summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-02-19 13:20:08 -0800
committerDavid S. Miller <davem@davemloft.net>2013-02-20 09:46:08 -0800
commitbcd896bae0166b4443503482a26ecf84d9ba60ab (patch)
tree9674d3bf3999833f9e9ee562bd6113509b94f684 /arch
parenta55ee1ff751f88252207160087d8197bb7538d4c (diff)
sparc64: Handle hugepage TSB being NULL.
Accomodate the possibility that the TSB might be NULL at the point that update_mmu_cache() is invoked. This is necessary because we will sometimes need to defer the TSB allocation to the first fault that happens in the 'mm'. Seperate out the hugepage PTE test into a seperate function so that the logic is clearer. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc/mm/init_64.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index c3b72423c84..0d0bc392c35 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -314,16 +314,31 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
unsigned long tag;
+ if (unlikely(!tsb))
+ return;
+
tsb += ((address >> tsb_hash_shift) &
(mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
tag = (address >> 22UL);
tsb_insert(tsb, tag, tte);
}
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static inline bool is_hugetlb_pte(pte_t pte)
+{
+ if ((tlb_type == hypervisor &&
+ (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
+ (tlb_type != hypervisor &&
+ (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
+ return true;
+ return false;
+}
+#endif
+
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
- unsigned long tsb_index, tsb_hash_shift, flags;
struct mm_struct *mm;
+ unsigned long flags;
pte_t pte = *ptep;
if (tlb_type != hypervisor) {
@@ -335,25 +350,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
mm = vma->vm_mm;
- tsb_index = MM_TSB_BASE;
- tsb_hash_shift = PAGE_SHIFT;
-
spin_lock_irqsave(&mm->context.lock, flags);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
- if ((tlb_type == hypervisor &&
- (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
- (tlb_type != hypervisor &&
- (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
- tsb_index = MM_TSB_HUGE;
- tsb_hash_shift = HPAGE_SHIFT;
- }
- }
+ if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
+ __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
+ address, pte_val(pte));
+ else
#endif
-
- __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift,
- address, pte_val(pte));
+ __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
+ address, pte_val(pte));
spin_unlock_irqrestore(&mm->context.lock, flags);
}