diff options
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r-- | arch/sparc/mm/fault_64.c | 9 | ||||
-rw-r--r-- | arch/sparc/mm/gup.c | 59 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 62 | ||||
-rw-r--r-- | arch/sparc/mm/tlb.c | 11 | ||||
-rw-r--r-- | arch/sparc/mm/tsb.c | 2 |
5 files changed, 115 insertions, 28 deletions
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 097aee763af..5062ff389e8 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -472,8 +472,13 @@ good_area: #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) mm_rss = mm->context.huge_pte_count; if (unlikely(mm_rss > - mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) - tsb_grow(mm, MM_TSB_HUGE, mm_rss); + mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { + if (mm->context.tsb_block[MM_TSB_HUGE].tsb) + tsb_grow(mm, MM_TSB_HUGE, mm_rss); + else + hugetlb_setup(regs); + + } #endif return; diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index 42c55df3aec..01ee23dd724 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -66,6 +66,56 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, return 1; } +static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, + unsigned long end, int write, struct page **pages, + int *nr) +{ + struct page *head, *page, *tail; + u32 mask; + int refs; + + mask = PMD_HUGE_PRESENT; + if (write) + mask |= PMD_HUGE_WRITE; + if ((pmd_val(pmd) & mask) != mask) + return 0; + + refs = 0; + head = pmd_page(pmd); + page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); + tail = page; + do { + VM_BUG_ON(compound_head(page) != head); + pages[*nr] = page; + (*nr)++; + page++; + refs++; + } while (addr += PAGE_SIZE, addr != end); + + if (!page_cache_add_speculative(head, refs)) { + *nr -= refs; + return 0; + } + + if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) { + *nr -= refs; + while (refs--) + put_page(head); + return 0; + } + + /* Any tail page need their mapcount reference taken before we + * return. + */ + while (refs--) { + if (PageTail(tail)) + get_huge_page_tail(tail); + tail++; + } + + return 1; +} + static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { @@ -77,9 +127,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, pmd_t pmd = *pmdp; next = pmd_addr_end(addr, end); - if (pmd_none(pmd)) + if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; - if (!gup_pte_range(pmd, addr, next, write, pages, nr)) + if (unlikely(pmd_large(pmd))) { + if (!gup_huge_pmd(pmdp, pmd, addr, next, + write, pages, nr)) + return 0; + } else if (!gup_pte_range(pmd, addr, next, write, + pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index c3b72423c84..82bbf048a5b 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -314,16 +314,31 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; unsigned long tag; + if (unlikely(!tsb)) + return; + tsb += ((address >> tsb_hash_shift) & (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); tag = (address >> 22UL); tsb_insert(tsb, tag, tte); } +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +static inline bool is_hugetlb_pte(pte_t pte) +{ + if ((tlb_type == hypervisor && + (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || + (tlb_type != hypervisor && + (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) + return true; + return false; +} +#endif + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - unsigned long tsb_index, tsb_hash_shift, flags; struct mm_struct *mm; + unsigned long flags; pte_t pte = *ptep; if (tlb_type != hypervisor) { @@ -335,25 +350,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * mm = vma->vm_mm; - tsb_index = MM_TSB_BASE; - tsb_hash_shift = PAGE_SHIFT; - spin_lock_irqsave(&mm->context.lock, flags); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { - if ((tlb_type == hypervisor && - (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || - (tlb_type != hypervisor && - (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { - tsb_index = MM_TSB_HUGE; - tsb_hash_shift = HPAGE_SHIFT; - } - } + if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) + __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT, + address, pte_val(pte)); + else #endif - - __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift, - address, pte_val(pte)); + __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, + address, pte_val(pte)); spin_unlock_irqrestore(&mm->context.lock, flags); } @@ -2712,14 +2718,28 @@ static void context_reload(void *__data) load_secondary_context(mm); } -void hugetlb_setup(struct mm_struct *mm) +void hugetlb_setup(struct pt_regs *regs) { - struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE]; + struct mm_struct *mm = current->mm; + struct tsb_config *tp; - if (likely(tp->tsb != NULL)) - return; + if (in_atomic() || !mm) { + const struct exception_table_entry *entry; + + entry = search_exception_tables(regs->tpc); + if (entry) { + regs->tpc = entry->fixup; + regs->tnpc = regs->tpc + 4; + return; + } + pr_alert("Unexpected HugeTLB setup in atomic context.\n"); + die_if_kernel("HugeTSB in atomic", regs); + } + + tp = &mm->context.tsb_block[MM_TSB_HUGE]; + if (likely(tp->tsb == NULL)) + tsb_grow(mm, MM_TSB_HUGE, 0); - tsb_grow(mm, MM_TSB_HUGE, 0); tsb_context_switch(mm); smp_tsb_sync(mm); diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 3e8fec391fe..ba6ae7ffdc2 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -135,8 +135,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, mm->context.huge_pte_count++; else mm->context.huge_pte_count--; - if (mm->context.huge_pte_count == 1) - hugetlb_setup(mm); + + /* Do not try to allocate the TSB hash table if we + * don't have one already. We have various locks held + * and thus we'll end up doing a GFP_KERNEL allocation + * in an atomic context. + * + * Instead, we let the first TLB miss on a hugepage + * take care of this. + */ } if (!pmd_none(orig)) { diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 7f647434749..428982b9bec 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -314,7 +314,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) retry_tsb_alloc: gfp_flags = GFP_KERNEL; if (new_size > (PAGE_SIZE * 2)) - gfp_flags = __GFP_NOWARN | __GFP_NORETRY; + gfp_flags |= __GFP_NOWARN | __GFP_NORETRY; new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index], gfp_flags, numa_node_id()); |