diff options
Diffstat (limited to 'arch/arm/mm/fault-armv.c')
-rw-r--r-- | arch/arm/mm/fault-armv.c | 85 |
1 files changed, 46 insertions, 39 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 56ee15321b0..c9b97e9836a 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -36,28 +36,12 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; * Therefore those configurations which might call adjust_pte (those * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. */ -static int adjust_pte(struct vm_area_struct *vma, unsigned long address) +static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, + unsigned long pfn, pte_t *ptep) { - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte, entry; + pte_t entry = *ptep; int ret; - pgd = pgd_offset(vma->vm_mm, address); - if (pgd_none(*pgd)) - goto no_pgd; - if (pgd_bad(*pgd)) - goto bad_pgd; - - pmd = pmd_offset(pgd, address); - if (pmd_none(*pmd)) - goto no_pmd; - if (pmd_bad(*pmd)) - goto bad_pmd; - - pte = pte_offset_map(pmd, address); - entry = *pte; - /* * If this page is present, it's actually being shared. */ @@ -68,33 +52,55 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) * fault (ie, is old), we can safely ignore any issues. */ if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { - unsigned long pfn = pte_pfn(entry); flush_cache_page(vma, address, pfn); outer_flush_range((pfn << PAGE_SHIFT), (pfn << PAGE_SHIFT) + PAGE_SIZE); pte_val(entry) &= ~L_PTE_MT_MASK; pte_val(entry) |= shared_pte_mask; - set_pte_at(vma->vm_mm, address, pte, entry); + set_pte_at(vma->vm_mm, address, ptep, entry); flush_tlb_page(vma, address); } - pte_unmap(pte); + return ret; +} + +static int adjust_pte(struct vm_area_struct *vma, unsigned long address, + unsigned long pfn) +{ + spinlock_t *ptl; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int ret; + + pgd = pgd_offset(vma->vm_mm, address); + if (pgd_none_or_clear_bad(pgd)) + return 0; + + pmd = pmd_offset(pgd, address); + if (pmd_none_or_clear_bad(pmd)) + return 0; -bad_pgd: - pgd_ERROR(*pgd); - pgd_clear(pgd); -no_pgd: - return 0; - -bad_pmd: - pmd_ERROR(*pmd); - pmd_clear(pmd); -no_pmd: - return 0; + /* + * This is called while another page table is mapped, so we + * must use the nested version. This also means we need to + * open-code the spin-locking. + */ + ptl = pte_lockptr(vma->vm_mm, pmd); + pte = pte_offset_map_nested(pmd, address); + spin_lock(ptl); + + ret = do_adjust_pte(vma, address, pfn, pte); + + spin_unlock(ptl); + pte_unmap_nested(pte); + + return ret; } static void -make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) +make_coherent(struct address_space *mapping, struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, unsigned long pfn) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *mpnt; @@ -122,11 +128,11 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne if (!(mpnt->vm_flags & VM_MAYSHARE)) continue; offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; - aliases += adjust_pte(mpnt, mpnt->vm_start + offset); + aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); } flush_dcache_mmap_unlock(mapping); if (aliases) - adjust_pte(vma, addr); + do_adjust_pte(vma, addr, pfn, ptep); else flush_cache_page(vma, addr, pfn); } @@ -144,9 +150,10 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne * * Note that the pte lock will be held. */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) { - unsigned long pfn = pte_pfn(pte); + unsigned long pfn = pte_pfn(*ptep); struct address_space *mapping; struct page *page; @@ -168,7 +175,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) #endif if (mapping) { if (cache_is_vivt()) - make_coherent(mapping, vma, addr, pfn); + make_coherent(mapping, vma, addr, ptep, pfn); else if (vma->vm_flags & VM_EXEC) __flush_icache_all(); } |