diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 16:23:15 +0900 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 16:23:15 +0900 |
commit | 9e2d8656f5e8aa214e66b462680cf86b210b74a8 (patch) | |
tree | f67d62e896cedf75599ea45f9ecf9999c6ad24cd /arch/sparc/include/asm | |
parent | 1ea4f4f8405cc1ceec23f2d261bc3775785e6712 (diff) | |
parent | 9e695d2ecc8451cc2c1603d60b5c8e7f5581923a (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge patches from Andrew Morton:
"A few misc things and very nearly all of the MM tree. A tremendous
amount of stuff (again), including a significant rbtree library
rework."
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (160 commits)
sparc64: Support transparent huge pages.
mm: thp: Use more portable PMD clearing sequenece in zap_huge_pmd().
mm: Add and use update_mmu_cache_pmd() in transparent huge page code.
sparc64: Document PGD and PMD layout.
sparc64: Eliminate PTE table memory wastage.
sparc64: Halve the size of PTE tables
sparc64: Only support 4MB huge pages and 8KB base pages.
memory-hotplug: suppress "Trying to free nonexistent resource <XXXXXXXXXXXXXXXX-YYYYYYYYYYYYYYYY>" warning
mm: memcg: clean up mm_match_cgroup() signature
mm: document PageHuge somewhat
mm: use %pK for /proc/vmallocinfo
mm, thp: fix mlock statistics
mm, thp: fix mapped pages avoiding unevictable list on mlock
memory-hotplug: update memory block's state and notify userspace
memory-hotplug: preparation to notify memory block's state at memory hot remove
mm: avoid section mismatch warning for memblock_type_name
make GFP_NOTRACK definition unconditional
cma: decrease cc.nr_migratepages after reclaiming pagelist
CMA: migrate mlocked pages
kpageflags: fix wrong KPF_THP on non-huge compound pages
...
Diffstat (limited to 'arch/sparc/include/asm')
-rw-r--r-- | arch/sparc/include/asm/hugetlb.h | 9 | ||||
-rw-r--r-- | arch/sparc/include/asm/mmu_64.h | 19 | ||||
-rw-r--r-- | arch/sparc/include/asm/mmu_context_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/page_64.h | 21 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgalloc_64.h | 56 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtable_64.h | 253 | ||||
-rw-r--r-- | arch/sparc/include/asm/tsb.h | 106 |
7 files changed, 318 insertions, 148 deletions
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index 177061064ee..8c5eed6d267 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -10,7 +10,10 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -void hugetlb_prefault_arch_hook(struct mm_struct *mm); +static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) +{ + hugetlb_setup(mm); +} static inline int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, @@ -82,4 +85,8 @@ static inline void arch_release_hugepage(struct page *page) { } +static inline void arch_clear_hugepage_flags(struct page *page) +{ +} + #endif /* _ASM_SPARC64_HUGETLB_H */ diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index 9067dc50053..76092c4dd27 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -30,22 +30,8 @@ #define CTX_PGSZ_MASK ((CTX_PGSZ_BITS << CTX_PGSZ0_SHIFT) | \ (CTX_PGSZ_BITS << CTX_PGSZ1_SHIFT)) -#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) #define CTX_PGSZ_BASE CTX_PGSZ_8KB -#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB) -#define CTX_PGSZ_BASE CTX_PGSZ_64KB -#else -#error No page size specified in kernel configuration -#endif - -#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) -#define CTX_PGSZ_HUGE CTX_PGSZ_4MB -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) -#define CTX_PGSZ_HUGE CTX_PGSZ_512KB -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -#define CTX_PGSZ_HUGE CTX_PGSZ_64KB -#endif - +#define CTX_PGSZ_HUGE CTX_PGSZ_4MB #define CTX_PGSZ_KERN CTX_PGSZ_4MB /* Thus, when running on UltraSPARC-III+ and later, we use the following @@ -96,7 +82,7 @@ struct tsb_config { #define MM_TSB_BASE 0 -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #define MM_TSB_HUGE 1 #define MM_NUM_TSBS 2 #else @@ -107,6 +93,7 @@ typedef struct { spinlock_t lock; unsigned long sparc64_ctx_val; unsigned long huge_pte_count; + struct page *pgtable_page; struct tsb_config tsb_block[MM_NUM_TSBS]; struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; } mm_context_t; diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index a97fd085ceb..9191ca62ed9 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -36,7 +36,7 @@ static inline void tsb_context_switch(struct mm_struct *mm) { __tsb_context_switch(__pa(mm->pgd), &mm->context.tsb_block[0], -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) (mm->context.tsb_block[1].tsb ? &mm->context.tsb_block[1] : NULL) diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index f0d09b40103..4b39f74d6ca 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -3,13 +3,7 @@ #include <linux/const.h> -#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) #define PAGE_SHIFT 13 -#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB) -#define PAGE_SHIFT 16 -#else -#error No page size specified in kernel configuration -#endif #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) @@ -21,15 +15,9 @@ #define DCACHE_ALIASING_POSSIBLE #endif -#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) #define HPAGE_SHIFT 22 -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) -#define HPAGE_SHIFT 19 -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -#define HPAGE_SHIFT 16 -#endif -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) @@ -38,6 +26,11 @@ #ifndef __ASSEMBLY__ +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +struct mm_struct; +extern void hugetlb_setup(struct mm_struct *mm); +#endif + #define WANT_PAGE_VIRTUAL extern void _clear_page(void *page); @@ -98,7 +91,7 @@ typedef unsigned long pgprot_t; #endif /* (STRICT_MM_TYPECHECKS) */ -typedef struct page *pgtable_t; +typedef pte_t *pgtable_t; #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ (_AC(0x0000000070000000,UL)) : \ diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h index 40b2d7a7023..bcfe063bce2 100644 --- a/arch/sparc/include/asm/pgalloc_64.h +++ b/arch/sparc/include/asm/pgalloc_64.h @@ -38,51 +38,20 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) kmem_cache_free(pgtable_cache, pmd); } -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) -{ - return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); -} - -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, - unsigned long address) -{ - struct page *page; - pte_t *pte; - - pte = pte_alloc_one_kernel(mm, address); - if (!pte) - return NULL; - page = virt_to_page(pte); - pgtable_page_ctor(page); - return page; -} - -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) -{ - free_page((unsigned long)pte); -} - -static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) -{ - pgtable_page_dtor(ptepage); - __free_page(ptepage); -} +extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address); +extern pgtable_t pte_alloc_one(struct mm_struct *mm, + unsigned long address); +extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte); +extern void pte_free(struct mm_struct *mm, pgtable_t ptepage); -#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) -#define pmd_populate(MM,PMD,PTE_PAGE) \ - pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) -#define pmd_pgtable(pmd) pmd_page(pmd) +#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE) +#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE) +#define pmd_pgtable(PMD) ((pte_t *)__pmd_page(PMD)) #define check_pgt_cache() do { } while (0) -static inline void pgtable_free(void *table, bool is_page) -{ - if (is_page) - free_page((unsigned long)table); - else - kmem_cache_free(pgtable_cache, table); -} +extern void pgtable_free(void *table, bool is_page); #ifdef CONFIG_SMP @@ -113,11 +82,10 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is } #endif /* !CONFIG_SMP */ -static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, +static inline void __pte_free_tlb(struct mmu_gather *tlb, pte_t *pte, unsigned long address) { - pgtable_page_dtor(ptepage); - pgtable_free_tlb(tlb, page_address(ptepage), true); + pgtable_free_tlb(tlb, pte, true); } #define __pmd_free_tlb(tlb, pmd, addr) \ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 61210db139f..95515f1e7ce 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -45,40 +45,59 @@ #define vmemmap ((struct page *)VMEMMAP_BASE) -/* XXX All of this needs to be rethought so we can take advantage - * XXX cheetah's full 64-bit virtual address space, ie. no more hole - * XXX in the middle like on spitfire. -DaveM - */ -/* - * Given a virtual address, the lowest PAGE_SHIFT bits determine offset - * into the page; the next higher PAGE_SHIFT-3 bits determine the pte# - * in the proper pagetable (the -3 is from the 8 byte ptes, and each page - * table is a single page long). The next higher PMD_BITS determine pmd# - * in the proper pmdtable (where we must have PMD_BITS <= (PAGE_SHIFT-2) - * since the pmd entries are 4 bytes, and each pmd page is a single page - * long). Finally, the higher few bits determine pgde#. - */ - /* PMD_SHIFT determines the size of the area a second-level page * table can map */ -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-4)) #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PMD_BITS (PAGE_SHIFT - 2) /* PGDIR_SHIFT determines what a third-level page table entry can map */ -#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS) +#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-4) + PMD_BITS) #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_BITS (PAGE_SHIFT - 2) +#if (PGDIR_SHIFT + PGDIR_BITS) != 44 +#error Page table parameters do not cover virtual address space properly. +#endif + +#if (PMD_SHIFT != HPAGE_SHIFT) +#error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages. +#endif + +/* PMDs point to PTE tables which are 4K aligned. */ +#define PMD_PADDR _AC(0xfffffffe,UL) +#define PMD_PADDR_SHIFT _AC(11,UL) + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define PMD_ISHUGE _AC(0x00000001,UL) + +/* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge + * pages, this frees up a bunch of bits in the layout that we can + * use for the protection settings and software metadata. + */ +#define PMD_HUGE_PADDR _AC(0xfffff800,UL) +#define PMD_HUGE_PROTBITS _AC(0x000007ff,UL) +#define PMD_HUGE_PRESENT _AC(0x00000400,UL) +#define PMD_HUGE_WRITE _AC(0x00000200,UL) +#define PMD_HUGE_DIRTY _AC(0x00000100,UL) +#define PMD_HUGE_ACCESSED _AC(0x00000080,UL) +#define PMD_HUGE_EXEC _AC(0x00000040,UL) +#define PMD_HUGE_SPLITTING _AC(0x00000020,UL) +#endif + +/* PGDs point to PMD tables which are 8K aligned. */ +#define PGD_PADDR _AC(0xfffffffc,UL) +#define PGD_PADDR_SHIFT _AC(11,UL) + #ifndef __ASSEMBLY__ #include <linux/sched.h> /* Entries per page directory level. */ -#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) +#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-4)) #define PTRS_PER_PMD (1UL << PMD_BITS) #define PTRS_PER_PGD (1UL << PGDIR_BITS) @@ -160,26 +179,11 @@ #define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */ #define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */ -#if PAGE_SHIFT == 13 #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V -#elif PAGE_SHIFT == 16 -#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U -#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V -#else -#error Wrong PAGE_SHIFT specified -#endif -#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) -#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U -#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U -#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V -#endif /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ #define __P000 __pgprot(0) @@ -218,7 +222,6 @@ extern unsigned long _PAGE_CACHE; extern unsigned long pg_iobits; extern unsigned long _PAGE_ALL_SZ_BITS; -extern unsigned long _PAGE_SZBITS; extern struct page *mem_map_zero; #define ZERO_PAGE(vaddr) (mem_map_zero) @@ -231,25 +234,25 @@ extern struct page *mem_map_zero; static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) { unsigned long paddr = pfn << PAGE_SHIFT; - unsigned long sz_bits; - - sz_bits = 0UL; - if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) { - __asm__ __volatile__( - "\n661: sethi %%uhi(%1), %0\n" - " sllx %0, 32, %0\n" - " .section .sun4v_2insn_patch, \"ax\"\n" - " .word 661b\n" - " mov %2, %0\n" - " nop\n" - " .previous\n" - : "=r" (sz_bits) - : "i" (_PAGE_SZBITS_4U), "i" (_PAGE_SZBITS_4V)); - } - return __pte(paddr | sz_bits | pgprot_val(prot)); + + BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL); + return __pte(paddr | pgprot_val(prot)); } #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot); +#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) + +extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + /* Do nothing, mk_pmd() does this part. */ + return pmd; +} +#endif + /* This one can be done with two shifts. */ static inline unsigned long pte_pfn(pte_t pte) { @@ -286,6 +289,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) * Note: We encode this into 3 sun4v 2-insn patch sequences. */ + BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL); __asm__ __volatile__( "\n661: sethi %%uhi(%2), %1\n" " sethi %%hi(%2), %0\n" @@ -307,10 +311,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) : "=r" (mask), "=r" (tmp) : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | - _PAGE_SZBITS_4U | _PAGE_SPECIAL), + _PAGE_SPECIAL), "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | - _PAGE_SZBITS_4V | _PAGE_SPECIAL)); + _PAGE_SPECIAL)); return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); } @@ -618,19 +622,130 @@ static inline unsigned long pte_special(pte_t pte) return pte_val(pte) & _PAGE_SPECIAL; } -#define pmd_set(pmdp, ptep) \ - (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_young(pmd_t pmd) +{ + return pmd_val(pmd) & PMD_HUGE_ACCESSED; +} + +static inline int pmd_write(pmd_t pmd) +{ + return pmd_val(pmd) & PMD_HUGE_WRITE; +} + +static inline unsigned long pmd_pfn(pmd_t pmd) +{ + unsigned long val = pmd_val(pmd) & PMD_HUGE_PADDR; + + return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT); +} + +static inline int pmd_large(pmd_t pmd) +{ + return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) == + (PMD_ISHUGE | PMD_HUGE_PRESENT); +} + +static inline int pmd_trans_splitting(pmd_t pmd) +{ + return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) == + (PMD_ISHUGE|PMD_HUGE_SPLITTING); +} + +static inline int pmd_trans_huge(pmd_t pmd) +{ + return pmd_val(pmd) & PMD_ISHUGE; +} + +#define has_transparent_hugepage() 1 + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ + pmd_val(pmd) &= ~PMD_HUGE_ACCESSED; + return pmd; +} + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ + pmd_val(pmd) &= ~PMD_HUGE_WRITE; + return pmd; +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ + pmd_val(pmd) |= PMD_HUGE_DIRTY; + return pmd; +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ + pmd_val(pmd) |= PMD_HUGE_ACCESSED; + return pmd; +} + +static inline pmd_t pmd_mkwrite(pmd_t pmd) +{ + pmd_val(pmd) |= PMD_HUGE_WRITE; + return pmd; +} + +static inline pmd_t pmd_mknotpresent(pmd_t pmd) +{ + pmd_val(pmd) &= ~PMD_HUGE_PRESENT; + return pmd; +} + +static inline pmd_t pmd_mksplitting(pmd_t pmd) +{ + pmd_val(pmd) |= PMD_HUGE_SPLITTING; + return pmd; +} + +extern pgprot_t pmd_pgprot(pmd_t entry); +#endif + +static inline int pmd_present(pmd_t pmd) +{ + return pmd_val(pmd) != 0U; +} + +#define pmd_none(pmd) (!pmd_val(pmd)) + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd); +#else +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; +} +#endif + +static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) +{ + unsigned long val = __pa((unsigned long) (ptep)) >> PMD_PADDR_SHIFT; + + pmd_val(*pmdp) = val; +} + #define pud_set(pudp, pmdp) \ - (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> 11UL)) -#define __pmd_page(pmd) \ - ((unsigned long) __va((((unsigned long)pmd_val(pmd))<<11UL))) + (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> PGD_PADDR_SHIFT)) +static inline unsigned long __pmd_page(pmd_t pmd) +{ + unsigned long paddr = (unsigned long) pmd_val(pmd); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (pmd_val(pmd) & PMD_ISHUGE) + paddr &= PMD_HUGE_PADDR; +#endif + paddr <<= PMD_PADDR_SHIFT; + return ((unsigned long) __va(paddr)); +} #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) #define pud_page_vaddr(pud) \ - ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL))) + ((unsigned long) __va((((unsigned long)pud_val(pud))<<PGD_PADDR_SHIFT))) #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) -#define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (0) -#define pmd_present(pmd) (pmd_val(pmd) != 0U) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U) #define pud_none(pud) (!pud_val(pud)) #define pud_bad(pud) (0) @@ -664,6 +779,16 @@ static inline unsigned long pte_special(pte_t pte) extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm); +#define __HAVE_ARCH_PMDP_GET_AND_CLEAR +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, + unsigned long addr, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + set_pmd_at(mm, addr, pmdp, __pmd(0U)); + return pmd; +} + static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, int fullmm) { @@ -719,6 +844,16 @@ extern void mmu_info(struct seq_file *); struct vm_area_struct; extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd); + +#define __HAVE_ARCH_PGTABLE_DEPOSIT +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); + +#define __HAVE_ARCH_PGTABLE_WITHDRAW +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); +#endif /* Encode and de-code a swap entry */ #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL) diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index 1a8afd1ad04..b4c258de444 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -147,20 +147,96 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; brz,pn REG1, FAIL_LABEL; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \ - sllx REG1, 11, REG1; \ + sllx REG1, PGD_PADDR_SHIFT, REG1; \ andn REG2, 0x3, REG2; \ lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ brz,pn REG1, FAIL_LABEL; \ sllx VADDR, 64 - PMD_SHIFT, REG2; \ - srlx REG2, 64 - PAGE_SHIFT, REG2; \ - sllx REG1, 11, REG1; \ + srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \ + sllx REG1, PMD_PADDR_SHIFT, REG1; \ andn REG2, 0x7, REG2; \ add REG1, REG2, REG1; - /* Do a user page table walk in MMU globals. Leaves physical PTE - * pointer in REG1. Jumps to FAIL_LABEL on early page table walk - * termination. Physical base of page tables is in PHYS_PGD which - * will not be modified. + /* This macro exists only to make the PMD translator below easier + * to read. It hides the ELF section switch for the sun4v code + * patching. + */ +#define OR_PTE_BIT(REG, NAME) \ +661: or REG, _PAGE_##NAME##_4U, REG; \ + .section .sun4v_1insn_patch, "ax"; \ + .word 661b; \ + or REG, _PAGE_##NAME##_4V, REG; \ + .previous; + + /* Load into REG the PTE value for VALID, CACHE, and SZHUGE. */ +#define BUILD_PTE_VALID_SZHUGE_CACHE(REG) \ +661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \ + .section .sun4v_1insn_patch, "ax"; \ + .word 661b; \ + sethi %uhi(_PAGE_VALID), REG; \ + .previous; \ + sllx REG, 32, REG; \ +661: or REG, _PAGE_CP_4U|_PAGE_CV_4U, REG; \ + .section .sun4v_1insn_patch, "ax"; \ + .word 661b; \ + or REG, _PAGE_CP_4V|_PAGE_CV_4V|_PAGE_SZHUGE_4V, REG; \ + .previous; + + /* PMD has been loaded into REG1, interpret the value, seeing + * if it is a HUGE PMD or a normal one. If it is not valid + * then jump to FAIL_LABEL. If it is a HUGE PMD, and it + * translates to a valid PTE, branch to PTE_LABEL. + * + * We translate the PMD by hand, one bit at a time, + * constructing the huge PTE. + * + * So we construct the PTE in REG2 as follows: + * + * 1) Extract the PMD PFN from REG1 and place it into REG2. + * + * 2) Translate PMD protection bits in REG1 into REG2, one bit + * at a time using andcc tests on REG1 and OR's into REG2. + * + * Only two bits to be concerned with here, EXEC and WRITE. + * Now REG1 is freed up and we can use it as a temporary. + * + * 3) Construct the VALID, CACHE, and page size PTE bits in + * REG1, OR with REG2 to form final PTE. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ + brz,pn REG1, FAIL_LABEL; \ + andcc REG1, PMD_ISHUGE, %g0; \ + be,pt %xcc, 700f; \ + and REG1, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED, REG2; \ + cmp REG2, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED; \ + bne,pn %xcc, FAIL_LABEL; \ + andn REG1, PMD_HUGE_PROTBITS, REG2; \ + sllx REG2, PMD_PADDR_SHIFT, REG2; \ + /* REG2 now holds PFN << PAGE_SHIFT */ \ + andcc REG1, PMD_HUGE_EXEC, %g0; \ + bne,a,pt %xcc, 1f; \ + OR_PTE_BIT(REG2, EXEC); \ +1: andcc REG1, PMD_HUGE_WRITE, %g0; \ + bne,a,pt %xcc, 1f; \ + OR_PTE_BIT(REG2, W); \ + /* REG1 can now be clobbered, build final PTE */ \ +1: BUILD_PTE_VALID_SZHUGE_CACHE(REG1); \ + ba,pt %xcc, PTE_LABEL; \ + or REG1, REG2, REG1; \ +700: +#else +#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ + brz,pn REG1, FAIL_LABEL; \ + nop; +#endif + + /* Do a user page table walk in MMU globals. Leaves final, + * valid, PTE value in REG1. Jumps to FAIL_LABEL on early + * page table walk termination or if the PTE is not valid. + * + * Physical base of page tables is in PHYS_PGD which will not + * be modified. * * VADDR will not be clobbered, but REG1 and REG2 will. */ @@ -172,15 +248,19 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; brz,pn REG1, FAIL_LABEL; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \ - sllx REG1, 11, REG1; \ + sllx REG1, PGD_PADDR_SHIFT, REG1; \ andn REG2, 0x3, REG2; \ lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ - brz,pn REG1, FAIL_LABEL; \ - sllx VADDR, 64 - PMD_SHIFT, REG2; \ - srlx REG2, 64 - PAGE_SHIFT, REG2; \ - sllx REG1, 11, REG1; \ + USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \ + sllx VADDR, 64 - PMD_SHIFT, REG2; \ + srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \ + sllx REG1, PMD_PADDR_SHIFT, REG1; \ andn REG2, 0x7, REG2; \ - add REG1, REG2, REG1; + add REG1, REG2, REG1; \ + ldxa [REG1] ASI_PHYS_USE_EC, REG1; \ + brgez,pn REG1, FAIL_LABEL; \ + nop; \ +800: /* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0. * If no entry is found, FAIL_LABEL will be branched to. On success |