diff options
Diffstat (limited to 'arch/powerpc/include/asm/pgtable-ppc64.h')
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc64.h | 7 |
1 files changed, 2 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index b9dcc936e2d..43e6ad424c7 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -12,7 +12,7 @@ #endif #include <asm/barrier.h> -#define FIRST_USER_ADDRESS 0 +#define FIRST_USER_ADDRESS 0UL /* * Size of EA range mapped by our pagetables. @@ -352,9 +352,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) #define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)}) #define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT}) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT }) -#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT) -#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) -#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); void pgtable_cache_init(void); @@ -389,7 +386,7 @@ void pgtable_cache_init(void); * The last three bits are intentionally left to zero. This memory location * are also used as normal page PTE pointers. So if we have any pointers * left around while we collapse a hugepage, we need to make sure - * _PAGE_PRESENT and _PAGE_FILE bits of that are zero when we look at them + * _PAGE_PRESENT bit of that is zero when we look at them */ static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) { |