summaryrefslogtreecommitdiffstats
path: root/include/asm-s390/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-s390/pgtable.h')
-rw-r--r--include/asm-s390/pgtable.h429
1 files changed, 223 insertions, 206 deletions
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 39bb5192dc3..f2cc25b74ad 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -13,8 +13,6 @@
#ifndef _ASM_S390_PGTABLE_H
#define _ASM_S390_PGTABLE_H
-#include <asm-generic/4level-fixup.h>
-
/*
* The Linux memory management assumes a three-level page table setup. For
* s390 31 bit we "fold" the mid level into the top-level page table, so
@@ -35,9 +33,6 @@
#include <asm/bug.h>
#include <asm/processor.h>
-struct vm_area_struct; /* forward declaration (include/linux/mm.h) */
-struct mm_struct;
-
extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
extern void paging_init(void);
extern void vmem_map_init(void);
@@ -63,14 +58,18 @@ extern char empty_zero_page[PAGE_SIZE];
*/
#ifndef __s390x__
# define PMD_SHIFT 22
+# define PUD_SHIFT 22
# define PGDIR_SHIFT 22
#else /* __s390x__ */
# define PMD_SHIFT 21
+# define PUD_SHIFT 31
# define PGDIR_SHIFT 31
#endif /* __s390x__ */
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
@@ -83,10 +82,12 @@ extern char empty_zero_page[PAGE_SIZE];
#ifndef __s390x__
# define PTRS_PER_PTE 1024
# define PTRS_PER_PMD 1
+# define PTRS_PER_PUD 1
# define PTRS_PER_PGD 512
#else /* __s390x__ */
# define PTRS_PER_PTE 512
# define PTRS_PER_PMD 1024
+# define PTRS_PER_PUD 1
# define PTRS_PER_PGD 2048
#endif /* __s390x__ */
@@ -96,6 +97,8 @@ extern char empty_zero_page[PAGE_SIZE];
printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
+#define pud_ERROR(e) \
+ printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
@@ -195,7 +198,7 @@ extern unsigned long vmalloc_end;
* I Segment-Invalid Bit: Segment is not available for address-translation
* TT Type 01
* TF
- * TL Table lenght
+ * TL Table length
*
* The 64 bit regiontable origin of S390 has following format:
* | region table origon | DTTL
@@ -221,6 +224,8 @@ extern unsigned long vmalloc_end;
/* Hardware bits in the page table entry */
#define _PAGE_RO 0x200 /* HW read-only bit */
#define _PAGE_INVALID 0x400 /* HW invalid bit */
+
+/* Software bits in the page table entry */
#define _PAGE_SWT 0x001 /* SW pte type bit t */
#define _PAGE_SWX 0x002 /* SW pte type bit x */
@@ -264,60 +269,75 @@ extern unsigned long vmalloc_end;
#ifndef __s390x__
-/* Bits in the segment table entry */
-#define _PAGE_TABLE_LEN 0xf /* only full page-tables */
-#define _PAGE_TABLE_COM 0x10 /* common page-table */
-#define _PAGE_TABLE_INV 0x20 /* invalid page-table */
-#define _SEG_PRESENT 0x001 /* Software (overlap with PTL) */
-
-/* Bits int the storage key */
-#define _PAGE_CHANGED 0x02 /* HW changed bit */
-#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
-
-#define _USER_SEG_TABLE_LEN 0x7f /* user-segment-table up to 2 GB */
-#define _KERNEL_SEG_TABLE_LEN 0x7f /* kernel-segment-table up to 2 GB */
-
-/*
- * User and Kernel pagetables are identical
- */
-#define _PAGE_TABLE _PAGE_TABLE_LEN
-#define _KERNPG_TABLE _PAGE_TABLE_LEN
-
-/*
- * The Kernel segment-tables includes the User segment-table
- */
+/* Bits in the segment table address-space-control-element */
+#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
+#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
+#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
+#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
+#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
-#define _SEGMENT_TABLE (_USER_SEG_TABLE_LEN|0x80000000|0x100)
-#define _KERNSEG_TABLE _KERNEL_SEG_TABLE_LEN
+/* Bits in the segment table entry */
+#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
+#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
+#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
+#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
-#define USER_STD_MASK 0x00000080UL
+#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
+#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
#else /* __s390x__ */
+/* Bits in the segment/region table address-space-control-element */
+#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
+#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
+#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
+#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
+#define _ASCE_REAL_SPACE 0x20 /* real space control */
+#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
+#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
+#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
+#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
+#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
+#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
+
+/* Bits in the region table entry */
+#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
+#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
+#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
+#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
+#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
+#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
+#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
+
+#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
+#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
+#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
+#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
+#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
+#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
+
/* Bits in the segment table entry */
-#define _PMD_ENTRY_INV 0x20 /* invalid segment table entry */
-#define _PMD_ENTRY 0x00
+#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
+#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
+#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
+
+#define _SEGMENT_ENTRY (0)
+#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
-/* Bits in the region third table entry */
-#define _PGD_ENTRY_INV 0x20 /* invalid region table entry */
-#define _PGD_ENTRY 0x07
+#endif /* __s390x__ */
/*
- * User and kernel page directory
+ * A user page table pointer has the space-switch-event bit, the
+ * private-space-control bit and the storage-alteration-event-control
+ * bit set. A kernel page table pointer doesn't need them.
*/
-#define _REGION_THIRD 0x4
-#define _REGION_THIRD_LEN 0x3
-#define _REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN|0x40|0x100)
-#define _KERN_REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN)
-
-#define USER_STD_MASK 0x0000000000000080UL
+#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
+ _ASCE_ALT_EVENT)
-/* Bits in the storage key */
+/* Bits int the storage key */
#define _PAGE_CHANGED 0x02 /* HW changed bit */
#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
-#endif /* __s390x__ */
-
/*
* Page protection definitions.
*/
@@ -358,65 +378,38 @@ extern unsigned long vmalloc_end;
#define __S111 PAGE_EX_RW
#ifndef __s390x__
-# define PMD_SHADOW_SHIFT 1
-# define PGD_SHADOW_SHIFT 1
+# define PxD_SHADOW_SHIFT 1
#else /* __s390x__ */
-# define PMD_SHADOW_SHIFT 2
-# define PGD_SHADOW_SHIFT 2
+# define PxD_SHADOW_SHIFT 2
#endif /* __s390x__ */
static inline struct page *get_shadow_page(struct page *page)
{
- if (s390_noexec && !list_empty(&page->lru))
- return virt_to_page(page->lru.next);
- return NULL;
-}
-
-static inline pte_t *get_shadow_pte(pte_t *ptep)
-{
- unsigned long pteptr = (unsigned long) (ptep);
-
- if (s390_noexec) {
- unsigned long offset = pteptr & (PAGE_SIZE - 1);
- void *addr = (void *) (pteptr ^ offset);
- struct page *page = virt_to_page(addr);
- if (!list_empty(&page->lru))
- return (pte_t *) ((unsigned long) page->lru.next |
- offset);
- }
+ if (s390_noexec && page->index)
+ return virt_to_page((void *)(addr_t) page->index);
return NULL;
}
-static inline pmd_t *get_shadow_pmd(pmd_t *pmdp)
+static inline void *get_shadow_pte(void *table)
{
- unsigned long pmdptr = (unsigned long) (pmdp);
+ unsigned long addr, offset;
+ struct page *page;
- if (s390_noexec) {
- unsigned long offset = pmdptr &
- ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1);
- void *addr = (void *) (pmdptr ^ offset);
- struct page *page = virt_to_page(addr);
- if (!list_empty(&page->lru))
- return (pmd_t *) ((unsigned long) page->lru.next |
- offset);
- }
- return NULL;
+ addr = (unsigned long) table;
+ offset = addr & (PAGE_SIZE - 1);
+ page = virt_to_page((void *)(addr ^ offset));
+ return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
}
-static inline pgd_t *get_shadow_pgd(pgd_t *pgdp)
+static inline void *get_shadow_table(void *table)
{
- unsigned long pgdptr = (unsigned long) (pgdp);
+ unsigned long addr, offset;
+ struct page *page;
- if (s390_noexec) {
- unsigned long offset = pgdptr &
- ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1);
- void *addr = (void *) (pgdptr ^ offset);
- struct page *page = virt_to_page(addr);
- if (!list_empty(&page->lru))
- return (pgd_t *) ((unsigned long) page->lru.next |
- offset);
- }
- return NULL;
+ addr = (unsigned long) table;
+ offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
+ page = virt_to_page((void *)(addr ^ offset));
+ return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
}
/*
@@ -424,7 +417,8 @@ static inline pgd_t *get_shadow_pgd(pgd_t *pgdp)
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
-static inline void set_pte(pte_t *pteptr, pte_t pteval)
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *pteptr, pte_t pteval)
{
pte_t *shadow_pte = get_shadow_pte(pteptr);
@@ -437,7 +431,6 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
}
}
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/*
* pgd/pmd/pte query functions
@@ -448,47 +441,50 @@ static inline int pgd_present(pgd_t pgd) { return 1; }
static inline int pgd_none(pgd_t pgd) { return 0; }
static inline int pgd_bad(pgd_t pgd) { return 0; }
-static inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; }
-static inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; }
-static inline int pmd_bad(pmd_t pmd)
-{
- return (pmd_val(pmd) & (~PAGE_MASK & ~_PAGE_TABLE_INV)) != _PAGE_TABLE;
-}
+static inline int pud_present(pud_t pud) { return 1; }
+static inline int pud_none(pud_t pud) { return 0; }
+static inline int pud_bad(pud_t pud) { return 0; }
#else /* __s390x__ */
-static inline int pgd_present(pgd_t pgd)
+static inline int pgd_present(pgd_t pgd) { return 1; }
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+
+static inline int pud_present(pud_t pud)
{
- return (pgd_val(pgd) & ~PAGE_MASK) == _PGD_ENTRY;
+ return pud_val(pud) & _REGION_ENTRY_ORIGIN;
}
-static inline int pgd_none(pgd_t pgd)
+static inline int pud_none(pud_t pud)
{
- return pgd_val(pgd) & _PGD_ENTRY_INV;
+ return pud_val(pud) & _REGION_ENTRY_INV;
}
-static inline int pgd_bad(pgd_t pgd)
+static inline int pud_bad(pud_t pud)
{
- return (pgd_val(pgd) & (~PAGE_MASK & ~_PGD_ENTRY_INV)) != _PGD_ENTRY;
+ unsigned long mask = ~_REGION_ENTRY_ORIGIN & ~_REGION_ENTRY_INV;
+ return (pud_val(pud) & mask) != _REGION3_ENTRY;
}
+#endif /* __s390x__ */
+
static inline int pmd_present(pmd_t pmd)
{
- return (pmd_val(pmd) & ~PAGE_MASK) == _PMD_ENTRY;
+ return pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
}
static inline int pmd_none(pmd_t pmd)
{
- return pmd_val(pmd) & _PMD_ENTRY_INV;
+ return pmd_val(pmd) & _SEGMENT_ENTRY_INV;
}
static inline int pmd_bad(pmd_t pmd)
{
- return (pmd_val(pmd) & (~PAGE_MASK & ~_PMD_ENTRY_INV)) != _PMD_ENTRY;
+ unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
+ return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
}
-#endif /* __s390x__ */
-
static inline int pte_none(pte_t pte)
{
return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
@@ -508,7 +504,8 @@ static inline int pte_file(pte_t pte)
return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
}
-#define pte_same(a,b) (pte_val(a) == pte_val(b))
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(a,b) (pte_val(a) == pte_val(b))
/*
* query functions pte_write/pte_dirty/pte_young only work if
@@ -543,58 +540,52 @@ static inline int pte_young(pte_t pte)
#ifndef __s390x__
-static inline void pgd_clear(pgd_t * pgdp) { }
+#define pgd_clear(pgd) do { } while (0)
+#define pud_clear(pud) do { } while (0)
static inline void pmd_clear_kernel(pmd_t * pmdp)
{
- pmd_val(pmdp[0]) = _PAGE_TABLE_INV;
- pmd_val(pmdp[1]) = _PAGE_TABLE_INV;
- pmd_val(pmdp[2]) = _PAGE_TABLE_INV;
- pmd_val(pmdp[3]) = _PAGE_TABLE_INV;
-}
-
-static inline void pmd_clear(pmd_t * pmdp)
-{
- pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
-
- pmd_clear_kernel(pmdp);
- if (shadow_pmd)
- pmd_clear_kernel(shadow_pmd);
+ pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY;
+ pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY;
+ pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY;
+ pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY;
}
#else /* __s390x__ */
-static inline void pgd_clear_kernel(pgd_t * pgdp)
+#define pgd_clear(pgd) do { } while (0)
+
+static inline void pud_clear_kernel(pud_t *pud)
{
- pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY;
+ pud_val(*pud) = _REGION3_ENTRY_EMPTY;
}
-static inline void pgd_clear(pgd_t * pgdp)
+static inline void pud_clear(pud_t * pud)
{
- pgd_t *shadow_pgd = get_shadow_pgd(pgdp);
+ pud_t *shadow = get_shadow_table(pud);
- pgd_clear_kernel(pgdp);
- if (shadow_pgd)
- pgd_clear_kernel(shadow_pgd);
+ pud_clear_kernel(pud);
+ if (shadow)
+ pud_clear_kernel(shadow);
}
static inline void pmd_clear_kernel(pmd_t * pmdp)
{
- pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
- pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
+ pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+ pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY;
}
+#endif /* __s390x__ */
+
static inline void pmd_clear(pmd_t * pmdp)
{
- pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
+ pmd_t *shadow_pmd = get_shadow_table(pmdp);
pmd_clear_kernel(pmdp);
if (shadow_pmd)
pmd_clear_kernel(shadow_pmd);
}
-#endif /* __s390x__ */
-
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t *shadow_pte = get_shadow_pte(ptep);
@@ -663,24 +654,19 @@ static inline pte_t pte_mkyoung(pte_t pte)
return pte;
}
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
{
return 0;
}
-static inline int
-ptep_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
{
/* No need to flush TLB; bits are in storage key */
- return ptep_test_and_clear_young(vma, address, ptep);
-}
-
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
- pte_t pte = *ptep;
- pte_clear(mm, addr, ptep);
- return pte;
+ return 0;
}
static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
@@ -709,6 +695,32 @@ static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
__ptep_ipte(address, ptep);
}
+/*
+ * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
+ * both clear the TLB for the unmapped pte. The reason is that
+ * ptep_get_and_clear is used in common code (e.g. change_pte_range)
+ * to modify an active pte. The sequence is
+ * 1) ptep_get_and_clear
+ * 2) set_pte_at
+ * 3) flush_tlb_range
+ * On s390 the tlb needs to get flushed with the modification of the pte
+ * if the pte is active. The only way how this can be implemented is to
+ * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
+ * is a nop.
+ */
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define ptep_get_and_clear(__mm, __address, __ptep) \
+({ \
+ pte_t __pte = *(__ptep); \
+ if (atomic_read(&(__mm)->mm_users) > 1 || \
+ (__mm) != current->active_mm) \
+ ptep_invalidate(__address, __ptep); \
+ else \
+ pte_clear((__mm), (__address), (__ptep)); \
+ __pte; \
+})
+
+#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
@@ -717,12 +729,40 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
return pte;
}
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+/*
+ * The batched pte unmap code uses ptep_get_and_clear_full to clear the
+ * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
+ * tlbs of an mm if it can guarantee that the ptes of the mm_struct
+ * cannot be accessed while the batched unmap is running. In this case
+ * full==1 and a simple pte_clear is enough. See tlb.h.
+ */
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, int full)
{
- pte_t old_pte = *ptep;
- set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+ pte_t pte = *ptep;
+
+ if (full)
+ pte_clear(mm, addr, ptep);
+ else
+ ptep_invalidate(addr, ptep);
+ return pte;
}
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define ptep_set_wrprotect(__mm, __addr, __ptep) \
+({ \
+ pte_t __pte = *(__ptep); \
+ if (pte_write(__pte)) { \
+ if (atomic_read(&(__mm)->mm_users) > 1 || \
+ (__mm) != current->active_mm) \
+ ptep_invalidate(__addr, __ptep); \
+ set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
+ } \
+})
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
({ \
int __changed = !pte_same(*(__ptep), __entry); \
@@ -740,11 +780,13 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
* should therefore only be called if it is not mapped in any
* address space.
*/
+#define __HAVE_ARCH_PAGE_TEST_DIRTY
static inline int page_test_dirty(struct page *page)
{
return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
}
+#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
static inline void page_clear_dirty(struct page *page)
{
page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
@@ -753,6 +795,7 @@ static inline void page_clear_dirty(struct page *page)
/*
* Test and clear referenced bit in storage key.
*/
+#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
static inline int page_test_and_clear_young(struct page *page)
{
unsigned long physpage = page_to_phys(page);
@@ -784,63 +827,48 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
return mk_pte_phys(physpage, pgprot);
}
-static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
-{
- unsigned long physpage = __pa((pfn) << PAGE_SHIFT);
-
- return mk_pte_phys(physpage, pgprot);
-}
-
-#ifdef __s390x__
-
-static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
-{
- unsigned long physpage = __pa((pfn) << PAGE_SHIFT);
-
- return __pmd(physpage + pgprot_val(pgprot));
-}
-
-#endif /* __s390x__ */
-
-#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
-#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
-#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
+#ifndef __s390x__
-#define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK)
+#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
+#define pud_deref(pmd) ({ BUG(); 0UL; })
+#define pgd_deref(pmd) ({ BUG(); 0UL; })
-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
+#define pud_offset(pgd, address) ((pud_t *) pgd)
+#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
-/* to find an entry in a page-table-directory */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+#else /* __s390x__ */
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
+#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
+#define pgd_deref(pgd) ({ BUG(); 0UL; })
-#ifndef __s390x__
+#define pud_offset(pgd, address) ((pud_t *) pgd)
-/* Find an entry in the second-level page table.. */
-static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
- return (pmd_t *) dir;
+ pmd_t *pmd = (pmd_t *) pud_deref(*pud);
+ return pmd + pmd_index(address);
}
-#else /* __s390x__ */
+#endif /* __s390x__ */
-/* Find an entry in the second-level page table.. */
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-#define pmd_offset(dir,addr) \
- ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(addr))
+#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
+#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
+#define pte_page(x) pfn_to_page(pte_pfn(x))
-#endif /* __s390x__ */
+#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
-#define pte_offset_kernel(pmd, address) \
- ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
+/* Find an entry in the lowest level page table.. */
+#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
+#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
@@ -930,17 +958,6 @@ extern int remove_shared_memory(unsigned long start, unsigned long size);
#define __HAVE_ARCH_MEMMAP_INIT
extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-#define __HAVE_ARCH_PTE_SAME
-#define __HAVE_ARCH_PAGE_TEST_DIRTY
-#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
-#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
#include <asm-generic/pgtable.h>
#endif /* _S390_PAGE_H */
-