diff options
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/dump_pagetables.c | 18 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 4 | ||||
-rw-r--r-- | arch/s390/mm/gup.c | 6 | ||||
-rw-r--r-- | arch/s390/mm/hugetlbpage.c | 124 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 1 | ||||
-rw-r--r-- | arch/s390/mm/pageattr.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 87 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 15 |
8 files changed, 182 insertions, 75 deletions
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index 3ad65b04ac1..46d517c3c76 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -53,7 +53,7 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level) seq_printf(m, "I\n"); return; } - seq_printf(m, "%s", pr & _PAGE_RO ? "RO " : "RW "); + seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW "); seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " "); seq_putc(m, '\n'); } @@ -105,12 +105,12 @@ static void note_page(struct seq_file *m, struct pg_state *st, } /* - * The actual page table walker functions. In order to keep the implementation - * of print_prot() short, we only check and pass _PAGE_INVALID and _PAGE_RO - * flags to note_page() if a region, segment or page table entry is invalid or - * read-only. - * After all it's just a hint that the current level being walked contains an - * invalid or read-only entry. + * The actual page table walker functions. In order to keep the + * implementation of print_prot() short, we only check and pass + * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region, + * segment or page table entry is invalid or read-only. + * After all it's just a hint that the current level being walked + * contains an invalid or read-only entry. */ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t *pmd, unsigned long addr) @@ -122,14 +122,14 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) { st->current_address = addr; pte = pte_offset_kernel(pmd, addr); - prot = pte_val(*pte) & (_PAGE_RO | _PAGE_INVALID); + prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID); note_page(m, st, prot, 4); addr += PAGE_SIZE; } } #ifdef CONFIG_64BIT -#define _PMD_PROT_MASK (_SEGMENT_ENTRY_RO | _SEGMENT_ENTRY_CO) +#define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO) #else #define _PMD_PROT_MASK 0 #endif diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 047c3e4c59a..f00aefb66a4 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -639,8 +639,8 @@ out: put_task_struct(tsk); } -static int __cpuinit pfault_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) +static int pfault_cpu_notify(struct notifier_block *self, unsigned long action, + void *hcpu) { struct thread_struct *thread, *next; struct task_struct *tsk; diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 1f5315d1215..5d758db27bd 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -24,7 +24,7 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, pte_t *ptep, pte; struct page *page; - mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL; + mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL; ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); do { @@ -55,8 +55,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, struct page *head, *page, *tail; int refs; - result = write ? 0 : _SEGMENT_ENTRY_RO; - mask = result | _SEGMENT_ENTRY_INV; + result = write ? 0 : _SEGMENT_ENTRY_PROTECT; + mask = result | _SEGMENT_ENTRY_INVALID; if ((pmd_val(pmd) & mask) != result) return 0; VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 121089d5780..248445f9260 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -8,21 +8,127 @@ #include <linux/mm.h> #include <linux/hugetlb.h> +static inline pmd_t __pte_to_pmd(pte_t pte) +{ + int none, young, prot; + pmd_t pmd; + + /* + * Convert encoding pte bits pmd bits + * .IR...wrdytp ..R...I...y. + * empty .10...000000 -> ..0...1...0. + * prot-none, clean, old .11...000001 -> ..0...1...1. + * prot-none, clean, young .11...000101 -> ..1...1...1. + * prot-none, dirty, old .10...001001 -> ..0...1...1. + * prot-none, dirty, young .10...001101 -> ..1...1...1. + * read-only, clean, old .11...010001 -> ..1...1...0. + * read-only, clean, young .01...010101 -> ..1...0...1. + * read-only, dirty, old .11...011001 -> ..1...1...0. + * read-only, dirty, young .01...011101 -> ..1...0...1. + * read-write, clean, old .11...110001 -> ..0...1...0. + * read-write, clean, young .01...110101 -> ..0...0...1. + * read-write, dirty, old .10...111001 -> ..0...1...0. + * read-write, dirty, young .00...111101 -> ..0...0...1. + * Huge ptes are dirty by definition, a clean pte is made dirty + * by the conversion. + */ + if (pte_present(pte)) { + pmd_val(pmd) = pte_val(pte) & PAGE_MASK; + if (pte_val(pte) & _PAGE_INVALID) + pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; + none = (pte_val(pte) & _PAGE_PRESENT) && + !(pte_val(pte) & _PAGE_READ) && + !(pte_val(pte) & _PAGE_WRITE); + prot = (pte_val(pte) & _PAGE_PROTECT) && + !(pte_val(pte) & _PAGE_WRITE); + young = pte_val(pte) & _PAGE_YOUNG; + if (none || young) + pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; + if (prot || (none && young)) + pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; + } else + pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; + return pmd; +} + +static inline pte_t __pmd_to_pte(pmd_t pmd) +{ + pte_t pte; + + /* + * Convert encoding pmd bits pte bits + * ..R...I...y. .IR...wrdytp + * empty ..0...1...0. -> .10...000000 + * prot-none, old ..0...1...1. -> .10...001001 + * prot-none, young ..1...1...1. -> .10...001101 + * read-only, old ..1...1...0. -> .11...011001 + * read-only, young ..1...0...1. -> .01...011101 + * read-write, old ..0...1...0. -> .10...111001 + * read-write, young ..0...0...1. -> .00...111101 + * Huge ptes are dirty by definition + */ + if (pmd_present(pmd)) { + pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY | + (pmd_val(pmd) & PAGE_MASK); + if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) + pte_val(pte) |= _PAGE_INVALID; + if (pmd_prot_none(pmd)) { + if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) + pte_val(pte) |= _PAGE_YOUNG; + } else { + pte_val(pte) |= _PAGE_READ; + if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) + pte_val(pte) |= _PAGE_PROTECT; + else + pte_val(pte) |= _PAGE_WRITE; + if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) + pte_val(pte) |= _PAGE_YOUNG; + } + } else + pte_val(pte) = _PAGE_INVALID; + return pte; +} void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *pteptr, pte_t pteval) + pte_t *ptep, pte_t pte) { - pmd_t *pmdp = (pmd_t *) pteptr; - unsigned long mask; + pmd_t pmd; + pmd = __pte_to_pmd(pte); if (!MACHINE_HAS_HPAGE) { - pteptr = (pte_t *) pte_page(pteval)[1].index; - mask = pte_val(pteval) & - (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); - pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; + pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; + pmd_val(pmd) |= pte_page(pte)[1].index; + } else + pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO; + *(pmd_t *) ptep = pmd; +} + +pte_t huge_ptep_get(pte_t *ptep) +{ + unsigned long origin; + pmd_t pmd; + + pmd = *(pmd_t *) ptep; + if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) { + origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN; + pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; + pmd_val(pmd) |= *(unsigned long *) origin; } + return __pmd_to_pte(pmd); +} - pmd_val(*pmdp) = pte_val(pteval); +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pmd_t *pmdp = (pmd_t *) ptep; + pte_t pte = huge_ptep_get(ptep); + + if (MACHINE_HAS_IDTE) + __pmd_idte(addr, pmdp); + else + __pmd_csp(pmdp); + pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; + return pte; } int arch_prepare_hugepage(struct page *page) @@ -58,7 +164,7 @@ void arch_release_hugepage(struct page *page) ptep = (pte_t *) page[1].index; if (!ptep) return; - clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY, + clear_table((unsigned long *) ptep, _PAGE_INVALID, PTRS_PER_PTE * sizeof(pte_t)); page_table_free(&init_mm, (unsigned long *) ptep); page[1].index = 0; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index ce36ea80e4f..ad446b0c55b 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -69,6 +69,7 @@ static void __init setup_zero_pages(void) order = 2; break; case 0x2827: /* zEC12 */ + case 0x2828: /* zEC12 */ default: order = 5; break; diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 80adfbf7506..990397420e6 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -118,7 +118,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable) pte = pte_offset_kernel(pmd, address); if (!enable) { __ptep_ipte(address, pte); - pte_val(*pte) = _PAGE_TYPE_EMPTY; + pte_val(*pte) = _PAGE_INVALID; continue; } pte_val(*pte) = __pa(address); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 967d0bf1c05..bf7c0dc64a7 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -161,7 +161,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) struct gmap_rmap *rmap; struct page *page; - if (*table & _SEGMENT_ENTRY_INV) + if (*table & _SEGMENT_ENTRY_INVALID) return 0; page = pfn_to_page(*table >> PAGE_SHIFT); mp = (struct gmap_pgtable *) page->index; @@ -172,7 +172,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) kfree(rmap); break; } - *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; + *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT; return 1; } @@ -258,7 +258,7 @@ static int gmap_alloc_table(struct gmap *gmap, return -ENOMEM; new = (unsigned long *) page_to_phys(page); crst_table_init(new, init); - if (*table & _REGION_ENTRY_INV) { + if (*table & _REGION_ENTRY_INVALID) { list_add(&page->lru, &gmap->crst_list); *table = (unsigned long) new | _REGION_ENTRY_LENGTH | (*table & _REGION_ENTRY_TYPE_MASK); @@ -292,22 +292,22 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) for (off = 0; off < len; off += PMD_SIZE) { /* Walk the guest addr space page table */ table = gmap->table + (((to + off) >> 53) & 0x7ff); - if (*table & _REGION_ENTRY_INV) + if (*table & _REGION_ENTRY_INVALID) goto out; table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + (((to + off) >> 42) & 0x7ff); - if (*table & _REGION_ENTRY_INV) + if (*table & _REGION_ENTRY_INVALID) goto out; table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + (((to + off) >> 31) & 0x7ff); - if (*table & _REGION_ENTRY_INV) + if (*table & _REGION_ENTRY_INVALID) goto out; table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + (((to + off) >> 20) & 0x7ff); /* Clear segment table entry in guest address space. */ flush |= gmap_unlink_segment(gmap, table); - *table = _SEGMENT_ENTRY_INV; + *table = _SEGMENT_ENTRY_INVALID; } out: spin_unlock(&gmap->mm->page_table_lock); @@ -345,17 +345,17 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, for (off = 0; off < len; off += PMD_SIZE) { /* Walk the gmap address space page table */ table = gmap->table + (((to + off) >> 53) & 0x7ff); - if ((*table & _REGION_ENTRY_INV) && + if ((*table & _REGION_ENTRY_INVALID) && gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) goto out_unmap; table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + (((to + off) >> 42) & 0x7ff); - if ((*table & _REGION_ENTRY_INV) && + if ((*table & _REGION_ENTRY_INVALID) && gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) goto out_unmap; table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + (((to + off) >> 31) & 0x7ff); - if ((*table & _REGION_ENTRY_INV) && + if ((*table & _REGION_ENTRY_INVALID) && gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) goto out_unmap; table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); @@ -363,7 +363,8 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, /* Store 'from' address in an invalid segment table entry. */ flush |= gmap_unlink_segment(gmap, table); - *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); + *table = (from + off) | (_SEGMENT_ENTRY_INVALID | + _SEGMENT_ENTRY_PROTECT); } spin_unlock(&gmap->mm->page_table_lock); up_read(&gmap->mm->mmap_sem); @@ -384,15 +385,15 @@ static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap) unsigned long *table; table = gmap->table + ((address >> 53) & 0x7ff); - if (unlikely(*table & _REGION_ENTRY_INV)) + if (unlikely(*table & _REGION_ENTRY_INVALID)) return ERR_PTR(-EFAULT); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + ((address >> 42) & 0x7ff); - if (unlikely(*table & _REGION_ENTRY_INV)) + if (unlikely(*table & _REGION_ENTRY_INVALID)) return ERR_PTR(-EFAULT); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + ((address >> 31) & 0x7ff); - if (unlikely(*table & _REGION_ENTRY_INV)) + if (unlikely(*table & _REGION_ENTRY_INVALID)) return ERR_PTR(-EFAULT); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + ((address >> 20) & 0x7ff); @@ -422,11 +423,11 @@ unsigned long __gmap_translate(unsigned long address, struct gmap *gmap) return PTR_ERR(segment_ptr); /* Convert the gmap address to an mm address. */ segment = *segment_ptr; - if (!(segment & _SEGMENT_ENTRY_INV)) { + if (!(segment & _SEGMENT_ENTRY_INVALID)) { page = pfn_to_page(segment >> PAGE_SHIFT); mp = (struct gmap_pgtable *) page->index; return mp->vmaddr | (address & ~PMD_MASK); - } else if (segment & _SEGMENT_ENTRY_RO) { + } else if (segment & _SEGMENT_ENTRY_PROTECT) { vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; return vmaddr | (address & ~PMD_MASK); } @@ -517,8 +518,8 @@ static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table) page = pfn_to_page(__pa(table) >> PAGE_SHIFT); mp = (struct gmap_pgtable *) page->index; list_for_each_entry_safe(rmap, next, &mp->mapper, list) { - *rmap->entry = - _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; + *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID | + _SEGMENT_ENTRY_PROTECT); list_del(&rmap->list); kfree(rmap); flush = 1; @@ -545,13 +546,13 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) /* Convert the gmap address to an mm address. */ while (1) { segment = *segment_ptr; - if (!(segment & _SEGMENT_ENTRY_INV)) { + if (!(segment & _SEGMENT_ENTRY_INVALID)) { /* Page table is present */ page = pfn_to_page(segment >> PAGE_SHIFT); mp = (struct gmap_pgtable *) page->index; return mp->vmaddr | (address & ~PMD_MASK); } - if (!(segment & _SEGMENT_ENTRY_RO)) + if (!(segment & _SEGMENT_ENTRY_PROTECT)) /* Nothing mapped in the gmap address space. */ break; rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap); @@ -586,25 +587,25 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) while (address < to) { /* Walk the gmap address space page table */ table = gmap->table + ((address >> 53) & 0x7ff); - if (unlikely(*table & _REGION_ENTRY_INV)) { + if (unlikely(*table & _REGION_ENTRY_INVALID)) { address = (address + PMD_SIZE) & PMD_MASK; continue; } table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + ((address >> 42) & 0x7ff); - if (unlikely(*table & _REGION_ENTRY_INV)) { + if (unlikely(*table & _REGION_ENTRY_INVALID)) { address = (address + PMD_SIZE) & PMD_MASK; continue; } table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + ((address >> 31) & 0x7ff); - if (unlikely(*table & _REGION_ENTRY_INV)) { + if (unlikely(*table & _REGION_ENTRY_INVALID)) { address = (address + PMD_SIZE) & PMD_MASK; continue; } table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + ((address >> 20) & 0x7ff); - if (unlikely(*table & _SEGMENT_ENTRY_INV)) { + if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) { address = (address + PMD_SIZE) & PMD_MASK; continue; } @@ -687,7 +688,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len) continue; /* Set notification bit in the pgste of the pte */ entry = *ptep; - if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) { + if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) { pgste = pgste_get_lock(ptep); pgste_val(pgste) |= PGSTE_IN_BIT; pgste_set_unlock(ptep, pgste); @@ -757,8 +758,9 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, page->index = (unsigned long) mp; atomic_set(&page->_mapcount, 0); table = (unsigned long *) page_to_phys(page); - clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); - clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); + clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); + clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT, + PAGE_SIZE/2); return table; } @@ -796,26 +798,21 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48; pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; if (!(pte_val(*ptep) & _PAGE_INVALID)) { - unsigned long address, bits; - unsigned char skey; + unsigned long address, bits, skey; address = pte_val(*ptep) & PAGE_MASK; - skey = page_get_storage_key(address); + skey = (unsigned long) page_get_storage_key(address); bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); + skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT); /* Set storage key ACC and FP */ - page_set_storage_key(address, - (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)), - !nq); - + page_set_storage_key(address, skey, !nq); /* Merge host changed & referenced into pgste */ pgste_val(new) |= bits << 52; - /* Transfer skey changed & referenced bit to kvm user bits */ - pgste_val(new) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */ } /* changing the guest storage key is considered a change of the page */ if ((pgste_val(new) ^ pgste_val(old)) & (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) - pgste_val(new) |= PGSTE_UC_BIT; + pgste_val(new) |= PGSTE_HC_BIT; pgste_set_unlock(ptep, new); pte_unmap_unlock(*ptep, ptl); @@ -888,7 +885,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) pgtable_page_ctor(page); atomic_set(&page->_mapcount, 1); table = (unsigned long *) page_to_phys(page); - clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); + clear_table(table, _PAGE_INVALID, PAGE_SIZE); spin_lock_bh(&mm->context.list_lock); list_add(&page->lru, &mm->context.pgtable_list); } else { @@ -1017,7 +1014,6 @@ void tlb_table_flush(struct mmu_gather *tlb) struct mmu_table_batch **batch = &tlb->batch; if (*batch) { - __tlb_flush_mm(tlb->mm); call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); *batch = NULL; } @@ -1027,11 +1023,12 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) { struct mmu_table_batch **batch = &tlb->batch; + tlb->mm->context.flush_mm = 1; if (*batch == NULL) { *batch = (struct mmu_table_batch *) __get_free_page(GFP_NOWAIT | __GFP_NOWARN); if (*batch == NULL) { - __tlb_flush_mm(tlb->mm); + __tlb_flush_mm_lazy(tlb->mm); tlb_remove_table_one(table); return; } @@ -1039,7 +1036,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) } (*batch)->tables[(*batch)->nr++] = table; if ((*batch)->nr == MAX_TABLE_BATCH) - tlb_table_flush(tlb); + tlb_flush_mmu(tlb); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -1171,9 +1168,9 @@ int s390_enable_sie(void) thp_split_mm(mm); /* Reallocate the page tables with pgstes */ mm->context.has_pgste = 1; - tlb_gather_mmu(&tlb, mm, 0); + tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE); page_table_realloc(&tlb, mm, 0, TASK_SIZE); - tlb_finish_mmu(&tlb, 0, -1); + tlb_finish_mmu(&tlb, 0, TASK_SIZE); up_write(&mm->mmap_sem); return mm->context.has_pgste ? 0 : -ENOMEM; } @@ -1251,9 +1248,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) list_del(lh); } ptep = (pte_t *) pgtable; - pte_val(*ptep) = _PAGE_TYPE_EMPTY; + pte_val(*ptep) = _PAGE_INVALID; ptep++; - pte_val(*ptep) = _PAGE_TYPE_EMPTY; + pte_val(*ptep) = _PAGE_INVALID; return pgtable; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 8b268fcc461..bcfb70b60be 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -69,7 +69,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address) pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); if (!pte) return NULL; - clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, + clear_table((unsigned long *) pte, _PAGE_INVALID, PTRS_PER_PTE * sizeof(pte_t)); return pte; } @@ -101,7 +101,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { pud_val(*pu_dir) = __pa(address) | _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | - (ro ? _REGION_ENTRY_RO : 0); + (ro ? _REGION_ENTRY_PROTECT : 0); address += PUD_SIZE; continue; } @@ -118,7 +118,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { pmd_val(*pm_dir) = __pa(address) | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | - (ro ? _SEGMENT_ENTRY_RO : 0); + _SEGMENT_ENTRY_YOUNG | + (ro ? _SEGMENT_ENTRY_PROTECT : 0); address += PMD_SIZE; continue; } @@ -131,7 +132,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) } pt_dir = pte_offset_kernel(pm_dir, address); - pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0); + pte_val(*pt_dir) = __pa(address) | + pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL); address += PAGE_SIZE; } ret = 0; @@ -154,7 +156,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size) pte_t *pt_dir; pte_t pte; - pte_val(pte) = _PAGE_TYPE_EMPTY; + pte_val(pte) = _PAGE_INVALID; while (address < end) { pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { @@ -255,7 +257,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) new_page =__pa(vmem_alloc_pages(0)); if (!new_page) goto out; - pte_val(*pt_dir) = __pa(new_page); + pte_val(*pt_dir) = + __pa(new_page) | pgprot_val(PAGE_KERNEL); } address += PAGE_SIZE; } |