diff options
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/init.c | 49 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 44 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 5 |
3 files changed, 53 insertions, 45 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 29f3a63806b..05598649b32 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -44,37 +44,34 @@ char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); void show_mem(void) { - int i, total = 0, reserved = 0; - int shared = 0, cached = 0; + unsigned long i, total = 0, reserved = 0; + unsigned long shared = 0, cached = 0; + unsigned long flags; struct page *page; + pg_data_t *pgdat; printk("Mem-info:\n"); show_free_areas(); - i = max_mapnr; - while (i-- > 0) { - if (!pfn_valid(i)) - continue; - page = pfn_to_page(i); - total++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (page_count(page)) - shared += page_count(page) - 1; + for_each_online_pgdat(pgdat) { + pgdat_resize_lock(pgdat, &flags); + for (i = 0; i < pgdat->node_spanned_pages; i++) { + if (!pfn_valid(pgdat->node_start_pfn + i)) + continue; + page = pfn_to_page(pgdat->node_start_pfn + i); + total++; + if (PageReserved(page)) + reserved++; + else if (PageSwapCache(page)) + cached++; + else if (page_count(page)) + shared += page_count(page) - 1; + } + pgdat_resize_unlock(pgdat, &flags); } - printk("%d pages of RAM\n", total); - printk("%d reserved pages\n", reserved); - printk("%d pages shared\n", shared); - printk("%d pages swap cached\n", cached); - - printk("%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); - printk("%lu pages writeback\n", global_page_state(NR_WRITEBACK)); - printk("%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); - printk("%lu pages slab\n", - global_page_state(NR_SLAB_RECLAIMABLE) + - global_page_state(NR_SLAB_UNRECLAIMABLE)); - printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE)); + printk("%ld pages of RAM\n", total); + printk("%ld reserved pages\n", reserved); + printk("%ld pages shared\n", shared); + printk("%ld pages swap cached\n", cached); } /* diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 5c1aea97cd1..3d98ba82ea6 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -254,36 +254,46 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) int s390_enable_sie(void) { struct task_struct *tsk = current; - struct mm_struct *mm; - int rc; + struct mm_struct *mm, *old_mm; - task_lock(tsk); - - rc = 0; + /* Do we have pgstes? if yes, we are done */ if (tsk->mm->context.pgstes) - goto unlock; + return 0; - rc = -EINVAL; + /* lets check if we are allowed to replace the mm */ + task_lock(tsk); if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || - tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) - goto unlock; + tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { + task_unlock(tsk); + return -EINVAL; + } + task_unlock(tsk); - tsk->mm->context.pgstes = 1; /* dirty little tricks .. */ + /* we copy the mm with pgstes enabled */ + tsk->mm->context.pgstes = 1; mm = dup_mm(tsk); tsk->mm->context.pgstes = 0; - - rc = -ENOMEM; if (!mm) - goto unlock; - mmput(tsk->mm); + return -ENOMEM; + + /* Now lets check again if somebody attached ptrace etc */ + task_lock(tsk); + if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || + tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { + mmput(mm); + task_unlock(tsk); + return -EINVAL; + } + + /* ok, we are alone. No ptrace, no threads, etc. */ + old_mm = tsk->mm; tsk->mm = tsk->active_mm = mm; preempt_disable(); update_mm(mm, tsk); cpu_set(smp_processor_id(), mm->cpu_vm_mask); preempt_enable(); - rc = 0; -unlock: task_unlock(tsk); - return rc; + mmput(old_mm); + return 0; } EXPORT_SYMBOL_GPL(s390_enable_sie); diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index beccacf907f..e4868bfc672 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -60,7 +60,7 @@ static inline pmd_t *vmem_pmd_alloc(void) return pmd; } -static pte_t __init_refok *vmem_pte_alloc(void) +static pte_t __ref *vmem_pte_alloc(void) { pte_t *pte; @@ -221,6 +221,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) *pt_dir = pte; } } + memset(start, 0, nr * sizeof(struct page)); ret = 0; out: flush_tlb_kernel_range(start_addr, end_addr); @@ -235,7 +236,7 @@ static int insert_memory_segment(struct memory_segment *seg) { struct memory_segment *tmp; - if (seg->start + seg->size >= VMEM_MAX_PHYS || + if (seg->start + seg->size > VMEM_MAX_PHYS || seg->start + seg->size < seg->start) return -ERANGE; |