diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2008-05-15 16:52:31 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-05-15 16:52:38 +0200 |
commit | 2069e978d5a6e7b45d58027e3de7f879b8c5e488 (patch) | |
tree | e2fba2169e6d745b4cdb2e252b66dcaaacdadaeb /arch/s390/mm/vmem.c | |
parent | e0a45ee0b922b998f8d6737cf6e9e69a791252b7 (diff) |
[S390] sparsemem vmemmap: initialize memmap.
Let's just use the generic vmmemmap_alloc_block() function which
always returns initialized memory.
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/vmem.c')
-rw-r--r-- | arch/s390/mm/vmem.c | 19 |
1 files changed, 6 insertions, 13 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index beccacf907f..ea2804808f3 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -27,19 +27,12 @@ struct memory_segment { static LIST_HEAD(mem_segs); -static void __ref *vmem_alloc_pages(unsigned int order) -{ - if (slab_is_available()) - return (void *)__get_free_pages(GFP_KERNEL, order); - return alloc_bootmem_pages((1 << order) * PAGE_SIZE); -} - -static inline pud_t *vmem_pud_alloc(void) +static pud_t *vmem_pud_alloc(void) { pud_t *pud = NULL; #ifdef CONFIG_64BIT - pud = vmem_alloc_pages(2); + pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0); if (!pud) return NULL; clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); @@ -47,12 +40,12 @@ static inline pud_t *vmem_pud_alloc(void) return pud; } -static inline pmd_t *vmem_pmd_alloc(void) +static pmd_t *vmem_pmd_alloc(void) { pmd_t *pmd = NULL; #ifdef CONFIG_64BIT - pmd = vmem_alloc_pages(2); + pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0); if (!pmd) return NULL; clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); @@ -60,7 +53,7 @@ static inline pmd_t *vmem_pmd_alloc(void) return pmd; } -static pte_t __init_refok *vmem_pte_alloc(void) +static pte_t __ref *vmem_pte_alloc(void) { pte_t *pte; @@ -214,7 +207,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) if (pte_none(*pt_dir)) { unsigned long new_page; - new_page =__pa(vmem_alloc_pages(0)); + new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0)); if (!new_page) goto out; pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); |