diff options
Diffstat (limited to 'arch/xtensa/mm')
-rw-r--r-- | arch/xtensa/mm/Makefile | 3 | ||||
-rw-r--r-- | arch/xtensa/mm/init.c | 78 | ||||
-rw-r--r-- | arch/xtensa/mm/misc.S | 2 | ||||
-rw-r--r-- | arch/xtensa/mm/mmu.c | 70 |
4 files changed, 84 insertions, 69 deletions
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile index 64e304a2f88..f0b646d2f84 100644 --- a/arch/xtensa/mm/Makefile +++ b/arch/xtensa/mm/Makefile @@ -2,4 +2,5 @@ # Makefile for the Linux/Xtensa-specific parts of the memory manager. # -obj-y := init.o fault.o tlb.o misc.o cache.o +obj-y := init.o cache.o misc.o +obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 34163cfaaff..427e14fa43c 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -24,15 +24,8 @@ #include <linux/mm.h> #include <linux/slab.h> -#include <asm/pgtable.h> #include <asm/bootparam.h> -#include <asm/mmu_context.h> -#include <asm/tlb.h> #include <asm/page.h> -#include <asm/pgalloc.h> - - -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); /* References to section boundaries */ @@ -130,7 +123,8 @@ void __init bootmem_init(void) /* Find an area to use for the bootmem bitmap. */ - bootmap_size = bootmem_bootmap_pages(max_low_pfn) << PAGE_SHIFT; + bootmap_size = bootmem_bootmap_pages(max_low_pfn - min_low_pfn); + bootmap_size <<= PAGE_SHIFT; bootmap_start = ~0; for (i=0; i<sysmem.nr_banks; i++) @@ -145,8 +139,9 @@ void __init bootmem_init(void) /* Reserve the bootmem bitmap area */ mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1); - bootmap_size = init_bootmem_node(NODE_DATA(0), min_low_pfn, + bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_start >> PAGE_SHIFT, + min_low_pfn, max_low_pfn); /* Add all remaining memory pieces into the bootmem map */ @@ -158,14 +153,14 @@ void __init bootmem_init(void) } -void __init paging_init(void) +void __init zones_init(void) { unsigned long zones_size[MAX_NR_ZONES]; int i; /* All pages are DMA-able, so we put them all in the DMA zone. */ - zones_size[ZONE_DMA] = max_low_pfn; + zones_size[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET; for (i = 1; i < MAX_NR_ZONES; i++) zones_size[i] = 0; @@ -173,40 +168,7 @@ void __init paging_init(void) zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; #endif - /* Initialize the kernel's page tables. */ - - memset(swapper_pg_dir, 0, PAGE_SIZE); - - free_area_init(zones_size); -} - -/* - * Flush the mmu and reset associated register to default values. - */ - -void __init init_mmu (void) -{ - /* Writing zeros to the <t>TLBCFG special registers ensure - * that valid values exist in the register. For existing - * PGSZID<w> fields, zero selects the first element of the - * page-size array. For nonexistent PGSZID<w> fields, zero is - * the best value to write. Also, when changing PGSZID<w> - * fields, the corresponding TLB must be flushed. - */ - set_itlbcfg_register (0); - set_dtlbcfg_register (0); - flush_tlb_all (); - - /* Set rasid register to a known value. */ - - set_rasid_register (ASID_USER_FIRST); - - /* Set PTEVADDR special register to the start of the page - * table, which is in kernel mappable space (ie. not - * statically mapped). This register's value is undefined on - * reset. - */ - set_ptevaddr_register (PGTABLE_START); + free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); } /* @@ -218,8 +180,8 @@ void __init mem_init(void) unsigned long codesize, reservedpages, datasize, initsize; unsigned long highmemsize, tmp, ram; - max_mapnr = num_physpages = max_low_pfn; - high_memory = (void *) __va(max_mapnr << PAGE_SHIFT); + max_mapnr = num_physpages = max_low_pfn - ARCH_PFN_OFFSET; + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); highmemsize = 0; #ifdef CONFIG_HIGHMEM @@ -229,7 +191,7 @@ void __init mem_init(void) totalram_pages += free_all_bootmem(); reservedpages = ram = 0; - for (tmp = 0; tmp < max_low_pfn; tmp++) { + for (tmp = 0; tmp < max_mapnr; tmp++) { ram++; if (PageReserved(mem_map+tmp)) reservedpages++; @@ -279,23 +241,3 @@ void free_initmem(void) printk("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10); } - -struct kmem_cache *pgtable_cache __read_mostly; - -static void pgd_ctor(void* addr) -{ - pte_t* ptep = (pte_t*)addr; - int i; - - for (i = 0; i < 1024; i++, ptep++) - pte_clear(NULL, 0, ptep); - -} - -void __init pgtable_cache_init(void) -{ - pgtable_cache = kmem_cache_create("pgd", - PAGE_SIZE, PAGE_SIZE, - SLAB_HWCACHE_ALIGN, - pgd_ctor); -} diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S index c885664211d..b048406d875 100644 --- a/arch/xtensa/mm/misc.S +++ b/arch/xtensa/mm/misc.S @@ -84,6 +84,7 @@ ENTRY(copy_page) retw +#ifdef CONFIG_MMU /* * If we have to deal with cache aliasing, we use temporary memory mappings * to ensure that the source and destination pages have the same color as @@ -311,6 +312,7 @@ ENTRY(__invalidate_icache_page_alias) /* End of special treatment in tlb miss exception */ ENTRY(__tlbtemp_mapping_end) +#endif /* CONFIG_MMU /* * void __invalidate_icache_page(ulong start) diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c new file mode 100644 index 00000000000..4bb91a970f1 --- /dev/null +++ b/arch/xtensa/mm/mmu.c @@ -0,0 +1,70 @@ +/* + * xtensa mmu stuff + * + * Extracted from init.c + */ +#include <linux/percpu.h> +#include <linux/init.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/cache.h> + +#include <asm/tlb.h> +#include <asm/tlbflush.h> +#include <asm/mmu_context.h> +#include <asm/page.h> + +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); + +void __init paging_init(void) +{ + memset(swapper_pg_dir, 0, PAGE_SIZE); +} + +/* + * Flush the mmu and reset associated register to default values. + */ +void __init init_mmu(void) +{ + /* Writing zeros to the <t>TLBCFG special registers ensure + * that valid values exist in the register. For existing + * PGSZID<w> fields, zero selects the first element of the + * page-size array. For nonexistent PGSZID<w> fields, zero is + * the best value to write. Also, when changing PGSZID<w> + * fields, the corresponding TLB must be flushed. + */ + set_itlbcfg_register(0); + set_dtlbcfg_register(0); + flush_tlb_all(); + + /* Set rasid register to a known value. */ + + set_rasid_register(ASID_USER_FIRST); + + /* Set PTEVADDR special register to the start of the page + * table, which is in kernel mappable space (ie. not + * statically mapped). This register's value is undefined on + * reset. + */ + set_ptevaddr_register(PGTABLE_START); +} + +struct kmem_cache *pgtable_cache __read_mostly; + +static void pgd_ctor(void *addr) +{ + pte_t *ptep = (pte_t *)addr; + int i; + + for (i = 0; i < 1024; i++, ptep++) + pte_clear(NULL, 0, ptep); + +} + +void __init pgtable_cache_init(void) +{ + pgtable_cache = kmem_cache_create("pgd", + PAGE_SIZE, PAGE_SIZE, + SLAB_HWCACHE_ALIGN, + pgd_ctor); +} |