From 9bc974b927ad7f07206b8db9b36a29a146c1cfd1 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Mon, 6 Feb 2012 23:23:37 +0900 Subject: microblaze: Fix typo in early_printk.c Correct spelling "remaping" to "remapping" in arch/microblaze/kernel/early_printk.c Signed-off-by: Masanari Iida Signed-off-by: Michal Simek --- arch/microblaze/kernel/early_printk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c index 8356e47631c..742c247792c 100644 --- a/arch/microblaze/kernel/early_printk.c +++ b/arch/microblaze/kernel/early_printk.c @@ -171,7 +171,7 @@ void __init remap_early_printk(void) { if (!early_console_initialized || !early_console) return; - printk(KERN_INFO "early_printk_console remaping from 0x%x to ", + printk(KERN_INFO "early_printk_console remapping from 0x%x to ", base_addr); base_addr = (u32) ioremap(base_addr, PAGE_SIZE); printk(KERN_CONT "0x%x\n", base_addr); -- cgit v1.2.3-70-g09d2 From 00708d421a22a0f82de2dbb91ca6213b3dcc5267 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 5 Mar 2012 15:53:19 +0100 Subject: microblaze: Fix makefile to work with latest toolchain When building with latest binutils, vmlinux includes some sections which need to be stripped out when building the binary image. Signed-off-by: Michal Simek --- arch/microblaze/boot/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile index 0c796cf8158..34940c828de 100644 --- a/arch/microblaze/boot/Makefile +++ b/arch/microblaze/boot/Makefile @@ -8,7 +8,7 @@ obj-y += linked_dtb.o targets := linux.bin linux.bin.gz simpleImage.% -OBJCOPYFLAGS := -O binary +OBJCOPYFLAGS := -R .note -R .comment -R .note.gnu.build-id -O binary # Ensure system.dtb exists $(obj)/linked_dtb.o: $(obj)/system.dtb -- cgit v1.2.3-70-g09d2 From 2e7ff4784980211187ae9a235ff6cf630e7633c8 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Wed, 22 Feb 2012 13:50:13 +0100 Subject: microblaze: Add PVR version string for MB 8.20.b and 8.30.a Just extend PVR reg decoding. Signed-off-by: Michal Simek --- arch/microblaze/kernel/cpu/cpuinfo.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c index 54194b28574..eab6abf5652 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo.c +++ b/arch/microblaze/kernel/cpu/cpuinfo.c @@ -35,6 +35,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = { {"8.00.b", 0x13}, {"8.10.a", 0x14}, {"8.20.a", 0x15}, + {"8.20.b", 0x16}, + {"8.30.a", 0x17}, {NULL, 0}, }; -- cgit v1.2.3-70-g09d2 From ee19b424b4ba8674a18943bf43d82d026d1e5bed Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 28 Feb 2012 10:49:33 -0800 Subject: microblaze: Use vsprintf extention %pf with builtin_return_address Emit the function name not the address when possible. builtin_return_address() gives an address. When building a kernel with CONFIG_KALLSYMS, emit the actual function name not the address. Signed-off-by: Joe Perches Signed-off-by: Michal Simek --- arch/microblaze/mm/pgtable.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index 59bf2335a4c..e3a68bb2da0 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c @@ -80,7 +80,7 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size, !(p >= virt_to_phys((unsigned long)&__bss_stop) && p < virt_to_phys((unsigned long)__bss_stop))) { printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT - " is RAM lr %p\n", (unsigned long)p, + " is RAM lr %pf\n", (unsigned long)p, __builtin_return_address(0)); return NULL; } -- cgit v1.2.3-70-g09d2 From f7f4786c7546534ca969cfa5eb753fa97e30c728 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Tue, 5 Apr 2011 15:49:22 +0200 Subject: microblaze: trivial: Fix typo fault in timer.c Signed-off-by: Michal Simek --- arch/microblaze/kernel/timer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index 3cb0bf64013..78b82f30bdd 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c @@ -79,7 +79,7 @@ static inline void microblaze_timer0_start_periodic(unsigned long load_val) * !PWMA - disable pwm * TINT - clear interrupt status * ENT- enable timer itself - * EINT - enable interrupt + * ENIT - enable interrupt * !LOAD - clear the bit to let go * ARHT - auto reload * !CAPT - no external trigger -- cgit v1.2.3-70-g09d2 From 4e2e4124b7fe68b28e9f759b7ecc0ec16307fce6 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 15 Dec 2011 09:24:06 +0100 Subject: microblaze: mm: Use ZONE_DMA instead of ZONE_NORMAL We should use ZONE_DMA because all address space is dma-able. Signed-off-by: Michal Simek --- arch/microblaze/Kconfig | 3 +++ arch/microblaze/mm/init.c | 6 +----- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index c8d6efb99db..8e9da3d4281 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -27,6 +27,9 @@ config SWAP config RWSEM_GENERIC_SPINLOCK def_bool y +config ZONE_DMA + def_bool y + config RWSEM_XCHGADD_ALGORITHM bool diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 565d193c7eb..a72f42498c2 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -58,11 +58,7 @@ static void __init paging_init(void) /* Clean every zones */ memset(zones_size, 0, sizeof(zones_size)); - /* - * old: we can DMA to/from any address.put all page into ZONE_DMA - * We use only ZONE_NORMAL - */ - zones_size[ZONE_NORMAL] = max_mapnr; + zones_size[ZONE_DMA] = max_mapnr; free_area_init(zones_size); } -- cgit v1.2.3-70-g09d2 From 83a92529c1789f86481190743a6bb09f31ec39a8 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 19 Dec 2011 13:46:35 +0100 Subject: microblaze: mm: Fix lowmem max memory size limits Use CONFIG_LOWMEM_SIZE if system has larger ram size. For system with larger ram size, enable HIGMEM support. Also setup limitation for memblock and use memblock allocation in lowmem region. Signed-off-by: Michal Simek --- arch/microblaze/include/asm/page.h | 1 - arch/microblaze/include/asm/pgtable.h | 3 +- arch/microblaze/include/asm/uaccess.h | 2 +- arch/microblaze/mm/init.c | 67 ++++++++++++++++++++++------------- arch/microblaze/mm/pgtable.c | 7 +--- 5 files changed, 46 insertions(+), 34 deletions(-) diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index a25e6b5e2ad..665f29330ce 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -135,7 +135,6 @@ extern unsigned long min_low_pfn; extern unsigned long max_pfn; extern unsigned long memory_start; -extern unsigned long memory_end; extern unsigned long memory_size; extern int page_is_ram(unsigned long pfn); diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index b2af42311a1..d8f2c3c68d3 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -94,8 +94,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* Start and end of the vmalloc area. */ /* Make sure to map the vmalloc area above the pinned kernel memory area of 32Mb. */ -#define VMALLOC_START (CONFIG_KERNEL_START + \ - max(32 * 1024 * 1024UL, memory_size)) +#define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE) #define VMALLOC_END ioremap_bot #endif /* __ASSEMBLY__ */ diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 072b0077abf..ef25f7538d4 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -80,7 +80,7 @@ extern unsigned long search_exception_table(unsigned long); static inline int ___range_ok(unsigned long addr, unsigned long size) { return ((addr < memory_start) || - ((addr + size) > memory_end)); + ((addr + size - 1) > (memory_start + memory_size - 1))); } #define __range_ok(addr, size) \ diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index a72f42498c2..2253e122aa8 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -44,9 +44,9 @@ char *klimit = _end; */ unsigned long memory_start; EXPORT_SYMBOL(memory_start); -unsigned long memory_end; /* due to mm/nommu.c */ unsigned long memory_size; EXPORT_SYMBOL(memory_size); +unsigned long lowmem_size; /* * paging_init() sets up the page tables - in fact we've already done this. @@ -58,7 +58,7 @@ static void __init paging_init(void) /* Clean every zones */ memset(zones_size, 0, sizeof(zones_size)); - zones_size[ZONE_DMA] = max_mapnr; + zones_size[ZONE_DMA] = max_pfn; free_area_init(zones_size); } @@ -74,32 +74,31 @@ void __init setup_memory(void) /* Find main memory where is the kernel */ for_each_memblock(memory, reg) { memory_start = (u32)reg->base; - memory_end = (u32) reg->base + reg->size; + lowmem_size = reg->size; if ((memory_start <= (u32)_text) && - ((u32)_text <= memory_end)) { - memory_size = memory_end - memory_start; + ((u32)_text <= (memory_start + lowmem_size - 1))) { + memory_size = lowmem_size; PAGE_OFFSET = memory_start; - printk(KERN_INFO "%s: Main mem: 0x%x-0x%x, " + printk(KERN_INFO "%s: Main mem: 0x%x, " "size 0x%08x\n", __func__, (u32) memory_start, - (u32) memory_end, (u32) memory_size); + (u32) memory_size); break; } } - if (!memory_start || !memory_end) { - panic("%s: Missing memory setting 0x%08x-0x%08x\n", - __func__, (u32) memory_start, (u32) memory_end); + if (!memory_start || !memory_size) { + panic("%s: Missing memory setting 0x%08x, size=0x%08x\n", + __func__, (u32) memory_start, (u32) memory_size); } /* reservation of region where is the kernel */ kernel_align_start = PAGE_DOWN((u32)_text); /* ALIGN can be remove because _end in vmlinux.lds.S is align */ kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; - memblock_reserve(kernel_align_start, kernel_align_size); - printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n", + printk(KERN_INFO "%s: kernel addr:0x%08x-0x%08x size=0x%08x\n", __func__, kernel_align_start, kernel_align_start + kernel_align_size, kernel_align_size); - + memblock_reserve(kernel_align_start, kernel_align_size); #endif /* * Kernel: @@ -116,11 +115,13 @@ void __init setup_memory(void) min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ /* RAM is assumed contiguous */ num_physpages = max_mapnr = memory_size >> PAGE_SHIFT; - max_pfn = max_low_pfn = memory_end >> PAGE_SHIFT; + max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; + max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr); printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); + printk(KERN_INFO "%s: max_pfn: %#lx\n", __func__, max_pfn); /* * Find an area to use for the bootmem bitmap. @@ -134,14 +135,25 @@ void __init setup_memory(void) memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); /* free bootmem is whole main memory */ - free_bootmem(memory_start, memory_size); + free_bootmem(memory_start, lowmem_size); /* reserve allocate blocks */ for_each_memblock(reserved, reg) { - pr_debug("reserved - 0x%08x-0x%08x\n", - (u32) reg->base, (u32) reg->size); - reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); + unsigned long top = reg->base + reg->size - 1; + + pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n", + (u32) reg->base, (u32) reg->size, top, + memory_start + lowmem_size - 1); + + if (top <= (memory_start + lowmem_size - 1)) { + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); + } else if (reg->base < (memory_start + lowmem_size - 1)) { + unsigned long trunc_size = memory_start + lowmem_size - + reg->base; + reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); + } } + #ifdef CONFIG_MMU init_bootmem_done = 1; #endif @@ -186,7 +198,8 @@ void free_initmem(void) void __init mem_init(void) { - high_memory = (void *)__va(memory_end); + high_memory = (void *)__va(memory_start + lowmem_size - 1); + /* this will put all memory onto the freelists */ totalram_pages += free_all_bootmem(); @@ -222,7 +235,6 @@ static void mm_cmdline_setup(void) maxmem = memparse(p, &p); if (maxmem && memory_size > maxmem) { memory_size = maxmem; - memory_end = memory_start + memory_size; memblock.memory.regions[0].size = memory_size; } } @@ -272,9 +284,12 @@ asmlinkage void __init mmu_init(void) } /* Find main memory where the kernel is */ memory_start = (u32) memblock.memory.regions[0].base; - memory_end = (u32) memblock.memory.regions[0].base + - (u32) memblock.memory.regions[0].size; - memory_size = memory_end - memory_start; + lowmem_size = memory_size = (u32) memblock.memory.regions[0].size; + + if (lowmem_size > CONFIG_LOWMEM_SIZE) { + lowmem_size = CONFIG_LOWMEM_SIZE; + memory_size = lowmem_size; + } mm_cmdline_setup(); /* FIXME parse args from command line - not used */ @@ -307,9 +322,13 @@ asmlinkage void __init mmu_init(void) ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */ #endif /* CONFIG_HIGHMEM_START_BOOL */ ioremap_bot = ioremap_base; - /* Initialize the context management stuff */ mmu_context_init(); + + /* Shortly after that, the entire linear mapping will be available */ + /* This will also cause that unflatten device tree will be allocated + * inside 768MB limit */ + memblock_set_current_limit(memory_start + lowmem_size - 1); } /* This is only called until mem_init is done. */ diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index e3a68bb2da0..68f5c01e4ad 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c @@ -44,11 +44,6 @@ unsigned long ioremap_base; unsigned long ioremap_bot; EXPORT_SYMBOL(ioremap_bot); -/* The maximum lowmem defaults to 768Mb, but this can be configured to - * another value. - */ -#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE - #ifndef CONFIG_SMP struct pgtable_cache_struct quicklists; #endif @@ -171,7 +166,7 @@ void __init mapin_ram(void) v = CONFIG_KERNEL_START; p = memory_start; - for (s = 0; s < memory_size; s += PAGE_SIZE) { + for (s = 0; s < CONFIG_LOWMEM_SIZE; s += PAGE_SIZE) { f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED | _PAGE_HWEXEC; if ((char *) v < _stext || (char *) v >= _etext) -- cgit v1.2.3-70-g09d2 From 419387612c03fce2ca6d5a3d6aac3dae42069264 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 15 Dec 2011 14:33:32 +0100 Subject: microblaze: Introduce fixmap Fixmap will be used for highmem support. Signed-off-by: Michal Simek --- arch/microblaze/include/asm/fixmap.h | 101 +++++++++++++++++++++++++++++++++++ arch/microblaze/mm/init.c | 17 +++--- arch/microblaze/mm/pgtable.c | 11 ++++ 3 files changed, 123 insertions(+), 6 deletions(-) create mode 100644 arch/microblaze/include/asm/fixmap.h diff --git a/arch/microblaze/include/asm/fixmap.h b/arch/microblaze/include/asm/fixmap.h new file mode 100644 index 00000000000..dd89754f0a8 --- /dev/null +++ b/arch/microblaze/include/asm/fixmap.h @@ -0,0 +1,101 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * + * Copyright 2008 Freescale Semiconductor Inc. + * Port to powerpc added by Kumar Gala + * + * Copyright 2011 Michal Simek + * Copyright 2011 PetaLogix Qld Pty Ltd + * Port to Microblaze + */ + +#ifndef _ASM_FIXMAP_H +#define _ASM_FIXMAP_H + +#ifndef __ASSEMBLY__ +#include +#include + +#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE)) + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. We allocate these special addresses + * from the end of virtual memory (0xfffff000) backwards. + * Also this lets us do fail-safe vmalloc(), we + * can guarantee that these special addresses and + * vmalloc()-ed addresses never overlap. + * + * these 'compile-time allocated' memory buffers are + * fixed-size 4k pages. (or larger if used with an increment + * highger than 1) use fixmap_set(idx,phys) to associate + * physical memory with fixmap indices. + * + * TLB entries of such buffers will not be flushed across + * task switches. + */ +enum fixed_addresses { + FIX_HOLE, + __end_of_fixed_addresses +}; + +extern void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags); + +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL) +/* + * Some hardware wants to get fixmapped without caching. + */ +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL_CI) + +#define clear_fixmap(idx) \ + __set_fixmap(idx, 0, __pgprot(0)) + +#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) + +extern void __this_fixmap_does_not_exist(void); + +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without tranlation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static __always_inline unsigned long fix_to_virt(const unsigned int idx) +{ + /* + * this branch gets completely eliminated after inlining, + * except when someone tries to use fixaddr indices in an + * illegal way. (such as mixing up address types or using + * out-of-range indices). + * + * If it doesn't get removed, the linker will complain + * loudly with a reasonably clear error message.. + */ + if (idx >= __end_of_fixed_addresses) + __this_fixmap_does_not_exist(); + + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +#endif /* !__ASSEMBLY__ */ +#endif diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 2253e122aa8..af87fd71a83 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -24,6 +24,7 @@ #include #include #include +#include /* Use for MMU and noMMU because of PCI generic code */ int mem_init_done; @@ -54,6 +55,13 @@ unsigned long lowmem_size; static void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES]; +#ifdef CONFIG_MMU + int idx; + + /* Setup fixmaps */ + for (idx = 0; idx < __end_of_fixed_addresses; idx++) + clear_fixmap(idx); +#endif /* Clean every zones */ memset(zones_size, 0, sizeof(zones_size)); @@ -316,12 +324,9 @@ asmlinkage void __init mmu_init(void) /* Map in all of RAM starting at CONFIG_KERNEL_START */ mapin_ram(); -#ifdef CONFIG_HIGHMEM_START_BOOL - ioremap_base = CONFIG_HIGHMEM_START; -#else - ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */ -#endif /* CONFIG_HIGHMEM_START_BOOL */ - ioremap_bot = ioremap_base; + /* Extend vmalloc and ioremap area as big as possible */ + ioremap_base = ioremap_bot = FIXADDR_START; + /* Initialize the context management stuff */ mmu_context_init(); diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index 68f5c01e4ad..84905da83cb 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c @@ -37,6 +37,7 @@ #include #include #include +#include #define flush_HPTE(X, va, pg) _tlbie(va) @@ -249,3 +250,13 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, } return pte; } + +void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) +{ + unsigned long address = __fix_to_virt(idx); + + if (idx >= __end_of_fixed_addresses) + BUG(); + + map_page(address, phys, pgprot_val(flags)); +} -- cgit v1.2.3-70-g09d2 From 832997990ab912ab8ed4ade08cb6ac5f471efa1e Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 19 Dec 2011 13:47:03 +0100 Subject: microblaze: Show more detailed information about memory Microblaze MMU is similar to ppc that's why ppc layout was reused. Signed-off-by: Michal Simek --- arch/microblaze/mm/init.c | 42 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 39 insertions(+), 3 deletions(-) diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index af87fd71a83..cbcdf24b1c8 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -206,14 +206,50 @@ void free_initmem(void) void __init mem_init(void) { + pg_data_t *pgdat; + unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; + high_memory = (void *)__va(memory_start + lowmem_size - 1); /* this will put all memory onto the freelists */ totalram_pages += free_all_bootmem(); - printk(KERN_INFO "Memory: %luk/%luk available\n", - nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10)); + for_each_online_pgdat(pgdat) { + unsigned long i; + struct page *page; + + for (i = 0; i < pgdat->node_spanned_pages; i++) { + if (!pfn_valid(pgdat->node_start_pfn + i)) + continue; + page = pgdat_page_nr(pgdat, i); + if (PageReserved(page)) + reservedpages++; + } + } + + codesize = (unsigned long)&_sdata - (unsigned long)&_stext; + datasize = (unsigned long)&_edata - (unsigned long)&_sdata; + initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; + bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; + + pr_info("Memory: %luk/%luk available (%luk kernel code, " + "%luk reserved, %luk data, %luk bss, %luk init)\n", + nr_free_pages() << (PAGE_SHIFT-10), + num_physpages << (PAGE_SHIFT-10), + codesize >> 10, + reservedpages << (PAGE_SHIFT-10), + datasize >> 10, + bsssize >> 10, + initsize >> 10); + +#ifdef CONFIG_MMU + pr_info("Kernel virtual memory layout:\n"); + pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); + pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", + ioremap_bot, ioremap_base); + pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", + (unsigned long)VMALLOC_START, VMALLOC_END); +#endif mem_init_done = 1; } -- cgit v1.2.3-70-g09d2 From baab8a828d2d6b5b073c192ebe777514bbf3c831 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 15 Dec 2011 15:47:16 +0100 Subject: microblaze: Use active regions Register lowmem active regions. Signed-off-by: Michal Simek --- arch/microblaze/Kconfig | 4 ++++ arch/microblaze/mm/init.c | 18 ++++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 8e9da3d4281..3267cc5065d 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -1,6 +1,7 @@ config MICROBLAZE def_bool y select HAVE_MEMBLOCK + select HAVE_MEMBLOCK_NODE_MAP select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_GRAPH_TRACER @@ -30,6 +31,9 @@ config RWSEM_GENERIC_SPINLOCK config ZONE_DMA def_bool y +config ARCH_POPULATES_NODE_MAP + def_bool y + config RWSEM_XCHGADD_ALGORITHM bool diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index cbcdf24b1c8..43b3f604baf 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -68,7 +68,8 @@ static void __init paging_init(void) zones_size[ZONE_DMA] = max_pfn; - free_area_init(zones_size); + /* We don't have holes in memory map */ + free_area_init_nodes(zones_size); } void __init setup_memory(void) @@ -142,8 +143,18 @@ void __init setup_memory(void) PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); + /* Add active regions with valid PFNs */ + for_each_memblock(memory, reg) { + unsigned long start_pfn, end_pfn; + + start_pfn = memblock_region_memory_base_pfn(reg); + end_pfn = memblock_region_memory_end_pfn(reg); + memblock_set_node(start_pfn << PAGE_SHIFT, + (end_pfn - start_pfn) << PAGE_SHIFT, 0); + } + /* free bootmem is whole main memory */ - free_bootmem(memory_start, lowmem_size); + free_bootmem_with_active_regions(0, max_low_pfn); /* reserve allocate blocks */ for_each_memblock(reserved, reg) { @@ -162,6 +173,9 @@ void __init setup_memory(void) } } + /* XXX need to clip this if using highmem? */ + sparse_memory_present_with_active_regions(0); + #ifdef CONFIG_MMU init_bootmem_done = 1; #endif -- cgit v1.2.3-70-g09d2 From 2f2f371f8907d169650f594850ca6096e2f73b77 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 15 Dec 2011 15:02:37 +0100 Subject: microblaze: Highmem support The first highmem implementation. Signed-off-by: Michal Simek --- arch/microblaze/Kconfig | 24 ++++----- arch/microblaze/include/asm/fixmap.h | 8 +++ arch/microblaze/include/asm/highmem.h | 96 +++++++++++++++++++++++++++++++++++ arch/microblaze/mm/Makefile | 1 + arch/microblaze/mm/highmem.c | 88 ++++++++++++++++++++++++++++++++ arch/microblaze/mm/init.c | 68 +++++++++++++++++++++++++ 6 files changed, 272 insertions(+), 13 deletions(-) create mode 100644 arch/microblaze/include/asm/highmem.h create mode 100644 arch/microblaze/mm/highmem.c diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 3267cc5065d..86ae27871f4 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -159,20 +159,18 @@ config XILINX_UNCACHED_SHADOW The feature requires the design to define the RAM memory controller window to be twice as large as the actual physical memory. -config HIGHMEM_START_BOOL - bool "Set high memory pool address" - depends on ADVANCED_OPTIONS && HIGHMEM - help - This option allows you to set the base address of the kernel virtual - area used to map high memory pages. This can be useful in - optimizing the layout of kernel virtual memory. - - Say N here unless you know what you are doing. - -config HIGHMEM_START - hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL +config HIGHMEM + bool "High memory support" depends on MMU - default "0xfe000000" + help + The address space of Microblaze processors is only 4 Gigabytes large + and it has to accommodate user address space, kernel address + space as well as some memory mapped IO. That means that, if you + have a large amount of physical memory and/or IO, not all of the + memory can be "permanently mapped" by the kernel. The physical + memory that is not permanently mapped is called "high memory". + + If unsure, say n. config LOWMEM_SIZE_BOOL bool "Set maximum low memory" diff --git a/arch/microblaze/include/asm/fixmap.h b/arch/microblaze/include/asm/fixmap.h index dd89754f0a8..f2b312e10b1 100644 --- a/arch/microblaze/include/asm/fixmap.h +++ b/arch/microblaze/include/asm/fixmap.h @@ -21,6 +21,10 @@ #ifndef __ASSEMBLY__ #include #include +#ifdef CONFIG_HIGHMEM +#include +#include +#endif #define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE)) @@ -44,6 +48,10 @@ */ enum fixed_addresses { FIX_HOLE, +#ifdef CONFIG_HIGHMEM + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * num_possible_cpus()) - 1, +#endif __end_of_fixed_addresses }; diff --git a/arch/microblaze/include/asm/highmem.h b/arch/microblaze/include/asm/highmem.h new file mode 100644 index 00000000000..2446a73140a --- /dev/null +++ b/arch/microblaze/include/asm/highmem.h @@ -0,0 +1,96 @@ +/* + * highmem.h: virtual kernel memory mappings for high memory + * + * Used in CONFIG_HIGHMEM systems for memory pages which + * are not addressable by direct kernel virtual addresses. + * + * Copyright (C) 1999 Gerhard Wichert, Siemens AG + * Gerhard.Wichert@pdb.siemens.de + * + * + * Redesigned the x86 32-bit VM architecture to deal with + * up to 16 Terabyte physical memory. With current x86 CPUs + * we now support up to 64 Gigabytes physical RAM. + * + * Copyright (C) 1999 Ingo Molnar + */ +#ifndef _ASM_HIGHMEM_H +#define _ASM_HIGHMEM_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +extern pte_t *kmap_pte; +extern pgprot_t kmap_prot; +extern pte_t *pkmap_page_table; + +/* + * Right now we initialize only a single pte table. It can be extended + * easily, subsequent pte tables have to be allocated in one physical + * chunk of RAM. + */ +/* + * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte + * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP + * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP + * in case of 16K/64K/256K page sizes. + */ + +#define PKMAP_ORDER PTE_SHIFT +#define LAST_PKMAP (1 << PKMAP_ORDER) + +#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ + & PMD_MASK) + +#define LAST_PKMAP_MASK (LAST_PKMAP - 1) +#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT) +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) + +extern void *kmap_high(struct page *page); +extern void kunmap_high(struct page *page); +extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); +extern void __kunmap_atomic(void *kvaddr); + +static inline void *kmap(struct page *page) +{ + might_sleep(); + if (!PageHighMem(page)) + return page_address(page); + return kmap_high(page); +} + +static inline void kunmap(struct page *page) +{ + BUG_ON(in_interrupt()); + if (!PageHighMem(page)) + return; + kunmap_high(page); +} + +static inline void *__kmap_atomic(struct page *page) +{ + return kmap_atomic_prot(page, kmap_prot); +} + +static inline struct page *kmap_atomic_to_page(void *ptr) +{ + unsigned long idx, vaddr = (unsigned long) ptr; + pte_t *pte; + + if (vaddr < FIXADDR_START) + return virt_to_page(ptr); + + idx = virt_to_fix(vaddr); + pte = kmap_pte - (idx - FIX_KMAP_BEGIN); + return pte_page(*pte); +} + +#define flush_cache_kmaps() { flush_icache(); flush_dcache(); } + +#endif /* __KERNEL__ */ + +#endif /* _ASM_HIGHMEM_H */ diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile index 09c49ed8723..7313bd8acbb 100644 --- a/arch/microblaze/mm/Makefile +++ b/arch/microblaze/mm/Makefile @@ -5,3 +5,4 @@ obj-y := consistent.o init.o obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o +obj-$(CONFIG_HIGHMEM) += highmem.o diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c new file mode 100644 index 00000000000..7d78838e8bf --- /dev/null +++ b/arch/microblaze/mm/highmem.c @@ -0,0 +1,88 @@ +/* + * highmem.c: virtual kernel memory mappings for high memory + * + * PowerPC version, stolen from the i386 version. + * + * Used in CONFIG_HIGHMEM systems for memory pages which + * are not addressable by direct kernel virtual addresses. + * + * Copyright (C) 1999 Gerhard Wichert, Siemens AG + * Gerhard.Wichert@pdb.siemens.de + * + * + * Redesigned the x86 32-bit VM architecture to deal with + * up to 16 Terrabyte physical memory. With current x86 CPUs + * we now support up to 64 Gigabytes physical RAM. + * + * Copyright (C) 1999 Ingo Molnar + * + * Reworked for PowerPC by various contributors. Moved from + * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. + */ + +#include +#include + +/* + * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap + * gives a more generic (and caching) interface. But kmap_atomic can + * be used in IRQ contexts, so in some (very limited) cases we need + * it. + */ +#include + +void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + + unsigned long vaddr; + int idx, type; + + /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +#ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(*(kmap_pte-idx))); +#endif + set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); + local_flush_tlb_page(NULL, vaddr); + + return (void *) vaddr; +} +EXPORT_SYMBOL(kmap_atomic_prot); + +void __kunmap_atomic(void *kvaddr) +{ + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; + int type; + + if (vaddr < __fix_to_virt(FIX_KMAP_END)) { + pagefault_enable(); + return; + } + + type = kmap_atomic_idx(); +#ifdef CONFIG_DEBUG_HIGHMEM + { + unsigned int idx; + + idx = type + KM_TYPE_NR * smp_processor_id(); + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + + /* + * force other mappings to Oops if they'll try to access + * this pte without first remap it + */ + pte_clear(&init_mm, vaddr, kmap_pte-idx); + local_flush_tlb_page(NULL, vaddr); + } +#endif + kmap_atomic_idx_pop(); + pagefault_enable(); +} +EXPORT_SYMBOL(__kunmap_atomic); diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 43b3f604baf..95297b13dd9 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -49,6 +49,53 @@ unsigned long memory_size; EXPORT_SYMBOL(memory_size); unsigned long lowmem_size; +#ifdef CONFIG_HIGHMEM +pte_t *kmap_pte; +EXPORT_SYMBOL(kmap_pte); +pgprot_t kmap_prot; +EXPORT_SYMBOL(kmap_prot); + +static inline pte_t *virt_to_kpte(unsigned long vaddr) +{ + return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), + vaddr), vaddr); +} + +static void __init highmem_init(void) +{ + pr_debug("%x\n", (u32)PKMAP_BASE); + map_page(PKMAP_BASE, 0, 0); /* XXX gross */ + pkmap_page_table = virt_to_kpte(PKMAP_BASE); + + kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); + kmap_prot = PAGE_KERNEL; +} + +static unsigned long highmem_setup(void) +{ + unsigned long pfn; + unsigned long reservedpages = 0; + + for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { + struct page *page = pfn_to_page(pfn); + + /* FIXME not sure about */ + if (memblock_is_reserved(pfn << PAGE_SHIFT)) + continue; + ClearPageReserved(page); + init_page_count(page); + __free_page(page); + totalhigh_pages++; + reservedpages++; + } + totalram_pages += totalhigh_pages; + printk(KERN_INFO "High memory: %luk\n", + totalhigh_pages << (PAGE_SHIFT-10)); + + return reservedpages; +} +#endif /* CONFIG_HIGHMEM */ + /* * paging_init() sets up the page tables - in fact we've already done this. */ @@ -66,7 +113,14 @@ static void __init paging_init(void) /* Clean every zones */ memset(zones_size, 0, sizeof(zones_size)); +#ifdef CONFIG_HIGHMEM + highmem_init(); + + zones_size[ZONE_DMA] = max_low_pfn; + zones_size[ZONE_HIGHMEM] = max_pfn; +#else zones_size[ZONE_DMA] = max_pfn; +#endif /* We don't have holes in memory map */ free_area_init_nodes(zones_size); @@ -241,6 +295,10 @@ void __init mem_init(void) } } +#ifdef CONFIG_HIGHMEM + reservedpages -= highmem_setup(); +#endif + codesize = (unsigned long)&_sdata - (unsigned long)&_stext; datasize = (unsigned long)&_edata - (unsigned long)&_sdata; initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; @@ -259,6 +317,10 @@ void __init mem_init(void) #ifdef CONFIG_MMU pr_info("Kernel virtual memory layout:\n"); pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); +#ifdef CONFIG_HIGHMEM + pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", + PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); +#endif /* CONFIG_HIGHMEM */ pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", ioremap_bot, ioremap_base); pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", @@ -346,7 +408,9 @@ asmlinkage void __init mmu_init(void) if (lowmem_size > CONFIG_LOWMEM_SIZE) { lowmem_size = CONFIG_LOWMEM_SIZE; +#ifndef CONFIG_HIGHMEM memory_size = lowmem_size; +#endif } mm_cmdline_setup(); /* FIXME parse args from command line - not used */ @@ -375,7 +439,11 @@ asmlinkage void __init mmu_init(void) mapin_ram(); /* Extend vmalloc and ioremap area as big as possible */ +#ifdef CONFIG_HIGHMEM + ioremap_base = ioremap_bot = PKMAP_BASE; +#else ioremap_base = ioremap_bot = FIXADDR_START; +#endif /* Initialize the context management stuff */ mmu_context_init(); -- cgit v1.2.3-70-g09d2 From 7c0d26150781cbd8a522259c9dea9e7ef23df8e3 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 22 Dec 2011 12:33:24 +0100 Subject: microblaze: Fix mapin_ram function Fix how many pages are allocated in mapin_ram. It is lowmem_size not setup CONFIG_LOWMEM_SIZE because it is the same for all systems. Which means that wrong pages are allocated if memory size is smaller than CONFIG_LOWMEM_SIZE. It has dramatic impact on bootup time. On sp605 MMU full hw design is 7s. Signed-off-by: Michal Simek --- arch/microblaze/include/asm/page.h | 1 + arch/microblaze/mm/pgtable.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 665f29330ce..352cc2352bd 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -136,6 +136,7 @@ extern unsigned long max_pfn; extern unsigned long memory_start; extern unsigned long memory_size; +extern unsigned long lowmem_size; extern int page_is_ram(unsigned long pfn); diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index 84905da83cb..d1c06d07fed 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c @@ -167,7 +167,7 @@ void __init mapin_ram(void) v = CONFIG_KERNEL_START; p = memory_start; - for (s = 0; s < CONFIG_LOWMEM_SIZE; s += PAGE_SIZE) { + for (s = 0; s < lowmem_size; s += PAGE_SIZE) { f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED | _PAGE_HWEXEC; if ((char *) v < _stext || (char *) v >= _etext) -- cgit v1.2.3-70-g09d2 From cc5647a64e8c6691be87a83632d8b1c78b795023 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 7 Nov 2011 13:42:12 +0100 Subject: microblaze: Use node name instead of compatible string Change report in bootlog: Origin: xlnx,xps-intc-1.00.a #0 at 0xc8000000, num_irq=6, edge=0x4 xlnx,xps-timer-1.00.a #0 at 0xc8004000, irq=2 New: interrupt-controller #0 at 0xc8000000, num_irq=6, edge=0x4 system-timer #0 at 0xc8004000, irq=2 Signed-off-by: Michal Simek --- arch/microblaze/kernel/intc.c | 4 ++-- arch/microblaze/kernel/timer.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c index 44b177e2ab1..3003d2f9f55 100644 --- a/arch/microblaze/kernel/intc.c +++ b/arch/microblaze/kernel/intc.c @@ -131,8 +131,8 @@ void __init init_IRQ(void) #ifdef CONFIG_SELFMOD_INTC selfmod_function((int *) arr_func, intc_baseaddr); #endif - printk(KERN_INFO "XPS intc #0 at 0x%08x, num_irq=%d, edge=0x%x\n", - intc_baseaddr, nr_irq, intr_mask); + printk(KERN_INFO "%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n", + intc->name, intc_baseaddr, nr_irq, intr_mask); /* * Disable all external interrupts until they are diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index 78b82f30bdd..cadfd5608af 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c @@ -274,8 +274,8 @@ void __init time_init(void) #ifdef CONFIG_SELFMOD_TIMER selfmod_function((int *) arr_func, timer_baseaddr); #endif - printk(KERN_INFO "XPS timer #0 at 0x%08x, irq=%d\n", - timer_baseaddr, irq); + printk(KERN_INFO "%s #0 at 0x%08x, irq=%d\n", + timer->name, timer_baseaddr, irq); /* If there is clock-frequency property than use it */ prop = of_get_property(timer, "clock-frequency", NULL); -- cgit v1.2.3-70-g09d2 From 173701d7745d07888a929bf08d77d29996ca13dc Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Wed, 9 Nov 2011 15:39:58 +0100 Subject: microblaze: Clear all MSR flags on the first kernel instruction The main reason is bug because of dynamic TLB allocation. U-BOOT didn't disable dcache and then writing to physical address from ASM wan't visible for reading through MMU. Disabling caches and clearing all flags from previous code is good to do so. Signed-off-by: Michal Simek --- arch/microblaze/kernel/head.S | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 77320b8fc16..a5ba9925ae8 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -63,9 +63,7 @@ ENTRY(_start) real_start: #endif - mfs r1, rmsr - andi r1, r1, ~2 - mts rmsr, r1 + mts rmsr, r0 /* * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc' * if the msrclr instruction is not enabled. We use this to detect @@ -73,6 +71,7 @@ real_start: * r8 == 0 - msr instructions are implemented * r8 != 0 - msr instructions are not implemented */ + mfs r1, rmsr msrclr r8, 0 /* clear nothing - just read msr for test */ cmpu r8, r8, r1 /* r1 must contain msr reg content */ -- cgit v1.2.3-70-g09d2 From 3a1d26769f61fe8a1f517a66dfcee935a76fd61c Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Wed, 13 Jul 2011 15:26:09 +0200 Subject: microblaze: Extend space for compiled-in FDT to 32kB Signed-off-by: Michal Simek --- arch/microblaze/kernel/head.S | 2 +- arch/microblaze/kernel/vmlinux.lds.S | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index a5ba9925ae8..441dad80558 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -95,7 +95,7 @@ big_endian: _prepare_copy_fdt: or r11, r0, r0 /* incremment */ ori r4, r0, TOPHYS(_fdt_start) - ori r3, r0, (0x4000 - 4) + ori r3, r0, (0x8000 - 4) _copy_fdt: lw r12, r7, r11 /* r12 = r7 + r11 */ sw r12, r4, r11 /* addr[r4 + r11] = r12 */ diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index ac0e1a5d478..109e9d86ade 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -44,7 +44,7 @@ SECTIONS { __fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) { _fdt_start = . ; /* place for fdt blob */ *(__fdt_blob) ; /* Any link-placed DTB */ - . = _fdt_start + 0x4000; /* Pad up to 16kbyte */ + . = _fdt_start + 0x8000; /* Pad up to 32kbyte */ _fdt_end = . ; } -- cgit v1.2.3-70-g09d2 From 95b0f9ea66661681f6ae081ea28416744d622c07 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 8 Feb 2010 16:41:38 +0100 Subject: microblaze: Improve TLB calculation for small systems Systems with small amount of memory need to be handled differently. Linux can't allocate the whole 32MB with two TLBs because then there is no MMU protection. Signed-off-by: Michal Simek --- arch/microblaze/Kconfig | 4 ++ arch/microblaze/include/asm/setup.h | 3 +- arch/microblaze/include/asm/system.h | 1 + arch/microblaze/kernel/head.S | 98 ++++++++++++++++++++++++++++++++++-- arch/microblaze/kernel/setup.c | 11 +++- arch/microblaze/mm/init.c | 14 ++++-- 6 files changed, 120 insertions(+), 11 deletions(-) diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 86ae27871f4..d64c10093b4 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -259,6 +259,10 @@ config MICROBLAZE_32K_PAGES endchoice +config KERNEL_PAD + hex "Kernel PAD for unpacking" if ADVANCED_OPTIONS + default "0x80000" if MMU + endmenu source "mm/Kconfig" diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h index 6c72ed7eba9..9f195c09473 100644 --- a/arch/microblaze/include/asm/setup.h +++ b/arch/microblaze/include/asm/setup.h @@ -39,7 +39,8 @@ extern void of_platform_reset_gpio_probe(void); void time_init(void); void init_IRQ(void); void machine_early_init(const char *cmdline, unsigned int ram, - unsigned int fdt, unsigned int msr); + unsigned int fdt, unsigned int msr, unsigned int tlb0, + unsigned int tlb1); void machine_restart(char *cmd); void machine_shutdown(void); diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h index 5a433cbaafb..01228d2b135 100644 --- a/arch/microblaze/include/asm/system.h +++ b/arch/microblaze/include/asm/system.h @@ -83,6 +83,7 @@ void default_idle(void); void free_init_pages(char *what, unsigned long begin, unsigned long end); void free_initmem(void); extern char *klimit; +extern unsigned long kernel_tlb; extern void ret_from_fork(void); extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 441dad80558..49dd48f9e6e 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -168,6 +168,53 @@ _invalidate: addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ tophys(r4,r3) /* Load the kernel physical address */ + /* start to do TLB calculation */ + addik r12, r0, _end + rsub r12, r3, r12 + addik r12, r12, CONFIG_KERNEL_PAD /* that's the pad */ + + or r9, r0, r0 /* TLB0 = 0 */ + or r10, r0, r0 /* TLB1 = 0 */ + + addik r11, r12, -0x1000000 + bgei r11, GT16 /* size is greater than 16MB */ + addik r11, r12, -0x0800000 + bgei r11, GT8 /* size is greater than 8MB */ + addik r11, r12, -0x0400000 + bgei r11, GT4 /* size is greater than 4MB */ + /* size is less than 4MB */ + addik r11, r12, -0x0200000 + bgei r11, GT2 /* size is greater than 2MB */ + addik r9, r0, 0x0100000 /* TLB0 must be 1MB */ + addik r11, r12, -0x0100000 + bgei r11, GT1 /* size is greater than 1MB */ + /* TLB1 is 0 which is setup above */ + bri tlb_end +GT4: /* r11 contains the rest - will be either 1 or 4 */ + ori r9, r0, 0x400000 /* TLB0 is 4MB */ + bri TLB1 +GT16: /* TLB0 is 16MB */ + addik r9, r0, 0x1000000 /* means TLB0 is 16MB */ +TLB1: + /* must be used r2 because of substract if failed */ + addik r2, r11, -0x0400000 + bgei r2, GT20 /* size is greater than 16MB */ + /* size is >16MB and <20MB */ + addik r11, r11, -0x0100000 + bgei r11, GT17 /* size is greater than 17MB */ + /* kernel is >16MB and < 17MB */ +GT1: + addik r10, r0, 0x0100000 /* means TLB1 is 1MB */ + bri tlb_end +GT2: /* TLB0 is 0 and TLB1 will be 4MB */ +GT17: /* TLB1 is 4MB - kernel size <20MB */ + addik r10, r0, 0x0400000 /* means TLB1 is 4MB */ + bri tlb_end +GT8: /* TLB0 is still zero that's why I can use only TLB1 */ +GT20: /* TLB1 is 16MB - kernel size >20MB */ + addik r10, r0, 0x1000000 /* means TLB1 is 16MB */ +tlb_end: + /* * Configure and load two entries into TLB slots 0 and 1. * In case we are pinning TLBs, these are reserved in by the @@ -177,16 +224,56 @@ _invalidate: andi r4,r4,0xfffffc00 /* Mask off the real page number */ ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ + /* TLB0 can be zeroes that's why we not setup it */ + beqi r9, jump_over + + /* look at the code below */ + ori r30, r0, 0x200 + andi r29, r9, 0x100000 + bneid r29, 1f + addik r30, r30, 0x80 + andi r29, r9, 0x400000 + bneid r29, 1f + addik r30, r30, 0x80 + andi r29, r9, 0x1000000 + bneid r29, 1f + addik r30, r30, 0x80 +1: + ori r11, r30, 0 + andi r3,r3,0xfffffc00 /* Mask off the effective page number */ - ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M)) + ori r3,r3,(TLB_VALID) + or r3, r3, r11 mts rtlbx,r0 /* TLB slow 0 */ mts rtlblo,r4 /* Load the data portion of the entry */ mts rtlbhi,r3 /* Load the tag portion of the entry */ - addik r4, r4, 0x01000000 /* Map next 16 M entries */ - addik r3, r3, 0x01000000 +jump_over: + /* TLB1 can be zeroes that's why we not setup it */ + beqi r10, jump_over2 + + /* look at the code below */ + ori r30, r0, 0x200 + andi r29, r10, 0x100000 + bneid r29, 1f + addik r30, r30, 0x80 + andi r29, r10, 0x400000 + bneid r29, 1f + addik r30, r30, 0x80 + andi r29, r10, 0x1000000 + bneid r29, 1f + addik r30, r30, 0x80 +1: + ori r12, r30, 0 + + addk r4, r4, r9 /* previous addr + TLB0 size */ + addk r3, r3, r9 + + andi r3,r3,0xfffffc00 /* Mask off the effective page number */ + ori r3,r3,(TLB_VALID) + or r3, r3, r12 ori r6,r0,1 /* TLB slot 1 */ mts rtlbx,r6 @@ -194,6 +281,7 @@ _invalidate: mts rtlblo,r4 /* Load the data portion of the entry */ mts rtlbhi,r3 /* Load the tag portion of the entry */ +jump_over2: /* * Load a TLB entry for LMB, since we need access to * the exception vectors, using a 4k real==virtual mapping. @@ -237,8 +325,8 @@ start_here: * Please see $(ARCH)/mach-$(SUBARCH)/setup.c for * the function. */ - addik r9, r0, machine_early_init - brald r15, r9 + addik r11, r0, machine_early_init + brald r15, r11 nop #ifndef CONFIG_MMU diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 604cd9dd133..a1fa2a5813b 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -97,8 +97,11 @@ inline unsigned get_romfs_len(unsigned *addr) } #endif /* CONFIG_MTD_UCLINUX_EBSS */ +unsigned long kernel_tlb; + void __init machine_early_init(const char *cmdline, unsigned int ram, - unsigned int fdt, unsigned int msr) + unsigned int fdt, unsigned int msr, unsigned int tlb0, + unsigned int tlb1) { unsigned long *src, *dst; unsigned int offset = 0; @@ -145,6 +148,12 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, setup_early_printk(NULL); #endif + /* setup kernel_tlb after BSS cleaning + * Maybe worth to move to asm code */ + kernel_tlb = tlb0 + tlb1; + /* printk("TLB1 0x%08x, TLB0 0x%08x, tlb 0x%x\n", tlb0, + tlb1, kernel_tlb); */ + printk("Ramdisk addr 0x%08x, ", ram); if (fdt) printk("FDT at 0x%08x\n", fdt); diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 95297b13dd9..ce80823051b 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -398,10 +398,16 @@ asmlinkage void __init mmu_init(void) machine_restart(NULL); } - if ((u32) memblock.memory.regions[0].size < 0x1000000) { - printk(KERN_EMERG "Memory must be greater than 16MB\n"); + if ((u32) memblock.memory.regions[0].size < 0x400000) { + printk(KERN_EMERG "Memory must be greater than 4MB\n"); machine_restart(NULL); } + + if ((u32) memblock.memory.regions[0].size < kernel_tlb) { + printk(KERN_EMERG "Kernel size is greater than memory node\n"); + machine_restart(NULL); + } + /* Find main memory where the kernel is */ memory_start = (u32) memblock.memory.regions[0].base; lowmem_size = memory_size = (u32) memblock.memory.regions[0].size; @@ -462,11 +468,11 @@ void __init *early_get_page(void) p = alloc_bootmem_pages(PAGE_SIZE); } else { /* - * Mem start + 32MB -> here is limit + * Mem start + kernel_tlb -> here is limit * because of mem mapping from head.S */ p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, - memory_start + 0x2000000)); + memory_start + kernel_tlb)); } return p; } -- cgit v1.2.3-70-g09d2 From 1451d1d88b9aa32ac9ee54180239e9b34b6f9e86 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 4 Apr 2011 15:46:03 +0200 Subject: microblaze: Introduce TLB skip size TLB skip size direct how many TLBs is skipped. Currently TLB0 and TLB1 are used for Linux kernel mapping that's why their are skipped. Signed-off-by: Michal Simek --- arch/microblaze/include/asm/mmu.h | 1 + arch/microblaze/kernel/hw_exception_handler.S | 7 ++++--- arch/microblaze/kernel/misc.S | 4 ++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/microblaze/include/asm/mmu.h b/arch/microblaze/include/asm/mmu.h index 8d6a654ceff..5198de8b122 100644 --- a/arch/microblaze/include/asm/mmu.h +++ b/arch/microblaze/include/asm/mmu.h @@ -68,6 +68,7 @@ extern void _tlbia(void); /* invalidate all TLB entries */ */ # define MICROBLAZE_TLB_SIZE 64 +# define MICROBLAZE_TLB_SKIP 2 /* * TLB entries are defined by a "high" tag portion and a "low" data diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index e62be837960..b7249f4215a 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S @@ -821,18 +821,19 @@ ex_handler_done: * A common place to load the TLB. */ tlb_index: - .long 1 /* MS: storing last used tlb index */ + /* MS: storing last used tlb index */ + .long (MICROBLAZE_TLB_SKIP - 1) finish_tlb_load: /* MS: load the last used TLB index. */ lwi r5, r0, TOPHYS(tlb_index) addik r5, r5, 1 /* MS: inc tlb_index -> use next one */ /* MS: FIXME this is potential fault, because this is mask not count */ - andi r5, r5, (MICROBLAZE_TLB_SIZE-1) + andi r5, r5, MICROBLAZE_TLB_SIZE - 1 ori r6, r0, 1 cmp r31, r5, r6 blti r31, ex12 - addik r5, r6, 1 + addik r5, r6, MICROBLAZE_TLB_SKIP - 1 ex12: /* MS: save back current TLB index */ swi r5, r0, TOPHYS(tlb_index) diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S index 206da3da361..c9090d7973f 100644 --- a/arch/microblaze/kernel/misc.S +++ b/arch/microblaze/kernel/misc.S @@ -36,7 +36,7 @@ _tlbia_1: nop mts rtlbhi, r0 /* flush: ensure V is clear */ nop - addik r11, r12, -2 + addik r11, r12, -MICROBLAZE_TLB_SKIP bneid r11, _tlbia_1 /* loop for all entries */ addik r12, r12, -1 /* sync */ @@ -75,7 +75,7 @@ early_console_reg_tlb_alloc: * Load a TLB entry for the UART, so that microblaze_progress() can use * the UARTs nice and early. We use a 4k real==virtual mapping. */ - ori r4, r0, MICROBLAZE_TLB_SIZE - 1 + ori r4, r0, 63 mts rtlbx, r4 /* TLB slot 63 */ or r4,r5,r0 -- cgit v1.2.3-70-g09d2 From e02db0aa3e1976ae4e23a66077d252a2f3ba74c7 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 8 Feb 2010 16:41:38 +0100 Subject: microblaze: Handle TLB skip size dynamically This patch fix the problem with rootfs on JFFS2 with early printk console turned on. The origin version used TLB63 for temporary early printk mapping. The code expect that kernel is not able to use all 64 TLB entries till early printk console is remapped by ioremap. After that temporary mapping on TLB63 is silently lost. This expectation give the opportunity to have early console pretty early. Microblaze systems with JFFS2 rootfs with early printk console turned on used more than 64 TLB entries before kernel can remap early console. Based on that kernel does access to bad area because early printk mapping is rewritten. This patch introduces tlb_skip variable which dynamically stores number of skipped TLB entries from the TLB0. skip_tlb=2 means that TLB0 and TLB1 should be skipped. MICROBLAZE_TLB_SKIP defines how many TLB is skipped at the kernel start. They can be used for user purpose. TLB 63 is used for temporary LMB mapping (MICROBLAZE_LMB_TLB_ID). Also clean TLBLO when kernel starts. For specific kernel sizes kernel can use just one TLB. Detect this case and use the second TLB for general purpose. Change _tlbia function to flush TLB entries from tlb_skip to TLB_SIZE. Export tlb_skip size through debugfs. Signed-off-by: Michal Simek --- arch/microblaze/include/asm/mmu.h | 13 ++++++++- arch/microblaze/kernel/early_printk.c | 14 +++++++++ arch/microblaze/kernel/head.S | 42 +++++++++++++++++---------- arch/microblaze/kernel/hw_exception_handler.S | 10 +++++-- arch/microblaze/kernel/misc.S | 13 ++++++--- arch/microblaze/kernel/setup.c | 13 +++++++++ 6 files changed, 83 insertions(+), 22 deletions(-) diff --git a/arch/microblaze/include/asm/mmu.h b/arch/microblaze/include/asm/mmu.h index 5198de8b122..1f9edddf7f4 100644 --- a/arch/microblaze/include/asm/mmu.h +++ b/arch/microblaze/include/asm/mmu.h @@ -56,6 +56,12 @@ typedef struct _SEGREG { extern void _tlbie(unsigned long va); /* invalidate a TLB entry */ extern void _tlbia(void); /* invalidate all TLB entries */ + +/* + * tlb_skip size stores actual number skipped TLBs from TLB0 - every directy TLB + * mapping has to increase tlb_skip size. + */ +extern u32 tlb_skip; # endif /* __ASSEMBLY__ */ /* @@ -68,7 +74,12 @@ extern void _tlbia(void); /* invalidate all TLB entries */ */ # define MICROBLAZE_TLB_SIZE 64 -# define MICROBLAZE_TLB_SKIP 2 + +/* For cases when you want to skip some TLB entries */ +# define MICROBLAZE_TLB_SKIP 0 + +/* Use the last TLB for temporary access to LMB */ +# define MICROBLAZE_LMB_TLB_ID 63 /* * TLB entries are defined by a "high" tag portion and a "low" data diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c index 742c247792c..ec485876d0d 100644 --- a/arch/microblaze/kernel/early_printk.c +++ b/arch/microblaze/kernel/early_printk.c @@ -175,6 +175,20 @@ void __init remap_early_printk(void) base_addr); base_addr = (u32) ioremap(base_addr, PAGE_SIZE); printk(KERN_CONT "0x%x\n", base_addr); + + /* + * Early console is on the top of skipped TLB entries + * decrease tlb_skip size ensure that hardcoded TLB entry will be + * used by generic algorithm + * FIXME check if early console mapping is on the top by rereading + * TLB entry and compare baseaddr + * mts rtlbx, (tlb_skip - 1) + * nop + * mfs rX, rtlblo + * nop + * cmp rX, orig_base_addr + */ + tlb_skip -= 1; } void __init disable_early_printk(void) diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 49dd48f9e6e..98b17f9f904 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -149,6 +149,7 @@ _copy_bram: _invalidate: mts rtlbx, r3 mts rtlbhi, r0 /* flush: ensure V is clear */ + mts rtlblo, r0 bgtid r3, _invalidate /* loop for all entries */ addik r3, r3, -1 /* sync */ @@ -224,8 +225,14 @@ tlb_end: andi r4,r4,0xfffffc00 /* Mask off the real page number */ ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ - /* TLB0 can be zeroes that's why we not setup it */ - beqi r9, jump_over + /* + * TLB0 is always used - check if is not zero (r9 stores TLB0 value) + * if is use TLB1 value and clear it (r10 stores TLB1 value) + */ + bnei r9, tlb0_not_zero + add r9, r10, r0 + add r10, r0, r0 +tlb0_not_zero: /* look at the code below */ ori r30, r0, 0x200 @@ -239,18 +246,21 @@ tlb_end: bneid r29, 1f addik r30, r30, 0x80 1: - ori r11, r30, 0 - andi r3,r3,0xfffffc00 /* Mask off the effective page number */ ori r3,r3,(TLB_VALID) - or r3, r3, r11 + or r3, r3, r30 - mts rtlbx,r0 /* TLB slow 0 */ + /* Load tlb_skip size value which is index to first unused TLB entry */ + lwi r11, r0, TOPHYS(tlb_skip) + mts rtlbx,r11 /* TLB slow 0 */ mts rtlblo,r4 /* Load the data portion of the entry */ mts rtlbhi,r3 /* Load the tag portion of the entry */ -jump_over: + /* Increase tlb_skip size */ + addik r11, r11, 1 + swi r11, r0, TOPHYS(tlb_skip) + /* TLB1 can be zeroes that's why we not setup it */ beqi r10, jump_over2 @@ -266,27 +276,30 @@ jump_over: bneid r29, 1f addik r30, r30, 0x80 1: - ori r12, r30, 0 - addk r4, r4, r9 /* previous addr + TLB0 size */ addk r3, r3, r9 andi r3,r3,0xfffffc00 /* Mask off the effective page number */ ori r3,r3,(TLB_VALID) - or r3, r3, r12 + or r3, r3, r30 - ori r6,r0,1 /* TLB slot 1 */ - mts rtlbx,r6 + lwi r11, r0, TOPHYS(tlb_skip) + mts rtlbx, r11 /* r11 is used from TLB0 */ mts rtlblo,r4 /* Load the data portion of the entry */ mts rtlbhi,r3 /* Load the tag portion of the entry */ + /* Increase tlb_skip size */ + addik r11, r11, 1 + swi r11, r0, TOPHYS(tlb_skip) + jump_over2: /* * Load a TLB entry for LMB, since we need access to * the exception vectors, using a 4k real==virtual mapping. */ - ori r6,r0,3 /* TLB slot 3 */ + /* Use temporary TLB_ID for LMB - clear this temporary mapping later */ + ori r6, r0, MICROBLAZE_LMB_TLB_ID mts rtlbx,r6 ori r4,r0,(TLB_WR | TLB_EX) @@ -355,8 +368,7 @@ start_here: /* Load up the kernel context */ kernel_load_context: - # Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away. - ori r5,r0,3 + ori r5, r0, MICROBLAZE_LMB_TLB_ID mts rtlbx,r5 nop mts rtlbhi,r0 diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index b7249f4215a..aa510f450ac 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S @@ -820,9 +820,15 @@ ex_handler_done: * Upon exit, we reload everything and RFI. * A common place to load the TLB. */ +.section .data +.align 4 +.global tlb_skip + tlb_skip: + .long MICROBLAZE_TLB_SKIP tlb_index: /* MS: storing last used tlb index */ - .long (MICROBLAZE_TLB_SKIP - 1) + .long MICROBLAZE_TLB_SIZE/2 +.previous finish_tlb_load: /* MS: load the last used TLB index. */ lwi r5, r0, TOPHYS(tlb_index) @@ -833,7 +839,7 @@ ex_handler_done: ori r6, r0, 1 cmp r31, r5, r6 blti r31, ex12 - addik r5, r6, MICROBLAZE_TLB_SKIP - 1 + lwi r5, r0, TOPHYS(tlb_skip) ex12: /* MS: save back current TLB index */ swi r5, r0, TOPHYS(tlb_index) diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S index c9090d7973f..1dafddeb8a0 100644 --- a/arch/microblaze/kernel/misc.S +++ b/arch/microblaze/kernel/misc.S @@ -29,16 +29,16 @@ .type _tlbia, @function .align 4; _tlbia: - addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */ + lwi r12, r0, tlb_skip; /* isync */ _tlbia_1: mts rtlbx, r12 nop mts rtlbhi, r0 /* flush: ensure V is clear */ nop - addik r11, r12, -MICROBLAZE_TLB_SKIP + rsubi r11, r12, MICROBLAZE_TLB_SIZE - 1 bneid r11, _tlbia_1 /* loop for all entries */ - addik r12, r12, -1 + addik r12, r12, 1 /* sync */ rtsd r15, 8 nop @@ -75,7 +75,7 @@ early_console_reg_tlb_alloc: * Load a TLB entry for the UART, so that microblaze_progress() can use * the UARTs nice and early. We use a 4k real==virtual mapping. */ - ori r4, r0, 63 + lwi r4, r0, tlb_skip mts rtlbx, r4 /* TLB slot 63 */ or r4,r5,r0 @@ -89,6 +89,11 @@ early_console_reg_tlb_alloc: nop mts rtlbhi,r5 /* Load the tag portion of the entry */ nop + + lwi r5, r0, tlb_skip + addik r5, r5, 1 + swi r5, r0, tlb_skip + rtsd r15, 8 nop diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index a1fa2a5813b..e4f5956ee13 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -208,6 +208,19 @@ static int microblaze_debugfs_init(void) return of_debugfs_root == NULL; } arch_initcall(microblaze_debugfs_init); + +static int __init debugfs_tlb(void) +{ + struct dentry *d; + + if (!of_debugfs_root) + return -ENODEV; + + d = debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip); + if (!d) + return -ENOMEM; +} +device_initcall(debugfs_tlb); #endif static int dflt_bus_notify(struct notifier_block *nb, -- cgit v1.2.3-70-g09d2