diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-02-18 18:35:20 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-02-18 18:35:20 +0900 |
commit | 77f36fcc035a5af19e95f50a2e648cda2a6ef2b9 (patch) | |
tree | a183a3289807a83da9c11e0d2d722cec60fce5d9 /arch/sh/kernel | |
parent | 838a4a9dcee0cbaeb0943531da00ac44d578f315 (diff) | |
parent | d01447b3197c2c470a14666be2c640407bbbfec7 (diff) |
Merge branch 'sh/pmb-dynamic'
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r-- | arch/sh/kernel/cpu/init.c | 21 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/sq.c | 13 | ||||
-rw-r--r-- | arch/sh/kernel/head_32.S | 52 | ||||
-rw-r--r-- | arch/sh/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/sh/kernel/traps_32.c | 7 | ||||
-rw-r--r-- | arch/sh/kernel/vmlinux.lds.S | 7 |
6 files changed, 74 insertions, 28 deletions
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index 6311b0b1789..c736422344e 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -24,6 +24,7 @@ #include <asm/elf.h> #include <asm/io.h> #include <asm/smp.h> +#include <asm/sh_bios.h> #ifdef CONFIG_SH_FPU #define cpu_has_fpu 1 @@ -342,9 +343,21 @@ asmlinkage void __init sh_cpu_init(void) speculative_execution_init(); expmask_init(); - /* - * Boot processor to setup the FP and extended state context info. - */ - if (raw_smp_processor_id() == 0) + /* Do the rest of the boot processor setup */ + if (raw_smp_processor_id() == 0) { + /* Save off the BIOS VBR, if there is one */ + sh_bios_vbr_init(); + + /* + * Setup VBR for boot CPU. Secondary CPUs do this through + * start_secondary(). + */ + per_cpu_trap_init(); + + /* + * Boot processor to setup the FP and extended state + * context info. + */ init_thread_xstate(); + } } diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index 97aea9d69b0..fc065f9da6e 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c @@ -100,7 +100,7 @@ static inline void sq_mapping_list_del(struct sq_mapping *map) spin_unlock_irq(&sq_mapping_lock); } -static int __sq_remap(struct sq_mapping *map, unsigned long flags) +static int __sq_remap(struct sq_mapping *map, pgprot_t prot) { #if defined(CONFIG_MMU) struct vm_struct *vma; @@ -113,7 +113,7 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) if (ioremap_page_range((unsigned long)vma->addr, (unsigned long)vma->addr + map->size, - vma->phys_addr, __pgprot(flags))) { + vma->phys_addr, prot)) { vunmap(vma->addr); return -EAGAIN; } @@ -135,14 +135,14 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) * @phys: Physical address of mapping. * @size: Length of mapping. * @name: User invoking mapping. - * @flags: Protection flags. + * @prot: Protection bits. * * Remaps the physical address @phys through the next available store queue * address of @size length. @name is logged at boot time as well as through * the sysfs interface. */ unsigned long sq_remap(unsigned long phys, unsigned int size, - const char *name, unsigned long flags) + const char *name, pgprot_t prot) { struct sq_mapping *map; unsigned long end; @@ -177,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size, map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); - ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags); + ret = __sq_remap(map, prot); if (unlikely(ret != 0)) goto out; @@ -309,8 +309,7 @@ static ssize_t mapping_store(const char *buf, size_t count) return -EIO; if (likely(len)) { - int ret = sq_remap(base, len, "Userspace", - pgprot_val(PAGE_SHARED)); + int ret = sq_remap(base, len, "Userspace", PAGE_SHARED); if (ret < 0) return ret; } else diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S index 83f2b84b58d..fe0b743881b 100644 --- a/arch/sh/kernel/head_32.S +++ b/arch/sh/kernel/head_32.S @@ -85,7 +85,7 @@ ENTRY(_stext) ldc r0, r7_bank ! ... and initial thread_info #endif -#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) +#ifdef CONFIG_PMB /* * Reconfigure the initial PMB mappings setup by the hardware. * @@ -139,7 +139,6 @@ ENTRY(_stext) mov.l r0, @r1 mov.l .LMEMORY_SIZE, r5 - mov r5, r7 mov #PMB_E_SHIFT, r0 mov #0x1, r4 @@ -150,8 +149,43 @@ ENTRY(_stext) mov.l .LFIRST_ADDR_ENTRY, r2 mov.l .LPMB_ADDR, r3 + /* + * First we need to walk the PMB and figure out if there are any + * existing mappings that match the initial mappings VPN/PPN. + * If these have already been established by the bootloader, we + * don't bother setting up new entries here, and let the late PMB + * initialization take care of things instead. + * + * Note that we may need to coalesce and merge entries in order + * to reclaim more available PMB slots, which is much more than + * we want to do at this early stage. + */ + mov #0, r10 + mov #NR_PMB_ENTRIES, r9 + + mov r1, r7 /* temporary PMB_DATA iter */ + +.Lvalidate_existing_mappings: + + mov.l @r7, r8 + and r0, r8 + cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */ + bt .Lpmb_done + + add #1, r10 /* Increment the loop counter */ + cmp/eq r9, r10 + bf/s .Lvalidate_existing_mappings + add r4, r7 /* Increment to the next PMB_DATA entry */ + + /* + * If we've fallen through, continue with setting up the initial + * mappings. + */ + + mov r5, r7 /* cached_to_uncached */ mov #0, r10 +#ifdef CONFIG_UNCACHED_MAPPING /* * Uncached mapping */ @@ -171,6 +205,7 @@ ENTRY(_stext) add r4, r1 add r4, r3 add #1, r10 +#endif /* * Iterate over all of the available sizes from largest to @@ -216,6 +251,7 @@ ENTRY(_stext) __PMB_ITER_BY_SIZE(64) __PMB_ITER_BY_SIZE(16) +#ifdef CONFIG_UNCACHED_MAPPING /* * Now that we can access it, update cached_to_uncached and * uncached_size. @@ -228,6 +264,7 @@ ENTRY(_stext) shll16 r7 shll8 r7 mov.l r7, @r0 +#endif /* * Clear the remaining PMB entries. @@ -236,7 +273,7 @@ ENTRY(_stext) * r10 = number of entries we've setup so far */ mov #0, r1 - mov #PMB_ENTRY_MAX, r0 + mov #NR_PMB_ENTRIES, r0 .Lagain: mov.l r1, @r3 /* Clear PMB_ADDR entry */ @@ -248,7 +285,8 @@ ENTRY(_stext) mov.l 6f, r0 icbi @r0 -#endif /* !CONFIG_PMB_LEGACY */ +.Lpmb_done: +#endif /* CONFIG_PMB */ #ifndef CONFIG_SH_NO_BSS_INIT /* @@ -300,13 +338,15 @@ ENTRY(stack_start) 6: .long sh_cpu_init 7: .long init_thread_union -#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) +#ifdef CONFIG_PMB .LPMB_ADDR: .long PMB_ADDR .LPMB_DATA: .long PMB_DATA .LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V .LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V .LMMUCR: .long MMUCR +.LMEMORY_SIZE: .long __MEMORY_SIZE +#ifdef CONFIG_UNCACHED_MAPPING .Lcached_to_uncached: .long cached_to_uncached .Luncached_size: .long uncached_size -.LMEMORY_SIZE: .long __MEMORY_SIZE +#endif #endif diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index e187750dd31..3459e70eed7 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -421,6 +421,8 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); + uncached_init(); + plat_early_device_setup(); /* Let earlyprintk output early console messages */ diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 9c090cb6887..c3d86fa71dd 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -30,7 +30,6 @@ #include <asm/alignment.h> #include <asm/fpu.h> #include <asm/kprobes.h> -#include <asm/sh_bios.h> #ifdef CONFIG_CPU_SH2 # define TRAP_RESERVED_INST 4 @@ -848,12 +847,6 @@ void __init trap_init(void) #ifdef TRAP_UBC set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler); #endif - - /* Save off the BIOS VBR, if there is one */ - sh_bios_vbr_init(); - - /* Setup VBR for boot cpu */ - per_cpu_trap_init(); } void show_stack(struct task_struct *tsk, unsigned long *sp) diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index 0e66c7b30e0..7f8a709c3ad 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -14,11 +14,10 @@ OUTPUT_ARCH(sh) #include <asm/cache.h> #include <asm/vmlinux.lds.h> -#if defined(CONFIG_29BIT) || defined(CONFIG_SUPERH64) || \ - defined(CONFIG_PMB_LEGACY) - #define MEMORY_OFFSET __MEMORY_START +#ifdef CONFIG_PMB + #define MEMORY_OFFSET 0 #else - #define MEMORY_OFFSET 0 + #define MEMORY_OFFSET __MEMORY_START #endif ENTRY(_start) |