diff options
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r-- | arch/sh/kernel/head_32.S | 241 |
1 files changed, 241 insertions, 0 deletions
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S index e5d421db4c8..8ee31a0b973 100644 --- a/arch/sh/kernel/head_32.S +++ b/arch/sh/kernel/head_32.S @@ -3,6 +3,7 @@ * arch/sh/kernel/head.S * * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima + * Copyright (C) 2010 Matt Fleming * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -84,6 +85,236 @@ ENTRY(_stext) ldc r0, r7_bank ! ... and initial thread_info #endif +#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) + /* + * Reconfigure the initial PMB mappings setup by the hardware. + * + * When we boot in 32-bit MMU mode there are 2 PMB entries already + * setup for us. + * + * Entry VPN PPN V SZ C UB WT + * --------------------------------------------------------------- + * 0 0x80000000 0x00000000 1 512MB 1 0 1 + * 1 0xA0000000 0x00000000 1 512MB 0 0 0 + * + * But we reprogram them here because we want complete control over + * our address space and the initial mappings may not map PAGE_OFFSET + * to __MEMORY_START (or even map all of our RAM). + * + * Once we've setup cached and uncached mappings for all of RAM we + * clear the rest of the PMB entries. + * + * This clearing also deals with the fact that PMB entries can persist + * across reboots. The PMB could have been left in any state when the + * reboot occurred, so to be safe we clear all entries and start with + * with a clean slate. + */ + + mov.l .LMMUCR, r1 /* Flush the TLB */ + mov.l @r1, r0 + or #MMUCR_TI, r0 + mov.l r0, @r1 + + mov.l .LMEMORY_SIZE, r5 + mov r5, r7 + + mov #PMB_E_SHIFT, r0 + mov #0x1, r4 + shld r0, r4 + + mov.l .LFIRST_DATA_ENTRY, r0 + mov.l .LPMB_DATA, r1 + mov.l .LFIRST_ADDR_ENTRY, r2 + mov.l .LPMB_ADDR, r3 + + mov #0, r10 + + /* + * r0 = PMB_DATA data field + * r1 = PMB_DATA address field + * r2 = PMB_ADDR data field + * r3 = PMB_ADDR address field + * r4 = PMB_E_SHIFT + * r5 = remaining amount of RAM to map + * r6 = PMB mapping size we're trying to use + * r7 = cached_to_uncached + * r8 = scratch register + * r9 = scratch register + * r10 = number of PMB entries we've setup + */ +.L512: + mov #(512 >> 4), r6 + shll16 r6 + shll8 r6 + + cmp/hi r5, r6 + bt .L128 + + mov #(PMB_SZ_512M >> 2), r9 + shll2 r9 + + /* + * Cached mapping + */ + mov #PMB_C, r8 + or r0, r8 + or r9, r8 + mov.l r8, @r1 + mov.l r2, @r3 + + add r4, r1 /* Increment to the next PMB_DATA entry */ + add r4, r3 /* Increment to the next PMB_ADDR entry */ + + add #1, r10 /* Increment number of PMB entries */ + + /* + * Uncached mapping + */ + mov #(PMB_UB >> 8), r8 + shll8 r8 + + or r0, r8 + or r9, r8 + mov.l r8, @r1 + mov r2, r8 + add r7, r8 + mov.l r8, @r3 + + add r4, r1 /* Increment to the next PMB_DATA entry */ + add r4, r3 /* Increment to the next PMB_ADDR entry */ + + add #1, r10 /* Increment number of PMB entries */ + + sub r6, r5 + add r6, r0 + add r6, r2 + + bra .L512 + +.L128: + mov #(128 >> 4), r6 + shll16 r6 + shll8 r6 + + cmp/hi r5, r6 + bt .L64 + + mov #(PMB_SZ_128M >> 2), r9 + shll2 r9 + + /* + * Cached mapping + */ + mov #PMB_C, r8 + or r0, r8 + or r9, r8 + mov.l r8, @r1 + mov.l r2, @r3 + + add r4, r1 /* Increment to the next PMB_DATA entry */ + add r4, r3 /* Increment to the next PMB_ADDR entry */ + + add #1, r10 /* Increment number of PMB entries */ + + /* + * Uncached mapping + */ + mov #(PMB_UB >> 8), r8 + shll8 r8 + + or r0, r8 + or r9, r8 + mov.l r8, @r1 + mov r2, r8 + add r7, r8 + mov.l r8, @r3 + + add r4, r1 /* Increment to the next PMB_DATA entry */ + add r4, r3 /* Increment to the next PMB_ADDR entry */ + + add #1, r10 /* Increment number of PMB entries */ + + sub r6, r5 + add r6, r0 + add r6, r2 + + bra .L128 + +.L64: + mov #(64 >> 4), r6 + shll16 r6 + shll8 r6 + + cmp/hi r5, r6 + bt .Ldone + + mov #(PMB_SZ_64M >> 2), r9 + shll2 r9 + + /* + * Cached mapping + */ + mov #PMB_C, r8 + or r0, r8 + or r9, r8 + mov.l r8, @r1 + mov.l r2, @r3 + + add r4, r1 /* Increment to the next PMB_DATA entry */ + add r4, r3 /* Increment to the next PMB_ADDR entry */ + + add #1, r10 /* Increment number of PMB entries */ + + /* + * Uncached mapping + */ + mov #(PMB_UB >> 8), r8 + shll8 r8 + + or r0, r8 + or r9, r8 + mov.l r8, @r1 + mov r2, r8 + add r7, r8 + mov.l r8, @r3 + + add r4, r1 /* Increment to the next PMB_DATA entry */ + add r4, r3 /* Increment to the next PMB_ADDR entry */ + + add #1, r10 /* Increment number of PMB entries */ + + sub r6, r5 + add r6, r0 + add r6, r2 + + bra .L64 + +.Ldone: + /* Update cached_to_uncached */ + mov.l .Lcached_to_uncached, r0 + mov.l r7, @r0 + + /* + * Clear the remaining PMB entries. + * + * r3 = entry to begin clearing from + * r10 = number of entries we've setup so far + */ + mov #0, r1 + mov #PMB_ENTRY_MAX, r0 + +.Lagain: + mov.l r1, @r3 /* Clear PMB_ADDR entry */ + add #1, r10 /* Increment the loop counter */ + cmp/eq r0, r10 + bf/s .Lagain + add r4, r3 /* Increment to the next PMB_ADDR entry */ + + mov.l 6f, r0 + icbi @r0 + +#endif /* !CONFIG_PMB_LEGACY */ + #ifndef CONFIG_SH_NO_BSS_INIT /* * Don't clear BSS if running on slow platforms such as an RTL simulation, @@ -133,3 +364,13 @@ ENTRY(stack_start) 5: .long start_kernel 6: .long sh_cpu_init 7: .long init_thread_union + +#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) +.LPMB_ADDR: .long PMB_ADDR +.LPMB_DATA: .long PMB_DATA +.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V +.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V +.LMMUCR: .long MMUCR +.Lcached_to_uncached: .long cached_to_uncached +.LMEMORY_SIZE: .long __MEMORY_SIZE +#endif |