diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-08-10 13:46:28 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-08-10 13:46:28 -0400 |
commit | 2f058256cb64e346f4fb4499ff4e0f1c2791a4b4 (patch) | |
tree | 91e06602f4d3abb6812ea8c9bc9ba4501e14c84e /arch/arm/mm | |
parent | 0274aa2506fd2fe89a58dd6cd64d3b3f7b976af8 (diff) | |
parent | 86b3786078d63242d3194ffc58ae8dae1d1bbef3 (diff) |
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm/mm/blockops.c | 3 | ||||
-rw-r--r-- | arch/arm/mm/fault.c | 81 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 92 | ||||
-rw-r--r-- | arch/arm/mm/mm-armv.c | 96 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1020.S | 4 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1020e.S | 4 | ||||
-rw-r--r-- | arch/arm/mm/proc-v6.S | 8 | ||||
-rw-r--r-- | arch/arm/mm/proc-xscale.S | 136 |
9 files changed, 139 insertions, 287 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 95606b4a3ba..afbbeb6f465 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -101,7 +101,7 @@ config CPU_ARM922T # ARM925T config CPU_ARM925T - bool "Support ARM925T processor" if ARCH_OMAP + bool "Support ARM925T processor" if ARCH_OMAP1 depends on ARCH_OMAP1510 default y if ARCH_OMAP1510 select CPU_32v4 diff --git a/arch/arm/mm/blockops.c b/arch/arm/mm/blockops.c index 806c6eeb1b0..4f5ee2d0899 100644 --- a/arch/arm/mm/blockops.c +++ b/arch/arm/mm/blockops.c @@ -25,13 +25,14 @@ blk_flush_kern_dcache_page(void *kaddr) { asm( "add r1, r0, %0 \n\ + sub r1, r1, %1 \n\ 1: .word 0xec401f0e @ mcrr p15, 0, r0, r1, c14, 0 @ blocking \n\ mov r0, #0 \n\ mcr p15, 0, r0, c7, c5, 0 \n\ mcr p15, 0, r0, c7, c10, 4 \n\ mov pc, lr" : - : "I" (PAGE_SIZE)); + : "I" (PAGE_SIZE), "I" (L1_CACHE_BYTES)); } /* diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index e25b4fd8412..0b6c4db44e0 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -238,9 +238,9 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) up_read(&mm->mmap_sem); /* - * Handle the "normal" case first + * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR */ - if (fault > 0) + if (fault >= VM_FAULT_MINOR) return 0; /* @@ -261,7 +261,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) do_exit(SIGKILL); return 0; - case 0: + case VM_FAULT_SIGBUS: /* * We had some memory, but were unable to * successfully fix up this page fault. @@ -372,49 +372,50 @@ do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs) static struct fsr_info { int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs); int sig; + int code; const char *name; } fsr_info[] = { /* * The following are the standard ARMv3 and ARMv4 aborts. ARMv5 * defines these to be "precise" aborts. */ - { do_bad, SIGSEGV, "vector exception" }, - { do_bad, SIGILL, "alignment exception" }, - { do_bad, SIGKILL, "terminal exception" }, - { do_bad, SIGILL, "alignment exception" }, - { do_bad, SIGBUS, "external abort on linefetch" }, - { do_translation_fault, SIGSEGV, "section translation fault" }, - { do_bad, SIGBUS, "external abort on linefetch" }, - { do_page_fault, SIGSEGV, "page translation fault" }, - { do_bad, SIGBUS, "external abort on non-linefetch" }, - { do_bad, SIGSEGV, "section domain fault" }, - { do_bad, SIGBUS, "external abort on non-linefetch" }, - { do_bad, SIGSEGV, "page domain fault" }, - { do_bad, SIGBUS, "external abort on translation" }, - { do_sect_fault, SIGSEGV, "section permission fault" }, - { do_bad, SIGBUS, "external abort on translation" }, - { do_page_fault, SIGSEGV, "page permission fault" }, + { do_bad, SIGSEGV, 0, "vector exception" }, + { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, + { do_bad, SIGKILL, 0, "terminal exception" }, + { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, + { do_bad, SIGBUS, 0, "external abort on linefetch" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, + { do_bad, SIGBUS, 0, "external abort on linefetch" }, + { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, + { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, + { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, + { do_bad, SIGBUS, 0, "external abort on translation" }, + { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, + { do_bad, SIGBUS, 0, "external abort on translation" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, /* * The following are "imprecise" aborts, which are signalled by bit * 10 of the FSR, and may not be recoverable. These are only * supported if the CPU abort handler supports bit 10. */ - { do_bad, SIGBUS, "unknown 16" }, - { do_bad, SIGBUS, "unknown 17" }, - { do_bad, SIGBUS, "unknown 18" }, - { do_bad, SIGBUS, "unknown 19" }, - { do_bad, SIGBUS, "lock abort" }, /* xscale */ - { do_bad, SIGBUS, "unknown 21" }, - { do_bad, SIGBUS, "imprecise external abort" }, /* xscale */ - { do_bad, SIGBUS, "unknown 23" }, - { do_bad, SIGBUS, "dcache parity error" }, /* xscale */ - { do_bad, SIGBUS, "unknown 25" }, - { do_bad, SIGBUS, "unknown 26" }, - { do_bad, SIGBUS, "unknown 27" }, - { do_bad, SIGBUS, "unknown 28" }, - { do_bad, SIGBUS, "unknown 29" }, - { do_bad, SIGBUS, "unknown 30" }, - { do_bad, SIGBUS, "unknown 31" } + { do_bad, SIGBUS, 0, "unknown 16" }, + { do_bad, SIGBUS, 0, "unknown 17" }, + { do_bad, SIGBUS, 0, "unknown 18" }, + { do_bad, SIGBUS, 0, "unknown 19" }, + { do_bad, SIGBUS, 0, "lock abort" }, /* xscale */ + { do_bad, SIGBUS, 0, "unknown 21" }, + { do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */ + { do_bad, SIGBUS, 0, "unknown 23" }, + { do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */ + { do_bad, SIGBUS, 0, "unknown 25" }, + { do_bad, SIGBUS, 0, "unknown 26" }, + { do_bad, SIGBUS, 0, "unknown 27" }, + { do_bad, SIGBUS, 0, "unknown 28" }, + { do_bad, SIGBUS, 0, "unknown 29" }, + { do_bad, SIGBUS, 0, "unknown 30" }, + { do_bad, SIGBUS, 0, "unknown 31" } }; void __init @@ -435,15 +436,19 @@ asmlinkage void do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6); + struct siginfo info; if (!inf->fn(addr, fsr, regs)) return; printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", inf->name, fsr, addr); - force_sig(inf->sig, current); - show_pte(current->mm, addr); - die_if_kernel("Oops", regs, 0); + + info.si_signo = inf->sig; + info.si_errno = 0; + info.si_code = inf->code; + info.si_addr = (void __user *)addr; + notify_die("", regs, &info, fsr, 0); } asmlinkage void diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 72a2b8cee31..edffa47a4b2 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -93,14 +93,7 @@ struct node_info { }; #define O_PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define V_PFN_DOWN(x) O_PFN_DOWN(__pa(x)) - #define O_PFN_UP(x) (PAGE_ALIGN(x) >> PAGE_SHIFT) -#define V_PFN_UP(x) O_PFN_UP(__pa(x)) - -#define PFN_SIZE(x) ((x) >> PAGE_SHIFT) -#define PFN_RANGE(s,e) PFN_SIZE(PAGE_ALIGN((unsigned long)(e)) - \ - (((unsigned long)(s)) & PAGE_MASK)) /* * FIXME: We really want to avoid allocating the bootmap bitmap @@ -113,7 +106,7 @@ find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) { unsigned int start_pfn, bank, bootmap_pfn; - start_pfn = V_PFN_UP(&_end); + start_pfn = O_PFN_UP(__pa(&_end)); bootmap_pfn = 0; for (bank = 0; bank < mi->nr_banks; bank ++) { @@ -122,9 +115,9 @@ find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) if (mi->bank[bank].node != node) continue; - start = O_PFN_UP(mi->bank[bank].start); - end = O_PFN_DOWN(mi->bank[bank].size + - mi->bank[bank].start); + start = mi->bank[bank].start >> PAGE_SHIFT; + end = (mi->bank[bank].size + + mi->bank[bank].start) >> PAGE_SHIFT; if (end < start_pfn) continue; @@ -191,8 +184,8 @@ find_memend_and_nodes(struct meminfo *mi, struct node_info *np) /* * Get the start and end pfns for this bank */ - start = O_PFN_UP(mi->bank[i].start); - end = O_PFN_DOWN(mi->bank[i].start + mi->bank[i].size); + start = mi->bank[i].start >> PAGE_SHIFT; + end = (mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT; if (np[node].start > start) np[node].start = start; @@ -444,7 +437,7 @@ void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) memtable_init(mi); if (mdesc->map_io) mdesc->map_io(); - flush_tlb_all(); + local_flush_tlb_all(); /* * initialise the zones within each node @@ -529,6 +522,69 @@ static inline void free_area(unsigned long addr, unsigned long end, char *s) printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); } +static inline void +free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) +{ + struct page *start_pg, *end_pg; + unsigned long pg, pgend; + + /* + * Convert start_pfn/end_pfn to a struct page pointer. + */ + start_pg = pfn_to_page(start_pfn); + end_pg = pfn_to_page(end_pfn); + + /* + * Convert to physical addresses, and + * round start upwards and end downwards. + */ + pg = PAGE_ALIGN(__pa(start_pg)); + pgend = __pa(end_pg) & PAGE_MASK; + + /* + * If there are free pages between these, + * free the section of the memmap array. + */ + if (pg < pgend) + free_bootmem_node(NODE_DATA(node), pg, pgend - pg); +} + +/* + * The mem_map array can get very big. Free the unused area of the memory map. + */ +static void __init free_unused_memmap_node(int node, struct meminfo *mi) +{ + unsigned long bank_start, prev_bank_end = 0; + unsigned int i; + + /* + * [FIXME] This relies on each bank being in address order. This + * may not be the case, especially if the user has provided the + * information on the command line. + */ + for (i = 0; i < mi->nr_banks; i++) { + if (mi->bank[i].size == 0 || mi->bank[i].node != node) + continue; + + bank_start = mi->bank[i].start >> PAGE_SHIFT; + if (bank_start < prev_bank_end) { + printk(KERN_ERR "MEM: unordered memory banks. " + "Not freeing memmap.\n"); + break; + } + + /* + * If we had a previous bank, and there is a space + * between the current bank and the previous, free it. + */ + if (prev_bank_end && prev_bank_end != bank_start) + free_memmap(node, prev_bank_end, bank_start); + + prev_bank_end = (mi->bank[i].start + + mi->bank[i].size) >> PAGE_SHIFT; + } +} + /* * mem_init() marks the free areas in the mem_map and tells us how much * memory is free. This is done after various parts of the system have @@ -547,16 +603,12 @@ void __init mem_init(void) max_mapnr = virt_to_page(high_memory) - mem_map; #endif - /* - * We may have non-contiguous memory. - */ - if (meminfo.nr_banks != 1) - create_memmap_holes(&meminfo); - /* this will put all unused low memory onto the freelists */ for_each_online_node(node) { pg_data_t *pgdat = NODE_DATA(node); + free_unused_memmap_node(node, &meminfo); + if (pgdat->node_spanned_pages != 0) totalram_pages += free_all_bootmem_node(pgdat); } diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index 2c2b93d77d4..e33fe4229d0 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c @@ -169,7 +169,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); + /* + * Copy over the kernel and IO PGD entries + */ init_pgd = pgd_offset_k(0); + memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, + (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); + + clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); if (!vectors_high()) { /* @@ -198,14 +205,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) spin_unlock(&mm->page_table_lock); } - /* - * Copy over the kernel and IO PGD entries - */ - memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, - (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); - - clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); - return new_pgd; no_pte: @@ -400,7 +399,7 @@ static void __init build_mem_type_table(void) ecc_mask = 0; } - if (cpu_arch <= CPU_ARCH_ARMv5) { + if (cpu_arch <= CPU_ARCH_ARMv5TEJ) { for (i = 0; i < ARRAY_SIZE(mem_types); i++) { if (mem_types[i].prot_l1) mem_types[i].prot_l1 |= PMD_BIT4; @@ -426,6 +425,9 @@ static void __init build_mem_type_table(void) mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + + mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE; + mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; } cp = &cache_policies[cachepolicy]; @@ -585,7 +587,7 @@ void setup_mm_for_reboot(char mode) pmdval = (i << PGDIR_SHIFT) | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; - if (cpu_arch <= CPU_ARCH_ARMv5) + if (cpu_arch <= CPU_ARCH_ARMv5TEJ) pmdval |= PMD_BIT4; pmd = pmd_off(pgd, i << PGDIR_SHIFT); pmd[0] = __pmd(pmdval); @@ -683,7 +685,7 @@ void __init memtable_init(struct meminfo *mi) } flush_cache_all(); - flush_tlb_all(); + local_flush_tlb_all(); top_pmd = pmd_off_k(0xffff0000); } @@ -698,75 +700,3 @@ void __init iotable_init(struct map_desc *io_desc, int nr) for (i = 0; i < nr; i++) create_mapping(io_desc + i); } - -static inline void -free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) -{ - struct page *start_pg, *end_pg; - unsigned long pg, pgend; - - /* - * Convert start_pfn/end_pfn to a struct page pointer. - */ - start_pg = pfn_to_page(start_pfn); - end_pg = pfn_to_page(end_pfn); - - /* - * Convert to physical addresses, and - * round start upwards and end downwards. - */ - pg = PAGE_ALIGN(__pa(start_pg)); - pgend = __pa(end_pg) & PAGE_MASK; - - /* - * If there are free pages between these, - * free the section of the memmap array. - */ - if (pg < pgend) - free_bootmem_node(NODE_DATA(node), pg, pgend - pg); -} - -static inline void free_unused_memmap_node(int node, struct meminfo *mi) -{ - unsigned long bank_start, prev_bank_end = 0; - unsigned int i; - - /* - * [FIXME] This relies on each bank being in address order. This - * may not be the case, especially if the user has provided the - * information on the command line. - */ - for (i = 0; i < mi->nr_banks; i++) { - if (mi->bank[i].size == 0 || mi->bank[i].node != node) - continue; - - bank_start = mi->bank[i].start >> PAGE_SHIFT; - if (bank_start < prev_bank_end) { - printk(KERN_ERR "MEM: unordered memory banks. " - "Not freeing memmap.\n"); - break; - } - - /* - * If we had a previous bank, and there is a space - * between the current bank and the previous, free it. - */ - if (prev_bank_end && prev_bank_end != bank_start) - free_memmap(node, prev_bank_end, bank_start); - - prev_bank_end = PAGE_ALIGN(mi->bank[i].start + - mi->bank[i].size) >> PAGE_SHIFT; - } -} - -/* - * The mem_map array can get very big. Free - * the unused area of the memory map. - */ -void __init create_memmap_holes(struct meminfo *mi) -{ - int node; - - for_each_online_node(node) - free_unused_memmap_node(node, mi); -} diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 1f325231b9e..5c0ae5260d1 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -445,14 +445,14 @@ __arm1020_setup: /* * R * .RVI ZFRS BLDP WCAM - * .0.1 1001 ..11 0101 /* FIXME: why no V bit? */ + * .011 1001 ..11 0101 */ .type arm1020_cr1_clear, #object .type arm1020_cr1_set, #object arm1020_cr1_clear: .word 0x593f arm1020_cr1_set: - .word 0x1935 + .word 0x3935 __INITDATA diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 142a2c2d6f0..d69389c4d4b 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -427,14 +427,14 @@ __arm1020e_setup: /* * R * .RVI ZFRS BLDP WCAM - * .0.1 1001 ..11 0101 /* FIXME: why no V bit? */ + * .011 1001 ..11 0101 */ .type arm1020e_cr1_clear, #object .type arm1020e_cr1_set, #object arm1020e_cr1_clear: .word 0x5f3f arm1020e_cr1_set: - .word 0x1935 + .word 0x3935 __INITDATA diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 0aa73d41478..352db98ee26 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -132,8 +132,8 @@ ENTRY(cpu_v6_switch_mm) * 100x 1 0 1 r/o no acc * 10x0 1 0 1 r/o no acc * 1011 0 0 1 r/w no acc - * 110x 1 1 0 r/o r/o - * 11x0 1 1 0 r/o r/o + * 110x 0 1 0 r/w r/o + * 11x0 0 1 0 r/w r/o * 1111 0 1 1 r/w r/w */ ENTRY(cpu_v6_set_pte) @@ -150,7 +150,7 @@ ENTRY(cpu_v6_set_pte) tst r1, #L_PTE_USER orrne r2, r2, #AP1 | nG tstne r2, #APX - eorne r2, r2, #AP0 + bicne r2, r2, #APX | AP0 tst r1, #L_PTE_YOUNG biceq r2, r2, #APX | AP1 | AP0 @@ -200,7 +200,7 @@ __v6_setup: mcr p15, 0, r4, c2, c0, 1 @ load TTB1 #ifdef CONFIG_VFP mrc p15, 0, r0, c1, c0, 2 - orr r0, r0, #(3 << 20) + orr r0, r0, #(0xf << 20) mcr p15, 0, r0, c1, c0, 2 @ Enable full access to VFP #endif mrc p15, 0, r0, c1, c0, 0 @ read control register diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 2d977b4eeea..b88de270014 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -370,142 +370,6 @@ ENTRY(cpu_xscale_dcache_clean_area) bhi 1b mov pc, lr -/* ================================ CACHE LOCKING============================ - * - * The XScale MicroArchitecture implements support for locking entries into - * the data and instruction cache. The following functions implement the core - * low level instructions needed to accomplish the locking. The developer's - * manual states that the code that performs the locking must be in non-cached - * memory. To accomplish this, the code in xscale-cache-lock.c copies the - * following functions from the cache into a non-cached memory region that - * is allocated through consistent_alloc(). - * - */ - .align 5 -/* - * xscale_icache_lock - * - * r0: starting address to lock - * r1: end address to lock - */ -ENTRY(xscale_icache_lock) - -iLockLoop: - bic r0, r0, #CACHELINESIZE - 1 - mcr p15, 0, r0, c9, c1, 0 @ lock into cache - cmp r0, r1 @ are we done? - add r0, r0, #CACHELINESIZE @ advance to next cache line - bls iLockLoop - mov pc, lr - -/* - * xscale_icache_unlock - */ -ENTRY(xscale_icache_unlock) - mcr p15, 0, r0, c9, c1, 1 @ Unlock icache - mov pc, lr - -/* - * xscale_dcache_lock - * - * r0: starting address to lock - * r1: end address to lock - */ -ENTRY(xscale_dcache_lock) - mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer - mov r2, #1 - mcr p15, 0, r2, c9, c2, 0 @ Put dcache in lock mode - cpwait ip @ Wait for completion - - mrs r2, cpsr - orr r3, r2, #PSR_F_BIT | PSR_I_BIT -dLockLoop: - msr cpsr_c, r3 - mcr p15, 0, r0, c7, c10, 1 @ Write back line if it is dirty - mcr p15, 0, r0, c7, c6, 1 @ Flush/invalidate line - msr cpsr_c, r2 - ldr ip, [r0], #CACHELINESIZE @ Preload 32 bytes into cache from - @ location [r0]. Post-increment - @ r3 to next cache line - cmp r0, r1 @ Are we done? - bls dLockLoop - - mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer - mov r2, #0 - mcr p15, 0, r2, c9, c2, 0 @ Get out of lock mode - cpwait_ret lr, ip - -/* - * xscale_dcache_unlock - */ -ENTRY(xscale_dcache_unlock) - mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer - mcr p15, 0, ip, c9, c2, 1 @ Unlock cache - mov pc, lr - -/* - * Needed to determine the length of the code that needs to be copied. - */ - .align 5 -ENTRY(xscale_cache_dummy) - mov pc, lr - -/* ================================ TLB LOCKING============================== - * - * The XScale MicroArchitecture implements support for locking entries into - * the Instruction and Data TLBs. The following functions provide the - * low level support for supporting these under Linux. xscale-lock.c - * implements some higher level management code. Most of the following - * is taken straight out of the Developer's Manual. - */ - -/* - * Lock I-TLB entry - * - * r0: Virtual address to translate and lock - */ - .align 5 -ENTRY(xscale_itlb_lock) - mrs r2, cpsr - orr r3, r2, #PSR_F_BIT | PSR_I_BIT - msr cpsr_c, r3 @ Disable interrupts - mcr p15, 0, r0, c8, c5, 1 @ Invalidate I-TLB entry - mcr p15, 0, r0, c10, c4, 0 @ Translate and lock - msr cpsr_c, r2 @ Restore interrupts - cpwait_ret lr, ip - -/* - * Lock D-TLB entry - * - * r0: Virtual address to translate and lock - */ - .align 5 -ENTRY(xscale_dtlb_lock) - mrs r2, cpsr - orr r3, r2, #PSR_F_BIT | PSR_I_BIT - msr cpsr_c, r3 @ Disable interrupts - mcr p15, 0, r0, c8, c6, 1 @ Invalidate D-TLB entry - mcr p15, 0, r0, c10, c8, 0 @ Translate and lock - msr cpsr_c, r2 @ Restore interrupts - cpwait_ret lr, ip - -/* - * Unlock all I-TLB entries - */ - .align 5 -ENTRY(xscale_itlb_unlock) - mcr p15, 0, ip, c10, c4, 1 @ Unlock I-TLB - mcr p15, 0, ip, c8, c5, 0 @ Invalidate I-TLB - cpwait_ret lr, ip - -/* - * Unlock all D-TLB entries - */ -ENTRY(xscale_dtlb_unlock) - mcr p15, 0, ip, c10, c8, 1 @ Unlock D-TBL - mcr p15, 0, ip, c8, c6, 0 @ Invalidate D-TLB - cpwait_ret lr, ip - /* =============================== PageTable ============================== */ #define PTE_CACHE_WRITE_ALLOCATE 0 |