From f76348a360fe92063e07a8f54b0c1ea67f91e76c Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 24 Dec 2009 12:57:01 +0000 Subject: ARM: remove unnecessary cache flush This cache flush occurs when we first insert a page into the page tables, where a page did not exist previously. There can be no cache lines associated with this virtual mapping, so this cache flush is redundant. Tested-by: Mike Rapoport Tested-by: Mikael Pettersson Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index c9b97e9836a..82df01a72f4 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -133,8 +133,6 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, flush_dcache_mmap_unlock(mapping); if (aliases) do_adjust_pte(vma, addr, pfn, ptep); - else - flush_cache_page(vma, addr, pfn); } /* -- cgit v1.2.3-70-g09d2 From 8e797a7e4f588fb3b9cfe9860b00dcd3b14f8b60 Mon Sep 17 00:00:00 2001 From: Srinidhi Kasagar Date: Sat, 3 Apr 2010 19:10:45 +0100 Subject: ARM: 6027/1: ux500: enable l2x0 support This enables the l2x0 support and ensures that the secondary CPU can see the page table and secondary data at this point. Signed-off-by: srinidhi kasagar Acked-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mach-ux500/cpu-u8500.c | 17 +++++++++++++++++ arch/arm/mach-ux500/platsmp.c | 3 ++- arch/arm/mm/Kconfig | 3 ++- 3 files changed, 21 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mach-ux500/cpu-u8500.c b/arch/arm/mach-ux500/cpu-u8500.c index 5fb44661ada..979f1c32ad0 100644 --- a/arch/arm/mach-ux500/cpu-u8500.c +++ b/arch/arm/mach-ux500/cpu-u8500.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -127,6 +128,7 @@ static struct map_desc u8500_io_desc[] __initdata = { __IO_DEV_DESC(U8500_GPIO2_BASE, SZ_4K), __IO_DEV_DESC(U8500_GPIO3_BASE, SZ_4K), __IO_DEV_DESC(U8500_GPIO5_BASE, SZ_4K), + __IO_DEV_DESC(U8500_L2CC_BASE, SZ_4K), }; static struct map_desc u8500ed_io_desc[] __initdata = { @@ -183,3 +185,18 @@ static void __init u8500_timer_init(void) struct sys_timer u8500_timer = { .init = u8500_timer_init, }; + +#ifdef CONFIG_CACHE_L2X0 +static int u8500_l2x0_init(void) +{ + void __iomem *l2x0_base; + + l2x0_base = __io_address(U8500_L2CC_BASE); + + /* 64KB way size, 8 way associativity, force WA */ + l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff); + + return 0; +} +early_initcall(u8500_l2x0_init); +#endif diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c index 8dfe7ca245d..76dd9354bd1 100644 --- a/arch/arm/mach-ux500/platsmp.c +++ b/arch/arm/mach-ux500/platsmp.c @@ -75,7 +75,8 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) * that it has been released by resetting pen_release. */ pen_release = cpu; - flush_cache_all(); + __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); + outer_clean_range(__pa(&pen_release), __pa(&pen_release) + 1); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index c4ed9f93f64..22c1c839306 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -754,7 +754,8 @@ config CACHE_FEROCEON_L2_WRITETHROUGH config CACHE_L2X0 bool "Enable the L2x0 outer cache controller" depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ - REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4 + REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \ + ARCH_NOMADIK || ARCH_OMAP4 || ARCH_U8500 default y select OUTER_CACHE help -- cgit v1.2.3-70-g09d2 From 4260415f6a3b92c5c986398d96c314df37a4ccbf Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 19 Apr 2010 10:15:03 +0100 Subject: ARM: fix build error in arch/arm/kernel/process.c /tmp/ccJ3ssZW.s: Assembler messages: /tmp/ccJ3ssZW.s:1952: Error: can't resolve `.text' {.text section} - `.LFB1077' This is caused because: .section .data .section .text .section .text .previous does not return us to the .text section, but the .data section; this makes use of .previous dangerous if the ordering of previous sections is not known. Fix up the other users of .previous; .pushsection and .popsection are a safer pairing to use than .section and .previous. Signed-off-by: Russell King --- arch/arm/include/asm/assembler.h | 12 ++++++------ arch/arm/include/asm/futex.h | 16 +++++++-------- arch/arm/include/asm/uaccess.h | 40 +++++++++++++++++++------------------- arch/arm/kernel/entry-armv.S | 10 +++++----- arch/arm/kernel/ftrace.c | 8 ++++---- arch/arm/kernel/process.c | 8 ++++---- arch/arm/lib/backtrace.S | 4 ++-- arch/arm/lib/clear_user.S | 4 ++-- arch/arm/lib/copy_from_user.S | 4 ++-- arch/arm/lib/copy_to_user.S | 4 ++-- arch/arm/lib/csumpartialcopyuser.S | 4 ++-- arch/arm/lib/getuser.S | 4 ++-- arch/arm/lib/putuser.S | 4 ++-- arch/arm/lib/strncpy_from_user.S | 4 ++-- arch/arm/lib/strnlen_user.S | 4 ++-- arch/arm/lib/uaccess.S | 8 ++++---- arch/arm/mm/alignment.c | 24 +++++++++++------------ arch/arm/mm/proc-sa1100.S | 2 +- arch/arm/nwfpe/entry.S | 8 ++++---- 19 files changed, 86 insertions(+), 86 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 00f46d9ce29..6e8f05c8a1c 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -149,10 +149,10 @@ #define USER(x...) \ 9999: x; \ - .section __ex_table,"a"; \ + .pushsection __ex_table,"a"; \ .align 3; \ .long 9999b,9001f; \ - .previous + .popsection /* * SMP data memory barrier @@ -193,10 +193,10 @@ .error "Unsupported inc macro argument" .endif - .section __ex_table,"a" + .pushsection __ex_table,"a" .align 3 .long 9999b, \abort - .previous + .popsection .endm .macro usracc, instr, reg, ptr, inc, cond, rept, abort @@ -234,10 +234,10 @@ .error "Unsupported inc macro argument" .endif - .section __ex_table,"a" + .pushsection __ex_table,"a" .align 3 .long 9999b, \abort - .previous + .popsection .endr .endm diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index bfcc15929a7..540a044153a 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -21,14 +21,14 @@ "2: strt %0, [%2]\n" \ " mov %0, #0\n" \ "3:\n" \ - " .section __ex_table,\"a\"\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 4f, 2b, 4f\n" \ - " .previous\n" \ - " .section .fixup,\"ax\"\n" \ + " .popsection\n" \ + " .pushsection .fixup,\"ax\"\n" \ "4: mov %0, %4\n" \ " b 3b\n" \ - " .previous" \ + " .popsection" \ : "=&r" (ret), "=&r" (oldval) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "cc", "memory") @@ -102,14 +102,14 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " it eq @ explicit IT needed for the 2b label\n" "2: streqt %2, [%3]\n" "3:\n" - " .section __ex_table,\"a\"\n" + " .pushsection __ex_table,\"a\"\n" " .align 3\n" " .long 1b, 4f, 2b, 4f\n" - " .previous\n" - " .section .fixup,\"ax\"\n" + " .popsection\n" + " .pushsection .fixup,\"ax\"\n" "4: mov %0, %4\n" " b 3b\n" - " .previous" + " .popsection" : "=&r" (val) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "cc", "memory"); diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 1d6bd40a432..33e4a48fe10 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -229,16 +229,16 @@ do { \ __asm__ __volatile__( \ "1: ldrbt %1,[%2]\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %3\n" \ " mov %1, #0\n" \ " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err), "=&r" (x) \ : "r" (addr), "i" (-EFAULT) \ : "cc") @@ -265,16 +265,16 @@ do { \ __asm__ __volatile__( \ "1: ldrt %1,[%2]\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %3\n" \ " mov %1, #0\n" \ " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err), "=&r" (x) \ : "r" (addr), "i" (-EFAULT) \ : "cc") @@ -310,15 +310,15 @@ do { \ __asm__ __volatile__( \ "1: strbt %1,[%2]\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %3\n" \ " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err) \ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ : "cc") @@ -343,15 +343,15 @@ do { \ __asm__ __volatile__( \ "1: strt %1,[%2]\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %3\n" \ " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err) \ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ : "cc") @@ -371,16 +371,16 @@ do { \ THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \ THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \ "3:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "4: mov %0, %3\n" \ " b 3b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 4b\n" \ " .long 2b, 4b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err), "+r" (__pu_addr) \ : "r" (x), "i" (-EFAULT) \ : "cc") diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 6c5cf369183..e6a0fb0f392 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -523,16 +523,16 @@ ENDPROC(__und_usr) /* * The out of line fixup for the ldrt above. */ - .section .fixup, "ax" + .pushsection .fixup, "ax" 4: mov pc, r9 - .previous - .section __ex_table,"a" + .popsection + .pushsection __ex_table,"a" .long 1b, 4b #if __LINUX_ARM_ARCH__ >= 7 .long 2b, 4b .long 3b, 4b #endif - .previous + .popsection /* * Check whether the instruction is a co-processor instruction. @@ -679,7 +679,7 @@ do_fpe: .data ENTRY(fp_enter) .word no_fp - .previous + .text ENTRY(no_fp) mov pc, lr diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index c6384276622..0298286ad4a 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -62,15 +62,15 @@ int ftrace_modify_code(unsigned long pc, unsigned char *old_code, " movne %0, #2 \n" "3:\n" - ".section .fixup, \"ax\"\n" + ".pushsection .fixup, \"ax\"\n" "4: mov %0, #1 \n" " b 3b \n" - ".previous\n" + ".popsection\n" - ".section __ex_table, \"a\"\n" + ".pushsection __ex_table, \"a\"\n" " .long 1b, 4b \n" " .long 2b, 4b \n" - ".previous\n" + ".popsection\n" : "=r"(err), "=r"(replaced) : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced) diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 0e12e0acbf2..acf5e6fdb6d 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -355,7 +355,7 @@ EXPORT_SYMBOL(dump_fpu); * the thread function, and r3 points to the exit function. */ extern void kernel_thread_helper(void); -asm( ".section .text\n" +asm( ".pushsection .text\n" " .align\n" " .type kernel_thread_helper, #function\n" "kernel_thread_helper:\n" @@ -363,11 +363,11 @@ asm( ".section .text\n" " mov lr, r3\n" " mov pc, r2\n" " .size kernel_thread_helper, . - kernel_thread_helper\n" -" .previous"); +" .popsection"); #ifdef CONFIG_ARM_UNWIND extern void kernel_thread_exit(long code); -asm( ".section .text\n" +asm( ".pushsection .text\n" " .align\n" " .type kernel_thread_exit, #function\n" "kernel_thread_exit:\n" @@ -377,7 +377,7 @@ asm( ".section .text\n" " nop\n" " .fnend\n" " .size kernel_thread_exit, . - kernel_thread_exit\n" -" .previous"); +" .popsection"); #else #define kernel_thread_exit do_exit #endif diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S index aaf7220d9e3..a673297b0cf 100644 --- a/arch/arm/lib/backtrace.S +++ b/arch/arm/lib/backtrace.S @@ -110,13 +110,13 @@ no_frame: ldmfd sp!, {r4 - r8, pc} ENDPROC(__backtrace) ENDPROC(c_backtrace) - .section __ex_table,"a" + .pushsection __ex_table,"a" .align 3 .long 1001b, 1006b .long 1002b, 1006b .long 1003b, 1006b .long 1004b, 1006b - .previous + .popsection #define instr r4 #define reg r5 diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S index 1279abd8b88..5e3f99620c0 100644 --- a/arch/arm/lib/clear_user.S +++ b/arch/arm/lib/clear_user.S @@ -46,8 +46,8 @@ USER( strnebt r2, [r0]) ldmfd sp!, {r1, pc} ENDPROC(__clear_user) - .section .fixup,"ax" + .pushsection .fixup,"ax" .align 0 9001: ldmfd sp!, {r0, pc} - .previous + .popsection diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index e4fe124aced..66a477a3e3c 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S @@ -90,7 +90,7 @@ ENTRY(__copy_from_user) ENDPROC(__copy_from_user) - .section .fixup,"ax" + .pushsection .fixup,"ax" .align 0 copy_abort_preamble ldmfd sp!, {r1, r2} @@ -100,5 +100,5 @@ ENDPROC(__copy_from_user) bl __memzero ldr r0, [sp], #4 copy_abort_end - .previous + .popsection diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index 1a71e158444..027b69bdbad 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S @@ -94,12 +94,12 @@ WEAK(__copy_to_user) ENDPROC(__copy_to_user) - .section .fixup,"ax" + .pushsection .fixup,"ax" .align 0 copy_abort_preamble ldmfd sp!, {r1, r2, r3} sub r0, r0, r1 rsb r0, r0, r2 copy_abort_end - .previous + .popsection diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S index fd0e9dcd9fd..59ff6fdc1e6 100644 --- a/arch/arm/lib/csumpartialcopyuser.S +++ b/arch/arm/lib/csumpartialcopyuser.S @@ -68,7 +68,7 @@ * so properly, we would have to add in whatever registers were loaded before * the fault, which, with the current asm above is not predictable. */ - .section .fixup,"ax" + .pushsection .fixup,"ax" .align 4 9001: mov r4, #-EFAULT ldr r5, [fp, #4] @ *err_ptr @@ -80,4 +80,4 @@ strneb r0, [r1], #1 bne 9002b load_regs - .previous + .popsection diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index a1814d92712..b1631a7dbe7 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S @@ -64,9 +64,9 @@ __get_user_bad: mov pc, lr ENDPROC(__get_user_bad) -.section __ex_table, "a" +.pushsection __ex_table, "a" .long 1b, __get_user_bad .long 2b, __get_user_bad .long 3b, __get_user_bad .long 4b, __get_user_bad -.previous +.popsection diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S index 02fedbf07c0..5a01a23c6c0 100644 --- a/arch/arm/lib/putuser.S +++ b/arch/arm/lib/putuser.S @@ -81,11 +81,11 @@ __put_user_bad: mov pc, lr ENDPROC(__put_user_bad) -.section __ex_table, "a" +.pushsection __ex_table, "a" .long 1b, __put_user_bad .long 2b, __put_user_bad .long 3b, __put_user_bad .long 4b, __put_user_bad .long 5b, __put_user_bad .long 6b, __put_user_bad -.previous +.popsection diff --git a/arch/arm/lib/strncpy_from_user.S b/arch/arm/lib/strncpy_from_user.S index 1c9814f346c..f202d7bd164 100644 --- a/arch/arm/lib/strncpy_from_user.S +++ b/arch/arm/lib/strncpy_from_user.S @@ -33,11 +33,11 @@ ENTRY(__strncpy_from_user) mov pc, lr ENDPROC(__strncpy_from_user) - .section .fixup,"ax" + .pushsection .fixup,"ax" .align 0 9001: mov r3, #0 strb r3, [r0, #0] @ null terminate mov r0, #-EFAULT mov pc, lr - .previous + .popsection diff --git a/arch/arm/lib/strnlen_user.S b/arch/arm/lib/strnlen_user.S index 7855b290665..0ecbb459c4f 100644 --- a/arch/arm/lib/strnlen_user.S +++ b/arch/arm/lib/strnlen_user.S @@ -33,8 +33,8 @@ ENTRY(__strnlen_user) mov pc, lr ENDPROC(__strnlen_user) - .section .fixup,"ax" + .pushsection .fixup,"ax" .align 0 9001: mov r0, #0 mov pc, lr - .previous + .popsection diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S index ffdd27498ce..fee9f6f88ad 100644 --- a/arch/arm/lib/uaccess.S +++ b/arch/arm/lib/uaccess.S @@ -279,10 +279,10 @@ USER( strgtbt r3, [r0], #1) @ May fault b .Lc2u_finished ENDPROC(__copy_to_user) - .section .fixup,"ax" + .pushsection .fixup,"ax" .align 0 9001: ldmfd sp!, {r0, r4 - r7, pc} - .previous + .popsection /* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n); * Purpose : copy a block from user memory to kernel memory @@ -545,7 +545,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault b .Lcfu_finished ENDPROC(__copy_from_user) - .section .fixup,"ax" + .pushsection .fixup,"ax" .align 0 /* * We took an exception. r0 contains a pointer to @@ -559,5 +559,5 @@ ENDPROC(__copy_from_user) blne __memzero mov r0, r4 ldmfd sp!, {r4 - r7, pc} - .previous + .popsection diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index edddd66faac..a2ab51fa73e 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -166,15 +166,15 @@ union offset_union { THUMB( "1: "ins" %1, [%2]\n" ) \ THUMB( " add %2, %2, #1\n" ) \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, #1\n" \ " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ - " .previous\n" \ + " .popsection\n" \ : "=r" (err), "=&r" (val), "=r" (addr) \ : "0" (err), "2" (addr)) @@ -226,16 +226,16 @@ union offset_union { " mov %1, %1, "NEXT_BYTE"\n" \ "2: "ins" %1, [%2]\n" \ "3:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "4: mov %0, #1\n" \ " b 3b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 4b\n" \ " .long 2b, 4b\n" \ - " .previous\n" \ + " .popsection\n" \ : "=r" (err), "=&r" (v), "=&r" (a) \ : "0" (err), "1" (v), "2" (a)); \ if (err) \ @@ -266,18 +266,18 @@ union offset_union { " mov %1, %1, "NEXT_BYTE"\n" \ "4: "ins" %1, [%2]\n" \ "5:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "6: mov %0, #1\n" \ " b 5b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 6b\n" \ " .long 2b, 6b\n" \ " .long 3b, 6b\n" \ " .long 4b, 6b\n" \ - " .previous\n" \ + " .popsection\n" \ : "=r" (err), "=&r" (v), "=&r" (a) \ : "0" (err), "1" (v), "2" (a)); \ if (err) \ diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index ee7700242c1..5c47760c206 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -45,7 +45,7 @@ ENTRY(cpu_sa1100_proc_init) mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland mov pc, lr - .previous + .section .text /* * cpu_sa1100_proc_fin() diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S index 48bca0db460..cafa1835433 100644 --- a/arch/arm/nwfpe/entry.S +++ b/arch/arm/nwfpe/entry.S @@ -111,12 +111,12 @@ next: @ to fault. Emit the appropriate exception gunk to fix things up. @ ??? For some reason, faults can happen at .Lx2 even with a @ plain LDR instruction. Weird, but it seems harmless. - .section .fixup,"ax" + .pushsection .fixup,"ax" .align 2 .Lfix: mov pc, r9 @ let the user eat segfaults - .previous + .popsection - .section __ex_table,"a" + .pushsection __ex_table,"a" .align 3 .long .Lx1, .Lfix - .previous + .popsection -- cgit v1.2.3-70-g09d2 From a3be6327163c223c1715c8307a616941fbf8bf73 Mon Sep 17 00:00:00 2001 From: Hans Ulli Kroll Date: Tue, 27 Apr 2010 12:45:10 +0200 Subject: ARM: Gemini: fix compiler error in copypage-fa.c Fix compiler error in copypage-fs.c missing struct vm_area_struct *vma in function fa_copy_user_highpage Signed-off-by: Hans Ulli Kroll --- arch/arm/mm/copypage-fa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c index b2a6008b011..d2852e1635b 100644 --- a/arch/arm/mm/copypage-fa.c +++ b/arch/arm/mm/copypage-fa.c @@ -40,7 +40,7 @@ fa_copy_user_page(void *kto, const void *kfrom) } void fa_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; -- cgit v1.2.3-70-g09d2 From fef88f10767cfd9f9b4eebb5d5490214c5e13ad5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 28 Feb 2010 17:26:25 +0000 Subject: ARM: Add Versatile Express CA9x4 processor support Signed-off-by: Russell King --- arch/arm/mach-vexpress/Kconfig | 5 + arch/arm/mach-vexpress/Makefile | 1 + arch/arm/mach-vexpress/ct-ca9x4.c | 215 +++++++++++++++++++++++++ arch/arm/mach-vexpress/include/mach/ct-ca9x4.h | 43 +++++ arch/arm/mm/Kconfig | 3 +- 5 files changed, 266 insertions(+), 1 deletion(-) create mode 100644 arch/arm/mach-vexpress/ct-ca9x4.c create mode 100644 arch/arm/mach-vexpress/include/mach/ct-ca9x4.h (limited to 'arch/arm/mm') diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig index 751858c97e6..3f19b660a16 100644 --- a/arch/arm/mach-vexpress/Kconfig +++ b/arch/arm/mach-vexpress/Kconfig @@ -1,4 +1,9 @@ menu "Versatile Express platform type" depends on ARCH_VEXPRESS +config ARCH_VEXPRESS_CA9X4 + bool "Versatile Express Cortex-A9x4 tile" + select CPU_V7 + select ARM_GIC + endmenu diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile index b47cf732981..3c5e1609fc4 100644 --- a/arch/arm/mach-vexpress/Makefile +++ b/arch/arm/mach-vexpress/Makefile @@ -3,3 +3,4 @@ # obj-y := v2m.o +obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c new file mode 100644 index 00000000000..5a0449c6f50 --- /dev/null +++ b/arch/arm/mach-vexpress/ct-ca9x4.c @@ -0,0 +1,215 @@ +/* + * Versatile Express Core Tile Cortex A9x4 Support + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include + +#include "core.h" + +#include + +#define V2M_PA_CS7 0x10000000 + +static struct map_desc ct_ca9x4_io_desc[] __initdata = { + { + .virtual = __MMIO_P2V(CT_CA9X4_MPIC), + .pfn = __phys_to_pfn(CT_CA9X4_MPIC), + .length = SZ_16K, + .type = MT_DEVICE, + }, { + .virtual = __MMIO_P2V(CT_CA9X4_SP804_TIMER), + .pfn = __phys_to_pfn(CT_CA9X4_SP804_TIMER), + .length = SZ_4K, + .type = MT_DEVICE, + }, { + .virtual = __MMIO_P2V(CT_CA9X4_L2CC), + .pfn = __phys_to_pfn(CT_CA9X4_L2CC), + .length = SZ_4K, + .type = MT_DEVICE, + }, +}; + +static void __init ct_ca9x4_map_io(void) +{ + v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); +} + +void __iomem *gic_cpu_base_addr; + +static void __init ct_ca9x4_init_irq(void) +{ + gic_cpu_base_addr = MMIO_P2V(A9_MPCORE_GIC_CPU); + gic_dist_init(0, MMIO_P2V(A9_MPCORE_GIC_DIST), 29); + gic_cpu_init(0, gic_cpu_base_addr); +} + +#if 0 +static void ct_ca9x4_timer_init(void) +{ + writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL); + writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL); + + sp804_clocksource_init(MMIO_P2V(CT_CA9X4_TIMER1)); + sp804_clockevents_init(MMIO_P2V(CT_CA9X4_TIMER0), IRQ_CT_CA9X4_TIMER0); +} + +static struct sys_timer ct_ca9x4_timer = { + .init = ct_ca9x4_timer_init, +}; +#endif + +static struct clcd_panel xvga_panel = { + .mode = { + .name = "XVGA", + .refresh = 60, + .xres = 1024, + .yres = 768, + .pixclock = 15384, + .left_margin = 168, + .right_margin = 8, + .upper_margin = 29, + .lower_margin = 3, + .hsync_len = 144, + .vsync_len = 6, + .sync = 0, + .vmode = FB_VMODE_NONINTERLACED, + }, + .width = -1, + .height = -1, + .tim2 = TIM2_BCD | TIM2_IPC, + .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1), + .bpp = 16, +}; + +static void ct_ca9x4_clcd_enable(struct clcd_fb *fb) +{ + v2m_cfg_write(SYS_CFG_MUXFPGA | SYS_CFG_SITE_DB1, 0); + v2m_cfg_write(SYS_CFG_DVIMODE | SYS_CFG_SITE_DB1, 2); +} + +static int ct_ca9x4_clcd_setup(struct clcd_fb *fb) +{ + unsigned long framesize = 1024 * 768 * 2; + dma_addr_t dma; + + fb->panel = &xvga_panel; + + fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, framesize, + &dma, GFP_KERNEL); + if (!fb->fb.screen_base) { + printk(KERN_ERR "CLCD: unable to map frame buffer\n"); + return -ENOMEM; + } + fb->fb.fix.smem_start = dma; + fb->fb.fix.smem_len = framesize; + + return 0; +} + +static int ct_ca9x4_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma) +{ + return dma_mmap_writecombine(&fb->dev->dev, vma, fb->fb.screen_base, + fb->fb.fix.smem_start, fb->fb.fix.smem_len); +} + +static void ct_ca9x4_clcd_remove(struct clcd_fb *fb) +{ + dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len, + fb->fb.screen_base, fb->fb.fix.smem_start); +} + +static struct clcd_board ct_ca9x4_clcd_data = { + .name = "CT-CA9X4", + .check = clcdfb_check, + .decode = clcdfb_decode, + .enable = ct_ca9x4_clcd_enable, + .setup = ct_ca9x4_clcd_setup, + .mmap = ct_ca9x4_clcd_mmap, + .remove = ct_ca9x4_clcd_remove, +}; + +static AMBA_DEVICE(clcd, "ct:clcd", CT_CA9X4_CLCDC, &ct_ca9x4_clcd_data); +static AMBA_DEVICE(dmc, "ct:dmc", CT_CA9X4_DMC, NULL); +static AMBA_DEVICE(smc, "ct:smc", CT_CA9X4_SMC, NULL); +static AMBA_DEVICE(gpio, "ct:gpio", CT_CA9X4_GPIO, NULL); + +static struct amba_device *ct_ca9x4_amba_devs[] __initdata = { + &clcd_device, + &dmc_device, + &smc_device, + &gpio_device, +}; + + +static long ct_round(struct clk *clk, unsigned long rate) +{ + return rate; +} + +static int ct_set(struct clk *clk, unsigned long rate) +{ + return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE_DB1 | 1, rate); +} + +static const struct clk_ops osc1_clk_ops = { + .round = ct_round, + .set = ct_set, +}; + +static struct clk osc1_clk = { + .ops = &osc1_clk_ops, + .rate = 24000000, +}; + +static struct clk_lookup lookups[] = { + { /* CLCD */ + .dev_id = "ct:clcd", + .clk = &osc1_clk, + }, +}; + +static void ct_ca9x4_init(void) +{ + int i; + +#ifdef CONFIG_CACHE_L2X0 + l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff); +#endif + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++) + amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource); +} + +MACHINE_START(VEXPRESS, "ARM-Versatile Express CA9x4") + .phys_io = V2M_UART0, + .io_pg_offst = (__MMIO_P2V(V2M_UART0) >> 18) & 0xfffc, + .boot_params = PHYS_OFFSET + 0x00000100, + .map_io = ct_ca9x4_map_io, + .init_irq = ct_ca9x4_init_irq, +#if 0 + .timer = &ct_ca9x4_timer, +#else + .timer = &v2m_timer, +#endif + .init_machine = ct_ca9x4_init, +MACHINE_END diff --git a/arch/arm/mach-vexpress/include/mach/ct-ca9x4.h b/arch/arm/mach-vexpress/include/mach/ct-ca9x4.h new file mode 100644 index 00000000000..10718e654c6 --- /dev/null +++ b/arch/arm/mach-vexpress/include/mach/ct-ca9x4.h @@ -0,0 +1,43 @@ +#ifndef __MACH_CT_CA9X4_H +#define __MACH_CT_CA9X4_H + +/* + * Physical base addresses + */ +#define CT_CA9X4_CLCDC (0x10020000) +#define CT_CA9X4_AXIRAM (0x10060000) +#define CT_CA9X4_DMC (0x100e0000) +#define CT_CA9X4_SMC (0x100e1000) +#define CT_CA9X4_SCC (0x100e2000) +#define CT_CA9X4_SP804_TIMER (0x100e4000) +#define CT_CA9X4_SP805_WDT (0x100e5000) +#define CT_CA9X4_TZPC (0x100e6000) +#define CT_CA9X4_GPIO (0x100e8000) +#define CT_CA9X4_FASTAXI (0x100e9000) +#define CT_CA9X4_SLOWAXI (0x100ea000) +#define CT_CA9X4_TZASC (0x100ec000) +#define CT_CA9X4_CORESIGHT (0x10200000) +#define CT_CA9X4_MPIC (0x1e000000) +#define CT_CA9X4_SYSTIMER (0x1e004000) +#define CT_CA9X4_SYSWDT (0x1e007000) +#define CT_CA9X4_L2CC (0x1e00a000) + +#define CT_CA9X4_TIMER0 (CT_CA9X4_SP804_TIMER + 0x000) +#define CT_CA9X4_TIMER1 (CT_CA9X4_SP804_TIMER + 0x020) + +#define A9_MPCORE_SCU (CT_CA9X4_MPIC + 0x0000) +#define A9_MPCORE_GIC_CPU (CT_CA9X4_MPIC + 0x0100) +#define A9_MPCORE_GIT (CT_CA9X4_MPIC + 0x0200) +#define A9_MPCORE_GIC_DIST (CT_CA9X4_MPIC + 0x1000) + +/* + * Interrupts. Those in {} are for AMBA devices + */ +#define IRQ_CT_CA9X4_CLCDC { 76 } +#define IRQ_CT_CA9X4_DMC { -1 } +#define IRQ_CT_CA9X4_SMC { 77, 78 } +#define IRQ_CT_CA9X4_TIMER0 80 +#define IRQ_CT_CA9X4_TIMER1 81 +#define IRQ_CT_CA9X4_GPIO { 82 } + +#endif diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index c4ed9f93f64..270c8e2a449 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -754,7 +754,8 @@ config CACHE_FEROCEON_L2_WRITETHROUGH config CACHE_L2X0 bool "Enable the L2x0 outer cache controller" depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ - REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4 + REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \ + ARCH_NOMADIK || ARCH_OMAP4 || ARCH_VEXPRESS_CA9X4 default y select OUTER_CACHE help -- cgit v1.2.3-70-g09d2 From e220ba60223a9d63e70217e5b112160df8c21cea Mon Sep 17 00:00:00 2001 From: Dave Estes Date: Tue, 11 Aug 2009 17:58:49 -0400 Subject: arm: mm: qsd8x50: Fix incorrect permission faults Handle incorrectly reported permission faults for qsd8650. On permission faults, retry MVA to PA conversion. If retry detects translation fault. Report as translation fault. Cc: Jamie Lokier Signed-off-by: Dave Estes --- arch/arm/mm/Kconfig | 2 ++ arch/arm/mm/abort-ev7.S | 21 +++++++++++++++++++++ 2 files changed, 23 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 5bd7c89a604..d3022dabb4a 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -572,6 +572,8 @@ config CPU_TLB_V6 config CPU_TLB_V7 bool +config VERIFY_PERMISSION_FAULT + bool endif config CPU_HAS_ASID diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index 2e6dc040c65..ec88b157d3b 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S @@ -29,5 +29,26 @@ ENTRY(v7_early_abort) * V6 code adjusts the returned DFSR. * New designs should not need to patch up faults. */ + +#if defined(CONFIG_VERIFY_PERMISSION_FAULT) + /* + * Detect erroneous permission failures and fix + */ + ldr r3, =0x40d @ On permission fault + and r3, r1, r3 + cmp r3, #0x0d + movne pc, lr + + mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR + isb + mrc p15, 0, r2, c7, c4, 0 @ Read the PAR + and r3, r2, #0x7b @ On translation fault + cmp r3, #0x0b + movne pc, lr + bic r1, r1, #0xf @ Fix up FSR FS[5:0] + and r2, r2, #0x7e + orr r1, r1, r2, LSR #1 +#endif + mov pc, lr ENDPROC(v7_early_abort) -- cgit v1.2.3-70-g09d2 From ea056df7965fc46cfff28fd3808bf3ada23d5059 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 4 May 2010 17:27:43 +0100 Subject: ARM: 6093/1: Fix kernel memory printing for sparsemem The show_mem() and mem_init() function are assuming that the page map is contiguous and calculates the start and end page of a bank using (map + pfn). This fails with SPARSEMEM where pfn_to_page() must be used. Tested-by: Will Deacon Tested-by: Marek Vasut Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/init.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 83db12a68d5..0ed29bfeba1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -86,9 +86,6 @@ void show_mem(void) printk("Mem-info:\n"); show_free_areas(); for_each_online_node(node) { - pg_data_t *n = NODE_DATA(node); - struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; - for_each_nodebank (i,mi,node) { struct membank *bank = &mi->bank[i]; unsigned int pfn1, pfn2; @@ -97,8 +94,8 @@ void show_mem(void) pfn1 = bank_pfn_start(bank); pfn2 = bank_pfn_end(bank); - page = map + pfn1; - end = map + pfn2; + page = pfn_to_page(pfn1); + end = pfn_to_page(pfn2 - 1) + 1; do { total++; @@ -603,9 +600,6 @@ void __init mem_init(void) reserved_pages = free_pages = 0; for_each_online_node(node) { - pg_data_t *n = NODE_DATA(node); - struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; - for_each_nodebank(i, &meminfo, node) { struct membank *bank = &meminfo.bank[i]; unsigned int pfn1, pfn2; @@ -614,8 +608,8 @@ void __init mem_init(void) pfn1 = bank_pfn_start(bank); pfn2 = bank_pfn_end(bank); - page = map + pfn1; - end = map + pfn2; + page = pfn_to_page(pfn1); + end = pfn_to_page(pfn2 - 1) + 1; do { if (PageReserved(page)) -- cgit v1.2.3-70-g09d2 From b1a9ceb2e003aab7b96e30d990c1092453a0ea44 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 6 May 2010 15:14:09 +0100 Subject: ARM: 6105/1: Fix the __arm_ioremap_caller() definition in nommu.c Commit 31aa8fd6 introduced the __arm_ioremap_caller() function but the nommu.c version did not have the _caller suffix. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/nommu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 9bfeb6b9509..f8791eed759 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -87,8 +87,8 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, } EXPORT_SYMBOL(__arm_ioremap); -void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, - unsigned int mtype, void *caller) +void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, + unsigned int mtype, void *caller) { return __arm_ioremap(phys_addr, size, mtype); } -- cgit v1.2.3-70-g09d2 From b5a07faadeb4e0cfd6dcee359e501d4755cab875 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 6 May 2010 15:15:28 +0100 Subject: ARM: 6106/1: Implement copy_to_user_page() for noMMU Commit 7959722 introduced calls to copy_(to|from)_user_page() from access_process_vm() in mm/nommu.c. The copy_to_user_page() was not implemented on noMMU ARM. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/nommu.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index f8791eed759..33b327379f0 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -65,6 +65,15 @@ void flush_dcache_page(struct page *page) } EXPORT_SYMBOL(flush_dcache_page); +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *dst, const void *src, + unsigned long len) +{ + memcpy(dst, src, len); + if (vma->vm_flags & VM_EXEC) + __cpuc_coherent_user_range(uaddr, uaddr + len); +} + void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype) { -- cgit v1.2.3-70-g09d2 From f4d6477f7f073b99220386d62f5bf54bec3482cc Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 7 May 2010 16:26:24 +0100 Subject: ARM: 6111/1: Implement read/write for ownership in the ARMv6 DMA cache ops The Snoop Control Unit on the ARM11MPCore hardware does not detect the cache operations and the dma_cache_maint*() functions may leave stale cache entries on other CPUs. The solution implemented in this patch performs a Read or Write For Ownership in the ARMv6 DMA cache maintenance functions. These LDR/STR instructions change the cache line state to shared or exclusive so that the cache maintenance operation has the desired effect. Tested-by: George G. Davis Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/cache-v6.S | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 9d89c67a1cc..e46ecd84713 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -211,6 +211,9 @@ v6_dma_inv_range: mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line #endif 1: +#ifdef CONFIG_SMP + str r0, [r0] @ write for ownership +#endif #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c6, 1 @ invalidate D line #else @@ -231,6 +234,9 @@ v6_dma_inv_range: v6_dma_clean_range: bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: +#ifdef CONFIG_SMP + ldr r2, [r0] @ read for ownership +#endif #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c10, 1 @ clean D line #else @@ -251,6 +257,10 @@ v6_dma_clean_range: ENTRY(v6_dma_flush_range) bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: +#ifdef CONFIG_SMP + ldr r2, [r0] @ read for ownership + str r2, [r0] @ write for ownership +#endif #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line #else @@ -273,7 +283,9 @@ ENTRY(v6_dma_map_area) add r1, r1, r0 teq r2, #DMA_FROM_DEVICE beq v6_dma_inv_range - b v6_dma_clean_range + teq r2, #DMA_TO_DEVICE + beq v6_dma_clean_range + b v6_dma_flush_range ENDPROC(v6_dma_map_area) /* @@ -283,9 +295,6 @@ ENDPROC(v6_dma_map_area) * - dir - DMA direction */ ENTRY(v6_dma_unmap_area) - add r1, r1, r0 - teq r2, #DMA_TO_DEVICE - bne v6_dma_inv_range mov pc, lr ENDPROC(v6_dma_unmap_area) -- cgit v1.2.3-70-g09d2 From b8349b569aae661dea9d59d7d2ee587ccea3336c Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 7 May 2010 18:03:05 +0100 Subject: ARM: 6112/1: Use the Inner Shareable I-cache and BTB ops on ARMv7 SMP The standard I-cache Invalidate All (ICIALLU) and Branch Predication Invalidate All (BPIALL) operations are not automatically broadcast to the other CPUs in an ARMv7 MP system. The patch adds the Inner Shareable variants, ICIALLUIS and BPIALLIS, if ARMv7 and SMP. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 4 ++++ arch/arm/include/asm/tlbflush.h | 29 ++++++++++++++++++++++++++++- arch/arm/mm/cache-v7.S | 4 ++++ arch/arm/mm/tlb-v7.S | 8 ++++++++ 4 files changed, 44 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 0d08d4170b6..4656a24058d 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -371,6 +371,10 @@ static inline void __flush_icache_all(void) #ifdef CONFIG_ARM_ERRATA_411920 extern void v6_icache_inval_all(void); v6_icache_inval_all(); +#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7 + asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n" + : + : "r" (0)); #else asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" : diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index e085e2c545e..bd863d8608c 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -46,6 +46,9 @@ #define TLB_V7_UIS_FULL (1 << 20) #define TLB_V7_UIS_ASID (1 << 21) +/* Inner Shareable BTB operation (ARMv7 MP extensions) */ +#define TLB_V7_IS_BTB (1 << 22) + #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ #define TLB_DCLEAN (1 << 30) #define TLB_WB (1 << 31) @@ -183,7 +186,7 @@ #endif #ifdef CONFIG_SMP -#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ +#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) #else #define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ @@ -339,6 +342,12 @@ static inline void local_flush_tlb_all(void) dsb(); isb(); } + if (tlb_flag(TLB_V7_IS_BTB)) { + /* flush the branch target cache */ + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + dsb(); + isb(); + } } static inline void local_flush_tlb_mm(struct mm_struct *mm) @@ -376,6 +385,12 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); dsb(); } + if (tlb_flag(TLB_V7_IS_BTB)) { + /* flush the branch target cache */ + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + dsb(); + isb(); + } } static inline void @@ -416,6 +431,12 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); dsb(); } + if (tlb_flag(TLB_V7_IS_BTB)) { + /* flush the branch target cache */ + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + dsb(); + isb(); + } } static inline void local_flush_tlb_kernel_page(unsigned long kaddr) @@ -454,6 +475,12 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) dsb(); isb(); } + if (tlb_flag(TLB_V7_IS_BTB)) { + /* flush the branch target cache */ + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + dsb(); + isb(); + } } /* diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index bcd64f26587..06a90dcfc60 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -167,7 +167,11 @@ ENTRY(v7_coherent_user_range) cmp r0, r1 blo 1b mov r0, #0 +#ifdef CONFIG_SMP + mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable +#else mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB +#endif dsb isb mov pc, lr diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index 0cb1848bd87..f3f288a9546 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -50,7 +50,11 @@ ENTRY(v7wbi_flush_user_tlb_range) cmp r0, r1 blo 1b mov ip, #0 +#ifdef CONFIG_SMP + mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable +#else mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB +#endif dsb mov pc, lr ENDPROC(v7wbi_flush_user_tlb_range) @@ -79,7 +83,11 @@ ENTRY(v7wbi_flush_kern_tlb_range) cmp r0, r1 blo 1b mov r2, #0 +#ifdef CONFIG_SMP + mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable +#else mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB +#endif dsb isb mov pc, lr -- cgit v1.2.3-70-g09d2 From 66b196475031c748a5861390a4fadb915e14ccdc Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Wed, 28 Apr 2010 10:59:45 -0400 Subject: [ARM] mmp: enable L2 in mmp2 Enable Tauros2 L2 in mmp2. Tauros2 L2 is shared in Marvell ARM cores. Signed-off-by: Haojian Zhuang Signed-off-by: Eric Miao --- arch/arm/configs/mmp2_defconfig | 2 ++ arch/arm/mach-mmp/mmp2.c | 5 +++++ arch/arm/mm/Kconfig | 2 +- 3 files changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/configs/mmp2_defconfig b/arch/arm/configs/mmp2_defconfig index 03f76cfc941..db8936370b6 100644 --- a/arch/arm/configs/mmp2_defconfig +++ b/arch/arm/configs/mmp2_defconfig @@ -246,6 +246,8 @@ CONFIG_ARM_THUMB=y # CONFIG_CPU_ICACHE_DISABLE is not set # CONFIG_CPU_DCACHE_DISABLE is not set # CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_OUTER_CACHE=y +CONFIG_CACHE_TAUROS2=y CONFIG_ARM_L1_CACHE_SHIFT=5 # CONFIG_ARM_ERRATA_411920 is not set CONFIG_COMMON_CLKDEV=y diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c index cca39929110..e236ec0c54f 100644 --- a/arch/arm/mach-mmp/mmp2.c +++ b/arch/arm/mach-mmp/mmp2.c @@ -15,6 +15,8 @@ #include #include +#include + #include #include #include @@ -99,6 +101,9 @@ static struct clk_lookup mmp2_clkregs[] = { static int __init mmp2_init(void) { if (cpu_is_mmp2()) { +#ifdef CONFIG_CACHE_TAUROS2 + tauros2_init(); +#endif mfp_init_base(MFPR_VIRT_BASE); mfp_init_addr(mmp2_addr_map); clkdev_add_table(ARRAY_AND_SIZE(mmp2_clkregs)); diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 5bd7c89a604..69891260238 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -769,7 +769,7 @@ config CACHE_L2X0 config CACHE_TAUROS2 bool "Enable the Tauros2 L2 cache controller" - depends on ARCH_DOVE + depends on (ARCH_DOVE || ARCH_MMP) default y select OUTER_CACHE help -- cgit v1.2.3-70-g09d2 From 0741b7d2699d1e7b4ceab27522c9e5518b2dbd2c Mon Sep 17 00:00:00 2001 From: Vasily Khoruzhick Date: Tue, 11 May 2010 09:55:10 +0300 Subject: ARM: RX1950: Add suspend/resume support for RX1950 Signed-off-by: Vasily Khoruzhick Signed-off-by: Ben Dooks --- arch/arm/mach-s3c2410/pm.c | 4 ++-- arch/arm/mm/mmu.c | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mach-s3c2410/pm.c b/arch/arm/mach-s3c2410/pm.c index 966119c8efe..6a61486ca75 100644 --- a/arch/arm/mach-s3c2410/pm.c +++ b/arch/arm/mach-s3c2410/pm.c @@ -60,10 +60,10 @@ static void s3c2410_pm_prepare(void) __raw_writel(calc, phys_to_virt(H1940_SUSPEND_CHECKSUM)); } - /* the RX3715 uses similar code and the same H1940 and the + /* RX3715 and RX1950 use similar to H1940 code and the * same offsets for resume and checksum pointers */ - if (machine_is_rx3715()) { + if (machine_is_rx3715() || machine_is_rx1950()) { void *base = phys_to_virt(H1940_SUSPEND_CHECK); unsigned long ptr; unsigned long calc = 0; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 241c24a1c18..45a1bc275f0 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -869,9 +869,10 @@ void __init reserve_node_zero(pg_data_t *pgdat) if (machine_is_p720t()) res_size = 0x00014000; - /* H1940 and RX3715 need to reserve this for suspend */ + /* H1940, RX3715 and RX1950 need to reserve this for suspend */ - if (machine_is_h1940() || machine_is_rx3715()) { + if (machine_is_h1940() || machine_is_rx3715() + || machine_is_rx1950()) { reserve_bootmem_node(pgdat, 0x30003000, 0x1000, BOOTMEM_DEFAULT); reserve_bootmem_node(pgdat, 0x30081000, 0x1000, -- cgit v1.2.3-70-g09d2 From b7072c63c1b0611042ba6ecf0152a33c7b806e67 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Sun, 2 May 2010 12:40:35 +0300 Subject: ARM: convert /proc/cpu/aligment to seq_file Convert code away from ->read_proc/->write_proc interfaces. Switch to proc_create()/proc_create_data() which makes addition of proc entries reliable wrt NULL ->proc_fops, NULL ->data and so on. Problem with ->read_proc et al is described here commit 786d7e1612f0b0adb6046f19b906609e4fe8b1ba "Fix rmmod/read/write races in /proc entries" This patch is part of an effort to remove the old simple procfs PAGE_SIZE buffer interface. Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Russell King --- arch/arm/mm/alignment.c | 53 ++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 27 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index edddd66faac..28b7c277619 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -94,36 +95,29 @@ static const char *usermode_action[] = { "signal+warn" }; -static int -proc_alignment_read(char *page, char **start, off_t off, int count, int *eof, - void *data) +static int alignment_proc_show(struct seq_file *m, void *v) { - char *p = page; - int len; - - p += sprintf(p, "User:\t\t%lu\n", ai_user); - p += sprintf(p, "System:\t\t%lu\n", ai_sys); - p += sprintf(p, "Skipped:\t%lu\n", ai_skipped); - p += sprintf(p, "Half:\t\t%lu\n", ai_half); - p += sprintf(p, "Word:\t\t%lu\n", ai_word); + seq_printf(m, "User:\t\t%lu\n", ai_user); + seq_printf(m, "System:\t\t%lu\n", ai_sys); + seq_printf(m, "Skipped:\t%lu\n", ai_skipped); + seq_printf(m, "Half:\t\t%lu\n", ai_half); + seq_printf(m, "Word:\t\t%lu\n", ai_word); if (cpu_architecture() >= CPU_ARCH_ARMv5TE) - p += sprintf(p, "DWord:\t\t%lu\n", ai_dword); - p += sprintf(p, "Multi:\t\t%lu\n", ai_multi); - p += sprintf(p, "User faults:\t%i (%s)\n", ai_usermode, + seq_printf(m, "DWord:\t\t%lu\n", ai_dword); + seq_printf(m, "Multi:\t\t%lu\n", ai_multi); + seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode, usermode_action[ai_usermode]); - len = (p - page) - off; - if (len < 0) - len = 0; - - *eof = (len <= count) ? 1 : 0; - *start = page + off; + return 0; +} - return len; +static int alignment_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, alignment_proc_show, NULL); } -static int proc_alignment_write(struct file *file, const char __user *buffer, - unsigned long count, void *data) +static ssize_t alignment_proc_write(struct file *file, const char __user *buffer, + size_t count, loff_t *pos) { char mode; @@ -136,6 +130,13 @@ static int proc_alignment_write(struct file *file, const char __user *buffer, return count; } +static const struct file_operations alignment_proc_fops = { + .open = alignment_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = alignment_proc_write, +}; #endif /* CONFIG_PROC_FS */ union offset_union { @@ -901,12 +902,10 @@ static int __init alignment_init(void) #ifdef CONFIG_PROC_FS struct proc_dir_entry *res; - res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL); + res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL, + &alignment_proc_fops); if (!res) return -ENOMEM; - - res->read_proc = proc_alignment_read; - res->write_proc = proc_alignment_write; #endif /* -- cgit v1.2.3-70-g09d2 From ceb683d3bc36f213aeef0c5d79e6fbb1e16bd459 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Mar 2010 18:47:20 +0000 Subject: ARM: Ensure meminfo is sorted prior to sanity_check_meminfo Acked-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/mm/init.c | 10 ---------- arch/arm/mm/mmu.c | 10 ++++++++++ 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7829cb5425f..8bbb9a972e7 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -387,21 +386,12 @@ static void arm_memory_present(struct meminfo *mi, int node) } #endif -static int __init meminfo_cmp(const void *_a, const void *_b) -{ - const struct membank *a = _a, *b = _b; - long cmp = bank_pfn_start(a) - bank_pfn_start(b); - return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; -} - void __init bootmem_init(void) { struct meminfo *mi = &meminfo; unsigned long min, max_low, max_high; int node, initrd_node; - sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL); - /* * Locate which node contains the ramdisk image, if any. */ diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9d4da6ac28e..91547196915 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -1013,6 +1014,13 @@ static void __init kmap_init(void) #endif } +static int __init meminfo_cmp(const void *_a, const void *_b) +{ + const struct membank *a = _a, *b = _b; + long cmp = bank_pfn_start(a) - bank_pfn_start(b); + return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; +} + /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. @@ -1021,6 +1029,8 @@ void __init paging_init(struct machine_desc *mdesc) { void *zero_page; + sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); + build_mem_type_table(); sanity_check_meminfo(); prepare_page_table(); -- cgit v1.2.3-70-g09d2 From a2227120eead4ea7d2ea04d8ce0947f1dd23dedf Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Mar 2010 18:56:05 +0000 Subject: ARM: Move memory mapping into mmu.c Acked-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/mm/init.c | 24 ------------------------ arch/arm/mm/mm.h | 3 --- arch/arm/mm/mmu.c | 29 ++++++++++++++++++++++++++++- 3 files changed, 28 insertions(+), 28 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 8bbb9a972e7..105d1d4f420 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -225,20 +225,6 @@ static int __init check_initrd(struct meminfo *mi) return initrd_node; } -static inline void map_memory_bank(struct membank *bank) -{ -#ifdef CONFIG_MMU - struct map_desc map; - - map.pfn = bank_pfn_start(bank); - map.virtual = __phys_to_virt(bank_phys_start(bank)); - map.length = bank_phys_size(bank); - map.type = MT_MEMORY; - - create_mapping(&map); -#endif -} - static void __init bootmem_init_node(int node, struct meminfo *mi, unsigned long start_pfn, unsigned long end_pfn) { @@ -247,16 +233,6 @@ static void __init bootmem_init_node(int node, struct meminfo *mi, pg_data_t *pgdat; int i; - /* - * Map the memory banks for this node. - */ - for_each_nodebank(i, mi, node) { - struct membank *bank = &mi->bank[i]; - - if (!bank->highmem) - map_memory_bank(bank); - } - /* * Allocate the bootmem bitmap page. */ diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index a888363398f..815d08eecbb 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -28,10 +28,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page #endif -struct map_desc; -struct meminfo; struct pglist_data; -void __init create_mapping(struct map_desc *md); void __init bootmem_init(void); void reserve_node_zero(struct pglist_data *pgdat); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 91547196915..69852003675 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -600,7 +600,7 @@ static void __init create_36bit_mapping(struct map_desc *md, * offsets, and we take full advantage of sections and * supersections. */ -void __init create_mapping(struct map_desc *md) +static void __init create_mapping(struct map_desc *md) { unsigned long phys, addr, length, end; const struct mem_type *type; @@ -1014,6 +1014,32 @@ static void __init kmap_init(void) #endif } +static inline void map_memory_bank(struct membank *bank) +{ + struct map_desc map; + + map.pfn = bank_pfn_start(bank); + map.virtual = __phys_to_virt(bank_phys_start(bank)); + map.length = bank_phys_size(bank); + map.type = MT_MEMORY; + + create_mapping(&map); +} + +static void __init map_lowmem(void) +{ + struct meminfo *mi = &meminfo; + int i; + + /* Map all the lowmem memory banks. */ + for (i = 0; i < mi->nr_banks; i++) { + struct membank *bank = &mi->bank[i]; + + if (!bank->highmem) + map_memory_bank(bank); + } +} + static int __init meminfo_cmp(const void *_a, const void *_b) { const struct membank *a = _a, *b = _b; @@ -1034,6 +1060,7 @@ void __init paging_init(struct machine_desc *mdesc) build_mem_type_table(); sanity_check_meminfo(); prepare_page_table(); + map_lowmem(); bootmem_init(); devicemaps_init(mdesc); kmap_init(); -- cgit v1.2.3-70-g09d2 From 64039be8226b9f6c80c704d94ac9891eee4a274c Mon Sep 17 00:00:00 2001 From: Jason McMullan Date: Wed, 5 May 2010 18:59:37 +0100 Subject: ARM: 6094/1: Extend cache-l2x0 to support the 16-way PL310 The L310 cache controller's interface is almost identical to the L210. One major difference is that the PL310 can have up to 16 ways. This change uses the cache's part ID and the Associativity bits in the AUX_CTRL register to determine the number of ways. Also, this version prints out the CACHE_ID and AUX_CTRL registers. Acked-by: Will Deacon Acked-by: Acked-by: Catalin Marinas Signed-off-by: Jason S. McMullan Signed-off-by: Russell King --- arch/arm/include/asm/hardware/cache-l2x0.h | 3 +++ arch/arm/mm/cache-l2x0.c | 39 ++++++++++++++++++++++++++---- 2 files changed, 37 insertions(+), 5 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index cdb9022716f..6bcba48800f 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -21,6 +21,9 @@ #define __ASM_ARM_HARDWARE_L2X0_H #define L2X0_CACHE_ID 0x000 +#define L2X0_CACHE_ID_PART_MASK (0xf << 6) +#define L2X0_CACHE_ID_PART_L210 (1 << 6) +#define L2X0_CACHE_ID_PART_L310 (3 << 6) #define L2X0_CACHE_TYPE 0x004 #define L2X0_CTRL 0x100 #define L2X0_AUX_CTRL 0x104 diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 07334632d3e..78f0fc8595e 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -27,6 +27,7 @@ static void __iomem *l2x0_base; static DEFINE_SPINLOCK(l2x0_lock); +static uint32_t l2x0_way_mask; /* Bitmask of active ways */ static inline void cache_wait(void __iomem *reg, unsigned long mask) { @@ -99,8 +100,8 @@ static inline void l2x0_inv_all(void) /* invalidate all ways */ spin_lock_irqsave(&l2x0_lock, flags); - writel(0xff, l2x0_base + L2X0_INV_WAY); - cache_wait(l2x0_base + L2X0_INV_WAY, 0xff); + writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); + cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); cache_sync(); spin_unlock_irqrestore(&l2x0_lock, flags); } @@ -199,9 +200,37 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) { __u32 aux; + __u32 cache_id; + int ways; + const char *type; l2x0_base = base; + cache_id = readl(l2x0_base + L2X0_CACHE_ID); + aux = readl(l2x0_base + L2X0_AUX_CTRL); + + /* Determine the number of ways */ + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { + case L2X0_CACHE_ID_PART_L310: + if (aux & (1 << 16)) + ways = 16; + else + ways = 8; + type = "L310"; + break; + case L2X0_CACHE_ID_PART_L210: + ways = (aux >> 13) & 0xf; + type = "L210"; + break; + default: + /* Assume unknown chips have 8 ways */ + ways = 8; + type = "L2x0 series"; + break; + } + + l2x0_way_mask = (1 << ways) - 1; + /* * Check if l2x0 controller is already enabled. * If you are booting from non-secure mode @@ -210,8 +239,6 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { /* l2x0 controller is disabled */ - - aux = readl(l2x0_base + L2X0_AUX_CTRL); aux &= aux_mask; aux |= aux_val; writel(aux, l2x0_base + L2X0_AUX_CTRL); @@ -226,5 +253,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) outer_cache.clean_range = l2x0_clean_range; outer_cache.flush_range = l2x0_flush_range; - printk(KERN_INFO "L2X0 cache controller enabled\n"); + printk(KERN_INFO "%s cache controller enabled\n", type); + printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", + ways, cache_id, aux); } -- cgit v1.2.3-70-g09d2 From 8c0b742ca7a7d21de0ddc87eda6ef0b282e4de18 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Sat, 15 May 2010 09:57:06 +0100 Subject: ARM: 6134/1: Handle instruction cache maintenance fault properly Between "clean D line..." and "invalidate I line" operations in v7_coherent_user_range(), the memory page may get swapped out. And the fault on "invalidate I line" could not be properly handled causing the oops. In ARMv6 "external abort on linefetch" replaced by "instruction cache maintenance fault". Let's handle it as translation fault. It fixes the issue. I'm not sure if it's reasonable to check arch version in run-time. Let's do it in compile time for now. Reviewed-by: Catalin Marinas Signed-off-by: Siarhei Siamashka Signed-off-by: Kirill A. Shutemov Signed-off-by: Russell King --- arch/arm/mm/fault.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 9d40c341e07..92f5801f99c 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -463,7 +463,12 @@ static struct fsr_info { { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, { do_bad, SIGKILL, 0, "terminal exception" }, { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, +/* Do we need runtime check ? */ +#if __LINUX_ARM_ARCH__ < 6 { do_bad, SIGBUS, 0, "external abort on linefetch" }, +#else + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "I-cache maintenance fault" }, +#endif { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, { do_bad, SIGBUS, 0, "external abort on linefetch" }, { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, -- cgit v1.2.3-70-g09d2 From 47ab0dee661dbd5aca67abe44a333e471134fbf9 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 15 May 2010 11:02:43 +0100 Subject: ARM: Optionally allow ARMv6 to use 'normal, bufferable' memory for DMA Provide a configuration option to allow the ARMv6 to use normal bufferable memory for coherent DMA. This option is forced to 'y' for ARMv7, and offered as a configuration option on ARMv6. Enabling this option requires drivers to have the necessary barriers to ensure that data in DMA coherent memory is visible prior to the DMA operation commencing. Reviewed-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 2 +- arch/arm/include/asm/system.h | 2 +- arch/arm/mm/Kconfig | 19 +++++++++++++++++++ 3 files changed, 21 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 11397687f42..ab68cf1ef80 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -314,7 +314,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) #define pgprot_writecombine(prot) \ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) -#if __LINUX_ARM_ARCH__ >= 7 +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE #define pgprot_dmacoherent(prot) \ __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) #else diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index ca88e6a8470..02f5d99adbc 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -137,7 +137,7 @@ extern unsigned int user_debug; #define dmb() __asm__ __volatile__ ("" : : : "memory") #endif -#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP) +#if defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) #define mb() dmb() #define rmb() dmb() #define wmb() dmb() diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index c4ed9f93f64..573528d9c6d 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -781,3 +781,22 @@ config ARM_L1_CACHE_SHIFT int default 6 if ARM_L1_CACHE_SHIFT_6 default 5 + +config ARM_DMA_MEM_BUFFERABLE + bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7 + default y if CPU_V6 || CPU_V7 + help + Historically, the kernel has used strongly ordered mappings to + provide DMA coherent memory. With the advent of ARMv7, mapping + memory with differing types results in unpredictable behaviour, + so on these CPUs, this option is forced on. + + Multiple mappings with differing attributes is also unpredictable + on ARMv6 CPUs, but since they do not have aggressive speculative + prefetch, no harm appears to occur. + + However, drivers may be missing the necessary barriers for ARMv6, + and therefore turning this on may result in unpredictable driver + behaviour. Therefore, we offer this as an option. + + You are recommended say 'Y' here and debug any affected drivers. -- cgit v1.2.3-70-g09d2