From 82e6923e1862428b755ec306b3dbccf926849314 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 21 Jan 2011 11:04:45 +0000 Subject: ARM: lh7a40x: remove unmaintained platform support lh7a40x has only been receiving updates for updates to generic code. The last involvement from the maintainer according to the git logs was in 2006. As such, it is a maintainence burden with no benefit. This gets rid of two defconfigs. Signed-off-by: Russell King --- arch/arm/include/asm/setup.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index f1e5a9bca24..da8b52ec49c 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -192,11 +192,7 @@ static struct tagtable __tagtable_##fn __tag = { tag, fn } /* * Memory map description */ -#ifdef CONFIG_ARCH_LH7A40X -# define NR_BANKS 16 -#else -# define NR_BANKS 8 -#endif +#define NR_BANKS 8 struct membank { unsigned long start; -- cgit v1.2.3-70-g09d2 From 5756e9dd0de6d5c307773f8f734c0684b3098fdd Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 26 Jan 2011 18:34:26 +0100 Subject: ARM: 6640/1: Thumb-2: Symbol manipulation macros for function body copying In low-level board support code, there is sometimes a need to copy a function body to another location at run-time. A straightforward call to memcpy doesn't work in Thumb-2, because bit 0 of external Thumb function symbols is set to 1, indicating that the function is Thumb. Without corrective measures, this will cause an off-by-one copy, and the copy may be called using the wrong instruction set. This patch adds an fncpy() macro to help with such copies. Particular care is needed, because C doesn't guarantee any defined behaviour when casting a function pointer to any other type. This has been observed to lead to strange optimisation side-effects when doing the arithmetic which is required in order to copy/move function bodies correctly in Thumb-2. Thanks to Russell King and Nicolas Pitre for their input on this patch. Signed-off-by: Dave Martin Tested-by: Jean Pihet Tested-by: Tony Lindgren Tested-by: Kevin Hilman Signed-off-by: Russell King --- arch/arm/include/asm/fncpy.h | 94 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 arch/arm/include/asm/fncpy.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h new file mode 100644 index 00000000000..de535474692 --- /dev/null +++ b/arch/arm/include/asm/fncpy.h @@ -0,0 +1,94 @@ +/* + * arch/arm/include/asm/fncpy.h - helper macros for function body copying + * + * Copyright (C) 2011 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* + * These macros are intended for use when there is a need to copy a low-level + * function body into special memory. + * + * For example, when reconfiguring the SDRAM controller, the code doing the + * reconfiguration may need to run from SRAM. + * + * NOTE: that the copied function body must be entirely self-contained and + * position-independent in order for this to work properly. + * + * NOTE: in order for embedded literals and data to get referenced correctly, + * the alignment of functions must be preserved when copying. To ensure this, + * the source and destination addresses for fncpy() must be aligned to a + * multiple of 8 bytes: you will be get a BUG() if this condition is not met. + * You will typically need a ".align 3" directive in the assembler where the + * function to be copied is defined, and ensure that your allocator for the + * destination buffer returns 8-byte-aligned pointers. + * + * Typical usage example: + * + * extern int f(args); + * extern uint32_t size_of_f; + * int (*copied_f)(args); + * void *sram_buffer; + * + * copied_f = fncpy(sram_buffer, &f, size_of_f); + * + * ... later, call the function: ... + * + * copied_f(args); + * + * The size of the function to be copied can't be determined from C: + * this must be determined by other means, such as adding assmbler directives + * in the file where f is defined. + */ + +#ifndef __ASM_FNCPY_H +#define __ASM_FNCPY_H + +#include +#include + +#include +#include + +/* + * Minimum alignment requirement for the source and destination addresses + * for function copying. + */ +#define FNCPY_ALIGN 8 + +#define fncpy(dest_buf, funcp, size) ({ \ + uintptr_t __funcp_address; \ + typeof(funcp) __result; \ + \ + asm("" : "=r" (__funcp_address) : "0" (funcp)); \ + \ + /* \ + * Ensure alignment of source and destination addresses, \ + * disregarding the function's Thumb bit: \ + */ \ + BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \ + (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \ + \ + memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \ + flush_icache_range((unsigned long)(dest_buf), \ + (unsigned long)(dest_buf) + (size)); \ + \ + asm("" : "=r" (__result) \ + : "0" ((uintptr_t)(dest_buf) | (__funcp_address & 1))); \ + \ + __result; \ +}) + +#endif /* !__ASM_FNCPY_H */ -- cgit v1.2.3-70-g09d2 From 6323f0ccedf756dfe5f46549cec69a2d6d97937b Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 16 Jan 2011 18:02:17 +0000 Subject: ARM: bitops: switch set/clear/change bitops to use ldrex/strex Switch the set/clear/change bitops to use the word-based exclusive operations, which are only present in a wider range of ARM architectures than the byte-based exclusive operations. Tested record: - Nicolas Pitre: ext3,rw,le - Sourav Poddar: nfs,le - Will Deacon: ext3,rw,le - Tony Lindgren: ext3+nfs,le Reviewed-by: Nicolas Pitre Tested-by: Sourav Poddar Tested-by: Will Deacon Tested-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/include/asm/bitops.h | 60 ++++++++++++++++--------------------------- arch/arm/kernel/armksyms.c | 18 +++++-------- arch/arm/lib/bitops.h | 38 ++++++++++++++------------- arch/arm/lib/changebit.S | 10 ++------ arch/arm/lib/clearbit.S | 11 ++------ arch/arm/lib/setbit.S | 11 ++------ arch/arm/lib/testchangebit.S | 9 +++---- arch/arm/lib/testclearbit.S | 9 +++---- arch/arm/lib/testsetbit.S | 9 +++---- 9 files changed, 63 insertions(+), 112 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 7b1bb2bbaf8..af54ed102f5 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -148,15 +148,19 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0. */ +/* + * Native endian assembly bitops. nr = 0 -> word 0 bit 0. + */ +extern void _set_bit(int nr, volatile unsigned long * p); +extern void _clear_bit(int nr, volatile unsigned long * p); +extern void _change_bit(int nr, volatile unsigned long * p); +extern int _test_and_set_bit(int nr, volatile unsigned long * p); +extern int _test_and_clear_bit(int nr, volatile unsigned long * p); +extern int _test_and_change_bit(int nr, volatile unsigned long * p); + /* * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. */ -extern void _set_bit_le(int nr, volatile unsigned long * p); -extern void _clear_bit_le(int nr, volatile unsigned long * p); -extern void _change_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); extern int _find_first_zero_bit_le(const void * p, unsigned size); extern int _find_next_zero_bit_le(const void * p, int size, int offset); extern int _find_first_bit_le(const unsigned long *p, unsigned size); @@ -165,12 +169,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); /* * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. */ -extern void _set_bit_be(int nr, volatile unsigned long * p); -extern void _clear_bit_be(int nr, volatile unsigned long * p); -extern void _change_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_set_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_change_bit_be(int nr, volatile unsigned long * p); extern int _find_first_zero_bit_be(const void * p, unsigned size); extern int _find_next_zero_bit_be(const void * p, int size, int offset); extern int _find_first_bit_be(const unsigned long *p, unsigned size); @@ -180,33 +178,26 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); /* * The __* form of bitops are non-atomic and may be reordered. */ -#define ATOMIC_BITOP_LE(name,nr,p) \ - (__builtin_constant_p(nr) ? \ - ____atomic_##name(nr, p) : \ - _##name##_le(nr,p)) - -#define ATOMIC_BITOP_BE(name,nr,p) \ - (__builtin_constant_p(nr) ? \ - ____atomic_##name(nr, p) : \ - _##name##_be(nr,p)) +#define ATOMIC_BITOP(name,nr,p) \ + (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p)) #else -#define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p) -#define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p) +#define ATOMIC_BITOP(name,nr,p) _##name(nr,p) #endif -#define NONATOMIC_BITOP(name,nr,p) \ - (____nonatomic_##name(nr, p)) +/* + * Native endian atomic definitions. + */ +#define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p) +#define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p) +#define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p) +#define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p) +#define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p) +#define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p) #ifndef __ARMEB__ /* * These are the little endian, atomic definitions. */ -#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) -#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) -#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) -#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) -#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) -#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) #define find_first_bit(p,sz) _find_first_bit_le(p,sz) @@ -215,16 +206,9 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); #define WORD_BITOFF_TO_LE(x) ((x)) #else - /* * These are the big endian, atomic definitions. */ -#define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p) -#define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p) -#define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p) -#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) -#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) -#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) #define find_first_bit(p,sz) _find_first_bit_be(p,sz) diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index e5e1e538767..d5d4185f0c2 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -140,24 +140,18 @@ EXPORT_SYMBOL(__aeabi_ulcmp); #endif /* bitops */ -EXPORT_SYMBOL(_set_bit_le); -EXPORT_SYMBOL(_test_and_set_bit_le); -EXPORT_SYMBOL(_clear_bit_le); -EXPORT_SYMBOL(_test_and_clear_bit_le); -EXPORT_SYMBOL(_change_bit_le); -EXPORT_SYMBOL(_test_and_change_bit_le); +EXPORT_SYMBOL(_set_bit); +EXPORT_SYMBOL(_test_and_set_bit); +EXPORT_SYMBOL(_clear_bit); +EXPORT_SYMBOL(_test_and_clear_bit); +EXPORT_SYMBOL(_change_bit); +EXPORT_SYMBOL(_test_and_change_bit); EXPORT_SYMBOL(_find_first_zero_bit_le); EXPORT_SYMBOL(_find_next_zero_bit_le); EXPORT_SYMBOL(_find_first_bit_le); EXPORT_SYMBOL(_find_next_bit_le); #ifdef __ARMEB__ -EXPORT_SYMBOL(_set_bit_be); -EXPORT_SYMBOL(_test_and_set_bit_be); -EXPORT_SYMBOL(_clear_bit_be); -EXPORT_SYMBOL(_test_and_clear_bit_be); -EXPORT_SYMBOL(_change_bit_be); -EXPORT_SYMBOL(_test_and_change_bit_be); EXPORT_SYMBOL(_find_first_zero_bit_be); EXPORT_SYMBOL(_find_next_zero_bit_be); EXPORT_SYMBOL(_find_first_bit_be); diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index bd00551fb79..a9d9d152a75 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -1,15 +1,15 @@ - -#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_32v6K) +#if __LINUX_ARM_ARCH__ >= 6 .macro bitop, instr ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned mov r2, #1 - and r3, r0, #7 @ Get bit offset - add r1, r1, r0, lsr #3 @ Get byte offset + and r3, r0, #31 @ Get bit offset + mov r0, r0, lsr #5 + add r1, r1, r0, lsl #2 @ Get word offset mov r3, r2, lsl r3 -1: ldrexb r2, [r1] +1: ldrex r2, [r1] \instr r2, r2, r3 - strexb r0, r2, [r1] + strex r0, r2, [r1] cmp r0, #0 bne 1b mov pc, lr @@ -18,15 +18,16 @@ .macro testop, instr, store ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned - and r3, r0, #7 @ Get bit offset mov r2, #1 - add r1, r1, r0, lsr #3 @ Get byte offset + and r3, r0, #31 @ Get bit offset + mov r0, r0, lsr #5 + add r1, r1, r0, lsl #2 @ Get word offset mov r3, r2, lsl r3 @ create mask smp_dmb -1: ldrexb r2, [r1] +1: ldrex r2, [r1] ands r0, r2, r3 @ save old value of bit - \instr r2, r2, r3 @ toggle bit - strexb ip, r2, [r1] + \instr r2, r2, r3 @ toggle bit + strex ip, r2, [r1] cmp ip, #0 bne 1b smp_dmb @@ -38,13 +39,14 @@ .macro bitop, instr ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned - and r2, r0, #7 + and r2, r0, #31 + mov r0, r0, lsr #5 mov r3, #1 mov r3, r3, lsl r2 save_and_disable_irqs ip - ldrb r2, [r1, r0, lsr #3] + ldr r2, [r1, r0, lsl #2] \instr r2, r2, r3 - strb r2, [r1, r0, lsr #3] + str r2, [r1, r0, lsl #2] restore_irqs ip mov pc, lr .endm @@ -60,11 +62,11 @@ .macro testop, instr, store ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned - add r1, r1, r0, lsr #3 - and r3, r0, #7 - mov r0, #1 + and r3, r0, #31 + mov r0, r0, lsr #5 save_and_disable_irqs ip - ldrb r2, [r1] + ldr r2, [r1, r0, lsl #2]! + mov r0, #1 tst r2, r0, lsl r3 \instr r2, r2, r0, lsl r3 \store r2, [r1] diff --git a/arch/arm/lib/changebit.S b/arch/arm/lib/changebit.S index 80f3115cbee..68ed5b62e83 100644 --- a/arch/arm/lib/changebit.S +++ b/arch/arm/lib/changebit.S @@ -12,12 +12,6 @@ #include "bitops.h" .text -/* Purpose : Function to change a bit - * Prototype: int change_bit(int bit, void *addr) - */ -ENTRY(_change_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_change_bit_le) +ENTRY(_change_bit) bitop eor -ENDPROC(_change_bit_be) -ENDPROC(_change_bit_le) +ENDPROC(_change_bit) diff --git a/arch/arm/lib/clearbit.S b/arch/arm/lib/clearbit.S index 1a63e43a1df..4c04c3b51ee 100644 --- a/arch/arm/lib/clearbit.S +++ b/arch/arm/lib/clearbit.S @@ -12,13 +12,6 @@ #include "bitops.h" .text -/* - * Purpose : Function to clear a bit - * Prototype: int clear_bit(int bit, void *addr) - */ -ENTRY(_clear_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_clear_bit_le) +ENTRY(_clear_bit) bitop bic -ENDPROC(_clear_bit_be) -ENDPROC(_clear_bit_le) +ENDPROC(_clear_bit) diff --git a/arch/arm/lib/setbit.S b/arch/arm/lib/setbit.S index 1dd7176c4b2..bbee5c66a23 100644 --- a/arch/arm/lib/setbit.S +++ b/arch/arm/lib/setbit.S @@ -12,13 +12,6 @@ #include "bitops.h" .text -/* - * Purpose : Function to set a bit - * Prototype: int set_bit(int bit, void *addr) - */ -ENTRY(_set_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_set_bit_le) +ENTRY(_set_bit) bitop orr -ENDPROC(_set_bit_be) -ENDPROC(_set_bit_le) +ENDPROC(_set_bit) diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S index 5c98dc567f0..15a4d431f22 100644 --- a/arch/arm/lib/testchangebit.S +++ b/arch/arm/lib/testchangebit.S @@ -12,9 +12,6 @@ #include "bitops.h" .text -ENTRY(_test_and_change_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_test_and_change_bit_le) - testop eor, strb -ENDPROC(_test_and_change_bit_be) -ENDPROC(_test_and_change_bit_le) +ENTRY(_test_and_change_bit) + testop eor, str +ENDPROC(_test_and_change_bit) diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S index 543d7094d18..521b66b5b95 100644 --- a/arch/arm/lib/testclearbit.S +++ b/arch/arm/lib/testclearbit.S @@ -12,9 +12,6 @@ #include "bitops.h" .text -ENTRY(_test_and_clear_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_test_and_clear_bit_le) - testop bicne, strneb -ENDPROC(_test_and_clear_bit_be) -ENDPROC(_test_and_clear_bit_le) +ENTRY(_test_and_clear_bit) + testop bicne, strne +ENDPROC(_test_and_clear_bit) diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S index 0b3f390401c..1c98cc2185b 100644 --- a/arch/arm/lib/testsetbit.S +++ b/arch/arm/lib/testsetbit.S @@ -12,9 +12,6 @@ #include "bitops.h" .text -ENTRY(_test_and_set_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_test_and_set_bit_le) - testop orreq, streqb -ENDPROC(_test_and_set_bit_be) -ENDPROC(_test_and_set_bit_le) +ENTRY(_test_and_set_bit) + testop orreq, streq +ENDPROC(_test_and_set_bit) -- cgit v1.2.3-70-g09d2 From 000d9c78eb5cd7f18e3d6a381d66e606bc9b8196 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 15 Jan 2011 16:22:12 +0000 Subject: ARM: v6k: remove CPU_32v6K dependencies in asm/spinlock.h SMP requires at least the ARMv6K extensions to be present, so if we're running on SMP, the WFE and SEV instructions must be available. However, when we run on UP, the v6K extensions may not be available, and so we don't want WFE/SEV to be in the instruction stream. Use the SMP alternatives infrastructure to replace these instructions with NOPs if we build for SMP but run on UP. Tested-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/spinlock.h | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 17eb355707d..da1af524015 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -5,17 +5,36 @@ #error SMP not supported on pre-ARMv6 CPUs #endif +/* + * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K + * extensions, so when running on UP, we have to patch these instructions away. + */ +#define ALT_SMP(smp, up) \ + "9998: " smp "\n" \ + " .pushsection \".alt.smp.init\", \"a\"\n" \ + " .long 9998b\n" \ + " " up "\n" \ + " .popsection\n" + +#ifdef CONFIG_THUMB2_KERNEL +#define SEV ALT_SMP("sev.w", "nop.w") +#define WFE(cond) ALT_SMP("wfe" cond ".w", "nop.w") +#else +#define SEV ALT_SMP("sev", "nop") +#define WFE(cond) ALT_SMP("wfe" cond, "nop") +#endif + static inline void dsb_sev(void) { #if __LINUX_ARM_ARCH__ >= 7 __asm__ __volatile__ ( "dsb\n" - "sev" + SEV ); -#elif defined(CONFIG_CPU_32v6K) +#else __asm__ __volatile__ ( "mcr p15, 0, %0, c7, c10, 4\n" - "sev" + SEV : : "r" (0) ); #endif @@ -46,9 +65,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: ldrex %0, [%1]\n" " teq %0, #0\n" -#ifdef CONFIG_CPU_32v6K -" wfene\n" -#endif + WFE("ne") " strexeq %0, %2, [%1]\n" " teqeq %0, #0\n" " bne 1b" @@ -107,9 +124,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) __asm__ __volatile__( "1: ldrex %0, [%1]\n" " teq %0, #0\n" -#ifdef CONFIG_CPU_32v6K -" wfene\n" -#endif + WFE("ne") " strexeq %0, %2, [%1]\n" " teq %0, #0\n" " bne 1b" @@ -176,9 +191,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) "1: ldrex %0, [%2]\n" " adds %0, %0, #1\n" " strexpl %1, %0, [%2]\n" -#ifdef CONFIG_CPU_32v6K -" wfemi\n" -#endif + WFE("mi") " rsbpls %0, %1, #0\n" " bmi 1b" : "=&r" (tmp), "=&r" (tmp2) -- cgit v1.2.3-70-g09d2 From e399b1a4e1d205bdc816cb550d2064f2eb1ddc4c Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 15:08:32 +0000 Subject: ARM: v6k: introduce CPU_V6K option Introduce a CPU_V6K configuration option for platforms to select if they have a V6K CPU core. This allows us to identify whether we need to support ARMv6 CPUs without the V6K SMP extensions at build time. Currently CPU_V6K is just an alias for CPU_V6, and all places which reference CPU_V6 are replaced by (CPU_V6 || CPU_V6K). Select CPU_V6K from platforms which are known to be V6K-only. Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/Kconfig | 10 ++++----- arch/arm/Makefile | 1 + arch/arm/boot/compressed/head.S | 2 +- arch/arm/boot/compressed/misc.c | 2 +- arch/arm/include/asm/cacheflush.h | 5 +++-- arch/arm/include/asm/proc-fns.h | 2 +- arch/arm/kernel/debug.S | 2 +- arch/arm/kernel/perf_event_v6.c | 4 ++-- arch/arm/mm/Kconfig | 47 +++++++++++++++++++++++++-------------- arch/arm/mm/Makefile | 1 + arch/arm/mm/mmap.c | 2 +- 11 files changed, 47 insertions(+), 31 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5cff165b7eb..95ba92ff0d4 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -24,7 +24,7 @@ config ARM select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V7)) + select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) select HAVE_C_RECORDMCOUNT select HAVE_GENERIC_HARDIRQS select HAVE_SPARSE_IRQ @@ -1048,7 +1048,7 @@ config XSCALE_PMU default y config CPU_HAS_PMU - depends on (CPU_V6 || CPU_V7 || XSCALE_PMU) && \ + depends on (CPU_V6 || CPU_V6K || CPU_V7 || XSCALE_PMU) && \ (!ARCH_OMAP3 || OMAP3_EMU) default y bool @@ -1064,7 +1064,7 @@ endif config ARM_ERRATA_411920 bool "ARM errata: Invalidation of the Instruction Cache operation can fail" - depends on CPU_V6 + depends on CPU_V6 || CPU_V6K help Invalidation of the Instruction Cache operation can fail. This erratum is present in 1136 (before r1p4), 1156 and 1176. @@ -1361,7 +1361,7 @@ config HZ config THUMB2_KERNEL bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)" - depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL + depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL select AEABI select ARM_ASM_UNIFIED help @@ -1852,7 +1852,7 @@ config FPE_FASTFPE config VFP bool "VFP-format floating point maths" - depends on CPU_V6 || CPU_ARM926T || CPU_V7 || CPU_FEROCEON + depends on CPU_V6 || CPU_V6K || CPU_ARM926T || CPU_V7 || CPU_FEROCEON help Say Y to include VFP support code in the kernel. This is needed if your hardware includes a VFP unit. diff --git a/arch/arm/Makefile b/arch/arm/Makefile index c22c1adfedd..9c430525e13 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -89,6 +89,7 @@ tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale tune-$(CONFIG_CPU_FEROCEON) :=$(call cc-option,-mtune=marvell-f,-mtune=xscale) tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) +tune-$(CONFIG_CPU_V6K) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) ifeq ($(CONFIG_AEABI),y) CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 7193884ed8b..91f20f0b304 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -21,7 +21,7 @@ #if defined(CONFIG_DEBUG_ICEDCC) -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) .macro loadsp, rb, tmp .endm .macro writeb, ch, rb diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index e653a6d3c8d..4657e877bf8 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c @@ -36,7 +36,7 @@ extern void error(char *x); #ifdef CONFIG_DEBUG_ICEDCC -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) static void icedcc_putc(int ch) { diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 3acd8fa25e3..7d0614f599a 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -116,7 +116,7 @@ # define MULTI_CACHE 1 #endif -#if defined(CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) //# ifdef _CACHE # define MULTI_CACHE 1 //# else @@ -316,7 +316,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, * Optimized __flush_icache_all for the common cases. Note that UP ARMv7 * will fall through to use __flush_icache_all_generic. */ -#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \ +#if (defined(CONFIG_CPU_V7) && \ + (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \ defined(CONFIG_SMP_ON_UP) #define __flush_icache_preferred __cpuc_flush_icache_all #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 8fdae9bc9ab..296ca47489f 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -231,7 +231,7 @@ # endif #endif -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) # ifdef CPU_NAME # undef MULTI_CPU # define MULTI_CPU diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index a0f07521ca8..d2d983be096 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -25,7 +25,7 @@ .macro addruart, rp, rv .endm -#if defined(CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) .macro senduart, rd, rx mcr p14, 0, \rd, c0, c5, 0 diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index c058bfc8532..6fc2d228db5 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -30,7 +30,7 @@ * enable the interrupt. */ -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) enum armv6_perf_types { ARMV6_PERFCTR_ICACHE_MISS = 0x0, ARMV6_PERFCTR_IBUF_STALL = 0x1, @@ -669,4 +669,4 @@ static const struct arm_pmu *__init armv6mpcore_pmu_init(void) { return NULL; } -#endif /* CONFIG_CPU_V6 */ +#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 9d30c6f804b..559e9330bb1 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -402,16 +402,18 @@ config CPU_V6 select CPU_TLB_V6 if MMU # ARMv6k -config CPU_32v6K - bool "Support ARM V6K processor extensions" if !SMP - depends on CPU_V6 || CPU_V7 - default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) - help - Say Y here if your ARMv6 processor supports the 'K' extension. - This enables the kernel to use some instructions not present - on previous processors, and as such a kernel build with this - enabled will not boot on processors with do not support these - instructions. +config CPU_V6K + bool "Support ARM V6K processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE + select CPU_32v6 + select CPU_32v6K if !ARCH_OMAP2 + select CPU_ABRT_EV6 + select CPU_PABRT_V6 + select CPU_CACHE_V6 + select CPU_CACHE_VIPT + select CPU_CP15_MMU + select CPU_HAS_ASID if MMU + select CPU_COPY_V6 if MMU + select CPU_TLB_V6 if MMU # ARMv7 config CPU_V7 @@ -453,6 +455,17 @@ config CPU_32v6 bool select TLS_REG_EMUL if !CPU_32v6K && !MMU +config CPU_32v6K + bool "Support ARM V6K processor extensions" if !SMP + depends on CPU_V6 || CPU_V6K || CPU_V7 + default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) + help + Say Y here if your ARMv6 processor supports the 'K' extension. + This enables the kernel to use some instructions not present + on previous processors, and as such a kernel build with this + enabled will not boot on processors with do not support these + instructions. + config CPU_32v7 bool @@ -623,7 +636,7 @@ comment "Processor Features" config ARM_THUMB bool "Support Thumb user binaries" - depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V7 || CPU_FEROCEON + depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON default y help Say Y if you want to include kernel support for running user space @@ -681,7 +694,7 @@ config CPU_BIG_ENDIAN config CPU_ENDIAN_BE8 bool depends on CPU_BIG_ENDIAN - default CPU_V6 || CPU_V7 + default CPU_V6 || CPU_V6K || CPU_V7 help Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. @@ -747,7 +760,7 @@ config CPU_CACHE_ROUND_ROBIN config CPU_BPREDICT_DISABLE bool "Disable branch prediction" - depends on CPU_ARM1020 || CPU_V6 || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526 + depends on CPU_ARM1020 || CPU_V6 || CPU_V6K || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526 help Say Y here to disable branch prediction. If unsure, say N. @@ -767,7 +780,7 @@ config NEEDS_SYSCALL_FOR_CMPXCHG config DMA_CACHE_RWFO bool "Enable read/write for ownership DMA cache maintenance" - depends on CPU_V6 && SMP + depends on (CPU_V6 || CPU_V6K) && SMP default y help The Snoop Control Unit on ARM11MPCore does not detect the @@ -823,7 +836,7 @@ config CACHE_L2X0 config CACHE_PL310 bool depends on CACHE_L2X0 - default y if CPU_V7 && !CPU_V6 + default y if CPU_V7 && !(CPU_V6 || CPU_V6K) help This option enables optimisations for the PL310 cache controller. @@ -851,10 +864,10 @@ config ARM_L1_CACHE_SHIFT default 5 config ARM_DMA_MEM_BUFFERABLE - bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7 + bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K) && !CPU_V7 depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \ MACH_REALVIEW_PB11MP) - default y if CPU_V6 || CPU_V7 + default y if CPU_V6 || CPU_V6K || CPU_V7 help Historically, the kernel has used strongly ordered mappings to provide DMA coherent memory. With the advent of ARMv7, mapping diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 00d74a04af3..bca7e61928c 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -90,6 +90,7 @@ obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o obj-$(CONFIG_CPU_V6) += proc-v6.o +obj-$(CONFIG_CPU_V6K) += proc-v6.o obj-$(CONFIG_CPU_V7) += proc-v7.o AFLAGS_proc-v6.o :=-Wa,-march=armv6 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index b0a98305055..afe209e1e1f 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -31,7 +31,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) unsigned int cache_type; int do_align = 0, aliasing = 0; -- cgit v1.2.3-70-g09d2 From 4ed67a53591db641543d57f31c182591a429dc93 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 15:42:42 +0000 Subject: ARM: v6k: select cmpxchg code sequences according to V6 variants If CONFIG_CPU_V6 is enabled, we must avoid the byte/halfword/doubleword exclusive operations, which aren't implemented before V6K. Use the generic versions (or omit them) instead. If CONFIG_CPU_V6 is not set, but CONFIG_CPU_32v6K is enabled, we have the K extnesions, so use these new instructions. Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/system.h | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 97f6d60297d..9a87823642d 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -347,6 +347,7 @@ void cpu_idle_wait(void); #include #if __LINUX_ARM_ARCH__ < 6 +/* min ARCH < ARMv6 */ #ifdef CONFIG_SMP #error "SMP is not supported on this platform" @@ -365,7 +366,7 @@ void cpu_idle_wait(void); #include #endif -#else /* __LINUX_ARM_ARCH__ >= 6 */ +#else /* min ARCH >= ARMv6 */ extern void __bad_cmpxchg(volatile void *ptr, int size); @@ -379,7 +380,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long oldval, res; switch (size) { -#ifdef CONFIG_CPU_32v6K +#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ case 1: do { asm volatile("@ __cmpxchg1\n" @@ -404,7 +405,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, : "memory", "cc"); } while (res); break; -#endif /* CONFIG_CPU_32v6K */ +#endif case 4: do { asm volatile("@ __cmpxchg4\n" @@ -450,12 +451,12 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, unsigned long ret; switch (size) { -#ifndef CONFIG_CPU_32v6K +#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ case 1: case 2: ret = __cmpxchg_local_generic(ptr, old, new, size); break; -#endif /* !CONFIG_CPU_32v6K */ +#endif default: ret = __cmpxchg(ptr, old, new, size); } @@ -469,7 +470,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, (unsigned long)(n), \ sizeof(*(ptr)))) -#ifdef CONFIG_CPU_32v6K +#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ /* * Note : ARMv7-M (currently unsupported by Linux) does not support @@ -524,11 +525,11 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, (unsigned long long)(o), \ (unsigned long long)(n))) -#else /* !CONFIG_CPU_32v6K */ +#else /* min ARCH = ARMv6 */ #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#endif /* CONFIG_CPU_32v6K */ +#endif #endif /* __LINUX_ARM_ARCH__ >= 6 */ -- cgit v1.2.3-70-g09d2 From 37bc618fe2689a7f8de8fac82e72b00ecea4d43d Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 16:38:56 +0000 Subject: ARM: v6k: select TLS register code according to V6 variants If CONFIG_CPU_V6 is enabled, we may or may not have the TLS register. Use the conditional code which copes with this variability. Otherwise, if CONFIG_CPU_32v6K is set, we know we have the TLS register on all supported CPUs, so use it unconditionally. Acked-by: Nicolas Pitre Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/tls.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index e71d6ff8d10..60843eb0f61 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -28,15 +28,14 @@ #define tls_emu 1 #define has_tls_reg 1 #define set_tls set_tls_none -#elif __LINUX_ARM_ARCH__ >= 7 || \ - (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) -#define tls_emu 0 -#define has_tls_reg 1 -#define set_tls set_tls_v6k -#elif __LINUX_ARM_ARCH__ == 6 +#elif defined(CONFIG_CPU_V6) #define tls_emu 0 #define has_tls_reg (elf_hwcap & HWCAP_TLS) #define set_tls set_tls_v6 +#elif defined(CONFIG_CPU_32v6K) +#define tls_emu 0 +#define has_tls_reg 1 +#define set_tls set_tls_v6k #else #define tls_emu 0 #define has_tls_reg 0 -- cgit v1.2.3-70-g09d2 From 774c096bf9e49eebf7b5d2d9fdddf632c29ccea0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 23 Jan 2011 13:04:53 +0000 Subject: ARM: v6/v7 cache: allow cache calls to be optimized The v6 cache call optimization was disabled to allow the optional block cache operations to be subsituted on CPUs which supported those operations. However, as that functionality was removed, we no longer need to prevent this optimization being taken advantage of. The v7 cache call optimization was just a copy of the v6, so also fix that too. Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 7d0614f599a..d9b4c42d62f 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -116,20 +116,20 @@ # define MULTI_CACHE 1 #endif -#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) -//# ifdef _CACHE +#if defined(CONFIG_CPU_CACHE_V6) +# ifdef _CACHE # define MULTI_CACHE 1 -//# else -//# define _CACHE v6 -//# endif +# else +# define _CACHE v6 +# endif #endif -#if defined(CONFIG_CPU_V7) -//# ifdef _CACHE +#if defined(CONFIG_CPU_CACHE_V7) +# ifdef _CACHE # define MULTI_CACHE 1 -//# else -//# define _CACHE v7 -//# endif +# else +# define _CACHE v7 +# endif #endif #if !defined(_CACHE) && !defined(MULTI_CACHE) -- cgit v1.2.3-70-g09d2 From 917692f5f7ec63de3b093c825913d68e910db282 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 9 Feb 2011 12:06:59 +0100 Subject: ARM: 6655/1: Correct WFE() in asm/spinlock.h for Thumb-2 The content for ALT_SMP() in the definition of WFE() expands to 6 bytes (IT cc ; WFEcc.W), which breaks the assumptions of the fixup code, leading to lockups when the affected code gets run. This patch works around the problem by explicitly using an IT + WFEcc.N pair. Signed-off-by: Dave Martin Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/spinlock.h | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index da1af524015..fdd3820edff 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -18,7 +18,23 @@ #ifdef CONFIG_THUMB2_KERNEL #define SEV ALT_SMP("sev.w", "nop.w") -#define WFE(cond) ALT_SMP("wfe" cond ".w", "nop.w") +/* + * For Thumb-2, special care is needed to ensure that the conditional WFE + * instruction really does assemble to exactly 4 bytes (as required by + * the SMP_ON_UP fixup code). By itself "wfene" might cause the + * assembler to insert a extra (16-bit) IT instruction, depending on the + * presence or absence of neighbouring conditional instructions. + * + * To avoid this unpredictableness, an approprite IT is inserted explicitly: + * the assembler won't change IT instructions which are explicitly present + * in the input. + */ +#define WFE(cond) ALT_SMP( \ + "it " cond "\n\t" \ + "wfe" cond ".n", \ + \ + "nop.w" \ +) #else #define SEV ALT_SMP("sev", "nop") #define WFE(cond) ALT_SMP("wfe" cond, "nop") -- cgit v1.2.3-70-g09d2 From 292ec42af7c6361435fe9df50cd59ec76f6741c6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 4 Feb 2011 10:36:39 +0000 Subject: ARM: pm: add function to set WFI low-power mode for SMP CPUs Add a function to set the SCU low-power mode for SMP CPUs. This centralizes this functionality rather than having to expose the SCU register definitions to each platform. Signed-off-by: Russell King --- arch/arm/include/asm/smp_scu.h | 7 +++++++ arch/arm/kernel/smp_scu.c | 23 +++++++++++++++++++++++ 2 files changed, 30 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 2376835015d..4eb6d005ffa 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h @@ -1,7 +1,14 @@ #ifndef __ASMARM_ARCH_SCU_H #define __ASMARM_ARCH_SCU_H +#define SCU_PM_NORMAL 0 +#define SCU_PM_DORMANT 2 +#define SCU_PM_POWEROFF 3 + +#ifndef __ASSEMBLER__ unsigned int scu_get_core_count(void __iomem *); void scu_enable(void __iomem *); +int scu_power_mode(void __iomem *, unsigned int); +#endif #endif diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index 9ab4149bd98..a1e757c3439 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -50,3 +50,26 @@ void __init scu_enable(void __iomem *scu_base) */ flush_cache_all(); } + +/* + * Set the executing CPUs power mode as defined. This will be in + * preparation for it executing a WFI instruction. + * + * This function must be called with preemption disabled, and as it + * has the side effect of disabling coherency, caches must have been + * flushed. Interrupts must also have been disabled. + */ +int scu_power_mode(void __iomem *scu_base, unsigned int mode) +{ + unsigned int val; + int cpu = smp_processor_id(); + + if (mode > 3 || mode == 1 || cpu > 3) + return -EINVAL; + + val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03; + val |= mode; + __raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu); + + return 0; +} -- cgit v1.2.3-70-g09d2 From 753790e713d80b50b867fa1ed32ec0eb5e82ae8e Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 6 Feb 2011 15:32:24 +0000 Subject: ARM: move cache/processor/fault glue to separate include files This allows the cache/processor/fault glue to be more easily used from assembler code. Tested on Assabet and Tegra 2. Tested-by: Colin Cross Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 133 +---------------- arch/arm/include/asm/cpu-multi32.h | 69 --------- arch/arm/include/asm/cpu-single.h | 44 ------ arch/arm/include/asm/glue-cache.h | 146 ++++++++++++++++++ arch/arm/include/asm/glue-df.h | 110 ++++++++++++++ arch/arm/include/asm/glue-pf.h | 57 +++++++ arch/arm/include/asm/glue-proc.h | 261 ++++++++++++++++++++++++++++++++ arch/arm/include/asm/glue.h | 138 ----------------- arch/arm/include/asm/proc-fns.h | 299 ++++++++----------------------------- arch/arm/kernel/asm-offsets.c | 2 + arch/arm/kernel/entry-armv.S | 3 +- 11 files changed, 644 insertions(+), 618 deletions(-) delete mode 100644 arch/arm/include/asm/cpu-multi32.h delete mode 100644 arch/arm/include/asm/cpu-single.h create mode 100644 arch/arm/include/asm/glue-cache.h create mode 100644 arch/arm/include/asm/glue-df.h create mode 100644 arch/arm/include/asm/glue-pf.h create mode 100644 arch/arm/include/asm/glue-proc.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 3acd8fa25e3..18a56640d97 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -12,130 +12,13 @@ #include -#include +#include #include #include #include #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) -/* - * Cache Model - * =========== - */ -#undef _CACHE -#undef MULTI_CACHE - -#if defined(CONFIG_CPU_CACHE_V3) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v3 -# endif -#endif - -#if defined(CONFIG_CPU_CACHE_V4) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v4 -# endif -#endif - -#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ - defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ - defined(CONFIG_CPU_ARM1026) -# define MULTI_CACHE 1 -#endif - -#if defined(CONFIG_CPU_FA526) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE fa -# endif -#endif - -#if defined(CONFIG_CPU_ARM926T) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE arm926 -# endif -#endif - -#if defined(CONFIG_CPU_ARM940T) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE arm940 -# endif -#endif - -#if defined(CONFIG_CPU_ARM946E) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE arm946 -# endif -#endif - -#if defined(CONFIG_CPU_CACHE_V4WB) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v4wb -# endif -#endif - -#if defined(CONFIG_CPU_XSCALE) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE xscale -# endif -#endif - -#if defined(CONFIG_CPU_XSC3) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE xsc3 -# endif -#endif - -#if defined(CONFIG_CPU_MOHAWK) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE mohawk -# endif -#endif - -#if defined(CONFIG_CPU_FEROCEON) -# define MULTI_CACHE 1 -#endif - -#if defined(CONFIG_CPU_V6) -//# ifdef _CACHE -# define MULTI_CACHE 1 -//# else -//# define _CACHE v6 -//# endif -#endif - -#if defined(CONFIG_CPU_V7) -//# ifdef _CACHE -# define MULTI_CACHE 1 -//# else -//# define _CACHE v7 -//# endif -#endif - -#if !defined(_CACHE) && !defined(MULTI_CACHE) -#error Unknown cache maintainence model -#endif - /* * This flag is used to indicate that the page pointed to by a pte is clean * and does not require cleaning before returning it to the user. @@ -249,19 +132,11 @@ extern struct cpu_cache_fns cpu_cache; * visible to the CPU. */ #define dmac_map_area cpu_cache.dma_map_area -#define dmac_unmap_area cpu_cache.dma_unmap_area +#define dmac_unmap_area cpu_cache.dma_unmap_area #define dmac_flush_range cpu_cache.dma_flush_range #else -#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) -#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) -#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) -#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) -#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) -#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) -#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) - extern void __cpuc_flush_icache_all(void); extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_user_all(void); @@ -276,10 +151,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t); * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -#define dmac_map_area __glue(_CACHE,_dma_map_area) -#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) -#define dmac_flush_range __glue(_CACHE,_dma_flush_range) - extern void dmac_map_area(const void *, size_t, int); extern void dmac_unmap_area(const void *, size_t, int); extern void dmac_flush_range(const void *, const void *); diff --git a/arch/arm/include/asm/cpu-multi32.h b/arch/arm/include/asm/cpu-multi32.h deleted file mode 100644 index e2b5b0b2116..00000000000 --- a/arch/arm/include/asm/cpu-multi32.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * arch/arm/include/asm/cpu-multi32.h - * - * Copyright (C) 2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include - -struct mm_struct; - -/* - * Don't change this structure - ASM code - * relies on it. - */ -extern struct processor { - /* MISC - * get data abort address/flags - */ - void (*_data_abort)(unsigned long pc); - /* - * Retrieve prefetch fault address - */ - unsigned long (*_prefetch_abort)(unsigned long lr); - /* - * Set up any processor specifics - */ - void (*_proc_init)(void); - /* - * Disable any processor specifics - */ - void (*_proc_fin)(void); - /* - * Special stuff for a reset - */ - void (*reset)(unsigned long addr) __attribute__((noreturn)); - /* - * Idle the processor - */ - int (*_do_idle)(void); - /* - * Processor architecture specific - */ - /* - * clean a virtual address range from the - * D-cache without flushing the cache. - */ - void (*dcache_clean_area)(void *addr, int size); - - /* - * Set the page table - */ - void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); - /* - * Set a possibly extended PTE. Non-extended PTEs should - * ignore 'ext'. - */ - void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); -} processor; - -#define cpu_proc_init() processor._proc_init() -#define cpu_proc_fin() processor._proc_fin() -#define cpu_reset(addr) processor.reset(addr) -#define cpu_do_idle() processor._do_idle() -#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) -#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) -#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) diff --git a/arch/arm/include/asm/cpu-single.h b/arch/arm/include/asm/cpu-single.h deleted file mode 100644 index f073a6d2a40..00000000000 --- a/arch/arm/include/asm/cpu-single.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * arch/arm/include/asm/cpu-single.h - * - * Copyright (C) 2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -/* - * Single CPU - */ -#ifdef __STDC__ -#define __catify_fn(name,x) name##x -#else -#define __catify_fn(name,x) name/**/x -#endif -#define __cpu_fn(name,x) __catify_fn(name,x) - -/* - * If we are supporting multiple CPUs, then we must use a table of - * function pointers for this lot. Otherwise, we can optimise the - * table away. - */ -#define cpu_proc_init __cpu_fn(CPU_NAME,_proc_init) -#define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin) -#define cpu_reset __cpu_fn(CPU_NAME,_reset) -#define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle) -#define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area) -#define cpu_do_switch_mm __cpu_fn(CPU_NAME,_switch_mm) -#define cpu_set_pte_ext __cpu_fn(CPU_NAME,_set_pte_ext) - -#include - -struct mm_struct; - -/* declare all the functions as extern */ -extern void cpu_proc_init(void); -extern void cpu_proc_fin(void); -extern int cpu_do_idle(void); -extern void cpu_dcache_clean_area(void *, int); -extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); -extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); -extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h new file mode 100644 index 00000000000..0591d35001e --- /dev/null +++ b/arch/arm/include/asm/glue-cache.h @@ -0,0 +1,146 @@ +/* + * arch/arm/include/asm/glue-cache.h + * + * Copyright (C) 1999-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_CACHE_H +#define ASM_GLUE_CACHE_H + +#include + +/* + * Cache Model + * =========== + */ +#undef _CACHE +#undef MULTI_CACHE + +#if defined(CONFIG_CPU_CACHE_V3) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v3 +# endif +#endif + +#if defined(CONFIG_CPU_CACHE_V4) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v4 +# endif +#endif + +#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ + defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ + defined(CONFIG_CPU_ARM1026) +# define MULTI_CACHE 1 +#endif + +#if defined(CONFIG_CPU_FA526) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE fa +# endif +#endif + +#if defined(CONFIG_CPU_ARM926T) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm926 +# endif +#endif + +#if defined(CONFIG_CPU_ARM940T) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm940 +# endif +#endif + +#if defined(CONFIG_CPU_ARM946E) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm946 +# endif +#endif + +#if defined(CONFIG_CPU_CACHE_V4WB) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v4wb +# endif +#endif + +#if defined(CONFIG_CPU_XSCALE) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE xscale +# endif +#endif + +#if defined(CONFIG_CPU_XSC3) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE xsc3 +# endif +#endif + +#if defined(CONFIG_CPU_MOHAWK) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE mohawk +# endif +#endif + +#if defined(CONFIG_CPU_FEROCEON) +# define MULTI_CACHE 1 +#endif + +#if defined(CONFIG_CPU_V6) +//# ifdef _CACHE +# define MULTI_CACHE 1 +//# else +//# define _CACHE v6 +//# endif +#endif + +#if defined(CONFIG_CPU_V7) +//# ifdef _CACHE +# define MULTI_CACHE 1 +//# else +//# define _CACHE v7 +//# endif +#endif + +#if !defined(_CACHE) && !defined(MULTI_CACHE) +#error Unknown cache maintainence model +#endif + +#ifndef MULTI_CACHE +#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) +#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) +#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) +#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) +#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) +#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) +#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) + +#define dmac_map_area __glue(_CACHE,_dma_map_area) +#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) +#define dmac_flush_range __glue(_CACHE,_dma_flush_range) +#endif + +#endif diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h new file mode 100644 index 00000000000..354d571e8bc --- /dev/null +++ b/arch/arm/include/asm/glue-df.h @@ -0,0 +1,110 @@ +/* + * arch/arm/include/asm/glue-df.h + * + * Copyright (C) 1997-1999 Russell King + * Copyright (C) 2000-2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_DF_H +#define ASM_GLUE_DF_H + +#include + +/* + * Data Abort Model + * ================ + * + * We have the following to choose from: + * arm6 - ARM6 style + * arm7 - ARM7 style + * v4_early - ARMv4 without Thumb early abort handler + * v4t_late - ARMv4 with Thumb late abort handler + * v4t_early - ARMv4 with Thumb early abort handler + * v5tej_early - ARMv5 with Thumb and Java early abort handler + * xscale - ARMv5 with Thumb with Xscale extensions + * v6_early - ARMv6 generic early abort handler + * v7_early - ARMv7 generic early abort handler + */ +#undef CPU_DABORT_HANDLER +#undef MULTI_DABORT + +#if defined(CONFIG_CPU_ARM610) +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER cpu_arm6_data_abort +# endif +#endif + +#if defined(CONFIG_CPU_ARM710) +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER cpu_arm7_data_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_LV4T +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v4t_late_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV4 +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v4_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV4T +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v4t_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV5TJ +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v5tj_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV5T +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v5t_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV6 +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v6_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV7 +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v7_early_abort +# endif +#endif + +#ifndef CPU_DABORT_HANDLER +#error Unknown data abort handler type +#endif + +#endif diff --git a/arch/arm/include/asm/glue-pf.h b/arch/arm/include/asm/glue-pf.h new file mode 100644 index 00000000000..d385f37c13f --- /dev/null +++ b/arch/arm/include/asm/glue-pf.h @@ -0,0 +1,57 @@ +/* + * arch/arm/include/asm/glue-pf.h + * + * Copyright (C) 1997-1999 Russell King + * Copyright (C) 2000-2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_PF_H +#define ASM_GLUE_PF_H + +#include + +/* + * Prefetch Abort Model + * ================ + * + * We have the following to choose from: + * legacy - no IFSR, no IFAR + * v6 - ARMv6: IFSR, no IFAR + * v7 - ARMv7: IFSR and IFAR + */ + +#undef CPU_PABORT_HANDLER +#undef MULTI_PABORT + +#ifdef CONFIG_CPU_PABRT_LEGACY +# ifdef CPU_PABORT_HANDLER +# define MULTI_PABORT 1 +# else +# define CPU_PABORT_HANDLER legacy_pabort +# endif +#endif + +#ifdef CONFIG_CPU_PABRT_V6 +# ifdef CPU_PABORT_HANDLER +# define MULTI_PABORT 1 +# else +# define CPU_PABORT_HANDLER v6_pabort +# endif +#endif + +#ifdef CONFIG_CPU_PABRT_V7 +# ifdef CPU_PABORT_HANDLER +# define MULTI_PABORT 1 +# else +# define CPU_PABORT_HANDLER v7_pabort +# endif +#endif + +#ifndef CPU_PABORT_HANDLER +#error Unknown prefetch abort handler type +#endif + +#endif diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h new file mode 100644 index 00000000000..e3bf443f2d1 --- /dev/null +++ b/arch/arm/include/asm/glue-proc.h @@ -0,0 +1,261 @@ +/* + * arch/arm/include/asm/glue-proc.h + * + * Copyright (C) 1997-1999 Russell King + * Copyright (C) 2000 Deep Blue Solutions Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_PROC_H +#define ASM_GLUE_PROC_H + +#include + +/* + * Work out if we need multiple CPU support + */ +#undef MULTI_CPU +#undef CPU_NAME + +/* + * CPU_NAME - the prefix for CPU related functions + */ + +#ifdef CONFIG_CPU_ARM610 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm6 +# endif +#endif + +#ifdef CONFIG_CPU_ARM7TDMI +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm7tdmi +# endif +#endif + +#ifdef CONFIG_CPU_ARM710 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm7 +# endif +#endif + +#ifdef CONFIG_CPU_ARM720T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm720 +# endif +#endif + +#ifdef CONFIG_CPU_ARM740T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm740 +# endif +#endif + +#ifdef CONFIG_CPU_ARM9TDMI +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm9tdmi +# endif +#endif + +#ifdef CONFIG_CPU_ARM920T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm920 +# endif +#endif + +#ifdef CONFIG_CPU_ARM922T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm922 +# endif +#endif + +#ifdef CONFIG_CPU_FA526 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_fa526 +# endif +#endif + +#ifdef CONFIG_CPU_ARM925T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm925 +# endif +#endif + +#ifdef CONFIG_CPU_ARM926T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm926 +# endif +#endif + +#ifdef CONFIG_CPU_ARM940T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm940 +# endif +#endif + +#ifdef CONFIG_CPU_ARM946E +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm946 +# endif +#endif + +#ifdef CONFIG_CPU_SA110 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_sa110 +# endif +#endif + +#ifdef CONFIG_CPU_SA1100 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_sa1100 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1020 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1020 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1020E +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1020e +# endif +#endif + +#ifdef CONFIG_CPU_ARM1022 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1022 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1026 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1026 +# endif +#endif + +#ifdef CONFIG_CPU_XSCALE +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_xscale +# endif +#endif + +#ifdef CONFIG_CPU_XSC3 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_xsc3 +# endif +#endif + +#ifdef CONFIG_CPU_MOHAWK +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_mohawk +# endif +#endif + +#ifdef CONFIG_CPU_FEROCEON +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_feroceon +# endif +#endif + +#ifdef CONFIG_CPU_V6 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_v6 +# endif +#endif + +#ifdef CONFIG_CPU_V7 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_v7 +# endif +#endif + +#ifndef MULTI_CPU +#define cpu_proc_init __glue(CPU_NAME,_proc_init) +#define cpu_proc_fin __glue(CPU_NAME,_proc_fin) +#define cpu_reset __glue(CPU_NAME,_reset) +#define cpu_do_idle __glue(CPU_NAME,_do_idle) +#define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area) +#define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm) +#define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext) +#endif + +#endif diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h index 234a3fc1c78..0ec35d1698a 100644 --- a/arch/arm/include/asm/glue.h +++ b/arch/arm/include/asm/glue.h @@ -15,7 +15,6 @@ */ #ifdef __KERNEL__ - #ifdef __STDC__ #define ____glue(name,fn) name##fn #else @@ -23,141 +22,4 @@ #endif #define __glue(name,fn) ____glue(name,fn) - - -/* - * Data Abort Model - * ================ - * - * We have the following to choose from: - * arm6 - ARM6 style - * arm7 - ARM7 style - * v4_early - ARMv4 without Thumb early abort handler - * v4t_late - ARMv4 with Thumb late abort handler - * v4t_early - ARMv4 with Thumb early abort handler - * v5tej_early - ARMv5 with Thumb and Java early abort handler - * xscale - ARMv5 with Thumb with Xscale extensions - * v6_early - ARMv6 generic early abort handler - * v7_early - ARMv7 generic early abort handler - */ -#undef CPU_DABORT_HANDLER -#undef MULTI_DABORT - -#if defined(CONFIG_CPU_ARM610) -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER cpu_arm6_data_abort -# endif -#endif - -#if defined(CONFIG_CPU_ARM710) -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER cpu_arm7_data_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_LV4T -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v4t_late_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV4 -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v4_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV4T -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v4t_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV5TJ -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v5tj_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV5T -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v5t_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV6 -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v6_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV7 -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v7_early_abort -# endif -#endif - -#ifndef CPU_DABORT_HANDLER -#error Unknown data abort handler type -#endif - -/* - * Prefetch Abort Model - * ================ - * - * We have the following to choose from: - * legacy - no IFSR, no IFAR - * v6 - ARMv6: IFSR, no IFAR - * v7 - ARMv7: IFSR and IFAR - */ - -#undef CPU_PABORT_HANDLER -#undef MULTI_PABORT - -#ifdef CONFIG_CPU_PABRT_LEGACY -# ifdef CPU_PABORT_HANDLER -# define MULTI_PABORT 1 -# else -# define CPU_PABORT_HANDLER legacy_pabort -# endif -#endif - -#ifdef CONFIG_CPU_PABRT_V6 -# ifdef CPU_PABORT_HANDLER -# define MULTI_PABORT 1 -# else -# define CPU_PABORT_HANDLER v6_pabort -# endif -#endif - -#ifdef CONFIG_CPU_PABRT_V7 -# ifdef CPU_PABORT_HANDLER -# define MULTI_PABORT 1 -# else -# define CPU_PABORT_HANDLER v7_pabort -# endif -#endif - -#ifndef CPU_PABORT_HANDLER -#error Unknown prefetch abort handler type -#endif - #endif diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 8fdae9bc9ab..69802150be2 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -13,248 +13,77 @@ #ifdef __KERNEL__ +#include +#include -/* - * Work out if we need multiple CPU support - */ -#undef MULTI_CPU -#undef CPU_NAME +#ifndef __ASSEMBLY__ + +struct mm_struct; /* - * CPU_NAME - the prefix for CPU related functions + * Don't change this structure - ASM code relies on it. */ - -#ifdef CONFIG_CPU_ARM610 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm6 -# endif -#endif - -#ifdef CONFIG_CPU_ARM7TDMI -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm7tdmi -# endif -#endif - -#ifdef CONFIG_CPU_ARM710 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm7 -# endif -#endif - -#ifdef CONFIG_CPU_ARM720T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm720 -# endif -#endif - -#ifdef CONFIG_CPU_ARM740T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm740 -# endif -#endif - -#ifdef CONFIG_CPU_ARM9TDMI -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm9tdmi -# endif -#endif - -#ifdef CONFIG_CPU_ARM920T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm920 -# endif -#endif - -#ifdef CONFIG_CPU_ARM922T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm922 -# endif -#endif - -#ifdef CONFIG_CPU_FA526 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_fa526 -# endif -#endif - -#ifdef CONFIG_CPU_ARM925T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm925 -# endif -#endif - -#ifdef CONFIG_CPU_ARM926T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm926 -# endif -#endif - -#ifdef CONFIG_CPU_ARM940T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm940 -# endif -#endif - -#ifdef CONFIG_CPU_ARM946E -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm946 -# endif -#endif - -#ifdef CONFIG_CPU_SA110 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_sa110 -# endif -#endif - -#ifdef CONFIG_CPU_SA1100 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_sa1100 -# endif -#endif - -#ifdef CONFIG_CPU_ARM1020 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1020 -# endif -#endif - -#ifdef CONFIG_CPU_ARM1020E -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1020e -# endif -#endif - -#ifdef CONFIG_CPU_ARM1022 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1022 -# endif -#endif - -#ifdef CONFIG_CPU_ARM1026 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1026 -# endif -#endif - -#ifdef CONFIG_CPU_XSCALE -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_xscale -# endif -#endif - -#ifdef CONFIG_CPU_XSC3 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_xsc3 -# endif -#endif - -#ifdef CONFIG_CPU_MOHAWK -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_mohawk -# endif -#endif - -#ifdef CONFIG_CPU_FEROCEON -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_feroceon -# endif -#endif - -#ifdef CONFIG_CPU_V6 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_v6 -# endif -#endif - -#ifdef CONFIG_CPU_V7 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_v7 -# endif -#endif - -#ifndef __ASSEMBLY__ +extern struct processor { + /* MISC + * get data abort address/flags + */ + void (*_data_abort)(unsigned long pc); + /* + * Retrieve prefetch fault address + */ + unsigned long (*_prefetch_abort)(unsigned long lr); + /* + * Set up any processor specifics + */ + void (*_proc_init)(void); + /* + * Disable any processor specifics + */ + void (*_proc_fin)(void); + /* + * Special stuff for a reset + */ + void (*reset)(unsigned long addr) __attribute__((noreturn)); + /* + * Idle the processor + */ + int (*_do_idle)(void); + /* + * Processor architecture specific + */ + /* + * clean a virtual address range from the + * D-cache without flushing the cache. + */ + void (*dcache_clean_area)(void *addr, int size); + + /* + * Set the page table + */ + void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); + /* + * Set a possibly extended PTE. Non-extended PTEs should + * ignore 'ext'. + */ + void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); +} processor; #ifndef MULTI_CPU -#include +extern void cpu_proc_init(void); +extern void cpu_proc_fin(void); +extern int cpu_do_idle(void); +extern void cpu_dcache_clean_area(void *, int); +extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); +extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); +extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); #else -#include +#define cpu_proc_init() processor._proc_init() +#define cpu_proc_fin() processor._proc_fin() +#define cpu_reset(addr) processor.reset(addr) +#define cpu_do_idle() processor._do_idle() +#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) +#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) +#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) #endif #include diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 82da6617213..5302a917271 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 2b46fea36c9..e8d88567680 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -16,7 +16,8 @@ */ #include -#include +#include +#include #include #include #include -- cgit v1.2.3-70-g09d2 From f4117ac9e237b74afdf5e001d5ea26a4d15e9847 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 18:07:14 +0000 Subject: ARM: P2V: separate PHYS_OFFSET from platform definitions This uncouple PHYS_OFFSET from the platform definitions, thereby facilitating run-time computation of the physical memory offset. Acked-by: Nicolas Pitre Acked-by: Viresh Kumar Acked-by: H Hartley Sweeten Acked-by: Magnus Damm Acked-by: Tony Lindgren Acked-by: Jean-Christophe PLAGNIOL-VILLARD Acked-by: Wan ZongShun Acked-by: Kukjin Kim Acked-by: Eric Miao Acked-by: Jiandong Zheng Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 2 ++ arch/arm/kernel/tcm.c | 2 +- arch/arm/mach-aaec2000/include/mach/memory.h | 2 +- arch/arm/mach-at91/include/mach/memory.h | 2 +- arch/arm/mach-bcmring/include/mach/hardware.h | 2 +- arch/arm/mach-bcmring/include/mach/memory.h | 2 +- arch/arm/mach-clps711x/include/mach/memory.h | 2 +- arch/arm/mach-cns3xxx/include/mach/memory.h | 2 +- arch/arm/mach-davinci/include/mach/memory.h | 4 ++-- arch/arm/mach-dove/include/mach/memory.h | 2 +- arch/arm/mach-ebsa110/include/mach/memory.h | 2 +- arch/arm/mach-ep93xx/include/mach/memory.h | 10 +++++----- arch/arm/mach-footbridge/include/mach/memory.h | 2 +- arch/arm/mach-gemini/include/mach/memory.h | 4 ++-- arch/arm/mach-h720x/include/mach/memory.h | 2 +- arch/arm/mach-integrator/include/mach/memory.h | 2 +- arch/arm/mach-iop13xx/include/mach/memory.h | 2 +- arch/arm/mach-iop32x/include/mach/memory.h | 2 +- arch/arm/mach-iop33x/include/mach/memory.h | 2 +- arch/arm/mach-ixp2000/include/mach/memory.h | 2 +- arch/arm/mach-ixp23xx/include/mach/memory.h | 2 +- arch/arm/mach-ixp4xx/include/mach/memory.h | 2 +- arch/arm/mach-kirkwood/include/mach/memory.h | 2 +- arch/arm/mach-ks8695/include/mach/memory.h | 2 +- arch/arm/mach-lh7a40x/include/mach/memory.h | 2 +- arch/arm/mach-loki/include/mach/memory.h | 2 +- arch/arm/mach-lpc32xx/include/mach/memory.h | 2 +- arch/arm/mach-mmp/include/mach/memory.h | 2 +- arch/arm/mach-msm/board-msm7x30.c | 2 +- arch/arm/mach-msm/include/mach/memory.h | 10 +++++----- arch/arm/mach-mv78xx0/include/mach/memory.h | 2 +- arch/arm/mach-mx3/mach-kzm_arm11_01.c | 2 +- arch/arm/mach-netx/include/mach/memory.h | 2 +- arch/arm/mach-nomadik/include/mach/memory.h | 2 +- arch/arm/mach-ns9xxx/include/mach/memory.h | 2 +- arch/arm/mach-nuc93x/include/mach/memory.h | 2 +- arch/arm/mach-orion5x/include/mach/memory.h | 2 +- arch/arm/mach-pnx4008/include/mach/memory.h | 2 +- arch/arm/mach-pxa/include/mach/memory.h | 2 +- arch/arm/mach-realview/include/mach/memory.h | 4 ++-- arch/arm/mach-rpc/include/mach/memory.h | 2 +- arch/arm/mach-s3c2400/include/mach/memory.h | 2 +- arch/arm/mach-s3c2410/include/mach/memory.h | 2 +- arch/arm/mach-s3c24a0/include/mach/memory.h | 2 +- arch/arm/mach-s3c64xx/include/mach/memory.h | 2 +- arch/arm/mach-s5p6442/include/mach/memory.h | 2 +- arch/arm/mach-s5p64x0/include/mach/memory.h | 2 +- arch/arm/mach-s5pc100/include/mach/memory.h | 2 +- arch/arm/mach-s5pv210/include/mach/memory.h | 2 +- arch/arm/mach-s5pv310/include/mach/memory.h | 2 +- arch/arm/mach-sa1100/include/mach/memory.h | 2 +- arch/arm/mach-shark/include/mach/memory.h | 2 +- arch/arm/mach-shmobile/include/mach/memory.h | 2 +- arch/arm/mach-tegra/include/mach/memory.h | 2 +- arch/arm/mach-u300/include/mach/memory.h | 6 +++--- arch/arm/mach-u300/u300.c | 2 +- arch/arm/mach-ux500/include/mach/memory.h | 2 +- arch/arm/mach-versatile/include/mach/memory.h | 2 +- arch/arm/mach-vexpress/include/mach/memory.h | 2 +- arch/arm/mach-w90x900/include/mach/memory.h | 2 +- arch/arm/plat-mxc/include/mach/memory.h | 18 +++++++++--------- arch/arm/plat-omap/include/plat/memory.h | 4 ++-- arch/arm/plat-spear/include/plat/memory.h | 2 +- arch/arm/plat-stmp3xxx/include/mach/memory.h | 2 +- arch/arm/plat-tcc/include/mach/memory.h | 2 +- 65 files changed, 88 insertions(+), 86 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index d0ee74b7cf8..2efec578a62 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -24,6 +24,8 @@ */ #define UL(x) _AC(x, UL) +#define PHYS_OFFSET PLAT_PHYS_OFFSET + #ifdef CONFIG_MMU /* diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 26685c2f7a4..f5cf660eefc 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -15,7 +15,7 @@ #include /* memcpy */ #include #include -#include +#include #include "tcm.h" static struct gen_pool *tcm_pool; diff --git a/arch/arm/mach-aaec2000/include/mach/memory.h b/arch/arm/mach-aaec2000/include/mach/memory.h index 4f93c567a35..4a10bf0bd36 100644 --- a/arch/arm/mach-aaec2000/include/mach/memory.h +++ b/arch/arm/mach-aaec2000/include/mach/memory.h @@ -12,6 +12,6 @@ #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0xf0000000) +#define PLAT_PHYS_OFFSET UL(0xf0000000) #endif /* __ASM_ARCH_MEMORY_H */ diff --git a/arch/arm/mach-at91/include/mach/memory.h b/arch/arm/mach-at91/include/mach/memory.h index 14f4ef4b6a9..c2cfe504064 100644 --- a/arch/arm/mach-at91/include/mach/memory.h +++ b/arch/arm/mach-at91/include/mach/memory.h @@ -23,6 +23,6 @@ #include -#define PHYS_OFFSET (AT91_SDRAM_BASE) +#define PLAT_PHYS_OFFSET (AT91_SDRAM_BASE) #endif diff --git a/arch/arm/mach-bcmring/include/mach/hardware.h b/arch/arm/mach-bcmring/include/mach/hardware.h index 447eb340c61..8bf3564fba5 100644 --- a/arch/arm/mach-bcmring/include/mach/hardware.h +++ b/arch/arm/mach-bcmring/include/mach/hardware.h @@ -31,7 +31,7 @@ * *_SIZE is the size of the region * *_BASE is the virtual address */ -#define RAM_START PHYS_OFFSET +#define RAM_START PLAT_PHYS_OFFSET #define RAM_SIZE (CFG_GLOBAL_RAM_SIZE-CFG_GLOBAL_RAM_SIZE_RESERVED) #define RAM_BASE PAGE_OFFSET diff --git a/arch/arm/mach-bcmring/include/mach/memory.h b/arch/arm/mach-bcmring/include/mach/memory.h index 114f942bb4f..15162e4c75f 100644 --- a/arch/arm/mach-bcmring/include/mach/memory.h +++ b/arch/arm/mach-bcmring/include/mach/memory.h @@ -23,7 +23,7 @@ * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. */ -#define PHYS_OFFSET CFG_GLOBAL_RAM_BASE +#define PLAT_PHYS_OFFSET CFG_GLOBAL_RAM_BASE /* * Maximum DMA memory allowed is 14M diff --git a/arch/arm/mach-clps711x/include/mach/memory.h b/arch/arm/mach-clps711x/include/mach/memory.h index f45c8e892cb..3a032a67725 100644 --- a/arch/arm/mach-clps711x/include/mach/memory.h +++ b/arch/arm/mach-clps711x/include/mach/memory.h @@ -23,7 +23,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0xc0000000) +#define PLAT_PHYS_OFFSET UL(0xc0000000) #if !defined(CONFIG_ARCH_CDB89712) && !defined (CONFIG_ARCH_AUTCPU12) diff --git a/arch/arm/mach-cns3xxx/include/mach/memory.h b/arch/arm/mach-cns3xxx/include/mach/memory.h index 3b6b769b7a2..dc16c5c5d86 100644 --- a/arch/arm/mach-cns3xxx/include/mach/memory.h +++ b/arch/arm/mach-cns3xxx/include/mach/memory.h @@ -13,7 +13,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #define __phys_to_bus(x) ((x) + PHYS_OFFSET) #define __bus_to_phys(x) ((x) - PHYS_OFFSET) diff --git a/arch/arm/mach-davinci/include/mach/memory.h b/arch/arm/mach-davinci/include/mach/memory.h index 22eb97c1c30..78822723f38 100644 --- a/arch/arm/mach-davinci/include/mach/memory.h +++ b/arch/arm/mach-davinci/include/mach/memory.h @@ -26,9 +26,9 @@ #if defined(CONFIG_ARCH_DAVINCI_DA8XX) && defined(CONFIG_ARCH_DAVINCI_DMx) #error Cannot enable DaVinci and DA8XX platforms concurrently #elif defined(CONFIG_ARCH_DAVINCI_DA8XX) -#define PHYS_OFFSET DA8XX_DDR_BASE +#define PLAT_PHYS_OFFSET DA8XX_DDR_BASE #else -#define PHYS_OFFSET DAVINCI_DDR_BASE +#define PLAT_PHYS_OFFSET DAVINCI_DDR_BASE #endif #define DDR2_SDRCR_OFFSET 0xc diff --git a/arch/arm/mach-dove/include/mach/memory.h b/arch/arm/mach-dove/include/mach/memory.h index d6687207494..bbc93fee6c7 100644 --- a/arch/arm/mach-dove/include/mach/memory.h +++ b/arch/arm/mach-dove/include/mach/memory.h @@ -5,6 +5,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-ebsa110/include/mach/memory.h b/arch/arm/mach-ebsa110/include/mach/memory.h index 0ca66d080c6..8e49066ad85 100644 --- a/arch/arm/mach-ebsa110/include/mach/memory.h +++ b/arch/arm/mach-ebsa110/include/mach/memory.h @@ -19,7 +19,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) /* * Cache flushing area - SRAM diff --git a/arch/arm/mach-ep93xx/include/mach/memory.h b/arch/arm/mach-ep93xx/include/mach/memory.h index 554064e9030..c9400cf0051 100644 --- a/arch/arm/mach-ep93xx/include/mach/memory.h +++ b/arch/arm/mach-ep93xx/include/mach/memory.h @@ -6,15 +6,15 @@ #define __ASM_ARCH_MEMORY_H #if defined(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET) -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) -#define PHYS_OFFSET UL(0xc0000000) +#define PLAT_PHYS_OFFSET UL(0xc0000000) #elif defined(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) -#define PHYS_OFFSET UL(0xd0000000) +#define PLAT_PHYS_OFFSET UL(0xd0000000) #elif defined(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) -#define PHYS_OFFSET UL(0xe0000000) +#define PLAT_PHYS_OFFSET UL(0xe0000000) #elif defined(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) -#define PHYS_OFFSET UL(0xf0000000) +#define PLAT_PHYS_OFFSET UL(0xf0000000) #else #error "Kconfig bug: No EP93xx PHYS_OFFSET set" #endif diff --git a/arch/arm/mach-footbridge/include/mach/memory.h b/arch/arm/mach-footbridge/include/mach/memory.h index 8d64f457408..5c6df377f96 100644 --- a/arch/arm/mach-footbridge/include/mach/memory.h +++ b/arch/arm/mach-footbridge/include/mach/memory.h @@ -62,7 +62,7 @@ extern unsigned long __bus_to_pfn(unsigned long); /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #define FLUSH_BASE_PHYS 0x50000000 diff --git a/arch/arm/mach-gemini/include/mach/memory.h b/arch/arm/mach-gemini/include/mach/memory.h index 2d14d5bf1f9..a50915f764d 100644 --- a/arch/arm/mach-gemini/include/mach/memory.h +++ b/arch/arm/mach-gemini/include/mach/memory.h @@ -11,9 +11,9 @@ #define __MACH_MEMORY_H #ifdef CONFIG_GEMINI_MEM_SWAP -# define PHYS_OFFSET UL(0x00000000) +# define PLAT_PHYS_OFFSET UL(0x00000000) #else -# define PHYS_OFFSET UL(0x10000000) +# define PLAT_PHYS_OFFSET UL(0x10000000) #endif #endif /* __MACH_MEMORY_H */ diff --git a/arch/arm/mach-h720x/include/mach/memory.h b/arch/arm/mach-h720x/include/mach/memory.h index ef4c1e26f18..9d368765146 100644 --- a/arch/arm/mach-h720x/include/mach/memory.h +++ b/arch/arm/mach-h720x/include/mach/memory.h @@ -7,7 +7,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x40000000) +#define PLAT_PHYS_OFFSET UL(0x40000000) /* * This is the maximum DMA address that can be DMAd to. * There should not be more than (0xd0000000 - 0xc0000000) diff --git a/arch/arm/mach-integrator/include/mach/memory.h b/arch/arm/mach-integrator/include/mach/memory.h index 991f24d2c11..334d5e27188 100644 --- a/arch/arm/mach-integrator/include/mach/memory.h +++ b/arch/arm/mach-integrator/include/mach/memory.h @@ -23,7 +23,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #define BUS_OFFSET UL(0x80000000) #define __virt_to_bus(x) ((x) - PAGE_OFFSET + BUS_OFFSET) diff --git a/arch/arm/mach-iop13xx/include/mach/memory.h b/arch/arm/mach-iop13xx/include/mach/memory.h index 3ad45531886..1afa99ef97f 100644 --- a/arch/arm/mach-iop13xx/include/mach/memory.h +++ b/arch/arm/mach-iop13xx/include/mach/memory.h @@ -6,7 +6,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #ifndef __ASSEMBLY__ diff --git a/arch/arm/mach-iop32x/include/mach/memory.h b/arch/arm/mach-iop32x/include/mach/memory.h index c30f6450ad5..169cc239f76 100644 --- a/arch/arm/mach-iop32x/include/mach/memory.h +++ b/arch/arm/mach-iop32x/include/mach/memory.h @@ -8,6 +8,6 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0xa0000000) +#define PLAT_PHYS_OFFSET UL(0xa0000000) #endif diff --git a/arch/arm/mach-iop33x/include/mach/memory.h b/arch/arm/mach-iop33x/include/mach/memory.h index a30a96aa6d2..8e1daf7006b 100644 --- a/arch/arm/mach-iop33x/include/mach/memory.h +++ b/arch/arm/mach-iop33x/include/mach/memory.h @@ -8,6 +8,6 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-ixp2000/include/mach/memory.h b/arch/arm/mach-ixp2000/include/mach/memory.h index 98e3471be15..5f0c4fd4076 100644 --- a/arch/arm/mach-ixp2000/include/mach/memory.h +++ b/arch/arm/mach-ixp2000/include/mach/memory.h @@ -13,7 +13,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #include diff --git a/arch/arm/mach-ixp23xx/include/mach/memory.h b/arch/arm/mach-ixp23xx/include/mach/memory.h index 6ef65d813f1..6cf0704e946 100644 --- a/arch/arm/mach-ixp23xx/include/mach/memory.h +++ b/arch/arm/mach-ixp23xx/include/mach/memory.h @@ -17,7 +17,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET (0x00000000) +#define PLAT_PHYS_OFFSET (0x00000000) #define IXP23XX_PCI_SDRAM_OFFSET (*((volatile int *)IXP23XX_PCI_SDRAM_BAR) & 0xfffffff0) diff --git a/arch/arm/mach-ixp4xx/include/mach/memory.h b/arch/arm/mach-ixp4xx/include/mach/memory.h index 0136eaa2922..6d388c9d0e2 100644 --- a/arch/arm/mach-ixp4xx/include/mach/memory.h +++ b/arch/arm/mach-ixp4xx/include/mach/memory.h @@ -12,7 +12,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #if !defined(__ASSEMBLY__) && defined(CONFIG_PCI) diff --git a/arch/arm/mach-kirkwood/include/mach/memory.h b/arch/arm/mach-kirkwood/include/mach/memory.h index 45431e13146..4600b44e3ad 100644 --- a/arch/arm/mach-kirkwood/include/mach/memory.h +++ b/arch/arm/mach-kirkwood/include/mach/memory.h @@ -5,6 +5,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-ks8695/include/mach/memory.h b/arch/arm/mach-ks8695/include/mach/memory.h index bace9a681ad..f7e1b9bce34 100644 --- a/arch/arm/mach-ks8695/include/mach/memory.h +++ b/arch/arm/mach-ks8695/include/mach/memory.h @@ -18,7 +18,7 @@ /* * Physical SRAM offset. */ -#define PHYS_OFFSET KS8695_SDRAM_PA +#define PLAT_PHYS_OFFSET KS8695_SDRAM_PA #ifndef __ASSEMBLY__ diff --git a/arch/arm/mach-lh7a40x/include/mach/memory.h b/arch/arm/mach-lh7a40x/include/mach/memory.h index edb8f5faf5d..f77bde80fe4 100644 --- a/arch/arm/mach-lh7a40x/include/mach/memory.h +++ b/arch/arm/mach-lh7a40x/include/mach/memory.h @@ -17,7 +17,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0xc0000000) +#define PLAT_PHYS_OFFSET UL(0xc0000000) /* * Sparsemem version of the above diff --git a/arch/arm/mach-loki/include/mach/memory.h b/arch/arm/mach-loki/include/mach/memory.h index 2ed7e6e732c..66366657a87 100644 --- a/arch/arm/mach-loki/include/mach/memory.h +++ b/arch/arm/mach-loki/include/mach/memory.h @@ -5,6 +5,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-lpc32xx/include/mach/memory.h b/arch/arm/mach-lpc32xx/include/mach/memory.h index 044e1acecbe..a647dd624af 100644 --- a/arch/arm/mach-lpc32xx/include/mach/memory.h +++ b/arch/arm/mach-lpc32xx/include/mach/memory.h @@ -22,6 +22,6 @@ /* * Physical DRAM offset of bank 0 */ -#define PHYS_OFFSET UL(0x80000000) +#define PLAT_PHYS_OFFSET UL(0x80000000) #endif diff --git a/arch/arm/mach-mmp/include/mach/memory.h b/arch/arm/mach-mmp/include/mach/memory.h index bdb21d70714..d68b50a2d6a 100644 --- a/arch/arm/mach-mmp/include/mach/memory.h +++ b/arch/arm/mach-mmp/include/mach/memory.h @@ -9,6 +9,6 @@ #ifndef __ASM_MACH_MEMORY_H #define __ASM_MACH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif /* __ASM_MACH_MEMORY_H */ diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c index 6f3b9735e97..decbf80b429 100644 --- a/arch/arm/mach-msm/board-msm7x30.c +++ b/arch/arm/mach-msm/board-msm7x30.c @@ -26,11 +26,11 @@ #include #include +#include #include #include #include -#include #include #include diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h index 070e17d237f..176875df241 100644 --- a/arch/arm/mach-msm/include/mach/memory.h +++ b/arch/arm/mach-msm/include/mach/memory.h @@ -18,15 +18,15 @@ /* physical offset of RAM */ #if defined(CONFIG_ARCH_QSD8X50) && defined(CONFIG_MSM_SOC_REV_A) -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #elif defined(CONFIG_ARCH_QSD8X50) -#define PHYS_OFFSET UL(0x20000000) +#define PLAT_PHYS_OFFSET UL(0x20000000) #elif defined(CONFIG_ARCH_MSM7X30) -#define PHYS_OFFSET UL(0x00200000) +#define PLAT_PHYS_OFFSET UL(0x00200000) #elif defined(CONFIG_ARCH_MSM8X60) -#define PHYS_OFFSET UL(0x40200000) +#define PLAT_PHYS_OFFSET UL(0x40200000) #else -#define PHYS_OFFSET UL(0x10000000) +#define PLAT_PHYS_OFFSET UL(0x10000000) #endif #endif diff --git a/arch/arm/mach-mv78xx0/include/mach/memory.h b/arch/arm/mach-mv78xx0/include/mach/memory.h index e663042d307..a648c51f2e4 100644 --- a/arch/arm/mach-mv78xx0/include/mach/memory.h +++ b/arch/arm/mach-mv78xx0/include/mach/memory.h @@ -5,6 +5,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-mx3/mach-kzm_arm11_01.c b/arch/arm/mach-mx3/mach-kzm_arm11_01.c index a5f3eb24e4d..df1a6ce8e3e 100644 --- a/arch/arm/mach-mx3/mach-kzm_arm11_01.c +++ b/arch/arm/mach-mx3/mach-kzm_arm11_01.c @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -36,7 +37,6 @@ #include #include #include -#include #include "devices-imx31.h" #include "devices.h" diff --git a/arch/arm/mach-netx/include/mach/memory.h b/arch/arm/mach-netx/include/mach/memory.h index 9a363f297f9..59561496c36 100644 --- a/arch/arm/mach-netx/include/mach/memory.h +++ b/arch/arm/mach-netx/include/mach/memory.h @@ -20,7 +20,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x80000000) +#define PLAT_PHYS_OFFSET UL(0x80000000) #endif diff --git a/arch/arm/mach-nomadik/include/mach/memory.h b/arch/arm/mach-nomadik/include/mach/memory.h index 1e5689d98ec..d3325211ba6 100644 --- a/arch/arm/mach-nomadik/include/mach/memory.h +++ b/arch/arm/mach-nomadik/include/mach/memory.h @@ -23,6 +23,6 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-ns9xxx/include/mach/memory.h b/arch/arm/mach-ns9xxx/include/mach/memory.h index 6107193adbf..5c65aee6e7a 100644 --- a/arch/arm/mach-ns9xxx/include/mach/memory.h +++ b/arch/arm/mach-ns9xxx/include/mach/memory.h @@ -19,6 +19,6 @@ #define NS9XXX_CS2STAT_LENGTH UL(0x1000) #define NS9XXX_CS3STAT_LENGTH UL(0x1000) -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-nuc93x/include/mach/memory.h b/arch/arm/mach-nuc93x/include/mach/memory.h index 323ab0db3f7..ef9864b002a 100644 --- a/arch/arm/mach-nuc93x/include/mach/memory.h +++ b/arch/arm/mach-nuc93x/include/mach/memory.h @@ -16,6 +16,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-orion5x/include/mach/memory.h b/arch/arm/mach-orion5x/include/mach/memory.h index 52a2955d0f8..6769917882f 100644 --- a/arch/arm/mach-orion5x/include/mach/memory.h +++ b/arch/arm/mach-orion5x/include/mach/memory.h @@ -7,6 +7,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-pnx4008/include/mach/memory.h b/arch/arm/mach-pnx4008/include/mach/memory.h index 0e877008105..1275db61cee 100644 --- a/arch/arm/mach-pnx4008/include/mach/memory.h +++ b/arch/arm/mach-pnx4008/include/mach/memory.h @@ -16,6 +16,6 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x80000000) +#define PLAT_PHYS_OFFSET UL(0x80000000) #endif diff --git a/arch/arm/mach-pxa/include/mach/memory.h b/arch/arm/mach-pxa/include/mach/memory.h index 92361a66b22..7f68724dcc2 100644 --- a/arch/arm/mach-pxa/include/mach/memory.h +++ b/arch/arm/mach-pxa/include/mach/memory.h @@ -15,7 +15,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0xa0000000) +#define PLAT_PHYS_OFFSET UL(0xa0000000) #if !defined(__ASSEMBLY__) && defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI) void cmx2xx_pci_adjust_zones(unsigned long *size, unsigned long *holes); diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h index 5dafc157b27..e05fc2c4c08 100644 --- a/arch/arm/mach-realview/include/mach/memory.h +++ b/arch/arm/mach-realview/include/mach/memory.h @@ -24,9 +24,9 @@ * Physical DRAM offset. */ #ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET -#define PHYS_OFFSET UL(0x70000000) +#define PLAT_PHYS_OFFSET UL(0x70000000) #else -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif #if !defined(__ASSEMBLY__) && defined(CONFIG_ZONE_DMA) diff --git a/arch/arm/mach-rpc/include/mach/memory.h b/arch/arm/mach-rpc/include/mach/memory.h index 78191bf2519..18a221093bf 100644 --- a/arch/arm/mach-rpc/include/mach/memory.h +++ b/arch/arm/mach-rpc/include/mach/memory.h @@ -21,7 +21,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x10000000) +#define PLAT_PHYS_OFFSET UL(0x10000000) /* * Cache flushing area - ROM diff --git a/arch/arm/mach-s3c2400/include/mach/memory.h b/arch/arm/mach-s3c2400/include/mach/memory.h index cf5901ffd38..3f33670dd01 100644 --- a/arch/arm/mach-s3c2400/include/mach/memory.h +++ b/arch/arm/mach-s3c2400/include/mach/memory.h @@ -15,6 +15,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x0C000000) +#define PLAT_PHYS_OFFSET UL(0x0C000000) #endif diff --git a/arch/arm/mach-s3c2410/include/mach/memory.h b/arch/arm/mach-s3c2410/include/mach/memory.h index 6f1e5871ae4..f92b97b89c0 100644 --- a/arch/arm/mach-s3c2410/include/mach/memory.h +++ b/arch/arm/mach-s3c2410/include/mach/memory.h @@ -11,6 +11,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x30000000) +#define PLAT_PHYS_OFFSET UL(0x30000000) #endif diff --git a/arch/arm/mach-s3c24a0/include/mach/memory.h b/arch/arm/mach-s3c24a0/include/mach/memory.h index 7d74fd5c8d6..7d208a71b17 100644 --- a/arch/arm/mach-s3c24a0/include/mach/memory.h +++ b/arch/arm/mach-s3c24a0/include/mach/memory.h @@ -11,7 +11,7 @@ #ifndef __ASM_ARCH_24A0_MEMORY_H #define __ASM_ARCH_24A0_MEMORY_H __FILE__ -#define PHYS_OFFSET UL(0x10000000) +#define PLAT_PHYS_OFFSET UL(0x10000000) #define __virt_to_bus(x) __virt_to_phys(x) #define __bus_to_virt(x) __phys_to_virt(x) diff --git a/arch/arm/mach-s3c64xx/include/mach/memory.h b/arch/arm/mach-s3c64xx/include/mach/memory.h index 42cc54e2ee3..4760cdae1eb 100644 --- a/arch/arm/mach-s3c64xx/include/mach/memory.h +++ b/arch/arm/mach-s3c64xx/include/mach/memory.h @@ -13,7 +13,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x50000000) +#define PLAT_PHYS_OFFSET UL(0x50000000) #define CONSISTENT_DMA_SIZE SZ_8M diff --git a/arch/arm/mach-s5p6442/include/mach/memory.h b/arch/arm/mach-s5p6442/include/mach/memory.h index 9ddd877ba2e..cfe259dded3 100644 --- a/arch/arm/mach-s5p6442/include/mach/memory.h +++ b/arch/arm/mach-s5p6442/include/mach/memory.h @@ -13,7 +13,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x20000000) +#define PLAT_PHYS_OFFSET UL(0x20000000) #define CONSISTENT_DMA_SIZE SZ_8M #endif /* __ASM_ARCH_MEMORY_H */ diff --git a/arch/arm/mach-s5p64x0/include/mach/memory.h b/arch/arm/mach-s5p64x0/include/mach/memory.h index 1b036b0a24c..365a6eb4b88 100644 --- a/arch/arm/mach-s5p64x0/include/mach/memory.h +++ b/arch/arm/mach-s5p64x0/include/mach/memory.h @@ -13,7 +13,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H __FILE__ -#define PHYS_OFFSET UL(0x20000000) +#define PLAT_PHYS_OFFSET UL(0x20000000) #define CONSISTENT_DMA_SIZE SZ_8M #endif /* __ASM_ARCH_MEMORY_H */ diff --git a/arch/arm/mach-s5pc100/include/mach/memory.h b/arch/arm/mach-s5pc100/include/mach/memory.h index 4b60d18179f..bda4e79fd5f 100644 --- a/arch/arm/mach-s5pc100/include/mach/memory.h +++ b/arch/arm/mach-s5pc100/include/mach/memory.h @@ -13,6 +13,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x20000000) +#define PLAT_PHYS_OFFSET UL(0x20000000) #endif diff --git a/arch/arm/mach-s5pv210/include/mach/memory.h b/arch/arm/mach-s5pv210/include/mach/memory.h index d503e0c4ce4..7b5fcf0da0c 100644 --- a/arch/arm/mach-s5pv210/include/mach/memory.h +++ b/arch/arm/mach-s5pv210/include/mach/memory.h @@ -13,7 +13,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x20000000) +#define PLAT_PHYS_OFFSET UL(0x20000000) #define CONSISTENT_DMA_SIZE (SZ_8M + SZ_4M + SZ_2M) /* diff --git a/arch/arm/mach-s5pv310/include/mach/memory.h b/arch/arm/mach-s5pv310/include/mach/memory.h index 1dffb482324..470b01bf861 100644 --- a/arch/arm/mach-s5pv310/include/mach/memory.h +++ b/arch/arm/mach-s5pv310/include/mach/memory.h @@ -13,7 +13,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H __FILE__ -#define PHYS_OFFSET UL(0x40000000) +#define PLAT_PHYS_OFFSET UL(0x40000000) /* Maximum of 256MiB in one bank */ #define MAX_PHYSMEM_BITS 32 diff --git a/arch/arm/mach-sa1100/include/mach/memory.h b/arch/arm/mach-sa1100/include/mach/memory.h index 128a1dfa96b..a44da6a2916 100644 --- a/arch/arm/mach-sa1100/include/mach/memory.h +++ b/arch/arm/mach-sa1100/include/mach/memory.h @@ -12,7 +12,7 @@ /* * Physical DRAM offset is 0xc0000000 on the SA1100 */ -#define PHYS_OFFSET UL(0xc0000000) +#define PLAT_PHYS_OFFSET UL(0xc0000000) #ifndef __ASSEMBLY__ diff --git a/arch/arm/mach-shark/include/mach/memory.h b/arch/arm/mach-shark/include/mach/memory.h index d9c4812f1c3..9afb1700000 100644 --- a/arch/arm/mach-shark/include/mach/memory.h +++ b/arch/arm/mach-shark/include/mach/memory.h @@ -15,7 +15,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x08000000) +#define PLAT_PHYS_OFFSET UL(0x08000000) #ifndef __ASSEMBLY__ diff --git a/arch/arm/mach-shmobile/include/mach/memory.h b/arch/arm/mach-shmobile/include/mach/memory.h index 377584e57e0..ad00c3c258f 100644 --- a/arch/arm/mach-shmobile/include/mach/memory.h +++ b/arch/arm/mach-shmobile/include/mach/memory.h @@ -1,7 +1,7 @@ #ifndef __ASM_MACH_MEMORY_H #define __ASM_MACH_MEMORY_H -#define PHYS_OFFSET UL(CONFIG_MEMORY_START) +#define PLAT_PHYS_OFFSET UL(CONFIG_MEMORY_START) #define MEM_SIZE UL(CONFIG_MEMORY_SIZE) /* DMA memory at 0xf6000000 - 0xffdfffff */ diff --git a/arch/arm/mach-tegra/include/mach/memory.h b/arch/arm/mach-tegra/include/mach/memory.h index 6151bab62af..537db3aa81a 100644 --- a/arch/arm/mach-tegra/include/mach/memory.h +++ b/arch/arm/mach-tegra/include/mach/memory.h @@ -22,7 +22,7 @@ #define __MACH_TEGRA_MEMORY_H /* physical offset of RAM */ -#define PHYS_OFFSET UL(0) +#define PLAT_PHYS_OFFSET UL(0) #endif diff --git a/arch/arm/mach-u300/include/mach/memory.h b/arch/arm/mach-u300/include/mach/memory.h index bf134bcc129..888e2e351ee 100644 --- a/arch/arm/mach-u300/include/mach/memory.h +++ b/arch/arm/mach-u300/include/mach/memory.h @@ -15,17 +15,17 @@ #ifdef CONFIG_MACH_U300_DUAL_RAM -#define PHYS_OFFSET UL(0x48000000) +#define PLAT_PHYS_OFFSET UL(0x48000000) #define BOOT_PARAMS_OFFSET (PHYS_OFFSET + 0x100) #else #ifdef CONFIG_MACH_U300_2MB_ALIGNMENT_FIX -#define PHYS_OFFSET (0x28000000 + \ +#define PLAT_PHYS_OFFSET (0x28000000 + \ (CONFIG_MACH_U300_ACCESS_MEM_SIZE - \ (CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1))*1024*1024) #else -#define PHYS_OFFSET (0x28000000 + \ +#define PLAT_PHYS_OFFSET (0x28000000 + \ (CONFIG_MACH_U300_ACCESS_MEM_SIZE + \ (CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1))*1024*1024) #endif diff --git a/arch/arm/mach-u300/u300.c b/arch/arm/mach-u300/u300.c index 07c35a84642..48b3b7f3996 100644 --- a/arch/arm/mach-u300/u300.c +++ b/arch/arm/mach-u300/u300.c @@ -19,9 +19,9 @@ #include #include #include -#include #include #include +#include static void __init u300_reserve(void) { diff --git a/arch/arm/mach-ux500/include/mach/memory.h b/arch/arm/mach-ux500/include/mach/memory.h index 510571a59e2..2ef697a6700 100644 --- a/arch/arm/mach-ux500/include/mach/memory.h +++ b/arch/arm/mach-ux500/include/mach/memory.h @@ -12,7 +12,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #define BUS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-versatile/include/mach/memory.h b/arch/arm/mach-versatile/include/mach/memory.h index 79aeab86b90..dacc9d8e4e6 100644 --- a/arch/arm/mach-versatile/include/mach/memory.h +++ b/arch/arm/mach-versatile/include/mach/memory.h @@ -23,6 +23,6 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-vexpress/include/mach/memory.h b/arch/arm/mach-vexpress/include/mach/memory.h index be28232ae63..5b7fcd439d8 100644 --- a/arch/arm/mach-vexpress/include/mach/memory.h +++ b/arch/arm/mach-vexpress/include/mach/memory.h @@ -20,6 +20,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x60000000) +#define PLAT_PHYS_OFFSET UL(0x60000000) #endif diff --git a/arch/arm/mach-w90x900/include/mach/memory.h b/arch/arm/mach-w90x900/include/mach/memory.h index 971b80702c2..f02905ba774 100644 --- a/arch/arm/mach-w90x900/include/mach/memory.h +++ b/arch/arm/mach-w90x900/include/mach/memory.h @@ -18,6 +18,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/plat-mxc/include/mach/memory.h b/arch/arm/plat-mxc/include/mach/memory.h index 83861408133..5d51cbb9889 100644 --- a/arch/arm/plat-mxc/include/mach/memory.h +++ b/arch/arm/plat-mxc/include/mach/memory.h @@ -23,23 +23,23 @@ #if !defined(CONFIG_RUNTIME_PHYS_OFFSET) # if defined CONFIG_ARCH_MX1 -# define PHYS_OFFSET MX1_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX1_PHYS_OFFSET # elif defined CONFIG_MACH_MX21 -# define PHYS_OFFSET MX21_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX21_PHYS_OFFSET # elif defined CONFIG_ARCH_MX25 -# define PHYS_OFFSET MX25_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX25_PHYS_OFFSET # elif defined CONFIG_MACH_MX27 -# define PHYS_OFFSET MX27_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX27_PHYS_OFFSET # elif defined CONFIG_ARCH_MX3 -# define PHYS_OFFSET MX3x_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX3x_PHYS_OFFSET # elif defined CONFIG_ARCH_MXC91231 -# define PHYS_OFFSET MXC91231_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MXC91231_PHYS_OFFSET # elif defined CONFIG_ARCH_MX50 -# define PHYS_OFFSET MX50_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX50_PHYS_OFFSET # elif defined CONFIG_ARCH_MX51 -# define PHYS_OFFSET MX51_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX51_PHYS_OFFSET # elif defined CONFIG_ARCH_MX53 -# define PHYS_OFFSET MX53_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX53_PHYS_OFFSET # endif #endif diff --git a/arch/arm/plat-omap/include/plat/memory.h b/arch/arm/plat-omap/include/plat/memory.h index f8d922fb558..e6720aa2d55 100644 --- a/arch/arm/plat-omap/include/plat/memory.h +++ b/arch/arm/plat-omap/include/plat/memory.h @@ -37,9 +37,9 @@ * Physical DRAM offset. */ #if defined(CONFIG_ARCH_OMAP1) -#define PHYS_OFFSET UL(0x10000000) +#define PLAT_PHYS_OFFSET UL(0x10000000) #else -#define PHYS_OFFSET UL(0x80000000) +#define PLAT_PHYS_OFFSET UL(0x80000000) #endif /* diff --git a/arch/arm/plat-spear/include/plat/memory.h b/arch/arm/plat-spear/include/plat/memory.h index 27a4aba7734..7e3599e1104 100644 --- a/arch/arm/plat-spear/include/plat/memory.h +++ b/arch/arm/plat-spear/include/plat/memory.h @@ -15,6 +15,6 @@ #define __PLAT_MEMORY_H /* Physical DRAM offset */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif /* __PLAT_MEMORY_H */ diff --git a/arch/arm/plat-stmp3xxx/include/mach/memory.h b/arch/arm/plat-stmp3xxx/include/mach/memory.h index 7b875a07a1a..61fa54882e1 100644 --- a/arch/arm/plat-stmp3xxx/include/mach/memory.h +++ b/arch/arm/plat-stmp3xxx/include/mach/memory.h @@ -17,6 +17,6 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x40000000) +#define PLAT_PHYS_OFFSET UL(0x40000000) #endif diff --git a/arch/arm/plat-tcc/include/mach/memory.h b/arch/arm/plat-tcc/include/mach/memory.h index cd91ba8a670..28a6e0cd13b 100644 --- a/arch/arm/plat-tcc/include/mach/memory.h +++ b/arch/arm/plat-tcc/include/mach/memory.h @@ -13,6 +13,6 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x20000000) +#define PLAT_PHYS_OFFSET UL(0x20000000) #endif -- cgit v1.2.3-70-g09d2 From dc21af99fadcfa0ae65b52fd0895f85824f0c288 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 19:09:43 +0000 Subject: ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching This idea came from Nicolas, Eric Miao produced an initial version, which was then rewritten into this. Patch the physical to virtual translations at runtime. As we modify the code, this makes it incompatible with XIP kernels, but allows us to achieve this with minimal loss of performance. As many translations are of the form: physical = virtual + (PHYS_OFFSET - PAGE_OFFSET) virtual = physical - (PHYS_OFFSET - PAGE_OFFSET) we generate an 'add' instruction for __virt_to_phys(), and a 'sub' instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET - PAGE_OFFSET) by comparing the address prior to MMU initialization with where it should be once the MMU has been initialized, and place this constant into the above add/sub instructions. Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for the C-mode PHYS_OFFSET variable definition to use. At present, we are unable to support Realview with Sparsemem enabled as this uses a complex mapping function, and MSM as this requires a constant which will not fit in our math instruction. Add a module version magic string for this feature to prevent incompatible modules being loaded. Tested-by: Tony Lindgren Reviewed-by: Nicolas Pitre Tested-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/Kconfig | 13 +++++++++ arch/arm/include/asm/memory.h | 55 ++++++++++++++++++++++++++-------- arch/arm/include/asm/module.h | 15 ++++++++-- arch/arm/kernel/armksyms.c | 4 +++ arch/arm/kernel/head.S | 68 +++++++++++++++++++++++++++++++++++++++++++ arch/arm/kernel/module.c | 23 ++++++++++++++- arch/arm/kernel/vmlinux.lds.S | 4 +++ 7 files changed, 167 insertions(+), 15 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5cff165b7eb..4147f76e798 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -191,6 +191,19 @@ config VECTORS_BASE help The base address of exception vectors. +config ARM_PATCH_PHYS_VIRT + bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)" + depends on EXPERIMENTAL + depends on !XIP_KERNEL && !THUMB2_KERNEL && MMU + depends on !ARCH_MSM + depends on !ARCH_REALVIEW || !SPARSEMEM + help + Patch phys-to-virt translation functions at runtime according to + the position of the kernel in system memory. + + This can only be used with non-XIP, non-Thumb2, MMU kernels where + the base of physical memory is at a 16MB boundary. + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 2efec578a62..7197879e1cb 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -24,8 +24,6 @@ */ #define UL(x) _AC(x, UL) -#define PHYS_OFFSET PLAT_PHYS_OFFSET - #ifdef CONFIG_MMU /* @@ -134,16 +132,6 @@ #define DTCM_OFFSET UL(0xfffe8000) #endif -/* - * Physical vs virtual RAM address space conversion. These are - * private definitions which should NOT be used outside memory.h - * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. - */ -#ifndef __virt_to_phys -#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) -#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) -#endif - /* * Convert a physical address to a Page Frame Number and back */ @@ -158,6 +146,49 @@ #ifndef __ASSEMBLY__ +/* + * Physical vs virtual RAM address space conversion. These are + * private definitions which should NOT be used outside memory.h + * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. + */ +#ifndef __virt_to_phys +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + +extern unsigned long __pv_phys_offset; +#define PHYS_OFFSET __pv_phys_offset + +#define __pv_stub(from,to,instr) \ + __asm__("@ __pv_stub\n" \ + "1: " instr " %0, %1, %2\n" \ + " .pushsection .pv_table,\"a\"\n" \ + " .long 1b\n" \ + " .popsection\n" \ + : "=r" (to) \ + : "r" (from), "I" (0x81000000)) + +static inline unsigned long __virt_to_phys(unsigned long x) +{ + unsigned long t; + __pv_stub(x, t, "add"); + return t; +} + +static inline unsigned long __phys_to_virt(unsigned long x) +{ + unsigned long t; + __pv_stub(x, t, "sub"); + return t; +} +#else +#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) +#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) +#endif +#endif + +#ifndef PHYS_OFFSET +#define PHYS_OFFSET PLAT_PHYS_OFFSET +#endif + /* * The DMA mask corresponding to the maximum bus address allocatable * using GFP_DMA. The default here places no restriction on DMA diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 12c8e680cbf..d072c21332e 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -25,8 +25,19 @@ struct mod_arch_specific { }; /* - * Include the ARM architecture version. + * Add the ARM architecture version to the version magic string */ -#define MODULE_ARCH_VERMAGIC "ARMv" __stringify(__LINUX_ARM_ARCH__) " " +#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " " + +/* Add __virt_to_phys patching state as well */ +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +#define MODULE_ARCH_VERMAGIC_P2V "p2v8 " +#else +#define MODULE_ARCH_VERMAGIC_P2V "" +#endif + +#define MODULE_ARCH_VERMAGIC \ + MODULE_ARCH_VERMAGIC_ARMVSN \ + MODULE_ARCH_VERMAGIC_P2V #endif /* _ASM_ARM_MODULE_H */ diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index e5e1e538767..9615423c37d 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -170,3 +170,7 @@ EXPORT_SYMBOL(mcount); #endif EXPORT_SYMBOL(__gnu_mcount_nc); #endif + +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +EXPORT_SYMBOL(__pv_phys_offset); +#endif diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 03a588b6e15..1db8ead2e33 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -97,6 +97,9 @@ ENTRY(stext) bl __vet_atags #ifdef CONFIG_SMP_ON_UP bl __fixup_smp +#endif +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + bl __fixup_pv_table #endif bl __create_page_tables @@ -438,4 +441,69 @@ smp_on_up: #endif +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + +/* __fixup_pv_table - patch the stub instructions with the delta between + * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and + * can be expressed by an immediate shifter operand. The stub instruction + * has a form of '(add|sub) rd, rn, #imm'. + */ + __HEAD +__fixup_pv_table: + adr r0, 1f + ldmia r0, {r3-r5, r7} + sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET + add r4, r4, r3 @ adjust table start address + add r5, r5, r3 @ adjust table end address + str r8, [r7, r3]! @ save computed PHYS_OFFSET to __pv_phys_offset + mov r6, r3, lsr #24 @ constant for add/sub instructions + teq r3, r6, lsl #24 @ must be 16MiB aligned + bne __error + str r6, [r7, #4] @ save to __pv_offset + b __fixup_a_pv_table +ENDPROC(__fixup_pv_table) + + .align +1: .long . + .long __pv_table_begin + .long __pv_table_end +2: .long __pv_phys_offset + + .text +__fixup_a_pv_table: + b 3f +2: ldr ip, [r7, r3] + bic ip, ip, #0x000000ff + orr ip, ip, r6 + str ip, [r7, r3] +3: cmp r4, r5 + ldrcc r7, [r4], #4 @ use branch for delay slot + bcc 2b + mov pc, lr +ENDPROC(__fixup_a_pv_table) + +ENTRY(fixup_pv_table) + stmfd sp!, {r4 - r7, lr} + ldr r2, 2f @ get address of __pv_phys_offset + mov r3, #0 @ no offset + mov r4, r0 @ r0 = table start + add r5, r0, r1 @ r1 = table size + ldr r6, [r2, #4] @ get __pv_offset + bl __fixup_a_pv_table + ldmfd sp!, {r4 - r7, pc} +ENDPROC(fixup_pv_table) + + .align +2: .long __pv_phys_offset + + .data + .globl __pv_phys_offset + .type __pv_phys_offset, %object +__pv_phys_offset: + .long 0 + .size __pv_phys_offset, . - __pv_phys_offset +__pv_offset: + .long 0 +#endif + #include "head-common.S" diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 2cfe8161b47..c5679f6d9f6 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -268,12 +268,28 @@ struct mod_unwind_map { const Elf_Shdr *txt_sec; }; +static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr, + const Elf_Shdr *sechdrs, const char *name) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; + + return NULL; +} + +extern void fixup_pv_table(const void *, unsigned long); + int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { + const Elf_Shdr *s = NULL; #ifdef CONFIG_ARM_UNWIND const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; + const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; struct mod_unwind_map maps[ARM_SEC_MAX]; int i; @@ -314,6 +330,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, maps[i].unw_sec->sh_size, maps[i].txt_sec->sh_addr, maps[i].txt_sec->sh_size); +#endif +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + s = find_mod_section(hdr, sechdrs, ".pv_table"); + if (s) + fixup_pv_table((void *)s->sh_addr, s->sh_size); #endif return 0; } diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 86b66f3f203..45b5651777e 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -57,6 +57,10 @@ SECTIONS __smpalt_end = .; #endif + __pv_table_begin = .; + *(.pv_table) + __pv_table_end = .; + INIT_SETUP(16) INIT_CALLS -- cgit v1.2.3-70-g09d2 From cada3c0841e1deaec4c0f92654610b028dc683ff Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 19:39:29 +0000 Subject: ARM: P2V: extend to 16-bit translation offsets MSM's memory is aligned to 2MB, which is more than we can do with our existing method as we're limited to the upper 8 bits. Extend this by using two instructions to 16 bits, automatically selected when MSM is enabled. Acked-by: Tony Lindgren Reviewed-by: Nicolas Pitre Tested-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/Kconfig | 5 ++++- arch/arm/include/asm/memory.h | 21 +++++++++++++++++---- arch/arm/include/asm/module.h | 4 ++++ arch/arm/kernel/head.S | 15 ++++++++++++++- 4 files changed, 39 insertions(+), 6 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 4147f76e798..b357c29e7df 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -195,7 +195,6 @@ config ARM_PATCH_PHYS_VIRT bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)" depends on EXPERIMENTAL depends on !XIP_KERNEL && !THUMB2_KERNEL && MMU - depends on !ARCH_MSM depends on !ARCH_REALVIEW || !SPARSEMEM help Patch phys-to-virt translation functions at runtime according to @@ -204,6 +203,10 @@ config ARM_PATCH_PHYS_VIRT This can only be used with non-XIP, non-Thumb2, MMU kernels where the base of physical memory is at a 16MB boundary. +config ARM_PATCH_PHYS_VIRT_16BIT + def_bool y + depends on ARM_PATCH_PHYS_VIRT && ARCH_MSM + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 7197879e1cb..2398b3fc026 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -154,29 +154,42 @@ #ifndef __virt_to_phys #ifdef CONFIG_ARM_PATCH_PHYS_VIRT +/* + * Constants used to force the right instruction encodings and shifts + * so that all we need to do is modify the 8-bit constant field. + */ +#define __PV_BITS_31_24 0x81000000 +#define __PV_BITS_23_16 0x00810000 + extern unsigned long __pv_phys_offset; #define PHYS_OFFSET __pv_phys_offset -#define __pv_stub(from,to,instr) \ +#define __pv_stub(from,to,instr,type) \ __asm__("@ __pv_stub\n" \ "1: " instr " %0, %1, %2\n" \ " .pushsection .pv_table,\"a\"\n" \ " .long 1b\n" \ " .popsection\n" \ : "=r" (to) \ - : "r" (from), "I" (0x81000000)) + : "r" (from), "I" (type)) static inline unsigned long __virt_to_phys(unsigned long x) { unsigned long t; - __pv_stub(x, t, "add"); + __pv_stub(x, t, "add", __PV_BITS_31_24); +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + __pv_stub(t, t, "add", __PV_BITS_23_16); +#endif return t; } static inline unsigned long __phys_to_virt(unsigned long x) { unsigned long t; - __pv_stub(x, t, "sub"); + __pv_stub(x, t, "sub", __PV_BITS_31_24); +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + __pv_stub(t, t, "sub", __PV_BITS_23_16); +#endif return t; } #else diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index d072c21332e..a2b775b81cf 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -31,7 +31,11 @@ struct mod_arch_specific { /* Add __virt_to_phys patching state as well */ #ifdef CONFIG_ARM_PATCH_PHYS_VIRT +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT +#define MODULE_ARCH_VERMAGIC_P2V "p2v16 " +#else #define MODULE_ARCH_VERMAGIC_P2V "p2v8 " +#endif #else #define MODULE_ARCH_VERMAGIC_P2V "" #endif diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 1db8ead2e33..a94dd99d54c 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -456,8 +456,13 @@ __fixup_pv_table: add r4, r4, r3 @ adjust table start address add r5, r5, r3 @ adjust table end address str r8, [r7, r3]! @ save computed PHYS_OFFSET to __pv_phys_offset +#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT mov r6, r3, lsr #24 @ constant for add/sub instructions teq r3, r6, lsl #24 @ must be 16MiB aligned +#else + mov r6, r3, lsr #16 @ constant for add/sub instructions + teq r3, r6, lsl #16 @ must be 64kiB aligned +#endif bne __error str r6, [r7, #4] @ save to __pv_offset b __fixup_a_pv_table @@ -471,10 +476,18 @@ ENDPROC(__fixup_pv_table) .text __fixup_a_pv_table: +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + and r0, r6, #255 @ offset bits 23-16 + mov r6, r6, lsr #8 @ offset bits 31-24 +#else + mov r0, #0 @ just in case... +#endif b 3f 2: ldr ip, [r7, r3] bic ip, ip, #0x000000ff - orr ip, ip, r6 + tst ip, #0x400 @ rotate shift tells us LS or MS byte + orrne ip, ip, r6 @ mask in offset bits 31-24 + orreq ip, ip, r0 @ mask in offset bits 23-16 str ip, [r7, r3] 3: cmp r4, r5 ldrcc r7, [r4], #4 @ use branch for delay slot -- cgit v1.2.3-70-g09d2 From 3a6b1676c6f27f7fad1a3d6fab5a95f90b1e7402 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 15 Feb 2011 17:28:28 +0100 Subject: ARM: 6675/1: use phys_addr_t instead of unsigned long in conversion code The unsigned long datatype is not sufficient for mapping physical addresses >= 4GB. This patch ensures that the address conversion code in asm/memory.h casts to the correct type when handling physical addresses. The internal v2p macros only deal with lowmem addresses, so these do not need to be modified. Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 2398b3fc026..431077c5a86 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -135,8 +136,8 @@ /* * Convert a physical address to a Page Frame Number and back */ -#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) -#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) +#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) +#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) /* * Convert a page to/from a physical address @@ -234,12 +235,12 @@ static inline unsigned long __phys_to_virt(unsigned long x) * translation for translating DMA addresses. Use the driver * DMA support - see dma-mapping.h. */ -static inline unsigned long virt_to_phys(const volatile void *x) +static inline phys_addr_t virt_to_phys(const volatile void *x) { return __virt_to_phys((unsigned long)(x)); } -static inline void *phys_to_virt(unsigned long x) +static inline void *phys_to_virt(phys_addr_t x) { return (void *)(__phys_to_virt((unsigned long)(x))); } -- cgit v1.2.3-70-g09d2 From f6b0fa02e8b0708d17d631afce456524eadf87ff Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 6 Feb 2011 15:48:39 +0000 Subject: ARM: pm: add generic CPU suspend/resume support This adds core support for saving and restoring CPU coprocessor registers for suspend/resume support. This contains support for suspend with ARM920, ARM926, SA11x0, PXA25x, PXA27x, PXA3xx, V6 and V7 CPUs. Tested on Assabet and Tegra 2. Tested-by: Colin Cross Tested-by: Kukjin Kim Signed-off-by: Russell King --- arch/arm/include/asm/glue-proc.h | 3 + arch/arm/include/asm/proc-fns.h | 7 +++ arch/arm/kernel/Makefile | 1 + arch/arm/kernel/asm-offsets.c | 9 +++ arch/arm/kernel/sleep.S | 109 ++++++++++++++++++++++++++++++++++++ arch/arm/mm/proc-arm1020.S | 3 + arch/arm/mm/proc-arm1020e.S | 3 + arch/arm/mm/proc-arm1022.S | 3 + arch/arm/mm/proc-arm1026.S | 3 + arch/arm/mm/proc-arm6_7.S | 6 ++ arch/arm/mm/proc-arm720.S | 3 + arch/arm/mm/proc-arm740.S | 3 + arch/arm/mm/proc-arm7tdmi.S | 3 + arch/arm/mm/proc-arm920.S | 37 +++++++++++++ arch/arm/mm/proc-arm922.S | 3 + arch/arm/mm/proc-arm925.S | 3 + arch/arm/mm/proc-arm926.S | 37 +++++++++++++ arch/arm/mm/proc-arm940.S | 3 + arch/arm/mm/proc-arm946.S | 3 + arch/arm/mm/proc-arm9tdmi.S | 3 + arch/arm/mm/proc-fa526.S | 3 + arch/arm/mm/proc-feroceon.S | 3 + arch/arm/mm/proc-mohawk.S | 3 + arch/arm/mm/proc-sa110.S | 3 + arch/arm/mm/proc-sa1100.S | 39 +++++++++++++ arch/arm/mm/proc-v6.S | 50 +++++++++++++++++ arch/arm/mm/proc-v7.S | 116 +++++++++++++++++++++++++++++---------- arch/arm/mm/proc-xsc3.S | 48 +++++++++++++++- arch/arm/mm/proc-xscale.S | 45 ++++++++++++++- 29 files changed, 522 insertions(+), 33 deletions(-) create mode 100644 arch/arm/kernel/sleep.S (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h index e3bf443f2d1..6469521d092 100644 --- a/arch/arm/include/asm/glue-proc.h +++ b/arch/arm/include/asm/glue-proc.h @@ -256,6 +256,9 @@ #define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area) #define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm) #define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext) +#define cpu_suspend_size __glue(CPU_NAME,_suspend_size) +#define cpu_do_suspend __glue(CPU_NAME,_do_suspend) +#define cpu_do_resume __glue(CPU_NAME,_do_resume) #endif #endif diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 69802150be2..8ec535e11fd 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -66,6 +66,11 @@ extern struct processor { * ignore 'ext'. */ void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); + + /* Suspend/resume */ + unsigned int suspend_size; + void (*do_suspend)(void *); + void (*do_resume)(void *); } processor; #ifndef MULTI_CPU @@ -86,6 +91,8 @@ extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); #define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) #endif +extern void cpu_resume(void); + #include #ifdef CONFIG_MMU diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 185ee822c93..74554f1742d 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o +obj-$(CONFIG_PM) += sleep.o obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o obj-$(CONFIG_SMP) += smp.o smp_tlb.o obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 5302a917271..927522cfc12 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -115,6 +116,14 @@ int main(void) #endif #ifdef MULTI_PABORT DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); +#endif +#ifdef MULTI_CPU + DEFINE(CPU_SLEEP_SIZE, offsetof(struct processor, suspend_size)); + DEFINE(CPU_DO_SUSPEND, offsetof(struct processor, do_suspend)); + DEFINE(CPU_DO_RESUME, offsetof(struct processor, do_resume)); +#endif +#ifdef MULTI_CACHE + DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all)); #endif BLANK(); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S new file mode 100644 index 00000000000..2ba17946619 --- /dev/null +++ b/arch/arm/kernel/sleep.S @@ -0,0 +1,109 @@ +#include +#include +#include +#include +#include +#include + .text + +/* + * Save CPU state for a suspend + * r1 = v:p offset + * r3 = virtual return function + * Note: sp is decremented to allocate space for CPU state on stack + * r0-r3,r9,r10,lr corrupted + */ +ENTRY(cpu_suspend) + mov r9, lr +#ifdef MULTI_CPU + ldr r10, =processor + mov r2, sp @ current virtual SP + ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state + ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function + sub sp, sp, r0 @ allocate CPU state on stack + mov r0, sp @ save pointer + add ip, ip, r1 @ convert resume fn to phys + stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn + ldr r3, =sleep_save_sp + add r2, sp, r1 @ convert SP to phys + str r2, [r3] @ save phys SP + mov lr, pc + ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state +#else + mov r2, sp @ current virtual SP + ldr r0, =cpu_suspend_size + sub sp, sp, r0 @ allocate CPU state on stack + mov r0, sp @ save pointer + stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn + ldr r3, =sleep_save_sp + add r2, sp, r1 @ convert SP to phys + str r2, [r3] @ save phys SP + bl cpu_do_suspend +#endif + + @ flush data cache +#ifdef MULTI_CACHE + ldr r10, =cpu_cache + mov lr, r9 + ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] +#else + mov lr, r9 + b __cpuc_flush_kern_all +#endif +ENDPROC(cpu_suspend) + .ltorg + +/* + * r0 = control register value + * r1 = v:p offset (preserved by cpu_do_resume) + * r2 = phys page table base + * r3 = L1 section flags + */ +ENTRY(cpu_resume_mmu) + adr r4, cpu_resume_turn_mmu_on + mov r4, r4, lsr #20 + orr r3, r3, r4, lsl #20 + ldr r5, [r2, r4, lsl #2] @ save old mapping + str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code + sub r2, r2, r1 + ldr r3, =cpu_resume_after_mmu + bic r1, r0, #CR_C @ ensure D-cache is disabled + b cpu_resume_turn_mmu_on +ENDPROC(cpu_resume_mmu) + .ltorg + .align 5 +cpu_resume_turn_mmu_on: + mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc + mrc p15, 0, r1, c0, c0, 0 @ read id reg + mov r1, r1 + mov r1, r1 + mov pc, r3 @ jump to virtual address +ENDPROC(cpu_resume_turn_mmu_on) +cpu_resume_after_mmu: + str r5, [r2, r4, lsl #2] @ restore old mapping + mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache + mov pc, lr +ENDPROC(cpu_resume_after_mmu) + +/* + * Note: Yes, part of the following code is located into the .data section. + * This is to allow sleep_save_sp to be accessed with a relative load + * while we can't rely on any MMU translation. We could have put + * sleep_save_sp in the .text section as well, but some setups might + * insist on it to be truly read-only. + */ + .data + .align +ENTRY(cpu_resume) + ldr r0, sleep_save_sp @ stack phys addr + msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off +#ifdef MULTI_CPU + ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn +#else + ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn + b cpu_do_resume +#endif +ENDPROC(cpu_resume) + +sleep_save_sp: + .word 0 @ preserve stack phys ptr here diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index bcf748d9f4e..226e3d8351c 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -493,6 +493,9 @@ arm1020_processor_functions: .word cpu_arm1020_dcache_clean_area .word cpu_arm1020_switch_mm .word cpu_arm1020_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm1020_processor_functions, . - arm1020_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index ab7ec26657e..86d9c2cf0bc 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -474,6 +474,9 @@ arm1020e_processor_functions: .word cpu_arm1020e_dcache_clean_area .word cpu_arm1020e_switch_mm .word cpu_arm1020e_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm1020e_processor_functions, . - arm1020e_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 831c5e54e22..83d3dd34f84 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -457,6 +457,9 @@ arm1022_processor_functions: .word cpu_arm1022_dcache_clean_area .word cpu_arm1022_switch_mm .word cpu_arm1022_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm1022_processor_functions, . - arm1022_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index e3f7e9a166b..686043ee728 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -452,6 +452,9 @@ arm1026_processor_functions: .word cpu_arm1026_dcache_clean_area .word cpu_arm1026_switch_mm .word cpu_arm1026_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm1026_processor_functions, . - arm1026_processor_functions .section .rodata diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 6a7be1863ed..5f79dc4ce3f 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -284,6 +284,9 @@ ENTRY(arm6_processor_functions) .word cpu_arm6_dcache_clean_area .word cpu_arm6_switch_mm .word cpu_arm6_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm6_processor_functions, . - arm6_processor_functions /* @@ -301,6 +304,9 @@ ENTRY(arm7_processor_functions) .word cpu_arm7_dcache_clean_area .word cpu_arm7_switch_mm .word cpu_arm7_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm7_processor_functions, . - arm7_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index c285395f44b..665266da143 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S @@ -185,6 +185,9 @@ ENTRY(arm720_processor_functions) .word cpu_arm720_dcache_clean_area .word cpu_arm720_switch_mm .word cpu_arm720_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm720_processor_functions, . - arm720_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 38b27dcba72..6f9d12effee 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -130,6 +130,9 @@ ENTRY(arm740_processor_functions) .word cpu_arm740_dcache_clean_area .word cpu_arm740_switch_mm .word 0 @ cpu_*_set_pte + .word 0 + .word 0 + .word 0 .size arm740_processor_functions, . - arm740_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 0c9786de20a..e4c165ca669 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S @@ -70,6 +70,9 @@ ENTRY(arm7tdmi_processor_functions) .word cpu_arm7tdmi_dcache_clean_area .word cpu_arm7tdmi_switch_mm .word 0 @ cpu_*_set_pte + .word 0 + .word 0 + .word 0 .size arm7tdmi_processor_functions, . - arm7tdmi_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 6109f278a90..219980ec8b6 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -387,6 +387,40 @@ ENTRY(cpu_arm920_set_pte_ext) #endif mov pc, lr +/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ +.globl cpu_arm920_suspend_size +.equ cpu_arm920_suspend_size, 4 * 3 +#ifdef CONFIG_PM +ENTRY(cpu_arm920_do_suspend) + stmfd sp!, {r4 - r7, lr} + mrc p15, 0, r4, c13, c0, 0 @ PID + mrc p15, 0, r5, c3, c0, 0 @ Domain ID + mrc p15, 0, r6, c2, c0, 0 @ TTB address + mrc p15, 0, r7, c1, c0, 0 @ Control register + stmia r0, {r4 - r7} + ldmfd sp!, {r4 - r7, pc} +ENDPROC(cpu_arm920_do_suspend) + +ENTRY(cpu_arm920_do_resume) + mov ip, #0 + mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs + mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches + ldmia r0, {r4 - r7} + mcr p15, 0, r4, c13, c0, 0 @ PID + mcr p15, 0, r5, c3, c0, 0 @ Domain ID + mcr p15, 0, r6, c2, c0, 0 @ TTB address + mov r0, r7 @ control register + mov r2, r6, lsr #14 @ get TTB0 base + mov r2, r2, lsl #14 + ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ + PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE + b cpu_resume_mmu +ENDPROC(cpu_arm920_do_resume) +#else +#define cpu_arm920_do_suspend 0 +#define cpu_arm920_do_resume 0 +#endif + __CPUINIT .type __arm920_setup, #function @@ -432,6 +466,9 @@ arm920_processor_functions: .word cpu_arm920_dcache_clean_area .word cpu_arm920_switch_mm .word cpu_arm920_set_pte_ext + .word cpu_arm920_suspend_size + .word cpu_arm920_do_suspend + .word cpu_arm920_do_resume .size arm920_processor_functions, . - arm920_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index bb2f0f46a5e..36154b1e792 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -436,6 +436,9 @@ arm922_processor_functions: .word cpu_arm922_dcache_clean_area .word cpu_arm922_switch_mm .word cpu_arm922_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm922_processor_functions, . - arm922_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index c13e01accfe..89c5e0009c4 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -503,6 +503,9 @@ arm925_processor_functions: .word cpu_arm925_dcache_clean_area .word cpu_arm925_switch_mm .word cpu_arm925_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm925_processor_functions, . - arm925_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 42eb4315740..6a4bdb2c94a 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -401,6 +401,40 @@ ENTRY(cpu_arm926_set_pte_ext) #endif mov pc, lr +/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ +.globl cpu_arm926_suspend_size +.equ cpu_arm926_suspend_size, 4 * 3 +#ifdef CONFIG_PM +ENTRY(cpu_arm926_do_suspend) + stmfd sp!, {r4 - r7, lr} + mrc p15, 0, r4, c13, c0, 0 @ PID + mrc p15, 0, r5, c3, c0, 0 @ Domain ID + mrc p15, 0, r6, c2, c0, 0 @ TTB address + mrc p15, 0, r7, c1, c0, 0 @ Control register + stmia r0, {r4 - r7} + ldmfd sp!, {r4 - r7, pc} +ENDPROC(cpu_arm926_do_suspend) + +ENTRY(cpu_arm926_do_resume) + mov ip, #0 + mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs + mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches + ldmia r0, {r4 - r7} + mcr p15, 0, r4, c13, c0, 0 @ PID + mcr p15, 0, r5, c3, c0, 0 @ Domain ID + mcr p15, 0, r6, c2, c0, 0 @ TTB address + mov r0, r7 @ control register + mov r2, r6, lsr #14 @ get TTB0 base + mov r2, r2, lsl #14 + ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ + PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE + b cpu_resume_mmu +ENDPROC(cpu_arm926_do_resume) +#else +#define cpu_arm926_do_suspend 0 +#define cpu_arm926_do_resume 0 +#endif + __CPUINIT .type __arm926_setup, #function @@ -456,6 +490,9 @@ arm926_processor_functions: .word cpu_arm926_dcache_clean_area .word cpu_arm926_switch_mm .word cpu_arm926_set_pte_ext + .word cpu_arm926_suspend_size + .word cpu_arm926_do_suspend + .word cpu_arm926_do_resume .size arm926_processor_functions, . - arm926_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 7b11cdb9935..26aea3f71c2 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -363,6 +363,9 @@ ENTRY(arm940_processor_functions) .word cpu_arm940_dcache_clean_area .word cpu_arm940_switch_mm .word 0 @ cpu_*_set_pte + .word 0 + .word 0 + .word 0 .size arm940_processor_functions, . - arm940_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 1a5bbf08034..8063345406f 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -419,6 +419,9 @@ ENTRY(arm946_processor_functions) .word cpu_arm946_dcache_clean_area .word cpu_arm946_switch_mm .word 0 @ cpu_*_set_pte + .word 0 + .word 0 + .word 0 .size arm946_processor_functions, . - arm946_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index db67e3134d7..7b7ebd4d096 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S @@ -70,6 +70,9 @@ ENTRY(arm9tdmi_processor_functions) .word cpu_arm9tdmi_dcache_clean_area .word cpu_arm9tdmi_switch_mm .word 0 @ cpu_*_set_pte + .word 0 + .word 0 + .word 0 .size arm9tdmi_processor_functions, . - arm9tdmi_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index 7c9ad621f0e..fc2a4ae15cf 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S @@ -195,6 +195,9 @@ fa526_processor_functions: .word cpu_fa526_dcache_clean_area .word cpu_fa526_switch_mm .word cpu_fa526_set_pte_ext + .word 0 + .word 0 + .word 0 .size fa526_processor_functions, . - fa526_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index b4597edbff9..d3883eed7a4 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -554,6 +554,9 @@ feroceon_processor_functions: .word cpu_feroceon_dcache_clean_area .word cpu_feroceon_switch_mm .word cpu_feroceon_set_pte_ext + .word 0 + .word 0 + .word 0 .size feroceon_processor_functions, . - feroceon_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 4458ee6aa71..9d4f2ae6337 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -388,6 +388,9 @@ mohawk_processor_functions: .word cpu_mohawk_dcache_clean_area .word cpu_mohawk_switch_mm .word cpu_mohawk_set_pte_ext + .word 0 + .word 0 + .word 0 .size mohawk_processor_functions, . - mohawk_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 5aa8d59c2e8..46f09ed16b9 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S @@ -203,6 +203,9 @@ ENTRY(sa110_processor_functions) .word cpu_sa110_dcache_clean_area .word cpu_sa110_switch_mm .word cpu_sa110_set_pte_ext + .word 0 + .word 0 + .word 0 .size sa110_processor_functions, . - sa110_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 2ac4e6f1071..74483d1977f 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -169,6 +169,42 @@ ENTRY(cpu_sa1100_set_pte_ext) #endif mov pc, lr +.globl cpu_sa1100_suspend_size +.equ cpu_sa1100_suspend_size, 4*4 +#ifdef CONFIG_PM +ENTRY(cpu_sa1100_do_suspend) + stmfd sp!, {r4 - r7, lr} + mrc p15, 0, r4, c3, c0, 0 @ domain ID + mrc p15, 0, r5, c2, c0, 0 @ translation table base addr + mrc p15, 0, r6, c13, c0, 0 @ PID + mrc p15, 0, r7, c1, c0, 0 @ control reg + stmia r0, {r4 - r7} @ store cp regs + ldmfd sp!, {r4 - r7, pc} +ENDPROC(cpu_sa1100_do_suspend) + +ENTRY(cpu_sa1100_do_resume) + ldmia r0, {r4 - r7} @ load cp regs + mov r1, #0 + mcr p15, 0, r1, c8, c7, 0 @ flush I+D TLBs + mcr p15, 0, r1, c7, c7, 0 @ flush I&D cache + mcr p15, 0, r1, c9, c0, 0 @ invalidate RB + mcr p15, 0, r1, c9, c0, 5 @ allow user space to use RB + + mcr p15, 0, r4, c3, c0, 0 @ domain ID + mcr p15, 0, r5, c2, c0, 0 @ translation table base addr + mcr p15, 0, r6, c13, c0, 0 @ PID + mov r0, r7 @ control register + mov r2, r5, lsr #14 @ get TTB0 base + mov r2, r2, lsl #14 + ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ + PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE + b cpu_resume_mmu +ENDPROC(cpu_sa1100_do_resume) +#else +#define cpu_sa1100_do_suspend 0 +#define cpu_sa1100_do_resume 0 +#endif + __CPUINIT .type __sa1100_setup, #function @@ -218,6 +254,9 @@ ENTRY(sa1100_processor_functions) .word cpu_sa1100_dcache_clean_area .word cpu_sa1100_switch_mm .word cpu_sa1100_set_pte_ext + .word cpu_sa1100_suspend_size + .word cpu_sa1100_do_suspend + .word cpu_sa1100_do_resume .size sa1100_processor_functions, . - sa1100_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 59a7e1ffe7b..832b6bdc192 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -121,6 +121,53 @@ ENTRY(cpu_v6_set_pte_ext) #endif mov pc, lr +/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ +.globl cpu_v6_suspend_size +.equ cpu_v6_suspend_size, 4 * 8 +#ifdef CONFIG_PM +ENTRY(cpu_v6_do_suspend) + stmfd sp!, {r4 - r11, lr} + mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID + mrc p15, 0, r5, c13, c0, 1 @ Context ID + mrc p15, 0, r6, c3, c0, 0 @ Domain ID + mrc p15, 0, r7, c2, c0, 0 @ Translation table base 0 + mrc p15, 0, r8, c2, c0, 1 @ Translation table base 1 + mrc p15, 0, r9, c1, c0, 1 @ auxillary control register + mrc p15, 0, r10, c1, c0, 2 @ co-processor access control + mrc p15, 0, r11, c1, c0, 0 @ control register + stmia r0, {r4 - r11} + ldmfd sp!, {r4- r11, pc} +ENDPROC(cpu_v6_do_suspend) + +ENTRY(cpu_v6_do_resume) + mov ip, #0 + mcr p15, 0, ip, c7, c14, 0 @ clean+invalidate D cache + mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache + mcr p15, 0, ip, c7, c15, 0 @ clean+invalidate cache + mcr p15, 0, ip, c7, c10, 4 @ drain write buffer + ldmia r0, {r4 - r11} + mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID + mcr p15, 0, r5, c13, c0, 1 @ Context ID + mcr p15, 0, r6, c3, c0, 0 @ Domain ID + mcr p15, 0, r7, c2, c0, 0 @ Translation table base 0 + mcr p15, 0, r8, c2, c0, 1 @ Translation table base 1 + mcr p15, 0, r9, c1, c0, 1 @ auxillary control register + mcr p15, 0, r10, c1, c0, 2 @ co-processor access control + mcr p15, 0, ip, c2, c0, 2 @ TTB control register + mcr p15, 0, ip, c7, c5, 4 @ ISB + mov r0, r11 @ control register + mov r2, r7, lsr #14 @ get TTB0 base + mov r2, r2, lsl #14 + ldr r3, cpu_resume_l1_flags + b cpu_resume_mmu +ENDPROC(cpu_v6_do_resume) +cpu_resume_l1_flags: + ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) + ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) +#else +#define cpu_v6_do_suspend 0 +#define cpu_v6_do_resume 0 +#endif .type cpu_v6_name, #object @@ -206,6 +253,9 @@ ENTRY(v6_processor_functions) .word cpu_v6_dcache_clean_area .word cpu_v6_switch_mm .word cpu_v6_set_pte_ext + .word cpu_v6_suspend_size + .word cpu_v6_do_suspend + .word cpu_v6_do_resume .size v6_processor_functions, . - v6_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 0c1172b56b4..a5187ddfb26 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -171,6 +171,87 @@ cpu_v7_name: .ascii "ARMv7 Processor" .align + /* + * Memory region attributes with SCTLR.TRE=1 + * + * n = TEX[0],C,B + * TR = PRRR[2n+1:2n] - memory type + * IR = NMRR[2n+1:2n] - inner cacheable property + * OR = NMRR[2n+17:2n+16] - outer cacheable property + * + * n TR IR OR + * UNCACHED 000 00 + * BUFFERABLE 001 10 00 00 + * WRITETHROUGH 010 10 10 10 + * WRITEBACK 011 10 11 11 + * reserved 110 + * WRITEALLOC 111 10 01 01 + * DEV_SHARED 100 01 + * DEV_NONSHARED 100 01 + * DEV_WC 001 10 + * DEV_CACHED 011 10 + * + * Other attributes: + * + * DS0 = PRRR[16] = 0 - device shareable property + * DS1 = PRRR[17] = 1 - device shareable property + * NS0 = PRRR[18] = 0 - normal shareable property + * NS1 = PRRR[19] = 1 - normal shareable property + * NOS = PRRR[24+n] = 1 - not outer shareable + */ +.equ PRRR, 0xff0a81a8 +.equ NMRR, 0x40e040e0 + +/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ +.globl cpu_v7_suspend_size +.equ cpu_v7_suspend_size, 4 * 8 +#ifdef CONFIG_PM +ENTRY(cpu_v7_do_suspend) + stmfd sp!, {r4 - r11, lr} + mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID + mrc p15, 0, r5, c13, c0, 1 @ Context ID + mrc p15, 0, r6, c3, c0, 0 @ Domain ID + mrc p15, 0, r7, c2, c0, 0 @ TTB 0 + mrc p15, 0, r8, c2, c0, 1 @ TTB 1 + mrc p15, 0, r9, c1, c0, 0 @ Control register + mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register + mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control + stmia r0, {r4 - r11} + ldmfd sp!, {r4 - r11, pc} +ENDPROC(cpu_v7_do_suspend) + +ENTRY(cpu_v7_do_resume) + mov ip, #0 + mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs + mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache + ldmia r0, {r4 - r11} + mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID + mcr p15, 0, r5, c13, c0, 1 @ Context ID + mcr p15, 0, r6, c3, c0, 0 @ Domain ID + mcr p15, 0, r7, c2, c0, 0 @ TTB 0 + mcr p15, 0, r8, c2, c0, 1 @ TTB 1 + mcr p15, 0, ip, c2, c0, 2 @ TTB control register + mcr p15, 0, r10, c1, c0, 1 @ Auxillary control register + mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control + ldr r4, =PRRR @ PRRR + ldr r5, =NMRR @ NMRR + mcr p15, 0, r4, c10, c2, 0 @ write PRRR + mcr p15, 0, r5, c10, c2, 1 @ write NMRR + isb + mov r0, r9 @ control register + mov r2, r7, lsr #14 @ get TTB0 base + mov r2, r2, lsl #14 + ldr r3, cpu_resume_l1_flags + b cpu_resume_mmu +ENDPROC(cpu_v7_do_resume) +cpu_resume_l1_flags: + ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) + ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) +#else +#define cpu_v7_do_suspend 0 +#define cpu_v7_do_resume 0 +#endif + __CPUINIT /* @@ -276,36 +357,8 @@ __v7_setup: ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) ALT_UP(orr r4, r4, #TTB_FLAGS_UP) mcr p15, 0, r4, c2, c0, 1 @ load TTB1 - /* - * Memory region attributes with SCTLR.TRE=1 - * - * n = TEX[0],C,B - * TR = PRRR[2n+1:2n] - memory type - * IR = NMRR[2n+1:2n] - inner cacheable property - * OR = NMRR[2n+17:2n+16] - outer cacheable property - * - * n TR IR OR - * UNCACHED 000 00 - * BUFFERABLE 001 10 00 00 - * WRITETHROUGH 010 10 10 10 - * WRITEBACK 011 10 11 11 - * reserved 110 - * WRITEALLOC 111 10 01 01 - * DEV_SHARED 100 01 - * DEV_NONSHARED 100 01 - * DEV_WC 001 10 - * DEV_CACHED 011 10 - * - * Other attributes: - * - * DS0 = PRRR[16] = 0 - device shareable property - * DS1 = PRRR[17] = 1 - device shareable property - * NS0 = PRRR[18] = 0 - normal shareable property - * NS1 = PRRR[19] = 1 - normal shareable property - * NOS = PRRR[24+n] = 1 - not outer shareable - */ - ldr r5, =0xff0a81a8 @ PRRR - ldr r6, =0x40e040e0 @ NMRR + ldr r5, =PRRR @ PRRR + ldr r6, =NMRR @ NMRR mcr p15, 0, r5, c10, c2, 0 @ write PRRR mcr p15, 0, r6, c10, c2, 1 @ write NMRR #endif @@ -351,6 +404,9 @@ ENTRY(v7_processor_functions) .word cpu_v7_dcache_clean_area .word cpu_v7_switch_mm .word cpu_v7_set_pte_ext + .word 0 + .word 0 + .word 0 .size v7_processor_functions, . - v7_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index ec26355cb7c..63d8b2044e8 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -413,9 +413,52 @@ ENTRY(cpu_xsc3_set_pte_ext) mov pc, lr .ltorg - .align +.globl cpu_xsc3_suspend_size +.equ cpu_xsc3_suspend_size, 4 * 8 +#ifdef CONFIG_PM +ENTRY(cpu_xsc3_do_suspend) + stmfd sp!, {r4 - r10, lr} + mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode + mrc p15, 0, r5, c15, c1, 0 @ CP access reg + mrc p15, 0, r6, c13, c0, 0 @ PID + mrc p15, 0, r7, c3, c0, 0 @ domain ID + mrc p15, 0, r8, c2, c0, 0 @ translation table base addr + mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg + mrc p15, 0, r10, c1, c0, 0 @ control reg + bic r4, r4, #2 @ clear frequency change bit + stmia r0, {r1, r4 - r10} @ store v:p offset + cp regs + ldmia sp!, {r4 - r10, pc} +ENDPROC(cpu_xsc3_do_suspend) + +ENTRY(cpu_xsc3_do_resume) + ldmia r0, {r1, r4 - r10} @ load v:p offset + cp regs + mov ip, #0 + mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB + mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer + mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer + mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs + mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. + mcr p15, 0, r5, c15, c1, 0 @ CP access reg + mcr p15, 0, r6, c13, c0, 0 @ PID + mcr p15, 0, r7, c3, c0, 0 @ domain ID + mcr p15, 0, r8, c2, c0, 0 @ translation table base addr + mcr p15, 0, r9, c1, c0, 1 @ auxiliary control reg + + @ temporarily map resume_turn_on_mmu into the page table, + @ otherwise prefetch abort occurs after MMU is turned on + mov r0, r10 @ control register + mov r2, r8, lsr #14 @ get TTB0 base + mov r2, r2, lsl #14 + ldr r3, =0x542e @ section flags + b cpu_resume_mmu +ENDPROC(cpu_xsc3_do_resume) +#else +#define cpu_xsc3_do_suspend 0 +#define cpu_xsc3_do_resume 0 +#endif + __CPUINIT .type __xsc3_setup, #function @@ -476,6 +519,9 @@ ENTRY(xsc3_processor_functions) .word cpu_xsc3_dcache_clean_area .word cpu_xsc3_switch_mm .word cpu_xsc3_set_pte_ext + .word cpu_xsc3_suspend_size + .word cpu_xsc3_do_suspend + .word cpu_xsc3_do_resume .size xsc3_processor_functions, . - xsc3_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 5a37c5e45c4..086038cd86a 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -513,11 +513,49 @@ ENTRY(cpu_xscale_set_pte_ext) xscale_set_pte_ext_epilogue mov pc, lr - .ltorg - .align +.globl cpu_xscale_suspend_size +.equ cpu_xscale_suspend_size, 4 * 7 +#ifdef CONFIG_PM +ENTRY(cpu_xscale_do_suspend) + stmfd sp!, {r4 - r10, lr} + mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode + mrc p15, 0, r5, c15, c1, 0 @ CP access reg + mrc p15, 0, r6, c13, c0, 0 @ PID + mrc p15, 0, r7, c3, c0, 0 @ domain ID + mrc p15, 0, r8, c2, c0, 0 @ translation table base addr + mrc p15, 0, r9, c1, c1, 0 @ auxiliary control reg + mrc p15, 0, r10, c1, c0, 0 @ control reg + bic r4, r4, #2 @ clear frequency change bit + stmia r0, {r4 - r10} @ store cp regs + ldmfd sp!, {r4 - r10, pc} +ENDPROC(cpu_xscale_do_suspend) + +ENTRY(cpu_xscale_do_resume) + ldmia r0, {r4 - r10} @ load cp regs + mov ip, #0 + mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs + mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB + mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. + mcr p15, 0, r5, c15, c1, 0 @ CP access reg + mcr p15, 0, r6, c13, c0, 0 @ PID + mcr p15, 0, r7, c3, c0, 0 @ domain ID + mcr p15, 0, r8, c2, c0, 0 @ translation table base addr + mcr p15, 0, r9, c1, c1, 0 @ auxiliary control reg + mov r0, r10 @ control register + mov r2, r8, lsr #14 @ get TTB0 base + mov r2, r2, lsl #14 + ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ + PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE + b cpu_resume_mmu +ENDPROC(cpu_xscale_do_resume) +#else +#define cpu_xscale_do_suspend 0 +#define cpu_xscale_do_resume 0 +#endif + __CPUINIT .type __xscale_setup, #function @@ -565,6 +603,9 @@ ENTRY(xscale_processor_functions) .word cpu_xscale_dcache_clean_area .word cpu_xscale_switch_mm .word cpu_xscale_set_pte_ext + .word cpu_xscale_suspend_size + .word cpu_xscale_do_suspend + .word cpu_xscale_do_resume .size xscale_processor_functions, . - xscale_processor_functions .section ".rodata" -- cgit v1.2.3-70-g09d2 From 2bbd7e9b74271b2d6a14b4840fc44afbea83774d Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 8 Jan 2011 12:05:09 +0000 Subject: ARM: fix some sparse errors in generic ARM code arch/arm/kernel/return_address.c:37:6: warning: symbol 'return_address' was not declared. Should it be static? arch/arm/kernel/setup.c:76:14: warning: symbol 'processor_id' was not declared. Should it be static? arch/arm/kernel/traps.c:259:1: warning: symbol 'die_lock' was not declared. Should it be static? arch/arm/vfp/vfpmodule.c:156:6: warning: symbol 'vfp_raise_sigfpe' was not declared. Should it be static? Signed-off-by: Russell King --- arch/arm/include/asm/cputype.h | 3 ++- arch/arm/kernel/return_address.c | 1 + arch/arm/kernel/traps.c | 2 +- arch/arm/vfp/vfpmodule.c | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 20ae96cc002..ed5bc9e05a4 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -23,6 +23,8 @@ #define CPUID_EXT_ISAR4 "c2, 4" #define CPUID_EXT_ISAR5 "c2, 5" +extern unsigned int processor_id; + #ifdef CONFIG_CPU_CP15 #define read_cpuid(reg) \ ({ \ @@ -43,7 +45,6 @@ __val; \ }) #else -extern unsigned int processor_id; #define read_cpuid(reg) (processor_id) #define read_cpuid_ext(reg) 0 #endif diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index df246da4cec..0b13a72f855 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -9,6 +9,7 @@ * the Free Software Foundation. */ #include +#include #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) #include diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index ee57640ba2b..7f53c3651c5 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -256,7 +256,7 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt return ret; } -DEFINE_SPINLOCK(die_lock); +static DEFINE_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 0797cb528b4..25b89d81710 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -153,7 +153,7 @@ static struct notifier_block vfp_notifier_block = { * Raise a SIGFPE for the current process. * sicode describes the signal being raised. */ -void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) +static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) { siginfo_t info; -- cgit v1.2.3-70-g09d2 From aaa50048f6ce44af66ce0389d4cc6a8348333271 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 25 Jan 2011 21:35:38 +0100 Subject: ARM: 6639/1: allow highmem on SMP platforms without h/w TLB ops broadcast In commit e616c591405c168f6dc3dfd1221e105adfe49b8d, highmem support was deactivated for SMP platforms without hardware TLB ops broadcast because usage of kmap_high_get() requires that IRQs be disabled when kmap_lock is locked which is incompatible with the IPI mechanism used by the software TLB ops broadcast invoked through flush_all_zero_pkmaps(). The reason for kmap_high_get() is to ensure that the currently kmap'd page usage count does not decrease to zero while we're using its existing virtual mapping in an atomic context. With a VIVT cache this is essential to do due to cache coherency issues, but with a VIPT cache this is only an optimization so not to pay the price of establishing a second mapping if an existing one can be used. However, on VIPT platforms without hardware TLB maintenance we can give up on that optimization in order to be able to use highmem. From ARMv7 onwards the TLB ops are broadcasted in hardware, so let's disable ARCH_NEEDS_KMAP_HIGH_GET only when CONFIG_SMP and CONFIG_CPU_TLB_V6 are defined. Signed-off-by: Nicolas Pitre Tested-by: Saeed Bishara Signed-off-by: Russell King --- arch/arm/include/asm/highmem.h | 29 +++++++++++++++++++++++++++-- arch/arm/mm/mmu.c | 10 ---------- 2 files changed, 27 insertions(+), 12 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 7080e2c8fa6..a4edd19dd3d 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -19,11 +19,36 @@ extern pte_t *pkmap_page_table; +extern void *kmap_high(struct page *page); +extern void kunmap_high(struct page *page); + +/* + * The reason for kmap_high_get() is to ensure that the currently kmap'd + * page usage count does not decrease to zero while we're using its + * existing virtual mapping in an atomic context. With a VIVT cache this + * is essential to do, but with a VIPT cache this is only an optimization + * so not to pay the price of establishing a second mapping if an existing + * one can be used. However, on platforms without hardware TLB maintenance + * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since + * the locking involved must also disable IRQs which is incompatible with + * the IPI mechanism used by global TLB operations. + */ #define ARCH_NEEDS_KMAP_HIGH_GET +#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6) +#undef ARCH_NEEDS_KMAP_HIGH_GET +#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT) +#error "The sum of features in your kernel config cannot be supported together" +#endif +#endif -extern void *kmap_high(struct page *page); +#ifdef ARCH_NEEDS_KMAP_HIGH_GET extern void *kmap_high_get(struct page *page); -extern void kunmap_high(struct page *page); +#else +static inline void *kmap_high_get(struct page *page) +{ + return NULL; +} +#endif /* * The following functions are already defined by diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 3c67e92f7d5..ff7b43b5885 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -827,16 +827,6 @@ static void __init sanity_check_meminfo(void) * rather difficult. */ reason = "with VIPT aliasing cache"; - } else if (is_smp() && tlb_ops_need_broadcast()) { - /* - * kmap_high needs to occasionally flush TLB entries, - * however, if the TLB entries need to be broadcast - * we may deadlock: - * kmap_high(irqs off)->flush_all_zero_pkmaps-> - * flush_tlb_kernel_range->smp_call_function_many - * (must not be called with irqs off) - */ - reason = "without hardware TLB ops broadcasting"; } if (reason) { printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", -- cgit v1.2.3-70-g09d2 From 425fc47adb5bb69f76285be77a09a3341a30799e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 14 Feb 2011 14:31:09 +0100 Subject: ARM: 6668/1: ptrace: remove single-step emulation code PTRACE_SINGLESTEP is a ptrace request designed to offer single-stepping support to userspace when the underlying architecture has hardware support for this operation. On ARM, we set arch_has_single_step() to 1 and attempt to emulate hardware single-stepping by disassembling the current instruction to determine the next pc and placing a software breakpoint on that location. Unfortunately this has the following problems: 1.) Only a subset of ARMv7 instructions are supported 2.) Thumb-2 is unsupported 3.) The code is not SMP safe We could try to fix this code, but it turns out that because of the above issues it is rarely used in practice. GDB, for example, uses PTRACE_POKETEXT and PTRACE_PEEKTEXT to manage breakpoints itself and does not require any kernel assistance. This patch removes the single-step emulation code from ptrace meaning that the PTRACE_SINGLESTEP request will return -EIO on ARM. Portable code must check the return value from a ptrace call and handle the failure gracefully. Acked-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/processor.h | 12 -- arch/arm/include/asm/ptrace.h | 2 - arch/arm/include/asm/traps.h | 1 + arch/arm/kernel/ptrace.c | 383 +-------------------------------------- arch/arm/kernel/ptrace.h | 37 ---- arch/arm/kernel/signal.c | 9 - arch/arm/kernel/traps.c | 2 +- 7 files changed, 3 insertions(+), 443 deletions(-) delete mode 100644 arch/arm/kernel/ptrace.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 67357baaeee..b439b41aeac 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -29,19 +29,7 @@ #define STACK_TOP_MAX TASK_SIZE #endif -union debug_insn { - u32 arm; - u16 thumb; -}; - -struct debug_entry { - u32 address; - union debug_insn insn; -}; - struct debug_info { - int nsaved; - struct debug_entry bp[2]; #ifdef CONFIG_HAVE_HW_BREAKPOINT struct perf_event *hbp[ARM_MAX_HBP_SLOTS]; #endif diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 783d50f3261..a8ff22b2a39 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -130,8 +130,6 @@ struct pt_regs { #ifdef __KERNEL__ -#define arch_has_single_step() (1) - #define user_mode(regs) \ (((regs)->ARM_cpsr & 0xf) == 0) diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index 1b960d5ef6a..f90756dc16d 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h @@ -45,6 +45,7 @@ static inline int in_exception_text(unsigned long ptr) extern void __init early_trap_init(void); extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); +extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs); extern void *vectors_page; diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 19c6816db61..eace844511f 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -26,8 +26,6 @@ #include #include -#include "ptrace.h" - #define REG_PC 15 #define REG_PSR 16 /* @@ -184,389 +182,12 @@ put_user_reg(struct task_struct *task, int offset, long data) return ret; } -static inline int -read_u32(struct task_struct *task, unsigned long addr, u32 *res) -{ - int ret; - - ret = access_process_vm(task, addr, res, sizeof(*res), 0); - - return ret == sizeof(*res) ? 0 : -EIO; -} - -static inline int -read_instr(struct task_struct *task, unsigned long addr, u32 *res) -{ - int ret; - - if (addr & 1) { - u16 val; - ret = access_process_vm(task, addr & ~1, &val, sizeof(val), 0); - ret = ret == sizeof(val) ? 0 : -EIO; - *res = val; - } else { - u32 val; - ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0); - ret = ret == sizeof(val) ? 0 : -EIO; - *res = val; - } - return ret; -} - -/* - * Get value of register `rn' (in the instruction) - */ -static unsigned long -ptrace_getrn(struct task_struct *child, unsigned long insn) -{ - unsigned int reg = (insn >> 16) & 15; - unsigned long val; - - val = get_user_reg(child, reg); - if (reg == 15) - val += 8; - - return val; -} - -/* - * Get value of operand 2 (in an ALU instruction) - */ -static unsigned long -ptrace_getaluop2(struct task_struct *child, unsigned long insn) -{ - unsigned long val; - int shift; - int type; - - if (insn & 1 << 25) { - val = insn & 255; - shift = (insn >> 8) & 15; - type = 3; - } else { - val = get_user_reg (child, insn & 15); - - if (insn & (1 << 4)) - shift = (int)get_user_reg (child, (insn >> 8) & 15); - else - shift = (insn >> 7) & 31; - - type = (insn >> 5) & 3; - } - - switch (type) { - case 0: val <<= shift; break; - case 1: val >>= shift; break; - case 2: - val = (((signed long)val) >> shift); - break; - case 3: - val = (val >> shift) | (val << (32 - shift)); - break; - } - return val; -} - -/* - * Get value of operand 2 (in a LDR instruction) - */ -static unsigned long -ptrace_getldrop2(struct task_struct *child, unsigned long insn) -{ - unsigned long val; - int shift; - int type; - - val = get_user_reg(child, insn & 15); - shift = (insn >> 7) & 31; - type = (insn >> 5) & 3; - - switch (type) { - case 0: val <<= shift; break; - case 1: val >>= shift; break; - case 2: - val = (((signed long)val) >> shift); - break; - case 3: - val = (val >> shift) | (val << (32 - shift)); - break; - } - return val; -} - -#define OP_MASK 0x01e00000 -#define OP_AND 0x00000000 -#define OP_EOR 0x00200000 -#define OP_SUB 0x00400000 -#define OP_RSB 0x00600000 -#define OP_ADD 0x00800000 -#define OP_ADC 0x00a00000 -#define OP_SBC 0x00c00000 -#define OP_RSC 0x00e00000 -#define OP_ORR 0x01800000 -#define OP_MOV 0x01a00000 -#define OP_BIC 0x01c00000 -#define OP_MVN 0x01e00000 - -static unsigned long -get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn) -{ - u32 alt = 0; - - switch (insn & 0x0e000000) { - case 0x00000000: - case 0x02000000: { - /* - * data processing - */ - long aluop1, aluop2, ccbit; - - if ((insn & 0x0fffffd0) == 0x012fff10) { - /* - * bx or blx - */ - alt = get_user_reg(child, insn & 15); - break; - } - - - if ((insn & 0xf000) != 0xf000) - break; - - aluop1 = ptrace_getrn(child, insn); - aluop2 = ptrace_getaluop2(child, insn); - ccbit = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0; - - switch (insn & OP_MASK) { - case OP_AND: alt = aluop1 & aluop2; break; - case OP_EOR: alt = aluop1 ^ aluop2; break; - case OP_SUB: alt = aluop1 - aluop2; break; - case OP_RSB: alt = aluop2 - aluop1; break; - case OP_ADD: alt = aluop1 + aluop2; break; - case OP_ADC: alt = aluop1 + aluop2 + ccbit; break; - case OP_SBC: alt = aluop1 - aluop2 + ccbit; break; - case OP_RSC: alt = aluop2 - aluop1 + ccbit; break; - case OP_ORR: alt = aluop1 | aluop2; break; - case OP_MOV: alt = aluop2; break; - case OP_BIC: alt = aluop1 & ~aluop2; break; - case OP_MVN: alt = ~aluop2; break; - } - break; - } - - case 0x04000000: - case 0x06000000: - /* - * ldr - */ - if ((insn & 0x0010f000) == 0x0010f000) { - unsigned long base; - - base = ptrace_getrn(child, insn); - if (insn & 1 << 24) { - long aluop2; - - if (insn & 0x02000000) - aluop2 = ptrace_getldrop2(child, insn); - else - aluop2 = insn & 0xfff; - - if (insn & 1 << 23) - base += aluop2; - else - base -= aluop2; - } - read_u32(child, base, &alt); - } - break; - - case 0x08000000: - /* - * ldm - */ - if ((insn & 0x00108000) == 0x00108000) { - unsigned long base; - unsigned int nr_regs; - - if (insn & (1 << 23)) { - nr_regs = hweight16(insn & 65535) << 2; - - if (!(insn & (1 << 24))) - nr_regs -= 4; - } else { - if (insn & (1 << 24)) - nr_regs = -4; - else - nr_regs = 0; - } - - base = ptrace_getrn(child, insn); - - read_u32(child, base + nr_regs, &alt); - break; - } - break; - - case 0x0a000000: { - /* - * bl or b - */ - signed long displ; - /* It's a branch/branch link: instead of trying to - * figure out whether the branch will be taken or not, - * we'll put a breakpoint at both locations. This is - * simpler, more reliable, and probably not a whole lot - * slower than the alternative approach of emulating the - * branch. - */ - displ = (insn & 0x00ffffff) << 8; - displ = (displ >> 6) + 8; - if (displ != 0 && displ != 4) - alt = pc + displ; - } - break; - } - - return alt; -} - -static int -swap_insn(struct task_struct *task, unsigned long addr, - void *old_insn, void *new_insn, int size) -{ - int ret; - - ret = access_process_vm(task, addr, old_insn, size, 0); - if (ret == size) - ret = access_process_vm(task, addr, new_insn, size, 1); - return ret; -} - -static void -add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr) -{ - int nr = dbg->nsaved; - - if (nr < 2) { - u32 new_insn = BREAKINST_ARM; - int res; - - res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4); - - if (res == 4) { - dbg->bp[nr].address = addr; - dbg->nsaved += 1; - } - } else - printk(KERN_ERR "ptrace: too many breakpoints\n"); -} - -/* - * Clear one breakpoint in the user program. We copy what the hardware - * does and use bit 0 of the address to indicate whether this is a Thumb - * breakpoint or an ARM breakpoint. - */ -static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp) -{ - unsigned long addr = bp->address; - union debug_insn old_insn; - int ret; - - if (addr & 1) { - ret = swap_insn(task, addr & ~1, &old_insn.thumb, - &bp->insn.thumb, 2); - - if (ret != 2 || old_insn.thumb != BREAKINST_THUMB) - printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at " - "0x%08lx (0x%04x)\n", task->comm, - task_pid_nr(task), addr, old_insn.thumb); - } else { - ret = swap_insn(task, addr & ~3, &old_insn.arm, - &bp->insn.arm, 4); - - if (ret != 4 || old_insn.arm != BREAKINST_ARM) - printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at " - "0x%08lx (0x%08x)\n", task->comm, - task_pid_nr(task), addr, old_insn.arm); - } -} - -void ptrace_set_bpt(struct task_struct *child) -{ - struct pt_regs *regs; - unsigned long pc; - u32 insn; - int res; - - regs = task_pt_regs(child); - pc = instruction_pointer(regs); - - if (thumb_mode(regs)) { - printk(KERN_WARNING "ptrace: can't handle thumb mode\n"); - return; - } - - res = read_instr(child, pc, &insn); - if (!res) { - struct debug_info *dbg = &child->thread.debug; - unsigned long alt; - - dbg->nsaved = 0; - - alt = get_branch_address(child, pc, insn); - if (alt) - add_breakpoint(child, dbg, alt); - - /* - * Note that we ignore the result of setting the above - * breakpoint since it may fail. When it does, this is - * not so much an error, but a forewarning that we may - * be receiving a prefetch abort shortly. - * - * If we don't set this breakpoint here, then we can - * lose control of the thread during single stepping. - */ - if (!alt || predicate(insn) != PREDICATE_ALWAYS) - add_breakpoint(child, dbg, pc + 4); - } -} - -/* - * Ensure no single-step breakpoint is pending. Returns non-zero - * value if child was being single-stepped. - */ -void ptrace_cancel_bpt(struct task_struct *child) -{ - int i, nsaved = child->thread.debug.nsaved; - - child->thread.debug.nsaved = 0; - - if (nsaved > 2) { - printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved); - nsaved = 2; - } - - for (i = 0; i < nsaved; i++) - clear_breakpoint(child, &child->thread.debug.bp[i]); -} - -void user_disable_single_step(struct task_struct *task) -{ - task->ptrace &= ~PT_SINGLESTEP; - ptrace_cancel_bpt(task); -} - -void user_enable_single_step(struct task_struct *task) -{ - task->ptrace |= PT_SINGLESTEP; -} - /* * Called by kernel/ptrace.c when detaching.. */ void ptrace_disable(struct task_struct *child) { - user_disable_single_step(child); + /* Nothing to do. */ } /* @@ -576,8 +197,6 @@ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs) { siginfo_t info; - ptrace_cancel_bpt(tsk); - info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_BRKPT; diff --git a/arch/arm/kernel/ptrace.h b/arch/arm/kernel/ptrace.h deleted file mode 100644 index 3926605b82e..00000000000 --- a/arch/arm/kernel/ptrace.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * linux/arch/arm/kernel/ptrace.h - * - * Copyright (C) 2000-2003 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include - -extern void ptrace_cancel_bpt(struct task_struct *); -extern void ptrace_set_bpt(struct task_struct *); -extern void ptrace_break(struct task_struct *, struct pt_regs *); - -/* - * Send SIGTRAP if we're single-stepping - */ -static inline void single_step_trap(struct task_struct *task) -{ - if (task->ptrace & PT_SINGLESTEP) { - ptrace_cancel_bpt(task); - send_sig(SIGTRAP, task, 1); - } -} - -static inline void single_step_clear(struct task_struct *task) -{ - if (task->ptrace & PT_SINGLESTEP) - ptrace_cancel_bpt(task); -} - -static inline void single_step_set(struct task_struct *task) -{ - if (task->ptrace & PT_SINGLESTEP) - ptrace_set_bpt(task); -} diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 907d5a620bc..7709668c484 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -20,7 +20,6 @@ #include #include -#include "ptrace.h" #include "signal.h" #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) @@ -348,8 +347,6 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs) if (restore_sigframe(regs, frame)) goto badframe; - single_step_trap(current); - return regs->ARM_r0; badframe: @@ -383,8 +380,6 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) goto badframe; - single_step_trap(current); - return regs->ARM_r0; badframe: @@ -704,8 +699,6 @@ static void do_signal(struct pt_regs *regs, int syscall) if (try_to_freeze()) goto no_signal; - single_step_clear(current); - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { sigset_t *oldset; @@ -724,7 +717,6 @@ static void do_signal(struct pt_regs *regs, int syscall) if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); } - single_step_set(current); return; } @@ -770,7 +762,6 @@ static void do_signal(struct pt_regs *regs, int syscall) sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); } } - single_step_set(current); } asmlinkage void diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 7f53c3651c5..21ac43f1c2d 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -32,7 +33,6 @@ #include #include -#include "ptrace.h" #include "signal.h" static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; -- cgit v1.2.3-70-g09d2 From 80f0aad77f3e1e9d9e518b09ac46963d628ae2be Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 25 Feb 2011 17:54:52 +0100 Subject: ARM: 6766/1: Thumb-2: Reflect ARM/Thumb-2 configuration in module vermagic Loading Thumb-2 modules into an ARM kernel or vice-versa isn't guaranteed to work safely, since the kernel is not interworking- aware everywhere. This patch adds "thumb2" to the module vermagic when CONFIG_THUMB2_KERNEL is enabled, to help avoid accidental loading of modules into the wrong kernel. Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/module.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index a2b775b81cf..543b44916d2 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -40,8 +40,16 @@ struct mod_arch_specific { #define MODULE_ARCH_VERMAGIC_P2V "" #endif +/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */ +#ifdef CONFIG_THUMB2_KERNEL +#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 " +#else +#define MODULE_ARCH_VERMAGIC_ARMTHUMB "" +#endif + #define MODULE_ARCH_VERMAGIC \ MODULE_ARCH_VERMAGIC_ARMVSN \ + MODULE_ARCH_VERMAGIC_ARMTHUMB \ MODULE_ARCH_VERMAGIC_P2V #endif /* _ASM_ARM_MODULE_H */ -- cgit v1.2.3-70-g09d2 From 2839e06c95d12ada034cf9b63da60334c7c6358b Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Tue, 8 Mar 2011 06:59:54 +0100 Subject: ARM: 6795/1: l2x0: Errata fix for flush by Way operation can cause data corrupti PL310 implements the Clean & Invalidate by Way L2 cache maintenance operation (offset 0x7FC). This operation runs in background so that PL310 can handle normal accesses while it is in progress. Under very rare circumstances, due to this erratum, write data can be lost when PL310 treats a cacheable write transaction during a Clean & Invalidate by Way operation. Workaround: Disable Write-Back and Cache Linefill (Debug Control Register) Clean & Invalidate by Way (0x7FC) Re-enable Write-Back and Cache Linefill (Debug Control Register) This patch also removes any OMAP dependency on PL310 Errata's Signed-off-by: Santosh Shilimkar Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/Kconfig | 15 ++++++++++++--- arch/arm/include/asm/outercache.h | 1 + arch/arm/mm/cache-l2x0.c | 32 ++++++++++++++++++-------------- 3 files changed, 31 insertions(+), 17 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 65ea7bb57c4..ef41f7e39f6 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1135,7 +1135,7 @@ config ARM_ERRATA_742231 config PL310_ERRATA_588369 bool "Clean & Invalidate maintenance operations do not invalidate clean lines" - depends on CACHE_L2X0 && ARCH_OMAP4 + depends on CACHE_L2X0 help The PL310 L2 cache controller implements three types of Clean & Invalidate maintenance operations: by Physical Address @@ -1144,8 +1144,7 @@ config PL310_ERRATA_588369 clean operation followed immediately by an invalidate operation, both performing to the same memory location. This functionality is not correctly implemented in PL310 as clean lines are not - invalidated as a result of these operations. Note that this errata - uses Texas Instrument's secure monitor api. + invalidated as a result of these operations. config ARM_ERRATA_720789 bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" @@ -1172,6 +1171,16 @@ config ARM_ERRATA_743622 visible impact on the overall performance or power consumption of the processor. +config PL310_ERRATA_727915 + bool "Background Clean & Invalidate by Way operation can cause data corruption" + depends on CACHE_L2X0 + help + PL310 implements the Clean & Invalidate by Way L2 cache maintenance + operation (offset 0x7FC). This operation runs in background so that + PL310 can handle normal accesses while it is in progress. Under very + rare circumstances, due to this erratum, write data can be lost when + PL310 treats a cacheable write transaction during a Clean & + Invalidate by Way operation. endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index fc190092527..348d513afa9 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -31,6 +31,7 @@ struct outer_cache_fns { #ifdef CONFIG_OUTER_CACHE_SYNC void (*sync)(void); #endif + void (*set_debug)(unsigned long); }; #ifdef CONFIG_OUTER_CACHE diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 170c9bb9586..803bce8845a 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -67,18 +67,24 @@ static inline void l2x0_inv_line(unsigned long addr) writel_relaxed(addr, base + L2X0_INV_LINE_PA); } -#ifdef CONFIG_PL310_ERRATA_588369 -static void debug_writel(unsigned long val) -{ - extern void omap_smc1(u32 fn, u32 arg); +#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) - /* - * Texas Instrument secure monitor api to modify the - * PL310 Debug Control Register. - */ - omap_smc1(0x100, val); +#define debug_writel(val) outer_cache.set_debug(val) + +static void l2x0_set_debug(unsigned long val) +{ + writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); } +#else +/* Optimised out for non-errata case */ +static inline void debug_writel(unsigned long val) +{ +} + +#define l2x0_set_debug NULL +#endif +#ifdef CONFIG_PL310_ERRATA_588369 static inline void l2x0_flush_line(unsigned long addr) { void __iomem *base = l2x0_base; @@ -91,11 +97,6 @@ static inline void l2x0_flush_line(unsigned long addr) } #else -/* Optimised out for non-errata case */ -static inline void debug_writel(unsigned long val) -{ -} - static inline void l2x0_flush_line(unsigned long addr) { void __iomem *base = l2x0_base; @@ -119,9 +120,11 @@ static void l2x0_flush_all(void) /* clean all ways */ spin_lock_irqsave(&l2x0_lock, flags); + debug_writel(0x03); writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); cache_sync(); + debug_writel(0x00); spin_unlock_irqrestore(&l2x0_lock, flags); } @@ -329,6 +332,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) outer_cache.flush_all = l2x0_flush_all; outer_cache.inv_all = l2x0_inv_all; outer_cache.disable = l2x0_disable; + outer_cache.set_debug = l2x0_set_debug; printk(KERN_INFO "%s cache controller enabled\n", type); printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", -- cgit v1.2.3-70-g09d2 From d7ed36a4ea84e3a850f9932e2058ceef987d1acd Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Wed, 2 Mar 2011 08:03:22 +0100 Subject: ARM: 6777/1: gic: Add hooks for architecture specific extensions Few architectures combine the GIC with an external interrupt controller. On such systems it may be necessary to update both the GIC registers and the external controller's registers to control IRQ behavior. This can be addressed in couple of possible methods. 1. Export common GIC routines along with 'struct irq_chip gic_chip' and allow architectures to have custom function by override. 2. Provide architecture specific function pointer hooks within GIC library and leave platforms to add the necessary code as part of these hooks. First one might be non-intrusive but have few shortcomings like arch needs to have there own custom gic library. Locks used should be common since it caters to same IRQs etc. Maintenance point of view also it leads to multiple file fixes. The second probably is cleaner and portable. It ensures that all the common GIC infrastructure is not touched and also provides archs to address their specific issue. Cc: Russell King Signed-off-by: Santosh Shilimkar Acked-by: Colin Cross Tested-by: Colin Cross Signed-off-by: Russell King --- arch/arm/common/gic.c | 47 +++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/hardware/gic.h | 1 + 2 files changed, 48 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index e21c1f4218d..cb6b041c39d 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -44,6 +44,19 @@ struct gic_chip_data { void __iomem *cpu_base; }; +/* + * Supported arch specific GIC irq extension. + * Default make them NULL. + */ +struct irq_chip gic_arch_extn = { + .irq_ack = NULL, + .irq_mask = NULL, + .irq_unmask = NULL, + .irq_retrigger = NULL, + .irq_set_type = NULL, + .irq_set_wake = NULL, +}; + #ifndef MAX_GIC_NR #define MAX_GIC_NR 1 #endif @@ -74,6 +87,8 @@ static inline unsigned int gic_irq(struct irq_data *d) static void gic_ack_irq(struct irq_data *d) { spin_lock(&irq_controller_lock); + if (gic_arch_extn.irq_ack) + gic_arch_extn.irq_ack(d); writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); spin_unlock(&irq_controller_lock); } @@ -84,6 +99,8 @@ static void gic_mask_irq(struct irq_data *d) spin_lock(&irq_controller_lock); writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); + if (gic_arch_extn.irq_mask) + gic_arch_extn.irq_mask(d); spin_unlock(&irq_controller_lock); } @@ -92,6 +109,8 @@ static void gic_unmask_irq(struct irq_data *d) u32 mask = 1 << (d->irq % 32); spin_lock(&irq_controller_lock); + if (gic_arch_extn.irq_unmask) + gic_arch_extn.irq_unmask(d); writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); spin_unlock(&irq_controller_lock); } @@ -116,6 +135,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type) spin_lock(&irq_controller_lock); + if (gic_arch_extn.irq_set_type) + gic_arch_extn.irq_set_type(d, type); + val = readl(base + GIC_DIST_CONFIG + confoff); if (type == IRQ_TYPE_LEVEL_HIGH) val &= ~confmask; @@ -141,6 +163,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type) return 0; } +static int gic_retrigger(struct irq_data *d) +{ + if (gic_arch_extn.irq_retrigger) + return gic_arch_extn.irq_retrigger(d); + + return -ENXIO; +} + #ifdef CONFIG_SMP static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) @@ -166,6 +196,21 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, } #endif +#ifdef CONFIG_PM +static int gic_set_wake(struct irq_data *d, unsigned int on) +{ + int ret = -ENXIO; + + if (gic_arch_extn.irq_set_wake) + ret = gic_arch_extn.irq_set_wake(d, on); + + return ret; +} + +#else +#define gic_set_wake NULL +#endif + static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) { struct gic_chip_data *chip_data = get_irq_data(irq); @@ -201,9 +246,11 @@ static struct irq_chip gic_chip = { .irq_mask = gic_mask_irq, .irq_unmask = gic_unmask_irq, .irq_set_type = gic_set_type, + .irq_retrigger = gic_retrigger, #ifdef CONFIG_SMP .irq_set_affinity = gic_set_affinity, #endif + .irq_set_wake = gic_set_wake, }; void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h index 84557d32100..0691f9dcc50 100644 --- a/arch/arm/include/asm/hardware/gic.h +++ b/arch/arm/include/asm/hardware/gic.h @@ -34,6 +34,7 @@ #ifndef __ASSEMBLY__ extern void __iomem *gic_cpu_base_addr; +extern struct irq_chip gic_arch_extn; void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *); void gic_secondary_init(unsigned int); -- cgit v1.2.3-70-g09d2 From 5dab26af1bacad9a7189d904fbc8b4fe8e95dd81 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 4 Mar 2011 12:38:54 +0100 Subject: ARM: 6784/1: errata: no automatic Store Buffer drain on Cortex-A9 On revisions of the Cortex-A9 prior to r2p0, the Store Buffer does not have any automatic draining mechanism and therefore a livelock may occur if an external agent continuously polls a memory location waiting to observe an update. This workaround defines cpu_relax() as smp_mb(), preventing correctly written polling loops from denying visibility of updates to memory. Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/Kconfig | 11 +++++++++++ arch/arm/include/asm/processor.h | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ec0f6589af0..d3f2de37a4b 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1213,6 +1213,17 @@ config ARM_ERRATA_754322 the new ASID. This workaround places two dsb instructions in the mm switching code so that no page table walks can cross the ASID switch. +config ARM_ERRATA_754327 + bool "ARM errata: no automatic Store Buffer drain" + depends on CPU_V7 && SMP + help + This option enables the workaround for the 754327 Cortex-A9 (prior to + r2p0) erratum. The Store Buffer does not have any automatic draining + mechanism and therefore a livelock may occur if an external agent + continuously polls a memory location waiting to observe an update. + This workaround defines cpu_relax() as smp_mb(), preventing correctly + written polling loops from denying visibility of updates to memory. + endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 67357baaeee..7a1f03c10f1 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -95,7 +95,7 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); -#if __LINUX_ARM_ARCH__ == 6 +#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327) #define cpu_relax() smp_mb() #else #define cpu_relax() barrier() -- cgit v1.2.3-70-g09d2 From 23bfdacf4eb525ff3404161429cedaa281c23e47 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 10 Mar 2011 14:03:01 +0100 Subject: ARM: 6798/1: aout-core: zero thread debug registers in a.out core dump The removal of the single-step emulation from ptrace on ARM means that thread_struct no longer has software breakpoint fields in its debug member. This patch fixes the a.out core dump code so that the debug registers are zeroed rather than trying to copy from non-existent fields. Cc: Nicolas Pitre Signed-off-by: Bryan Wu Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/a.out-core.h | 6 +----- arch/arm/include/asm/user.h | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h index 93d04acaa31..92f10cb5c70 100644 --- a/arch/arm/include/asm/a.out-core.h +++ b/arch/arm/include/asm/a.out-core.h @@ -32,11 +32,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; dump->u_ssize = 0; - dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; - dump->u_debugreg[1] = tsk->thread.debug.bp[1].address; - dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm; - dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm; - dump->u_debugreg[4] = tsk->thread.debug.nsaved; + memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg)); if (dump->start_stack < 0x04000000) dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h index 05ac4b06876..35917b3a97f 100644 --- a/arch/arm/include/asm/user.h +++ b/arch/arm/include/asm/user.h @@ -71,7 +71,7 @@ struct user{ /* the registers. */ unsigned long magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ - int u_debugreg[8]; + int u_debugreg[8]; /* No longer used */ struct user_fp u_fp; /* FP state */ struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */ /* the FP registers. */ -- cgit v1.2.3-70-g09d2 From 522d7decc0370070448a8c28982c8dfd8970489e Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Thu, 10 Mar 2011 18:47:31 -0800 Subject: futex: Remove redundant pagefault_disable in futex_atomic_cmpxchg_inatomic() kernel/futex.c disables page faults before calling futex_atomic_cmpxchg_inatomic(), so there is no need to do it again within that function. Signed-off-by: Michel Lespinasse Cc: Darren Hart Cc: Peter Zijlstra Cc: Matt Turner Cc: Russell King Cc: David Howells Cc: Tony Luck Cc: Michal Simek Cc: Ralf Baechle Cc: "James E.J. Bottomley" Cc: Benjamin Herrenschmidt Cc: Martin Schwidefsky Cc: Paul Mundt Cc: "David S. Miller" Cc: Chris Metcalf Cc: Linus Torvalds LKML-Reference: <20110311024731.GB26122@google.com> Signed-off-by: Thomas Gleixner --- arch/arm/include/asm/futex.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index b33fe7065b3..7133a862083 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -95,7 +95,8 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - pagefault_disable(); /* implies preempt_disable() */ + /* Note that preemption is disabled by futex_atomic_cmpxchg_inatomic + * call sites. */ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: " T(ldr) " %0, [%3]\n" @@ -115,8 +116,6 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "cc", "memory"); - pagefault_enable(); /* subsumes preempt_enable() */ - return val; } -- cgit v1.2.3-70-g09d2 From 37a9d912b24f96a0591773e6e6c3642991ae5a70 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Thu, 10 Mar 2011 18:48:51 -0800 Subject: futex: Sanitize cmpxchg_futex_value_locked API The cmpxchg_futex_value_locked API was funny in that it returned either the original, user-exposed futex value OR an error code such as -EFAULT. This was confusing at best, and could be a source of livelocks in places that retry the cmpxchg_futex_value_locked after trying to fix the issue by running fault_in_user_writeable(). This change makes the cmpxchg_futex_value_locked API more similar to the get_futex_value_locked one, returning an error code and updating the original value through a reference argument. Signed-off-by: Michel Lespinasse Acked-by: Chris Metcalf [tile] Acked-by: Tony Luck [ia64] Acked-by: Thomas Gleixner Tested-by: Michal Simek [microblaze] Acked-by: David Howells [frv] Cc: Darren Hart Cc: Peter Zijlstra Cc: Matt Turner Cc: Russell King Cc: Ralf Baechle Cc: "James E.J. Bottomley" Cc: Benjamin Herrenschmidt Cc: Martin Schwidefsky Cc: Paul Mundt Cc: "David S. Miller" Cc: Linus Torvalds LKML-Reference: <20110311024851.GC26122@google.com> Signed-off-by: Thomas Gleixner --- arch/alpha/include/asm/futex.h | 22 +++++++++--------- arch/arm/include/asm/futex.h | 18 ++++++++------- arch/frv/include/asm/futex.h | 3 ++- arch/ia64/include/asm/futex.h | 9 +++++--- arch/microblaze/include/asm/futex.h | 24 +++++++++++--------- arch/mips/include/asm/futex.h | 32 +++++++++++++------------- arch/parisc/include/asm/futex.h | 18 +++++++-------- arch/powerpc/include/asm/futex.h | 20 +++++++++-------- arch/s390/include/asm/futex.h | 4 ++-- arch/s390/include/asm/uaccess.h | 2 +- arch/s390/lib/uaccess.h | 4 ++-- arch/s390/lib/uaccess_pt.c | 13 ++++++----- arch/s390/lib/uaccess_std.c | 6 +++-- arch/sh/include/asm/futex-irq.h | 9 ++++---- arch/sh/include/asm/futex.h | 5 +++-- arch/sparc/include/asm/futex_64.h | 16 ++++++++----- arch/tile/include/asm/futex.h | 7 +++--- arch/x86/include/asm/futex.h | 16 +++++++------ include/asm-generic/futex.h | 3 ++- kernel/futex.c | 45 +++++++++++++------------------------ 20 files changed, 144 insertions(+), 132 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h index 945de222ab9..c4e5c2850cc 100644 --- a/arch/alpha/include/asm/futex.h +++ b/arch/alpha/include/asm/futex.h @@ -81,21 +81,22 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, + int oldval, int newval) { - int prev, cmp; + int ret = 0, prev, cmp; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; __asm__ __volatile__ ( __ASM_SMP_MB - "1: ldl_l %0,0(%2)\n" - " cmpeq %0,%3,%1\n" - " beq %1,3f\n" - " mov %4,%1\n" - "2: stl_c %1,0(%2)\n" - " beq %1,4f\n" + "1: ldl_l %1,0(%3)\n" + " cmpeq %1,%4,%2\n" + " beq %2,3f\n" + " mov %5,%2\n" + "2: stl_c %2,0(%3)\n" + " beq %2,4f\n" "3: .subsection 2\n" "4: br 1b\n" " .previous\n" @@ -105,11 +106,12 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " .long 2b-.\n" " lda $31,3b-2b(%0)\n" " .previous\n" - : "=&r"(prev), "=&r"(cmp) + : "+r"(ret), "=&r"(prev), "=&r"(cmp) : "r"(uaddr), "r"((long)oldval), "r"(newval) : "memory"); - return prev; + *uval = prev; + return ret; } #endif /* __KERNEL__ */ diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 7133a862083..d20b78fce75 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -88,9 +88,10 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, + int oldval, int newval) { - int val; + int ret = 0, val; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; @@ -99,24 +100,25 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) * call sites. */ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" - "1: " T(ldr) " %0, [%3]\n" - " teq %0, %1\n" + "1: " T(ldr) " %1, [%4]\n" + " teq %1, %2\n" " it eq @ explicit IT needed for the 2b label\n" - "2: " T(streq) " %2, [%3]\n" + "2: " T(streq) " %3, [%4]\n" "3:\n" " .pushsection __ex_table,\"a\"\n" " .align 3\n" " .long 1b, 4f, 2b, 4f\n" " .popsection\n" " .pushsection .fixup,\"ax\"\n" - "4: mov %0, %4\n" + "4: mov %0, %5\n" " b 3b\n" " .popsection" - : "=&r" (val) + : "+r" (ret), "=&r" (val) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "cc", "memory"); - return val; + *uval = val; + return ret; } #endif /* !SMP */ diff --git a/arch/frv/include/asm/futex.h b/arch/frv/include/asm/futex.h index 08b3d1da358..0548f8e4d11 100644 --- a/arch/frv/include/asm/futex.h +++ b/arch/frv/include/asm/futex.h @@ -10,7 +10,8 @@ extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr); static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, + int oldval, int newval) { return -ENOSYS; } diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index c7f0f062239..b0728404dad 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h @@ -100,23 +100,26 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, + int oldval, int newval) { if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; { - register unsigned long r8 __asm ("r8"); + register unsigned long r8 __asm ("r8") = 0; + unsigned long prev; __asm__ __volatile__( " mf;; \n" " mov ar.ccv=%3;; \n" "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n" " .xdata4 \"__ex_table\", 1b-., 2f-. \n" "[2:]" - : "=r" (r8) + : "=r" (prev) : "r" (uaddr), "r" (newval), "rO" ((long) (unsigned) oldval) : "memory"); + *uval = prev; return r8; } } diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h index ad3fd61b2fe..fa019ed65df 100644 --- a/arch/microblaze/include/asm/futex.h +++ b/arch/microblaze/include/asm/futex.h @@ -94,31 +94,33 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, + int oldval, int newval) { - int prev, cmp; + int ret = 0, prev, cmp; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - __asm__ __volatile__ ("1: lwx %0, %2, r0; \ - cmp %1, %0, %3; \ - beqi %1, 3f; \ - 2: swx %4, %2, r0; \ - addic %1, r0, 0; \ - bnei %1, 1b; \ + __asm__ __volatile__ ("1: lwx %1, %3, r0; \ + cmp %2, %1, %4; \ + beqi %2, 3f; \ + 2: swx %5, %3, r0; \ + addic %2, r0, 0; \ + bnei %2, 1b; \ 3: \ .section .fixup,\"ax\"; \ 4: brid 3b; \ - addik %0, r0, %5; \ + addik %0, r0, %6; \ .previous; \ .section __ex_table,\"a\"; \ .word 1b,4b,2b,4b; \ .previous;" \ - : "=&r" (prev), "=&r"(cmp) \ + : "+r" (ret), "=&r" (prev), "=&r"(cmp) \ : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)); - return prev; + *uval = prev; + return ret; } #endif /* __KERNEL__ */ diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index b9cce90346c..692a24bd83b 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -132,9 +132,10 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, + int oldval, int newval) { - int retval; + int ret = 0, val; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; @@ -145,25 +146,25 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " .set push \n" " .set noat \n" " .set mips3 \n" - "1: ll %0, %2 \n" - " bne %0, %z3, 3f \n" + "1: ll %1, %3 \n" + " bne %1, %z4, 3f \n" " .set mips0 \n" - " move $1, %z4 \n" + " move $1, %z5 \n" " .set mips3 \n" - "2: sc $1, %1 \n" + "2: sc $1, %2 \n" " beqzl $1, 1b \n" __WEAK_LLSC_MB "3: \n" " .set pop \n" " .section .fixup,\"ax\" \n" - "4: li %0, %5 \n" + "4: li %0, %6 \n" " j 3b \n" " .previous \n" " .section __ex_table,\"a\" \n" " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "=&r" (retval), "=R" (*uaddr) + : "+r" (ret), "=&r" (val), "=R" (*uaddr) : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); } else if (cpu_has_llsc) { @@ -172,31 +173,32 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " .set push \n" " .set noat \n" " .set mips3 \n" - "1: ll %0, %2 \n" - " bne %0, %z3, 3f \n" + "1: ll %1, %3 \n" + " bne %1, %z4, 3f \n" " .set mips0 \n" - " move $1, %z4 \n" + " move $1, %z5 \n" " .set mips3 \n" - "2: sc $1, %1 \n" + "2: sc $1, %2 \n" " beqz $1, 1b \n" __WEAK_LLSC_MB "3: \n" " .set pop \n" " .section .fixup,\"ax\" \n" - "4: li %0, %5 \n" + "4: li %0, %6 \n" " j 3b \n" " .previous \n" " .section __ex_table,\"a\" \n" " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "=&r" (retval), "=R" (*uaddr) + : "+r" (ret), "=&r" (val), "=R" (*uaddr) : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); } else return -ENOSYS; - return retval; + *uval = val; + return ret; } #endif diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index 0c705c3a55e..4c6d8672325 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h @@ -51,10 +51,10 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) /* Non-atomic version */ static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, + int oldval, int newval) { - int err = 0; - int uval; + int val; /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is * our gateway page, and causes no end of trouble... @@ -65,12 +65,12 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - err = get_user(uval, uaddr); - if (err) return -EFAULT; - if (uval == oldval) - err = put_user(newval, uaddr); - if (err) return -EFAULT; - return uval; + if (get_user(val, uaddr)) + return -EFAULT; + if (val == oldval && put_user(newval, uaddr)) + return -EFAULT; + *uval = val; + return 0; } #endif /*__KERNEL__*/ diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index 7c589ef81fb..631e8da6006 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -82,35 +82,37 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, + int oldval, int newval) { - int prev; + int ret = 0, prev; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; __asm__ __volatile__ ( PPC_RELEASE_BARRIER -"1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\ - cmpw 0,%0,%3\n\ +"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ + cmpw 0,%1,%4\n\ bne- 3f\n" - PPC405_ERR77(0,%2) -"2: stwcx. %4,0,%2\n\ + PPC405_ERR77(0,%3) +"2: stwcx. %5,0,%3\n\ bne- 1b\n" PPC_ACQUIRE_BARRIER "3: .section .fixup,\"ax\"\n\ -4: li %0,%5\n\ +4: li %0,%6\n\ b 3b\n\ .previous\n\ .section __ex_table,\"a\"\n\ .align 3\n\ " PPC_LONG "1b,4b,2b,4b\n\ .previous" \ - : "=&r" (prev), "+m" (*uaddr) + : "+r" (ret), "=&r" (prev), "+m" (*uaddr) : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) : "cc", "memory"); - return prev; + *uval = prev; + return ret; } #endif /* __KERNEL__ */ diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index 5c5d02de49e..27ac515ef59 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -39,13 +39,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, +static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, int oldval, int newval) { if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval); + return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval); } #endif /* __KERNEL__ */ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index d6b1ed0ec52..549adf6a9b8 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -84,7 +84,7 @@ struct uaccess_ops { size_t (*strnlen_user)(size_t, const char __user *); size_t (*strncpy_from_user)(size_t, const char __user *, char *); int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); - int (*futex_atomic_cmpxchg)(int __user *, int old, int new); + int (*futex_atomic_cmpxchg)(int *, int __user *, int old, int new); }; extern struct uaccess_ops uaccess; diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h index 126011df14f..89a80674e44 100644 --- a/arch/s390/lib/uaccess.h +++ b/arch/s390/lib/uaccess.h @@ -12,12 +12,12 @@ extern size_t copy_from_user_std(size_t, const void __user *, void *); extern size_t copy_to_user_std(size_t, void __user *, const void *); extern size_t strnlen_user_std(size_t, const char __user *); extern size_t strncpy_from_user_std(size_t, const char __user *, char *); -extern int futex_atomic_cmpxchg_std(int __user *, int, int); +extern int futex_atomic_cmpxchg_std(int *, int __user *, int, int); extern int futex_atomic_op_std(int, int __user *, int, int *); extern size_t copy_from_user_pt(size_t, const void __user *, void *); extern size_t copy_to_user_pt(size_t, void __user *, const void *); extern int futex_atomic_op_pt(int, int __user *, int, int *); -extern int futex_atomic_cmpxchg_pt(int __user *, int, int); +extern int futex_atomic_cmpxchg_pt(int *, int __user *, int, int); #endif /* __ARCH_S390_LIB_UACCESS_H */ diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 404f2de296d..b3cebcd52f5 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -354,26 +354,29 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) return ret; } -static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) +static int __futex_atomic_cmpxchg_pt(int *uval, int __user *uaddr, + int oldval, int newval) { int ret; asm volatile("0: cs %1,%4,0(%5)\n" - "1: lr %0,%1\n" + "1: la %0,0\n" "2:\n" EX_TABLE(0b,2b) EX_TABLE(1b,2b) : "=d" (ret), "+d" (oldval), "=m" (*uaddr) : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) : "cc", "memory" ); + *uval = oldval; return ret; } -int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) +int futex_atomic_cmpxchg_pt(int *uval, int __user *uaddr, + int oldval, int newval) { int ret; if (segment_eq(get_fs(), KERNEL_DS)) - return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); + return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); spin_lock(¤t->mm->page_table_lock); uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); if (!uaddr) { @@ -382,7 +385,7 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) } get_page(virt_to_page(uaddr)); spin_unlock(¤t->mm->page_table_lock); - ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); + ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); put_page(virt_to_page(uaddr)); return ret; } diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c index a6c4f7ed24a..1d6643c0b95 100644 --- a/arch/s390/lib/uaccess_std.c +++ b/arch/s390/lib/uaccess_std.c @@ -287,19 +287,21 @@ int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) return ret; } -int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval) +int futex_atomic_cmpxchg_std(int *uval, int __user *uaddr, + int oldval, int newval) { int ret; asm volatile( " sacf 256\n" "0: cs %1,%4,0(%5)\n" - "1: lr %0,%1\n" + "1: la %0,0\n" "2: sacf 0\n" EX_TABLE(0b,2b) EX_TABLE(1b,2b) : "=d" (ret), "+d" (oldval), "=m" (*uaddr) : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) : "cc", "memory" ); + *uval = oldval; return ret; } diff --git a/arch/sh/include/asm/futex-irq.h b/arch/sh/include/asm/futex-irq.h index a9f16a7f9ae..7b701cbd1e8 100644 --- a/arch/sh/include/asm/futex-irq.h +++ b/arch/sh/include/asm/futex-irq.h @@ -88,7 +88,8 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr, +static inline int atomic_futex_op_cmpxchg_inatomic(int *uval, + int __user *uaddr, int oldval, int newval) { unsigned long flags; @@ -102,10 +103,8 @@ static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr, local_irq_restore(flags); - if (ret) - return ret; - - return prev; + *uval = prev; + return ret; } #endif /* __ASM_SH_FUTEX_IRQ_H */ diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h index 68256ec5fa3..a8a5125dc9b 100644 --- a/arch/sh/include/asm/futex.h +++ b/arch/sh/include/asm/futex.h @@ -65,12 +65,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, + int oldval, int newval) { if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - return atomic_futex_op_cmpxchg_inatomic(uaddr, oldval, newval); + return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval); } #endif /* __KERNEL__ */ diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h index 47f95839dc6..e0862200d6a 100644 --- a/arch/sparc/include/asm/futex_64.h +++ b/arch/sparc/include/asm/futex_64.h @@ -85,26 +85,30 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, + int oldval, int newval) { + int ret = 0; + __asm__ __volatile__( - "\n1: casa [%3] %%asi, %2, %0\n" + "\n1: casa [%4] %%asi, %3, %1\n" "2:\n" " .section .fixup,#alloc,#execinstr\n" " .align 4\n" "3: sethi %%hi(2b), %0\n" " jmpl %0 + %%lo(2b), %%g0\n" - " mov %4, %0\n" + " mov %5, %0\n" " .previous\n" " .section __ex_table,\"a\"\n" " .align 4\n" " .word 1b, 3b\n" " .previous\n" - : "=r" (newval) - : "0" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT) + : "+r" (ret), "=r" (newval) + : "1" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT) : "memory"); - return newval; + *uval = newval; + return ret; } #endif /* !(_SPARC64_FUTEX_H) */ diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h index fe0d10dcae5..664b20aa258 100644 --- a/arch/tile/include/asm/futex.h +++ b/arch/tile/include/asm/futex.h @@ -119,8 +119,8 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, - int newval) +static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, + int oldval, int newval) { struct __get_user asm_ret; @@ -128,7 +128,8 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, return -EFAULT; asm_ret = futex_cmpxchg(uaddr, oldval, newval); - return asm_ret.err ? asm_ret.err : asm_ret.val; + *uval = asm_ret.val; + return asm_ret.err; } #ifndef __tilegx__ diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index 1f11ce44e95..884c0b5676f 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h @@ -109,9 +109,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, - int newval) +static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, + int oldval, int newval) { + int ret = 0; #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) /* Real i386 machines have no cmpxchg instruction */ @@ -122,18 +123,19 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n" + asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" "2:\t.section .fixup, \"ax\"\n" - "3:\tmov %2, %0\n" + "3:\tmov %3, %0\n" "\tjmp 2b\n" "\t.previous\n" _ASM_EXTABLE(1b, 3b) - : "=a" (oldval), "+m" (*uaddr) - : "i" (-EFAULT), "r" (newval), "0" (oldval) + : "+r" (ret), "=a" (oldval), "+m" (*uaddr) + : "i" (-EFAULT), "r" (newval), "1" (oldval) : "memory" ); - return oldval; + *uval = oldval; + return ret; } #endif diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index 3c2344f4813..132bf5227b4 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -48,7 +48,8 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, + int oldval, int newval) { return -ENOSYS; } diff --git a/kernel/futex.c b/kernel/futex.c index 773815465ba..237f14bfc02 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -381,15 +381,16 @@ static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, return NULL; } -static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) +static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr, + u32 uval, u32 newval) { - u32 curval; + int ret; pagefault_disable(); - curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); + ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); pagefault_enable(); - return curval; + return ret; } static int get_futex_value_locked(u32 *dest, u32 __user *from) @@ -688,9 +689,7 @@ retry: if (set_waiters) newval |= FUTEX_WAITERS; - curval = cmpxchg_futex_value_locked(uaddr, 0, newval); - - if (unlikely(curval == -EFAULT)) + if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval))) return -EFAULT; /* @@ -728,9 +727,7 @@ retry: lock_taken = 1; } - curval = cmpxchg_futex_value_locked(uaddr, uval, newval); - - if (unlikely(curval == -EFAULT)) + if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))) return -EFAULT; if (unlikely(curval != uval)) goto retry; @@ -843,9 +840,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) newval = FUTEX_WAITERS | task_pid_vnr(new_owner); - curval = cmpxchg_futex_value_locked(uaddr, uval, newval); - - if (curval == -EFAULT) + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) ret = -EFAULT; else if (curval != uval) ret = -EINVAL; @@ -880,10 +875,8 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval) * There is no waiter, so we unlock the futex. The owner died * bit has not to be preserved here. We are the owner: */ - oldval = cmpxchg_futex_value_locked(uaddr, uval, 0); - - if (oldval == -EFAULT) - return oldval; + if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0)) + return -EFAULT; if (oldval != uval) return -EAGAIN; @@ -1578,9 +1571,7 @@ retry: while (1) { newval = (uval & FUTEX_OWNER_DIED) | newtid; - curval = cmpxchg_futex_value_locked(uaddr, uval, newval); - - if (curval == -EFAULT) + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) goto handle_fault; if (curval == uval) break; @@ -2073,11 +2064,8 @@ retry: * again. If it succeeds then we can return without waking * anyone else up: */ - if (!(uval & FUTEX_OWNER_DIED)) - uval = cmpxchg_futex_value_locked(uaddr, vpid, 0); - - - if (unlikely(uval == -EFAULT)) + if (!(uval & FUTEX_OWNER_DIED) && + cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0)) goto pi_faulted; /* * Rare case: we managed to release the lock atomically, @@ -2464,9 +2452,7 @@ retry: * userspace. */ mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; - nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); - - if (nval == -EFAULT) + if (futex_atomic_cmpxchg_inatomic(&nval, uaddr, uval, mval)) return -1; if (nval != uval) @@ -2679,8 +2665,7 @@ static int __init futex_init(void) * implementation, the non-functional ones will return * -ENOSYS. */ - curval = cmpxchg_futex_value_locked(NULL, 0, 0); - if (curval == -EFAULT) + if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) futex_cmpxchg_enabled = 1; for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { -- cgit v1.2.3-70-g09d2 From 8d7718aa082aaf30a0b4989e1f04858952f941bc Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Thu, 10 Mar 2011 18:50:58 -0800 Subject: futex: Sanitize futex ops argument types Change futex_atomic_op_inuser and futex_atomic_cmpxchg_inatomic prototypes to use u32 types for the futex as this is the data type the futex core code uses all over the place. Signed-off-by: Michel Lespinasse Cc: Darren Hart Cc: Peter Zijlstra Cc: Matt Turner Cc: Russell King Cc: David Howells Cc: Tony Luck Cc: Michal Simek Cc: Ralf Baechle Cc: "James E.J. Bottomley" Cc: Benjamin Herrenschmidt Cc: Martin Schwidefsky Cc: Paul Mundt Cc: "David S. Miller" Cc: Chris Metcalf Cc: Linus Torvalds LKML-Reference: <20110311025058.GD26122@google.com> Signed-off-by: Thomas Gleixner --- arch/alpha/include/asm/futex.h | 13 +++++++------ arch/arm/include/asm/futex.h | 13 +++++++------ arch/frv/include/asm/futex.h | 6 +++--- arch/frv/kernel/futex.c | 14 +++++++------- arch/ia64/include/asm/futex.h | 10 +++++----- arch/microblaze/include/asm/futex.h | 13 +++++++------ arch/mips/include/asm/futex.h | 13 +++++++------ arch/parisc/include/asm/futex.h | 12 ++++++------ arch/powerpc/include/asm/futex.h | 13 +++++++------ arch/s390/include/asm/futex.h | 10 +++++----- arch/s390/include/asm/uaccess.h | 4 ++-- arch/s390/lib/uaccess.h | 8 ++++---- arch/s390/lib/uaccess_pt.c | 12 ++++++------ arch/s390/lib/uaccess_std.c | 6 +++--- arch/sh/include/asm/futex-irq.h | 19 ++++++++++--------- arch/sh/include/asm/futex.h | 10 +++++----- arch/sparc/include/asm/futex_64.h | 8 ++++---- arch/tile/include/asm/futex.h | 24 ++++++++++++------------ arch/x86/include/asm/futex.h | 10 +++++----- include/asm-generic/futex.h | 8 ++++---- 20 files changed, 116 insertions(+), 110 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h index c4e5c2850cc..e8a761aee08 100644 --- a/arch/alpha/include/asm/futex.h +++ b/arch/alpha/include/asm/futex.h @@ -29,7 +29,7 @@ : "r" (uaddr), "r"(oparg) \ : "memory") -static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -39,7 +39,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -81,12 +81,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, - int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int ret = 0, prev, cmp; + int ret = 0, cmp; + u32 prev; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; __asm__ __volatile__ ( diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index d20b78fce75..0e29d8e6a5c 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -35,7 +35,7 @@ : "cc", "memory") static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -46,7 +46,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); /* implies preempt_disable() */ @@ -88,12 +88,13 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, - int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int ret = 0, val; + int ret = 0; + u32 val; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; /* Note that preemption is disabled by futex_atomic_cmpxchg_inatomic diff --git a/arch/frv/include/asm/futex.h b/arch/frv/include/asm/futex.h index 0548f8e4d11..4bea27f50a7 100644 --- a/arch/frv/include/asm/futex.h +++ b/arch/frv/include/asm/futex.h @@ -7,11 +7,11 @@ #include #include -extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr); +extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr); static inline int -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, - int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { return -ENOSYS; } diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c index 14f64b054c7..d155ca9e509 100644 --- a/arch/frv/kernel/futex.c +++ b/arch/frv/kernel/futex.c @@ -18,7 +18,7 @@ * the various futex operations; MMU fault checking is ignored under no-MMU * conditions */ -static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -50,7 +50,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_o return ret; } -static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -83,7 +83,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_o return ret; } -static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -116,7 +116,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_ol return ret; } -static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -149,7 +149,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_o return ret; } -static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -186,7 +186,7 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_o /* * do the futex operations */ -int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -197,7 +197,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index b0728404dad..8428525ddb2 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h @@ -46,7 +46,7 @@ do { \ } while (0) static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -100,10 +100,10 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, - int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; { diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h index fa019ed65df..b0526d2716f 100644 --- a/arch/microblaze/include/asm/futex.h +++ b/arch/microblaze/include/asm/futex.h @@ -29,7 +29,7 @@ }) static inline int -futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -39,7 +39,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -94,12 +94,13 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, - int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int ret = 0, prev, cmp; + int ret = 0, cmp; + u32 prev; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; __asm__ __volatile__ ("1: lwx %1, %3, r0; \ diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index 692a24bd83b..6ebf1734b41 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -75,7 +75,7 @@ } static inline int -futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -85,7 +85,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -132,12 +132,13 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, - int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int ret = 0, val; + int ret = 0; + u32 val; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; if (cpu_has_llsc && R10000_LLSC_WAR) { diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index 4c6d8672325..67a33cc27ef 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h @@ -8,7 +8,7 @@ #include static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -18,7 +18,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -51,10 +51,10 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) /* Non-atomic version */ static inline int -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, - int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int val; + u32 val; /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is * our gateway page, and causes no end of trouble... @@ -62,7 +62,7 @@ futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, if (segment_eq(KERNEL_DS, get_fs()) && !uaddr) return -EFAULT; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; if (get_user(val, uaddr)) diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index 631e8da6006..c94e4a3fe2e 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -30,7 +30,7 @@ : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ : "cr0", "memory") -static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -40,7 +40,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -82,12 +82,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, - int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int ret = 0, prev; + int ret = 0; + u32 prev; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; __asm__ __volatile__ ( diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index 27ac515ef59..81cf36b691f 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -7,7 +7,7 @@ #include #include -static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -39,10 +39,10 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, - int oldval, int newval) +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval); diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 549adf6a9b8..2d9ea11f919 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -83,8 +83,8 @@ struct uaccess_ops { size_t (*clear_user)(size_t, void __user *); size_t (*strnlen_user)(size_t, const char __user *); size_t (*strncpy_from_user)(size_t, const char __user *, char *); - int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); - int (*futex_atomic_cmpxchg)(int *, int __user *, int old, int new); + int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old); + int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new); }; extern struct uaccess_ops uaccess; diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h index 89a80674e44..1d2536cb630 100644 --- a/arch/s390/lib/uaccess.h +++ b/arch/s390/lib/uaccess.h @@ -12,12 +12,12 @@ extern size_t copy_from_user_std(size_t, const void __user *, void *); extern size_t copy_to_user_std(size_t, void __user *, const void *); extern size_t strnlen_user_std(size_t, const char __user *); extern size_t strncpy_from_user_std(size_t, const char __user *, char *); -extern int futex_atomic_cmpxchg_std(int *, int __user *, int, int); -extern int futex_atomic_op_std(int, int __user *, int, int *); +extern int futex_atomic_cmpxchg_std(u32 *, u32 __user *, u32, u32); +extern int futex_atomic_op_std(int, u32 __user *, int, int *); extern size_t copy_from_user_pt(size_t, const void __user *, void *); extern size_t copy_to_user_pt(size_t, void __user *, const void *); -extern int futex_atomic_op_pt(int, int __user *, int, int *); -extern int futex_atomic_cmpxchg_pt(int *, int __user *, int, int); +extern int futex_atomic_op_pt(int, u32 __user *, int, int *); +extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32); #endif /* __ARCH_S390_LIB_UACCESS_H */ diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index b3cebcd52f5..74833831417 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -302,7 +302,7 @@ fault: : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ "m" (*uaddr) : "cc" ); -static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) +static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) { int oldval = 0, newval, ret; @@ -335,7 +335,7 @@ static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) return ret; } -int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) +int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) { int ret; @@ -354,8 +354,8 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) return ret; } -static int __futex_atomic_cmpxchg_pt(int *uval, int __user *uaddr, - int oldval, int newval) +static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { int ret; @@ -370,8 +370,8 @@ static int __futex_atomic_cmpxchg_pt(int *uval, int __user *uaddr, return ret; } -int futex_atomic_cmpxchg_pt(int *uval, int __user *uaddr, - int oldval, int newval) +int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { int ret; diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c index 1d6643c0b95..bb1a7eed42c 100644 --- a/arch/s390/lib/uaccess_std.c +++ b/arch/s390/lib/uaccess_std.c @@ -255,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst) : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ "m" (*uaddr) : "cc"); -int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) +int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old) { int oldval = 0, newval, ret; @@ -287,8 +287,8 @@ int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) return ret; } -int futex_atomic_cmpxchg_std(int *uval, int __user *uaddr, - int oldval, int newval) +int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { int ret; diff --git a/arch/sh/include/asm/futex-irq.h b/arch/sh/include/asm/futex-irq.h index 7b701cbd1e8..6cb9f193a95 100644 --- a/arch/sh/include/asm/futex-irq.h +++ b/arch/sh/include/asm/futex-irq.h @@ -3,7 +3,7 @@ #include -static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -20,7 +20,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -37,7 +37,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -54,7 +54,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -71,7 +71,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -88,12 +88,13 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_cmpxchg_inatomic(int *uval, - int __user *uaddr, - int oldval, int newval) +static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval, + u32 __user *uaddr, + u32 oldval, u32 newval) { unsigned long flags; - int ret, prev = 0; + int ret; + u32 prev = 0; local_irq_save(flags); diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h index a8a5125dc9b..7be39a646fb 100644 --- a/arch/sh/include/asm/futex.h +++ b/arch/sh/include/asm/futex.h @@ -10,7 +10,7 @@ /* XXX: UP variants, fix for SH-4A and SMP.. */ #include -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -21,7 +21,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -65,10 +65,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, - int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval); diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h index e0862200d6a..444e7bea23b 100644 --- a/arch/sparc/include/asm/futex_64.h +++ b/arch/sparc/include/asm/futex_64.h @@ -30,7 +30,7 @@ : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ : "memory") -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -38,7 +38,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) int cmparg = (encoded_op << 20) >> 20; int oldval = 0, ret, tem; - if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))) + if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) return -EFAULT; if (unlikely((((unsigned long) uaddr) & 0x3UL))) return -EINVAL; @@ -85,8 +85,8 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, - int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { int ret = 0; diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h index 664b20aa258..d03ec124a59 100644 --- a/arch/tile/include/asm/futex.h +++ b/arch/tile/include/asm/futex.h @@ -29,16 +29,16 @@ #include #include -extern struct __get_user futex_set(int __user *v, int i); -extern struct __get_user futex_add(int __user *v, int n); -extern struct __get_user futex_or(int __user *v, int n); -extern struct __get_user futex_andn(int __user *v, int n); -extern struct __get_user futex_cmpxchg(int __user *v, int o, int n); +extern struct __get_user futex_set(u32 __user *v, int i); +extern struct __get_user futex_add(u32 __user *v, int n); +extern struct __get_user futex_or(u32 __user *v, int n); +extern struct __get_user futex_andn(u32 __user *v, int n); +extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n); #ifndef __tilegx__ -extern struct __get_user futex_xor(int __user *v, int n); +extern struct __get_user futex_xor(u32 __user *v, int n); #else -static inline struct __get_user futex_xor(int __user *uaddr, int n) +static inline struct __get_user futex_xor(u32 __user *uaddr, int n) { struct __get_user asm_ret = __get_user_4(uaddr); if (!asm_ret.err) { @@ -53,7 +53,7 @@ static inline struct __get_user futex_xor(int __user *uaddr, int n) } #endif -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -119,12 +119,12 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, - int oldval, int newval) +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { struct __get_user asm_ret; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; asm_ret = futex_cmpxchg(uaddr, oldval, newval); diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index 884c0b5676f..d09bb03653f 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h @@ -37,7 +37,7 @@ "+m" (*uaddr), "=&r" (tem) \ : "r" (oparg), "i" (-EFAULT), "1" (0)) -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -48,7 +48,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) @@ -109,8 +109,8 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, - int oldval, int newval) +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { int ret = 0; @@ -120,7 +120,7 @@ static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, return -ENOSYS; #endif - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index 132bf5227b4..01f227e1425 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -6,7 +6,7 @@ #include static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -16,7 +16,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -48,8 +48,8 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, - int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { return -ENOSYS; } -- cgit v1.2.3-70-g09d2 From 07d5ecae2940ddd77746e2fb597dcf57d3c2e277 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 14 Mar 2011 20:00:30 +0100 Subject: arm: Remove bogus comment in futex_atomic_cmpxchg_inatomic() commit 522d7dec(futex: Remove redundant pagefault_disable in futex_atomic_cmpxchg_inatomic()) added a bogus comment. /* Note that preemption is disabled by futex_atomic_cmpxchg_inatomic * call sites. */ Bogus in two aspects: 1) pagefault_disable != preempt_disable even if the mechanism we use is the same 2) we have a call site which deliberately does not disable pagefaults as it wants the possible fault to be handled - though that has been changed for consistency reasons now. Sigh. I really should have seen that when committing the above. :( Catched-by-and-rightfully-ranted-at-by: Linus Torvalds Signed-off-by: Thomas Gleixner LKML-Reference: Cc: Michel Lespinasse Cc: Darren Hart --- arch/arm/include/asm/futex.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 0e29d8e6a5c..199a6b6de7f 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -97,9 +97,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - /* Note that preemption is disabled by futex_atomic_cmpxchg_inatomic - * call sites. */ - __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: " T(ldr) " %1, [%4]\n" " teq %1, %2\n" -- cgit v1.2.3-70-g09d2 From 10a8c3839810ac9af1aec836d61b92e7a879f5fa Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 14 Mar 2011 14:00:30 +0100 Subject: ARM: 6806/1: irq: introduce entry and exit functions for chained handlers Some chained IRQ handlers are written to cope with primary chips of potentially different flow types. Whether this a sensible thing to do is a point of contention. This patch introduces entry/exit functions for chained handlers which infer the flow type of the primary chip as fasteoi or level-type by checking whether or not the ->irq_eoi function pointer is present and calling back to the primary chip as necessary. Other methods of flow control are not considered. Acked-by: Thomas Gleixner Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/mach/irq.h | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index 22ac140edd9..febe495d0c6 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -34,4 +34,35 @@ do { \ raw_spin_unlock(&desc->lock); \ } while(0) +#ifndef __ASSEMBLY__ +/* + * Entry/exit functions for chained handlers where the primary IRQ chip + * may implement either fasteoi or level-trigger flow control. + */ +static inline void chained_irq_enter(struct irq_chip *chip, + struct irq_desc *desc) +{ + /* FastEOI controllers require no action on entry. */ + if (chip->irq_eoi) + return; + + if (chip->irq_mask_ack) { + chip->irq_mask_ack(&desc->irq_data); + } else { + chip->irq_mask(&desc->irq_data); + if (chip->irq_ack) + chip->irq_ack(&desc->irq_data); + } +} + +static inline void chained_irq_exit(struct irq_chip *chip, + struct irq_desc *desc) +{ + if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); + else + chip->irq_unmask(&desc->irq_data); +} +#endif + #endif -- cgit v1.2.3-70-g09d2