diff options
Diffstat (limited to 'arch/arm')
50 files changed, 528 insertions, 197 deletions
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 0ce9d0f71f2..12bfc1fa51f 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -157,6 +157,7 @@ machine-$(CONFIG_ARCH_EBSA110) += ebsa110 machine-$(CONFIG_ARCH_EFM32) += efm32 machine-$(CONFIG_ARCH_EP93XX) += ep93xx machine-$(CONFIG_ARCH_EXYNOS) += exynos +machine-$(CONFIG_ARCH_FOOTBRIDGE) += footbridge machine-$(CONFIG_ARCH_GEMINI) += gemini machine-$(CONFIG_ARCH_HIGHBANK) += highbank machine-$(CONFIG_ARCH_HISI) += hisi @@ -205,7 +206,6 @@ machine-$(CONFIG_ARCH_VEXPRESS) += vexpress machine-$(CONFIG_ARCH_VT8500) += vt8500 machine-$(CONFIG_ARCH_W90X900) += w90x900 machine-$(CONFIG_ARCH_ZYNQ) += zynq -machine-$(CONFIG_FOOTBRIDGE) += footbridge machine-$(CONFIG_PLAT_SPEAR) += spear # Platform directory name. This list is sorted alphanumerically diff --git a/arch/arm/boot/bootp/Makefile b/arch/arm/boot/bootp/Makefile index c394e305447..5761f003913 100644 --- a/arch/arm/boot/bootp/Makefile +++ b/arch/arm/boot/bootp/Makefile @@ -5,6 +5,8 @@ # architecture-specific flags and dependencies. # +GCOV_PROFILE := n + LDFLAGS_bootp :=-p --no-undefined -X \ --defsym initrd_phys=$(INITRD_PHYS) \ --defsym params_phys=$(PARAMS_PHYS) -T diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 76a50ecae1c..3ea230aa94b 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -37,6 +37,8 @@ ifeq ($(CONFIG_ARM_VIRT_EXT),y) OBJS += hyp-stub.o endif +GCOV_PROFILE := n + # # Architecture dependencies # diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index e03fbf3c688..b40cdadb1f8 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -447,22 +447,19 @@ gpmc,device-width = <2>; gpmc,sync-clk-ps = <0>; gpmc,cs-on-ns = <0>; - gpmc,cs-rd-off-ns = <40>; - gpmc,cs-wr-off-ns = <40>; + gpmc,cs-rd-off-ns = <80>; + gpmc,cs-wr-off-ns = <80>; gpmc,adv-on-ns = <0>; - gpmc,adv-rd-off-ns = <30>; - gpmc,adv-wr-off-ns = <30>; - gpmc,we-on-ns = <5>; - gpmc,we-off-ns = <25>; - gpmc,oe-on-ns = <2>; - gpmc,oe-off-ns = <20>; - gpmc,access-ns = <20>; - gpmc,wr-access-ns = <40>; - gpmc,rd-cycle-ns = <40>; - gpmc,wr-cycle-ns = <40>; - gpmc,wait-pin = <0>; - gpmc,wait-on-read; - gpmc,wait-on-write; + gpmc,adv-rd-off-ns = <60>; + gpmc,adv-wr-off-ns = <60>; + gpmc,we-on-ns = <10>; + gpmc,we-off-ns = <50>; + gpmc,oe-on-ns = <4>; + gpmc,oe-off-ns = <40>; + gpmc,access-ns = <40>; + gpmc,wr-access-ns = <80>; + gpmc,rd-cycle-ns = <80>; + gpmc,wr-cycle-ns = <80>; gpmc,bus-turnaround-ns = <0>; gpmc,cycle2cycle-delay-ns = <0>; gpmc,clk-activation-ns = <0>; diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index c6c58c1c00e..6b675a02066 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -423,10 +423,14 @@ status = "disabled"; lvds-channel@0 { + #address-cells = <1>; + #size-cells = <0>; reg = <0>; status = "disabled"; - port { + port@0 { + reg = <0>; + lvds0_in: endpoint { remote-endpoint = <&ipu_di0_lvds0>; }; @@ -434,10 +438,14 @@ }; lvds-channel@1 { + #address-cells = <1>; + #size-cells = <0>; reg = <1>; status = "disabled"; - port { + port@1 { + reg = <1>; + lvds1_in: endpoint { remote-endpoint = <&ipu_di1_lvds1>; }; diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi index 598afe91c67..4773d6af66a 100644 --- a/arch/arm/boot/dts/k2e-clocks.dtsi +++ b/arch/arm/boot/dts/k2e-clocks.dtsi @@ -40,7 +40,7 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk16>; - clock-output-names = "usb"; + clock-output-names = "usb1"; reg = <0x02350004 0xb00>, <0x02350000 0x400>; reg-names = "control", "domain"; domain-id = <0>; @@ -60,8 +60,8 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk12>; - clock-output-names = "pcie"; - reg = <0x0235006c 0xb00>, <0x02350000 0x400>; + clock-output-names = "pcie1"; + reg = <0x0235006c 0xb00>, <0x02350048 0x400>; reg-names = "control", "domain"; domain-id = <18>; }; diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index b8698ca6864..429471aa7a1 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts @@ -353,13 +353,12 @@ }; ldo8_reg: ldo8 { - /* VDD_3v0: Does not go anywhere */ + /* VDD_3V_GP: act led/serial console */ regulator-name = "ldo8"; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3000000>; + regulator-always-on; regulator-boot-on; - /* Unused */ - status = "disabled"; }; ldo9_reg: ldo9 { diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig index 1b650c85bdd..72233b9c9d0 100644 --- a/arch/arm/configs/ep93xx_defconfig +++ b/arch/arm/configs/ep93xx_defconfig @@ -107,5 +107,6 @@ CONFIG_DEBUG_SPINLOCK=y CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_USER=y CONFIG_DEBUG_LL=y +CONFIG_DEBUG_LL_UART_PL01X=y # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_LIBCRC32C=y diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig index d52b4ffe201..ea49d37564d 100644 --- a/arch/arm/configs/versatile_defconfig +++ b/arch/arm/configs/versatile_defconfig @@ -82,5 +82,6 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_USER=y CONFIG_DEBUG_LL=y +CONFIG_DEBUG_LL_UART_PL01X=y CONFIG_FONTS=y CONFIG_FONT_ACORN_8x8=y diff --git a/arch/arm/crypto/sha1-armv7-neon.S b/arch/arm/crypto/sha1-armv7-neon.S index 50013c0e286..dcd01f3f0bb 100644 --- a/arch/arm/crypto/sha1-armv7-neon.S +++ b/arch/arm/crypto/sha1-armv7-neon.S @@ -9,7 +9,7 @@ */ #include <linux/linkage.h> - +#include <asm/assembler.h> .syntax unified .code 32 @@ -61,13 +61,13 @@ #define RT3 r12 #define W0 q0 -#define W1 q1 +#define W1 q7 #define W2 q2 #define W3 q3 #define W4 q4 -#define W5 q5 -#define W6 q6 -#define W7 q7 +#define W5 q6 +#define W6 q5 +#define W7 q1 #define tmp0 q8 #define tmp1 q9 @@ -79,6 +79,11 @@ #define qK3 q14 #define qK4 q15 +#ifdef CONFIG_CPU_BIG_ENDIAN +#define ARM_LE(code...) +#else +#define ARM_LE(code...) code +#endif /* Round function macros. */ @@ -150,45 +155,45 @@ #define W_PRECALC_00_15() \ add RWK, sp, #(WK_offs(0)); \ \ - vld1.32 {tmp0, tmp1}, [RDATA]!; \ - vrev32.8 W0, tmp0; /* big => little */ \ - vld1.32 {tmp2, tmp3}, [RDATA]!; \ + vld1.32 {W0, W7}, [RDATA]!; \ + ARM_LE(vrev32.8 W0, W0; ) /* big => little */ \ + vld1.32 {W6, W5}, [RDATA]!; \ vadd.u32 tmp0, W0, curK; \ - vrev32.8 W7, tmp1; /* big => little */ \ - vrev32.8 W6, tmp2; /* big => little */ \ + ARM_LE(vrev32.8 W7, W7; ) /* big => little */ \ + ARM_LE(vrev32.8 W6, W6; ) /* big => little */ \ vadd.u32 tmp1, W7, curK; \ - vrev32.8 W5, tmp3; /* big => little */ \ + ARM_LE(vrev32.8 W5, W5; ) /* big => little */ \ vadd.u32 tmp2, W6, curK; \ vst1.32 {tmp0, tmp1}, [RWK]!; \ vadd.u32 tmp3, W5, curK; \ vst1.32 {tmp2, tmp3}, [RWK]; \ #define WPRECALC_00_15_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vld1.32 {tmp0, tmp1}, [RDATA]!; \ + vld1.32 {W0, W7}, [RDATA]!; \ #define WPRECALC_00_15_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ add RWK, sp, #(WK_offs(0)); \ #define WPRECALC_00_15_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vrev32.8 W0, tmp0; /* big => little */ \ + ARM_LE(vrev32.8 W0, W0; ) /* big => little */ \ #define WPRECALC_00_15_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vld1.32 {tmp2, tmp3}, [RDATA]!; \ + vld1.32 {W6, W5}, [RDATA]!; \ #define WPRECALC_00_15_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ vadd.u32 tmp0, W0, curK; \ #define WPRECALC_00_15_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vrev32.8 W7, tmp1; /* big => little */ \ + ARM_LE(vrev32.8 W7, W7; ) /* big => little */ \ #define WPRECALC_00_15_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vrev32.8 W6, tmp2; /* big => little */ \ + ARM_LE(vrev32.8 W6, W6; ) /* big => little */ \ #define WPRECALC_00_15_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ vadd.u32 tmp1, W7, curK; \ #define WPRECALC_00_15_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ - vrev32.8 W5, tmp3; /* big => little */ \ + ARM_LE(vrev32.8 W5, W5; ) /* big => little */ \ #define WPRECALC_00_15_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ vadd.u32 tmp2, W6, curK; \ diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 79ecb4f34ff..10e78d00a0b 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -466,6 +466,7 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size) */ #define v7_exit_coherency_flush(level) \ asm volatile( \ + ".arch armv7-a \n\t" \ "stmfd sp!, {fp, ip} \n\t" \ "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \ "bic r0, r0, #"__stringify(CR_C)" \n\t" \ diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index 39eb16b0066..bfe2a2f5a64 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -45,7 +45,7 @@ void *return_address(unsigned int); #else -extern inline void *return_address(unsigned int level) +static inline void *return_address(unsigned int level) { return NULL; } diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 2ec765c39ab..18f5a554134 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -49,12 +49,6 @@ extern void smp_init_cpus(void); extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); /* - * Boot a secondary CPU, and assign it the specified idle task. - * This also gives us the initial stack to use for this CPU. - */ -extern int boot_secondary(unsigned int cpu, struct task_struct *); - -/* * Called from platform specific assembly code, this is the * secondary CPU entry point. */ diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index 4651f6999b7..e86c985b8c7 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -63,8 +63,8 @@ static inline void syscall_get_arguments(struct task_struct *task, if (i + n > SYSCALL_MAX_ARGS) { unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; - pr_warning("%s called with max args %d, handling only %d\n", - __func__, i + n, SYSCALL_MAX_ARGS); + pr_warn("%s called with max args %d, handling only %d\n", + __func__, i + n, SYSCALL_MAX_ARGS); memset(args_bad, 0, n_bad * sizeof(args[0])); n = SYSCALL_MAX_ARGS - i; } @@ -88,8 +88,8 @@ static inline void syscall_set_arguments(struct task_struct *task, return; if (i + n > SYSCALL_MAX_ARGS) { - pr_warning("%s called with max args %d, handling only %d\n", - __func__, i + n, SYSCALL_MAX_ARGS); + pr_warn("%s called with max args %d, handling only %d\n", + __func__, i + n, SYSCALL_MAX_ARGS); n = SYSCALL_MAX_ARGS - i; } diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 83259b87333..5f833f7adba 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -1,6 +1,9 @@ #ifndef __ASMARM_TLS_H #define __ASMARM_TLS_H +#include <linux/compiler.h> +#include <asm/thread_info.h> + #ifdef __ASSEMBLY__ #include <asm/asm-offsets.h> .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 @@ -50,6 +53,49 @@ #endif #ifndef __ASSEMBLY__ + +static inline void set_tls(unsigned long val) +{ + struct thread_info *thread; + + thread = current_thread_info(); + + thread->tp_value[0] = val; + + /* + * This code runs with preemption enabled and therefore must + * be reentrant with respect to switch_tls. + * + * We need to ensure ordering between the shadow state and the + * hardware state, so that we don't corrupt the hardware state + * with a stale shadow state during context switch. + * + * If we're preempted here, switch_tls will load TPIDRURO from + * thread_info upon resuming execution and the following mcr + * is merely redundant. + */ + barrier(); + + if (!tls_emu) { + if (has_tls_reg) { + asm("mcr p15, 0, %0, c13, c0, 3" + : : "r" (val)); + } else { +#ifdef CONFIG_KUSER_HELPERS + /* + * User space must never try to access this + * directly. Expect your app to break + * eventually if you do so. The user helper + * at 0xffff0fe0 must be used instead. (see + * entry-armv.S for details) + */ + *((unsigned int *)0xffff0ff0) = val; +#endif + } + + } +} + static inline unsigned long get_tpuser(void) { unsigned long reg = 0; @@ -59,5 +105,23 @@ static inline unsigned long get_tpuser(void) return reg; } + +static inline void set_tpuser(unsigned long val) +{ + /* Since TPIDRURW is fully context-switched (unlike TPIDRURO), + * we need not update thread_info. + */ + if (has_tls_reg && !tls_emu) { + asm("mcr p15, 0, %0, c13, c0, 2" + : : "r" (val)); + } +} + +static inline void flush_tls(void) +{ + set_tls(0); + set_tpuser(0); +} + #endif #endif /* __ASMARM_TLS_H */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index a4cd7af475e..4767eb9caa7 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -107,8 +107,11 @@ static inline void set_fs(mm_segment_t fs) extern int __get_user_1(void *); extern int __get_user_2(void *); extern int __get_user_4(void *); -extern int __get_user_lo8(void *); +extern int __get_user_32t_8(void *); extern int __get_user_8(void *); +extern int __get_user_64t_1(void *); +extern int __get_user_64t_2(void *); +extern int __get_user_64t_4(void *); #define __GUP_CLOBBER_1 "lr", "cc" #ifdef CONFIG_CPU_USE_DOMAINS @@ -117,7 +120,7 @@ extern int __get_user_8(void *); #define __GUP_CLOBBER_2 "lr", "cc" #endif #define __GUP_CLOBBER_4 "lr", "cc" -#define __GUP_CLOBBER_lo8 "lr", "cc" +#define __GUP_CLOBBER_32t_8 "lr", "cc" #define __GUP_CLOBBER_8 "lr", "cc" #define __get_user_x(__r2,__p,__e,__l,__s) \ @@ -131,12 +134,30 @@ extern int __get_user_8(void *); /* narrowing a double-word get into a single 32bit word register: */ #ifdef __ARMEB__ -#define __get_user_xb(__r2, __p, __e, __l, __s) \ - __get_user_x(__r2, __p, __e, __l, lo8) +#define __get_user_x_32t(__r2, __p, __e, __l, __s) \ + __get_user_x(__r2, __p, __e, __l, 32t_8) #else -#define __get_user_xb __get_user_x +#define __get_user_x_32t __get_user_x #endif +/* + * storing result into proper least significant word of 64bit target var, + * different only for big endian case where 64 bit __r2 lsw is r3: + */ +#ifdef __ARMEB__ +#define __get_user_x_64t(__r2, __p, __e, __l, __s) \ + __asm__ __volatile__ ( \ + __asmeq("%0", "r0") __asmeq("%1", "r2") \ + __asmeq("%3", "r1") \ + "bl __get_user_64t_" #__s \ + : "=&r" (__e), "=r" (__r2) \ + : "0" (__p), "r" (__l) \ + : __GUP_CLOBBER_##__s) +#else +#define __get_user_x_64t __get_user_x +#endif + + #define __get_user_check(x,p) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ @@ -146,17 +167,26 @@ extern int __get_user_8(void *); register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __get_user_x(__r2, __p, __e, __l, 1); \ + if (sizeof((x)) >= 8) \ + __get_user_x_64t(__r2, __p, __e, __l, 1); \ + else \ + __get_user_x(__r2, __p, __e, __l, 1); \ break; \ case 2: \ - __get_user_x(__r2, __p, __e, __l, 2); \ + if (sizeof((x)) >= 8) \ + __get_user_x_64t(__r2, __p, __e, __l, 2); \ + else \ + __get_user_x(__r2, __p, __e, __l, 2); \ break; \ case 4: \ - __get_user_x(__r2, __p, __e, __l, 4); \ + if (sizeof((x)) >= 8) \ + __get_user_x_64t(__r2, __p, __e, __l, 4); \ + else \ + __get_user_x(__r2, __p, __e, __l, 4); \ break; \ case 8: \ if (sizeof((x)) < 8) \ - __get_user_xb(__r2, __p, __e, __l, 4); \ + __get_user_x_32t(__r2, __p, __e, __l, 4); \ else \ __get_user_x(__r2, __p, __e, __l, 8); \ break; \ diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index f7b450f97e6..a88671cfe1f 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -98,6 +98,14 @@ EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); EXPORT_SYMBOL(__get_user_4); +EXPORT_SYMBOL(__get_user_8); + +#ifdef __ARMEB__ +EXPORT_SYMBOL(__get_user_64t_1); +EXPORT_SYMBOL(__get_user_64t_2); +EXPORT_SYMBOL(__get_user_64t_4); +EXPORT_SYMBOL(__get_user_32t_8); +#endif EXPORT_SYMBOL(__put_user_1); EXPORT_SYMBOL(__put_user_2); diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c index 7807ef58a2a..528f8af2add 100644 --- a/arch/arm/kernel/atags_parse.c +++ b/arch/arm/kernel/atags_parse.c @@ -130,7 +130,7 @@ static int __init parse_tag_cmdline(const struct tag *tag) strlcat(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); #elif defined(CONFIG_CMDLINE_FORCE) - pr_warning("Ignoring tag cmdline (using the default kernel command line)\n"); + pr_warn("Ignoring tag cmdline (using the default kernel command line)\n"); #else strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 859f56cb122..2f5555d307b 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -372,6 +372,9 @@ ENDPROC(__fiq_abt) ARM( stmib sp, {r1 - r12} ) THUMB( stmia sp, {r0 - r12} ) + ATRAP( mrc p15, 0, r7, c1, c0, 0) + ATRAP( ldr r8, .LCcralign) + ldmia r0, {r3 - r5} add r0, sp, #S_PC @ here for interlock avoidance mov r6, #-1 @ "" "" "" "" @@ -379,6 +382,8 @@ ENDPROC(__fiq_abt) str r3, [sp] @ save the "real" r0 copied @ from the exception stack + ATRAP( ldr r8, [r8, #0]) + @ @ We are now ready to fill in the remaining blanks on the stack: @ @@ -392,10 +397,9 @@ ENDPROC(__fiq_abt) ARM( stmdb r0, {sp, lr}^ ) THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) - @ @ Enable the alignment trap while in kernel mode - @ - alignment_trap r0, .LCcralign + ATRAP( teq r8, r7) + ATRAP( mcrne p15, 0, r8, c1, c0, 0) @ @ Clear FP to mark the first stack frame diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index e52fe5a2d84..6bb09d4abde 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -366,7 +366,7 @@ ENTRY(vector_swi) str r0, [sp, #S_OLD_R0] @ Save OLD_R0 #endif zero_fp - alignment_trap ip, __cr_alignment + alignment_trap r10, ip, __cr_alignment enable_irq ct_user_exit get_thread_info tsk diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 0d91ca05d55..4176df721bf 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -37,11 +37,19 @@ #endif .endm - .macro alignment_trap, rtemp, label #ifdef CONFIG_ALIGNMENT_TRAP - ldr \rtemp, \label - ldr \rtemp, [\rtemp] - mcr p15, 0, \rtemp, c1, c0 +#define ATRAP(x...) x +#else +#define ATRAP(x...) +#endif + + .macro alignment_trap, rtmp1, rtmp2, label +#ifdef CONFIG_ALIGNMENT_TRAP + mrc p15, 0, \rtmp2, c1, c0, 0 + ldr \rtmp1, \label + ldr \rtmp1, [\rtmp1] + teq \rtmp1, \rtmp2 + mcrne p15, 0, \rtmp1, c1, c0, 0 #endif .endm diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 4d963fb66e3..b5b452f90f7 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -113,8 +113,8 @@ static u32 read_wb_reg(int n) GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val); GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val); default: - pr_warning("attempt to read from unknown breakpoint " - "register %d\n", n); + pr_warn("attempt to read from unknown breakpoint register %d\n", + n); } return val; @@ -128,8 +128,8 @@ static void write_wb_reg(int n, u32 val) GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val); GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val); default: - pr_warning("attempt to write to unknown breakpoint " - "register %d\n", n); + pr_warn("attempt to write to unknown breakpoint register %d\n", + n); } isb(); } @@ -292,7 +292,7 @@ int hw_breakpoint_slots(int type) case TYPE_DATA: return get_num_wrps(); default: - pr_warning("unknown slot type: %d\n", type); + pr_warn("unknown slot type: %d\n", type); return 0; } } @@ -365,7 +365,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) } if (i == max_slots) { - pr_warning("Can't find any breakpoint slot\n"); + pr_warn("Can't find any breakpoint slot\n"); return -EBUSY; } @@ -417,7 +417,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) } if (i == max_slots) { - pr_warning("Can't find any breakpoint slot\n"); + pr_warn("Can't find any breakpoint slot\n"); return; } @@ -894,8 +894,8 @@ static int debug_reg_trap(struct pt_regs *regs, unsigned int instr) { int cpu = smp_processor_id(); - pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n", - instr, cpu); + pr_warn("Debug register access (0x%x) caused undefined instruction on CPU %d\n", + instr, cpu); /* Set the error flag for this CPU and skip the faulting instruction. */ cpumask_set_cpu(cpu, &debug_err_mask); diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 2c425760451..88de943eebd 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -175,7 +175,7 @@ static bool migrate_one_irq(struct irq_desc *desc) c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) + else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) cpumask_copy(d->affinity, affinity); return ret; @@ -205,8 +205,8 @@ void migrate_irqs(void) raw_spin_unlock(&desc->lock); if (affinity_broken && printk_ratelimit()) - pr_warning("IRQ%u no longer affine to CPU%u\n", i, - smp_processor_id()); + pr_warn("IRQ%u no longer affine to CPU%u\n", + i, smp_processor_id()); } local_irq_restore(flags); diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c index 08d731294bc..b206d7790c7 100644 --- a/arch/arm/kernel/kprobes-test.c +++ b/arch/arm/kernel/kprobes-test.c @@ -110,10 +110,13 @@ * * @ TESTCASE_START * bl __kprobes_test_case_start - * @ start of inline data... + * .pushsection .rodata + * "10: * .ascii "mov r0, r7" @ text title for test case * .byte 0 - * .align 2, 0 + * .popsection + * @ start of inline data... + * .word 10b @ pointer to title in .rodata section * * @ TEST_ARG_REG * .byte ARG_TYPE_REG @@ -971,7 +974,7 @@ void __naked __kprobes_test_case_start(void) __asm__ __volatile__ ( "stmdb sp!, {r4-r11} \n\t" "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" - "bic r0, lr, #1 @ r0 = inline title string \n\t" + "bic r0, lr, #1 @ r0 = inline data \n\t" "mov r1, sp \n\t" "bl kprobes_test_case_start \n\t" "bx r0 \n\t" @@ -1349,15 +1352,14 @@ static unsigned long next_instruction(unsigned long pc) return pc + 4; } -static uintptr_t __used kprobes_test_case_start(const char *title, void *stack) +static uintptr_t __used kprobes_test_case_start(const char **title, void *stack) { struct test_arg *args; struct test_arg_end *end_arg; unsigned long test_code; - args = (struct test_arg *)PTR_ALIGN(title + strlen(title) + 1, 4); - - current_title = title; + current_title = *title++; + args = (struct test_arg *)title; current_args = args; current_stack = stack; diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h index eecc90a0fd9..4430990e90e 100644 --- a/arch/arm/kernel/kprobes-test.h +++ b/arch/arm/kernel/kprobes-test.h @@ -111,11 +111,14 @@ struct test_arg_end { #define TESTCASE_START(title) \ __asm__ __volatile__ ( \ "bl __kprobes_test_case_start \n\t" \ + ".pushsection .rodata \n\t" \ + "10: \n\t" \ /* don't use .asciz here as 'title' may be */ \ /* multiple strings to be concatenated. */ \ ".ascii "#title" \n\t" \ ".byte 0 \n\t" \ - ".align 2, 0 \n\t" + ".popsection \n\t" \ + ".word 10b \n\t" #define TEST_ARG_REG(reg, val) \ ".byte "__stringify(ARG_TYPE_REG)" \n\t" \ diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index e6a6edbec61..eb2c4d55666 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -76,21 +76,15 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) static void cpu_pmu_enable_percpu_irq(void *data) { - struct arm_pmu *cpu_pmu = data; - struct platform_device *pmu_device = cpu_pmu->plat_device; - int irq = platform_get_irq(pmu_device, 0); + int irq = *(int *)data; enable_percpu_irq(irq, IRQ_TYPE_NONE); - cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs); } static void cpu_pmu_disable_percpu_irq(void *data) { - struct arm_pmu *cpu_pmu = data; - struct platform_device *pmu_device = cpu_pmu->plat_device; - int irq = platform_get_irq(pmu_device, 0); + int irq = *(int *)data; - cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs); disable_percpu_irq(irq); } @@ -103,7 +97,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) irq = platform_get_irq(pmu_device, 0); if (irq >= 0 && irq_is_percpu(irq)) { - on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1); + on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); free_percpu_irq(irq, &percpu_pmu); } else { for (i = 0; i < irqs; ++i) { @@ -138,7 +132,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) irq); return err; } - on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1); + on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); } else { for (i = 0; i < irqs; ++i) { err = 0; @@ -152,8 +146,8 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) * continue. Otherwise, continue without this interrupt. */ if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { - pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, i); + pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, i); continue; } diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 52a21dafb27..a0a691d1cbe 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -333,6 +333,8 @@ void flush_thread(void) memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); memset(&thread->fpstate, 0, sizeof(union fp_state)); + flush_tls(); + thread_notify(THREAD_NOTIFY_FLUSH, thread); } @@ -471,19 +473,57 @@ int in_gate_area_no_mm(unsigned long addr) const char *arch_vma_name(struct vm_area_struct *vma) { - return is_gate_vma(vma) ? "[vectors]" : - (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? - "[sigpage]" : NULL; + return is_gate_vma(vma) ? "[vectors]" : NULL; +} + +/* If possible, provide a placement hint at a random offset from the + * stack for the signal page. + */ +static unsigned long sigpage_addr(const struct mm_struct *mm, + unsigned int npages) +{ + unsigned long offset; + unsigned long first; + unsigned long last; + unsigned long addr; + unsigned int slots; + + first = PAGE_ALIGN(mm->start_stack); + + last = TASK_SIZE - (npages << PAGE_SHIFT); + + /* No room after stack? */ + if (first > last) + return 0; + + /* Just enough room? */ + if (first == last) + return first; + + slots = ((last - first) >> PAGE_SHIFT) + 1; + + offset = get_random_int() % slots; + + addr = first + (offset << PAGE_SHIFT); + + return addr; } static struct page *signal_page; extern struct page *get_signal_page(void); +static const struct vm_special_mapping sigpage_mapping = { + .name = "[sigpage]", + .pages = &signal_page, +}; + int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; unsigned long addr; - int ret; + unsigned long hint; + int ret = 0; if (!signal_page) signal_page = get_signal_page(); @@ -491,18 +531,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) return -ENOMEM; down_write(&mm->mmap_sem); - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + hint = sigpage_addr(mm, 1); + addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } - ret = install_special_mapping(mm, addr, PAGE_SIZE, + vma = _install_special_mapping(mm, addr, PAGE_SIZE, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, - &signal_page); + &sigpage_mapping); + + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto up_fail; + } - if (ret == 0) - mm->context.sigpage = addr; + mm->context.sigpage = addr; up_fail: up_write(&mm->mmap_sem); diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index fafedd86885..98ea4b7eb40 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -59,15 +59,6 @@ void *return_address(unsigned int level) #else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */ -#if defined(CONFIG_ARM_UNWIND) -#warning "TODO: return_address should use unwind tables" -#endif - -void *return_address(unsigned int level) -{ - return NULL; -} - #endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */ EXPORT_SYMBOL_GPL(return_address); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 9388a3d479e..39c74a2c3df 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -95,6 +95,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret; + if (!smp_ops.smp_boot_secondary) + return -ENOSYS; + /* * We need to tell the secondary core where to find * its stack and the page tables. @@ -113,7 +116,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) /* * Now bring the CPU into our world. */ - ret = boot_secondary(cpu, idle); + ret = smp_ops.smp_boot_secondary(cpu, idle); if (ret == 0) { /* * CPU was successfully started, wait for it @@ -142,13 +145,6 @@ void __init smp_init_cpus(void) smp_ops.smp_init_cpus(); } -int boot_secondary(unsigned int cpu, struct task_struct *idle) -{ - if (smp_ops.smp_boot_secondary) - return smp_ops.smp_boot_secondary(cpu, idle); - return -ENOSYS; -} - int platform_can_cpu_hotplug(void) { #ifdef CONFIG_HOTPLUG_CPU @@ -650,7 +646,7 @@ void smp_send_stop(void) udelay(1); if (num_online_cpus() > 1) - pr_warning("SMP: failed to stop secondary CPUs\n"); + pr_warn("SMP: failed to stop secondary CPUs\n"); } /* diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 67ca8578c6d..587fdfe1a72 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -142,14 +142,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data, while (1) { unsigned long temp; - /* - * Barrier required between accessing protected resource and - * releasing a lock for it. Legacy code might not have done - * this, and we cannot determine that this is not the case - * being emulated, so insert always. - */ - smp_mb(); - if (type == TYPE_SWPB) __user_swpb_asm(*data, address, res, temp); else @@ -162,13 +154,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data, } if (res == 0) { - /* - * Barrier also required between acquiring a lock for a - * protected resource and accessing the resource. Inserted for - * same reason as above. - */ - smp_mb(); - if (type == TYPE_SWPB) swpbcounter++; else diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c index 7b8403b7666..80f0d69205e 100644 --- a/arch/arm/kernel/thumbee.c +++ b/arch/arm/kernel/thumbee.c @@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void switch (cmd) { case THREAD_NOTIFY_FLUSH: - thread->thumbee_state = 0; + teehbr_write(0); break; case THREAD_NOTIFY_SWITCH: current_thread_info()->thumbee_state = teehbr_read(); diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 439138d3437..0c8b10801d3 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -601,7 +601,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags) #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) asmlinkage int arm_syscall(int no, struct pt_regs *regs) { - struct thread_info *thread = current_thread_info(); siginfo_t info; if ((no >> 16) != (__ARM_NR_BASE>> 16)) @@ -652,21 +651,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) return regs->ARM_r0; case NR(set_tls): - thread->tp_value[0] = regs->ARM_r0; - if (tls_emu) - return 0; - if (has_tls_reg) { - asm ("mcr p15, 0, %0, c13, c0, 3" - : : "r" (regs->ARM_r0)); - } else { - /* - * User space must never try to access this directly. - * Expect your app to break eventually if you do so. - * The user helper at 0xffff0fe0 must be used instead. - * (see entry-armv.S for details) - */ - *((unsigned int *)0xffff0ff0) = regs->ARM_r0; - } + set_tls(regs->ARM_r0); return 0; #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index a61a1dfbb0d..cbb85c5fabf 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -157,7 +157,7 @@ static const struct unwind_idx *search_index(unsigned long addr, if (likely(start->addr_offset <= addr_prel31)) return start; else { - pr_warning("unwind: Unknown symbol address %08lx\n", addr); + pr_warn("unwind: Unknown symbol address %08lx\n", addr); return NULL; } } @@ -225,7 +225,7 @@ static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl) unsigned long ret; if (ctrl->entries <= 0) { - pr_warning("unwind: Corrupt unwind table\n"); + pr_warn("unwind: Corrupt unwind table\n"); return 0; } @@ -333,8 +333,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) insn = (insn << 8) | unwind_get_byte(ctrl); mask = insn & 0x0fff; if (mask == 0) { - pr_warning("unwind: 'Refuse to unwind' instruction %04lx\n", - insn); + pr_warn("unwind: 'Refuse to unwind' instruction %04lx\n", + insn); return -URC_FAILURE; } @@ -357,8 +357,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) unsigned long mask = unwind_get_byte(ctrl); if (mask == 0 || mask & 0xf0) { - pr_warning("unwind: Spare encoding %04lx\n", - (insn << 8) | mask); + pr_warn("unwind: Spare encoding %04lx\n", + (insn << 8) | mask); return -URC_FAILURE; } @@ -370,7 +370,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) ctrl->vrs[SP] += 0x204 + (uleb128 << 2); } else { - pr_warning("unwind: Unhandled instruction %02lx\n", insn); + pr_warn("unwind: Unhandled instruction %02lx\n", insn); return -URC_FAILURE; } @@ -403,7 +403,7 @@ int unwind_frame(struct stackframe *frame) idx = unwind_find_idx(frame->pc); if (!idx) { - pr_warning("unwind: Index not found %08lx\n", frame->pc); + pr_warn("unwind: Index not found %08lx\n", frame->pc); return -URC_FAILURE; } @@ -422,8 +422,8 @@ int unwind_frame(struct stackframe *frame) /* only personality routine 0 supported in the index */ ctrl.insn = &idx->insn; else { - pr_warning("unwind: Unsupported personality routine %08lx in the index at %p\n", - idx->insn, idx); + pr_warn("unwind: Unsupported personality routine %08lx in the index at %p\n", + idx->insn, idx); return -URC_FAILURE; } @@ -435,8 +435,8 @@ int unwind_frame(struct stackframe *frame) ctrl.byte = 1; ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16); } else { - pr_warning("unwind: Unsupported personality routine %08lx at %p\n", - *ctrl.insn, ctrl.insn); + pr_warn("unwind: Unsupported personality routine %08lx at %p\n", + *ctrl.insn, ctrl.insn); return -URC_FAILURE; } diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 6f57cb94367..8e95aa47457 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -219,8 +219,8 @@ SECTIONS __data_loc = ALIGN(4); /* location in binary */ . = PAGE_OFFSET + TEXT_OFFSET; #else - __init_end = .; . = ALIGN(THREAD_SIZE); + __init_end = .; __data_loc = .; #endif diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index 938600098b8..8ecfd15c3a0 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S @@ -80,7 +80,7 @@ ENTRY(__get_user_8) ENDPROC(__get_user_8) #ifdef __ARMEB__ -ENTRY(__get_user_lo8) +ENTRY(__get_user_32t_8) check_uaccess r0, 8, r1, r2, __get_user_bad #ifdef CONFIG_CPU_USE_DOMAINS add r0, r0, #4 @@ -90,7 +90,37 @@ ENTRY(__get_user_lo8) #endif mov r0, #0 ret lr -ENDPROC(__get_user_lo8) +ENDPROC(__get_user_32t_8) + +ENTRY(__get_user_64t_1) + check_uaccess r0, 1, r1, r2, __get_user_bad8 +8: TUSER(ldrb) r3, [r0] + mov r0, #0 + ret lr +ENDPROC(__get_user_64t_1) + +ENTRY(__get_user_64t_2) + check_uaccess r0, 2, r1, r2, __get_user_bad8 +#ifdef CONFIG_CPU_USE_DOMAINS +rb .req ip +9: ldrbt r3, [r0], #1 +10: ldrbt rb, [r0], #0 +#else +rb .req r0 +9: ldrb r3, [r0] +10: ldrb rb, [r0, #1] +#endif + orr r3, rb, r3, lsl #8 + mov r0, #0 + ret lr +ENDPROC(__get_user_64t_2) + +ENTRY(__get_user_64t_4) + check_uaccess r0, 4, r1, r2, __get_user_bad8 +11: TUSER(ldr) r3, [r0] + mov r0, #0 + ret lr +ENDPROC(__get_user_64t_4) #endif __get_user_bad8: @@ -111,5 +141,9 @@ ENDPROC(__get_user_bad8) .long 6b, __get_user_bad8 #ifdef __ARMEB__ .long 7b, __get_user_bad + .long 8b, __get_user_bad8 + .long 9b, __get_user_bad8 + .long 10b, __get_user_bad8 + .long 11b, __get_user_bad8 #endif .popsection diff --git a/arch/arm/mach-imx/clk-gate2.c b/arch/arm/mach-imx/clk-gate2.c index 84acdfd1d71..5a75cdc8189 100644 --- a/arch/arm/mach-imx/clk-gate2.c +++ b/arch/arm/mach-imx/clk-gate2.c @@ -97,7 +97,7 @@ static int clk_gate2_is_enabled(struct clk_hw *hw) struct clk_gate2 *gate = to_clk_gate2(hw); if (gate->share_count) - return !!(*gate->share_count); + return !!__clk_get_enable_count(hw->clk); else return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx); } @@ -127,10 +127,6 @@ struct clk *clk_register_gate2(struct device *dev, const char *name, gate->bit_idx = bit_idx; gate->flags = clk_gate2_flags; gate->lock = lock; - - /* Initialize share_count per hardware state */ - if (share_count) - *share_count = clk_gate2_reg_is_enabled(reg, bit_idx) ? 1 : 0; gate->share_count = share_count; init.name = name; diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index e7189dcc930..08d4167cc7c 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -1,9 +1,6 @@ menu "TI OMAP/AM/DM/DRA Family" depends on ARCH_MULTI_V6 || ARCH_MULTI_V7 -config ARCH_OMAP - bool - config ARCH_OMAP2 bool "TI OMAP2" depends on ARCH_MULTI_V6 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 8fd87a3055b..9e91a4e7519 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -2065,7 +2065,7 @@ static void _reconfigure_io_chain(void) spin_lock_irqsave(&io_chain_lock, flags); - if (cpu_is_omap34xx() && omap3_has_io_chain_ctrl()) + if (cpu_is_omap34xx()) omap3xxx_prm_reconfigure_io_chain(); else if (cpu_is_omap44xx()) omap44xx_prm_reconfigure_io_chain(); diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c index 2458be6fc67..372de3edf4a 100644 --- a/arch/arm/mach-omap2/prm3xxx.c +++ b/arch/arm/mach-omap2/prm3xxx.c @@ -45,7 +45,7 @@ static struct omap_prcm_irq_setup omap3_prcm_irq_setup = { .ocp_barrier = &omap3xxx_prm_ocp_barrier, .save_and_clear_irqen = &omap3xxx_prm_save_and_clear_irqen, .restore_irqen = &omap3xxx_prm_restore_irqen, - .reconfigure_io_chain = &omap3xxx_prm_reconfigure_io_chain, + .reconfigure_io_chain = NULL, }; /* @@ -369,15 +369,30 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva) } /** - * omap3xxx_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain + * omap3430_pre_es3_1_reconfigure_io_chain - restart wake-up daisy chain + * + * The ST_IO_CHAIN bit does not exist in 3430 before es3.1. The only + * thing we can do is toggle EN_IO bit for earlier omaps. + */ +void omap3430_pre_es3_1_reconfigure_io_chain(void) +{ + omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, + PM_WKEN); + omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, + PM_WKEN); + omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN); +} + +/** + * omap3_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain * * Clear any previously-latched I/O wakeup events and ensure that the * I/O wakeup gates are aligned with the current mux settings. Works * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then * deasserting WUCLKIN and clearing the ST_IO_CHAIN WKST bit. No - * return value. + * return value. These registers are only available in 3430 es3.1 and later. */ -void omap3xxx_prm_reconfigure_io_chain(void) +void omap3_prm_reconfigure_io_chain(void) { int i = 0; @@ -400,6 +415,15 @@ void omap3xxx_prm_reconfigure_io_chain(void) } /** + * omap3xxx_prm_reconfigure_io_chain - reconfigure I/O chain + */ +void omap3xxx_prm_reconfigure_io_chain(void) +{ + if (omap3_prcm_irq_setup.reconfigure_io_chain) + omap3_prcm_irq_setup.reconfigure_io_chain(); +} + +/** * omap3xxx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches * * Activates the I/O wakeup event latches and allows events logged by @@ -656,6 +680,13 @@ static int omap3xxx_prm_late_init(void) if (!(prm_features & PRM_HAS_IO_WAKEUP)) return 0; + if (omap3_has_io_chain_ctrl()) + omap3_prcm_irq_setup.reconfigure_io_chain = + omap3_prm_reconfigure_io_chain; + else + omap3_prcm_irq_setup.reconfigure_io_chain = + omap3430_pre_es3_1_reconfigure_io_chain; + omap3xxx_prm_enable_io_wakeup(); ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); if (!ret) diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c index 630fa916bbc..04b013fbc98 100644 --- a/arch/arm/mach-pxa/generic.c +++ b/arch/arm/mach-pxa/generic.c @@ -61,7 +61,7 @@ EXPORT_SYMBOL(get_clock_tick_rate); /* * For non device-tree builds, keep legacy timer init */ -void pxa_timer_init(void) +void __init pxa_timer_init(void) { pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000), get_clock_tick_rate()); diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig index 04f9784ff0e..c6f6ed1cbed 100644 --- a/arch/arm/mach-sa1100/Kconfig +++ b/arch/arm/mach-sa1100/Kconfig @@ -58,6 +58,7 @@ config SA1100_H3100 bool "Compaq iPAQ H3100" select ARM_SA1110_CPUFREQ select HTC_EGPIO + select MFD_IPAQ_MICRO help Say Y here if you intend to run this kernel on the Compaq iPAQ H3100 handheld computer. Information about this machine and the @@ -69,6 +70,7 @@ config SA1100_H3600 bool "Compaq iPAQ H3600/H3700" select ARM_SA1110_CPUFREQ select HTC_EGPIO + select MFD_IPAQ_MICRO help Say Y here if you intend to run this kernel on the Compaq iPAQ H3600 handheld computer. Information about this machine and the diff --git a/arch/arm/mach-sa1100/h3xxx.c b/arch/arm/mach-sa1100/h3xxx.c index c79bf467fb7..b1d4faa12f9 100644 --- a/arch/arm/mach-sa1100/h3xxx.c +++ b/arch/arm/mach-sa1100/h3xxx.c @@ -25,6 +25,7 @@ #include <asm/mach/map.h> #include <mach/h3xxx.h> +#include <mach/irqs.h> #include "generic.h" @@ -244,9 +245,23 @@ static struct platform_device h3xxx_keys = { }, }; +static struct resource h3xxx_micro_resources[] = { + DEFINE_RES_MEM(0x80010000, SZ_4K), + DEFINE_RES_MEM(0x80020000, SZ_4K), + DEFINE_RES_IRQ(IRQ_Ser1UART), +}; + +struct platform_device h3xxx_micro_asic = { + .name = "ipaq-h3xxx-micro", + .id = -1, + .resource = h3xxx_micro_resources, + .num_resources = ARRAY_SIZE(h3xxx_micro_resources), +}; + static struct platform_device *h3xxx_devices[] = { &h3xxx_egpio, &h3xxx_keys, + &h3xxx_micro_asic, }; void __init h3xxx_mach_init(void) diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 0c1ab49e5f7..83792f4324e 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -41,6 +41,7 @@ * This code is not portable to processors with late data abort handling. */ #define CODING_BITS(i) (i & 0x0e000000) +#define COND_BITS(i) (i & 0xf0000000) #define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */ #define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */ @@ -821,6 +822,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) break; case 0x04000000: /* ldr or str immediate */ + if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */ + goto bad; offset.un = OFFSET_BITS(instr); handler = do_alignment_ldrstr; break; diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 5f2c988a06a..55f9d6e0cc8 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -21,6 +21,7 @@ #include <linux/init.h> #include <linux/smp.h> #include <linux/spinlock.h> +#include <linux/log2.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> @@ -945,6 +946,98 @@ static int l2_wt_override; * pass it though the device tree */ static u32 cache_id_part_number_from_dt; +/** + * l2x0_cache_size_of_parse() - read cache size parameters from DT + * @np: the device tree node for the l2 cache + * @aux_val: pointer to machine-supplied auxilary register value, to + * be augmented by the call (bits to be set to 1) + * @aux_mask: pointer to machine-supplied auxilary register mask, to + * be augmented by the call (bits to be set to 0) + * @associativity: variable to return the calculated associativity in + * @max_way_size: the maximum size in bytes for the cache ways + */ +static void __init l2x0_cache_size_of_parse(const struct device_node *np, + u32 *aux_val, u32 *aux_mask, + u32 *associativity, + u32 max_way_size) +{ + u32 mask = 0, val = 0; + u32 cache_size = 0, sets = 0; + u32 way_size_bits = 1; + u32 way_size = 0; + u32 block_size = 0; + u32 line_size = 0; + + of_property_read_u32(np, "cache-size", &cache_size); + of_property_read_u32(np, "cache-sets", &sets); + of_property_read_u32(np, "cache-block-size", &block_size); + of_property_read_u32(np, "cache-line-size", &line_size); + + if (!cache_size || !sets) + return; + + /* All these l2 caches have the same line = block size actually */ + if (!line_size) { + if (block_size) { + /* If linesize if not given, it is equal to blocksize */ + line_size = block_size; + } else { + /* Fall back to known size */ + pr_warn("L2C OF: no cache block/line size given: " + "falling back to default size %d bytes\n", + CACHE_LINE_SIZE); + line_size = CACHE_LINE_SIZE; + } + } + + if (line_size != CACHE_LINE_SIZE) + pr_warn("L2C OF: DT supplied line size %d bytes does " + "not match hardware line size of %d bytes\n", + line_size, + CACHE_LINE_SIZE); + + /* + * Since: + * set size = cache size / sets + * ways = cache size / (sets * line size) + * way size = cache size / (cache size / (sets * line size)) + * way size = sets * line size + * associativity = ways = cache size / way size + */ + way_size = sets * line_size; + *associativity = cache_size / way_size; + + if (way_size > max_way_size) { + pr_err("L2C OF: set size %dKB is too large\n", way_size); + return; + } + + pr_info("L2C OF: override cache size: %d bytes (%dKB)\n", + cache_size, cache_size >> 10); + pr_info("L2C OF: override line size: %d bytes\n", line_size); + pr_info("L2C OF: override way size: %d bytes (%dKB)\n", + way_size, way_size >> 10); + pr_info("L2C OF: override associativity: %d\n", *associativity); + + /* + * Calculates the bits 17:19 to set for way size: + * 512KB -> 6, 256KB -> 5, ... 16KB -> 1 + */ + way_size_bits = ilog2(way_size >> 10) - 3; + if (way_size_bits < 1 || way_size_bits > 6) { + pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n", + way_size); + return; + } + + mask |= L2C_AUX_CTRL_WAY_SIZE_MASK; + val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT); + + *aux_val &= ~mask; + *aux_val |= val; + *aux_mask &= ~mask; +} + static void __init l2x0_of_parse(const struct device_node *np, u32 *aux_val, u32 *aux_mask) { @@ -952,6 +1045,7 @@ static void __init l2x0_of_parse(const struct device_node *np, u32 tag = 0; u32 dirty = 0; u32 val = 0, mask = 0; + u32 assoc; of_property_read_u32(np, "arm,tag-latency", &tag); if (tag) { @@ -974,6 +1068,15 @@ static void __init l2x0_of_parse(const struct device_node *np, val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; } + l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); + if (assoc > 8) { + pr_err("l2x0 of: cache setting yield too high associativity\n"); + pr_err("l2x0 of: %d calculated, max 8\n", assoc); + } else { + mask |= L2X0_AUX_CTRL_ASSOC_MASK; + val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT); + } + *aux_val &= ~mask; *aux_val |= val; *aux_mask &= ~mask; @@ -1021,6 +1124,7 @@ static void __init l2c310_of_parse(const struct device_node *np, u32 data[3] = { 0, 0, 0 }; u32 tag[3] = { 0, 0, 0 }; u32 filter[2] = { 0, 0 }; + u32 assoc; of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); if (tag[0] && tag[1] && tag[2]) @@ -1047,6 +1151,23 @@ static void __init l2c310_of_parse(const struct device_node *np, writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN, l2x0_base + L310_ADDR_FILTER_START); } + + l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); + switch (assoc) { + case 16: + *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; + *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; + *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; + break; + case 8: + *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; + *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; + break; + default: + pr_err("PL310 OF: cache setting yield illegal associativity\n"); + pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc); + break; + } } static const struct l2c_init_data of_l2c310_data __initconst = { diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index c447ec70e86..e7a81cebbb2 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -27,7 +27,7 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) { pmd = pmd_alloc_one(&init_mm, addr); if (!pmd) { - pr_warning("Failed to allocate identity pmd.\n"); + pr_warn("Failed to allocate identity pmd.\n"); return; } /* diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 659c75d808d..9221645dd19 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -636,6 +636,11 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) { + if (start == initrd_start) + start = round_down(start, PAGE_SIZE); + if (end == initrd_end) + end = round_up(end, PAGE_SIZE); + poison_init_mem((void *)start, PAGE_ALIGN(end) - start); free_reserved_area((void *)start, (void *)end, -1, "initrd"); } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 8348ed6b2ef..9f98cec7fe1 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -223,13 +223,13 @@ early_param("ecc", early_ecc); static int __init early_cachepolicy(char *p) { - pr_warning("cachepolicy kernel parameter not supported without cp15\n"); + pr_warn("cachepolicy kernel parameter not supported without cp15\n"); } early_param("cachepolicy", early_cachepolicy); static int __init noalign_setup(char *__unused) { - pr_warning("noalign kernel parameter not supported without cp15\n"); + pr_warn("noalign kernel parameter not supported without cp15\n"); } __setup("noalign", noalign_setup); diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 1a24e9232ec..d3daed0ae0a 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -146,7 +146,6 @@ ENDPROC(cpu_v7_set_pte_ext) mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits addls \ttbr1, \ttbr1, #TTBR1_OFFSET - adcls \tmp, \tmp, #0 mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1 mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits @@ -158,9 +157,9 @@ ENDPROC(cpu_v7_set_pte_ext) * TFR EV X F IHD LR S * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced - * 11 0 110 1 0011 1100 .111 1101 < we want + * 11 0 110 0 0011 1100 .111 1101 < we want */ .align 2 .type v7_crval, #object v7_crval: - crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c + crval clear=0x0122c302, mmuset=0x30c03c7d, ucset=0x00c01c7c diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index b5d67db2089..b3a947863ac 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -570,7 +570,7 @@ __v7_ca15mp_proc_info: __v7_b15mp_proc_info: .long 0x420f00f0 .long 0xff0ffff0 - __v7_proc __v7_b15mp_setup, hwcaps = HWCAP_IDIV + __v7_proc __v7_b15mp_setup .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info /* diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index 02fc10d2d63..d055db32ffc 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig @@ -1,3 +1,6 @@ +config ARCH_OMAP + bool + if ARCH_OMAP menu "TI OMAP Common Features" |