diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-12-16 14:51:32 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-12-16 14:51:32 +0100 |
commit | fe361cfcf40ad4612226347573a8669cd0d44799 (patch) | |
tree | e874ef5a29c3bfe3dd67dc2d8962562c00fc8e3a /arch/arm/kernel | |
parent | c7f2e3cd6c1f4932ccc4135d050eae3f7c7aef63 (diff) | |
parent | 319e2e3f63c348a9b66db4667efa73178e18b17d (diff) |
Merge tag 'v3.13-rc4' into perf/core
Merge Linux 3.13-rc4, to refresh this branch with the latest fixes.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/head-nommu.S | 4 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 9 | ||||
-rw-r--r-- | arch/arm/kernel/machine_kexec.c | 17 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 7 | ||||
-rw-r--r-- | arch/arm/kernel/relocate_kernel.S | 8 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 3 | ||||
-rw-r--r-- | arch/arm/kernel/sigreturn_codes.S | 40 | ||||
-rw-r--r-- | arch/arm/kernel/stacktrace.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 5 |
9 files changed, 66 insertions, 29 deletions
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 14235ba64a9..716249cc2ee 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -68,7 +68,7 @@ ENTRY(stext) #ifdef CONFIG_ARM_MPU /* Calculate the size of a region covering just the kernel */ - ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET + ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET ldr r6, =(_end) @ Cover whole kernel sub r6, r6, r5 @ Minimum size of region to map clz r6, r6 @ Region size must be 2^N... @@ -213,7 +213,7 @@ ENTRY(__setup_mpu) set_region_nr r0, #MPU_RAM_REGION isb /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ - ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET + ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 7801866e626..32f317e5828 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -110,7 +110,7 @@ ENTRY(stext) sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) add r8, r8, r4 @ PHYS_OFFSET #else - ldr r8, =PHYS_OFFSET @ always constant in this case + ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case #endif /* @@ -508,6 +508,7 @@ __fixup_smp: teq r0, #0x0 @ '0' on actual UP A9 hardware beq __fixup_smp_on_up @ So its an A9 UP ldr r0, [r0, #4] @ read SCU Config +ARM_BE8(rev r0, r0) @ byteswap if big endian and r0, r0, #0x3 @ number of CPUs teq r0, #0x0 @ is 1? movne pc, lr @@ -644,7 +645,11 @@ ARM_BE8(rev16 ip, ip) bcc 1b bx lr #else +#ifdef CONFIG_CPU_ENDIAN_BE8 + moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction +#else moveq r0, #0x400000 @ set bit 22, mov to mvn instruction +#endif b 2f 1: ldr ip, [r7, r3] #ifdef CONFIG_CPU_ENDIAN_BE8 @@ -653,7 +658,7 @@ ARM_BE8(rev16 ip, ip) tst ip, #0x000f0000 @ check the rotation field orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24 biceq ip, ip, #0x00004000 @ clear bit 22 - orreq ip, ip, r0, lsl #24 @ mask in offset bits 7-0 + orreq ip, ip, r0 @ mask in offset bits 7-0 #else bic ip, ip, #0x000000ff tst ip, #0xf00 @ check the rotation field diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 57221e349a7..f0d180d8b29 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -14,11 +14,12 @@ #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> +#include <asm/fncpy.h> #include <asm/mach-types.h> #include <asm/smp_plat.h> #include <asm/system_misc.h> -extern const unsigned char relocate_new_kernel[]; +extern void relocate_new_kernel(void); extern const unsigned int relocate_new_kernel_size; extern unsigned long kexec_start_address; @@ -142,6 +143,8 @@ void machine_kexec(struct kimage *image) { unsigned long page_list; unsigned long reboot_code_buffer_phys; + unsigned long reboot_entry = (unsigned long)relocate_new_kernel; + unsigned long reboot_entry_phys; void *reboot_code_buffer; /* @@ -168,16 +171,16 @@ void machine_kexec(struct kimage *image) /* copy our kernel relocation code to the control code page */ - memcpy(reboot_code_buffer, - relocate_new_kernel, relocate_new_kernel_size); + reboot_entry = fncpy(reboot_code_buffer, + reboot_entry, + relocate_new_kernel_size); + reboot_entry_phys = (unsigned long)reboot_entry + + (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer); - - flush_icache_range((unsigned long) reboot_code_buffer, - (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); if (kexec_reinit) kexec_reinit(); - soft_restart(reboot_code_buffer_phys); + soft_restart(reboot_entry_phys); } diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 94f6b05f9e2..92f7b15dd22 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu); unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; + unsigned long stack_page; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; @@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p) frame.sp = thread_saved_sp(p); frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(p); + stack_page = (unsigned long)task_stack_page(p); do { - int ret = unwind_frame(&frame); - if (ret < 0) + if (frame.sp < stack_page || + frame.sp >= stack_page + THREAD_SIZE || + unwind_frame(&frame) < 0) return 0; if (!in_sched_functions(frame.pc)) return frame.pc; diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S index d0cdedf4864..95858966d84 100644 --- a/arch/arm/kernel/relocate_kernel.S +++ b/arch/arm/kernel/relocate_kernel.S @@ -2,10 +2,12 @@ * relocate_kernel.S - put the kernel image in place to boot */ +#include <linux/linkage.h> #include <asm/kexec.h> - .globl relocate_new_kernel -relocate_new_kernel: + .align 3 /* not needed for this code, but keeps fncpy() happy */ + +ENTRY(relocate_new_kernel) ldr r0,kexec_indirection_page ldr r1,kexec_start_address @@ -79,6 +81,8 @@ kexec_mach_type: kexec_boot_atags: .long 0x0 +ENDPROC(relocate_new_kernel) + relocate_new_kernel_end: .globl relocate_new_kernel_size diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 6a1b8a81b1a..987a7f5bce5 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -873,8 +873,6 @@ void __init setup_arch(char **cmdline_p) machine_desc = mdesc; machine_name = mdesc->name; - setup_dma_zone(mdesc); - if (mdesc->reboot_mode != REBOOT_HARD) reboot_mode = mdesc->reboot_mode; @@ -892,6 +890,7 @@ void __init setup_arch(char **cmdline_p) sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); + setup_dma_zone(mdesc); sanity_check_meminfo(); arm_memblock_init(&meminfo, mdesc); diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S index 3c5d0f2170f..b84d0cb1368 100644 --- a/arch/arm/kernel/sigreturn_codes.S +++ b/arch/arm/kernel/sigreturn_codes.S @@ -30,6 +30,27 @@ * snippets. */ +/* + * In CPU_THUMBONLY case kernel arm opcodes are not allowed. + * Note in this case codes skips those instructions but it uses .org + * directive to keep correct layout of sigreturn_codes array. + */ +#ifndef CONFIG_CPU_THUMBONLY +#define ARM_OK(code...) code +#else +#define ARM_OK(code...) +#endif + + .macro arm_slot n + .org sigreturn_codes + 12 * (\n) +ARM_OK( .arm ) + .endm + + .macro thumb_slot n + .org sigreturn_codes + 12 * (\n) + 8 + .thumb + .endm + #if __LINUX_ARM_ARCH__ <= 4 /* * Note we manually set minimally required arch that supports @@ -45,26 +66,27 @@ .global sigreturn_codes .type sigreturn_codes, #object - .arm + .align sigreturn_codes: /* ARM sigreturn syscall code snippet */ - mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) - swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) + arm_slot 0 +ARM_OK( mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) ) +ARM_OK( swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) ) /* Thumb sigreturn syscall code snippet */ - .thumb + thumb_slot 0 movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) swi #0 /* ARM sigreturn_rt syscall code snippet */ - .arm - mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) - swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) + arm_slot 1 +ARM_OK( mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) ) +ARM_OK( swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) ) /* Thumb sigreturn_rt syscall code snippet */ - .thumb + thumb_slot 1 movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) swi #0 @@ -74,7 +96,7 @@ sigreturn_codes: * it is thumb case or not, so we need additional * word after real last entry. */ - .arm + arm_slot 2 .space 4 .size sigreturn_codes, . - sigreturn_codes diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 00f79e59985..af4e8c8a542 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame) high = ALIGN(low, THREAD_SIZE); /* check current frame pointer is within bounds */ - if (fp < (low + 12) || fp + 4 >= high) + if (fp < low + 12 || fp > high - 4) return -EINVAL; /* restore the registers from the stack frame */ diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 6125f259b7b..7940241f057 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -509,9 +509,10 @@ static inline int __do_cache_op(unsigned long start, unsigned long end) { int ret; - unsigned long chunk = PAGE_SIZE; do { + unsigned long chunk = min(PAGE_SIZE, end - start); + if (signal_pending(current)) { struct thread_info *ti = current_thread_info(); @@ -856,7 +857,7 @@ static void __init kuser_init(void *vectors) memcpy(vectors + 0xfe0, vectors + 0xfe8, 4); } #else -static void __init kuser_init(void *vectors) +static inline void __init kuser_init(void *vectors) { } #endif |