diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/kernel/arch.c | 46 | ||||
-rw-r--r-- | arch/arm/kernel/armksyms.c | 9 | ||||
-rw-r--r-- | arch/arm/kernel/ecard.c | 12 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 254 | ||||
-rw-r--r-- | arch/arm/kernel/entry-header.S | 7 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 64 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 14 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 9 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 82 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 31 | ||||
-rw-r--r-- | arch/arm/kernel/signal.h | 12 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 245 | ||||
-rw-r--r-- | arch/arm/kernel/time.c | 107 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 33 |
15 files changed, 686 insertions, 241 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 4a2af55e134..3e1b0327e4d 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -6,7 +6,7 @@ AFLAGS_head.o := -DTEXTADDR=$(TEXTADDR) -DDATAADDR=$(DATAADDR) # Object file lists. -obj-y := arch.o compat.o dma.o entry-armv.o entry-common.o irq.o \ +obj-y := compat.o dma.o entry-armv.o entry-common.o irq.o \ process.o ptrace.o semaphore.o setup.o signal.o sys_arm.o \ time.o traps.o diff --git a/arch/arm/kernel/arch.c b/arch/arm/kernel/arch.c deleted file mode 100644 index 4e02fbeb10a..00000000000 --- a/arch/arm/kernel/arch.c +++ /dev/null @@ -1,46 +0,0 @@ -/* - * linux/arch/arm/kernel/arch.c - * - * Architecture specific fixups. - */ -#include <linux/config.h> -#include <linux/init.h> -#include <linux/types.h> - -#include <asm/elf.h> -#include <asm/page.h> -#include <asm/setup.h> -#include <asm/mach/arch.h> - -unsigned int vram_size; - -#ifdef CONFIG_ARCH_ACORN - -unsigned int memc_ctrl_reg; -unsigned int number_mfm_drives; - -static int __init parse_tag_acorn(const struct tag *tag) -{ - memc_ctrl_reg = tag->u.acorn.memc_control_reg; - number_mfm_drives = tag->u.acorn.adfsdrives; - - switch (tag->u.acorn.vram_pages) { - case 512: - vram_size += PAGE_SIZE * 256; - case 256: - vram_size += PAGE_SIZE * 256; - default: - break; - } -#if 0 - if (vram_size) { - desc->video_start = 0x02000000; - desc->video_end = 0x02000000 + vram_size; - } -#endif - return 0; -} - -__tagtable(ATAG_ACORN, parse_tag_acorn); - -#endif diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 4c38bd8bc29..835d450797a 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -30,9 +30,6 @@ extern void __lshrdi3(void); extern void __modsi3(void); extern void __muldi3(void); extern void __ucmpdi2(void); -extern void __udivdi3(void); -extern void __umoddi3(void); -extern void __udivmoddi4(void); extern void __udivsi3(void); extern void __umodsi3(void); extern void __do_div64(void); @@ -44,7 +41,10 @@ extern void fp_enter(void); * This has a special calling convention; it doesn't * modify any of the usual registers, except for LR. */ +#define EXPORT_CRC_ALIAS(sym) __CRC_SYMBOL(sym, "") + #define EXPORT_SYMBOL_ALIAS(sym,orig) \ + EXPORT_CRC_ALIAS(sym) \ const struct kernel_symbol __ksymtab_##sym \ __attribute__((section("__ksymtab"))) = \ { (unsigned long)&orig, #sym }; @@ -134,9 +134,6 @@ EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__modsi3); EXPORT_SYMBOL(__muldi3); EXPORT_SYMBOL(__ucmpdi2); -EXPORT_SYMBOL(__udivdi3); -EXPORT_SYMBOL(__umoddi3); -EXPORT_SYMBOL(__udivmoddi4); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__umodsi3); EXPORT_SYMBOL(__do_div64); diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c index 3dc15b131f5..6540db69133 100644 --- a/arch/arm/kernel/ecard.c +++ b/arch/arm/kernel/ecard.c @@ -866,19 +866,19 @@ static struct expansion_card *__init ecard_alloc_card(int type, int slot) return ec; } -static ssize_t ecard_show_irq(struct device *dev, char *buf) +static ssize_t ecard_show_irq(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->irq); } -static ssize_t ecard_show_dma(struct device *dev, char *buf) +static ssize_t ecard_show_dma(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->dma); } -static ssize_t ecard_show_resources(struct device *dev, char *buf) +static ssize_t ecard_show_resources(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); char *str = buf; @@ -893,19 +893,19 @@ static ssize_t ecard_show_resources(struct device *dev, char *buf) return str - buf; } -static ssize_t ecard_show_vendor(struct device *dev, char *buf) +static ssize_t ecard_show_vendor(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->cid.manufacturer); } -static ssize_t ecard_show_device(struct device *dev, char *buf) +static ssize_t ecard_show_device(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->cid.product); } -static ssize_t ecard_show_type(struct device *dev, char *buf) +static ssize_t ecard_show_type(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%s\n", ec->type == ECARD_EASI ? "EASI" : "IOC"); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index e14278d5988..39a6c1b0b9a 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -24,48 +24,91 @@ #include "entry-header.S" /* + * Interrupt handling. Preserves r7, r8, r9 + */ + .macro irq_handler +1: get_irqnr_and_base r0, r6, r5, lr + movne r1, sp + @ + @ routine called with r0 = irq number, r1 = struct pt_regs * + @ + adrne lr, 1b + bne asm_do_IRQ + +#ifdef CONFIG_SMP + /* + * XXX + * + * this macro assumes that irqstat (r6) and base (r5) are + * preserved from get_irqnr_and_base above + */ + test_for_ipi r0, r6, r5, lr + movne r0, sp + adrne lr, 1b + bne do_IPI +#endif + + .endm + +/* * Invalid mode handlers */ - .macro inv_entry, sym, reason - sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go - stmia sp, {r0 - lr} @ Save XXX r0 - lr - ldr r4, .LC\sym + .macro inv_entry, reason + sub sp, sp, #S_FRAME_SIZE + stmib sp, {r1 - lr} mov r1, #\reason .endm __pabt_invalid: - inv_entry abt, BAD_PREFETCH - b 1f + inv_entry BAD_PREFETCH + b common_invalid __dabt_invalid: - inv_entry abt, BAD_DATA - b 1f + inv_entry BAD_DATA + b common_invalid __irq_invalid: - inv_entry irq, BAD_IRQ - b 1f + inv_entry BAD_IRQ + b common_invalid __und_invalid: - inv_entry und, BAD_UNDEFINSTR + inv_entry BAD_UNDEFINSTR + + @ + @ XXX fall through to common_invalid + @ + +@ +@ common_invalid - generic code for failed exception (re-entrant version of handlers) +@ +common_invalid: + zero_fp + + ldmia r0, {r4 - r6} + add r0, sp, #S_PC @ here for interlock avoidance + mov r7, #-1 @ "" "" "" "" + str r4, [sp] @ save preserved r0 + stmia r0, {r5 - r7} @ lr_<exception>, + @ cpsr_<exception>, "old_r0" -1: zero_fp - ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0 - add r4, sp, #S_PC - stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0 mov r0, sp - and r2, r6, #31 @ int mode + and r2, r6, #0x1f b bad_mode /* * SVC mode handlers */ - .macro svc_entry, sym + .macro svc_entry sub sp, sp, #S_FRAME_SIZE - stmia sp, {r0 - r12} @ save r0 - r12 - ldr r2, .LC\sym - add r0, sp, #S_FRAME_SIZE - ldmia r2, {r2 - r4} @ get pc, cpsr - add r5, sp, #S_SP + stmib sp, {r1 - r12} + + ldmia r0, {r1 - r3} + add r5, sp, #S_SP @ here for interlock avoidance + mov r4, #-1 @ "" "" "" "" + add r0, sp, #S_FRAME_SIZE @ "" "" "" "" + str r1, [sp] @ save the "real" r0 copied + @ from the exception stack + mov r1, lr @ @@ -82,7 +125,7 @@ __und_invalid: .align 5 __dabt_svc: - svc_entry abt + svc_entry @ @ get ready to re-enable interrupts if appropriate @@ -129,28 +172,24 @@ __dabt_svc: .align 5 __irq_svc: - svc_entry irq + svc_entry + #ifdef CONFIG_PREEMPT - get_thread_info r8 - ldr r9, [r8, #TI_PREEMPT] @ get preempt count - add r7, r9, #1 @ increment it - str r7, [r8, #TI_PREEMPT] + get_thread_info tsk + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count + add r7, r8, #1 @ increment it + str r7, [tsk, #TI_PREEMPT] #endif -1: get_irqnr_and_base r0, r6, r5, lr - movne r1, sp - @ - @ routine called with r0 = irq number, r1 = struct pt_regs * - @ - adrne lr, 1b - bne asm_do_IRQ + + irq_handler #ifdef CONFIG_PREEMPT - ldr r0, [r8, #TI_FLAGS] @ get flags + ldr r0, [tsk, #TI_FLAGS] @ get flags tst r0, #_TIF_NEED_RESCHED blne svc_preempt preempt_return: - ldr r0, [r8, #TI_PREEMPT] @ read preempt value + ldr r0, [tsk, #TI_PREEMPT] @ read preempt value + str r8, [tsk, #TI_PREEMPT] @ restore preempt count teq r0, r7 - str r9, [r8, #TI_PREEMPT] @ restore preempt count strne r0, [r0, -r0] @ bug() #endif ldr r0, [sp, #S_PSR] @ irqs are already disabled @@ -161,7 +200,7 @@ preempt_return: #ifdef CONFIG_PREEMPT svc_preempt: - teq r9, #0 @ was preempt count = 0 + teq r8, #0 @ was preempt count = 0 ldreq r6, .LCirq_stat movne pc, lr @ no ldr r0, [r6, #4] @ local_irq_count @@ -169,9 +208,9 @@ svc_preempt: adds r0, r0, r1 movne pc, lr mov r7, #0 @ preempt_schedule_irq - str r7, [r8, #TI_PREEMPT] @ expects preempt_count == 0 + str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0 1: bl preempt_schedule_irq @ irq en/disable is done inside - ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS + ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED beq preempt_return @ go again b 1b @@ -179,7 +218,7 @@ svc_preempt: .align 5 __und_svc: - svc_entry und + svc_entry @ @ call emulation code, which returns using r9 if it has emulated @@ -209,7 +248,7 @@ __und_svc: .align 5 __pabt_svc: - svc_entry abt + svc_entry @ @ re-enable interrupts if appropriate @@ -242,12 +281,8 @@ __pabt_svc: ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr .align 5 -.LCirq: - .word __temp_irq -.LCund: - .word __temp_und -.LCabt: - .word __temp_abt +.LCcralign: + .word cr_alignment #ifdef MULTI_ABORT .LCprocfns: .word processor @@ -262,12 +297,16 @@ __pabt_svc: /* * User mode handlers */ - .macro usr_entry, sym - sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go - stmia sp, {r0 - r12} @ save r0 - r12 - ldr r7, .LC\sym - add r5, sp, #S_PC - ldmia r7, {r2 - r4} @ Get USR pc, cpsr + .macro usr_entry + sub sp, sp, #S_FRAME_SIZE + stmib sp, {r1 - r12} + + ldmia r0, {r1 - r3} + add r0, sp, #S_PC @ here for interlock avoidance + mov r4, #-1 @ "" "" "" "" + + str r1, [sp] @ save the "real" r0 copied + @ from the exception stack #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) @ make sure our user space atomic helper is aborted @@ -284,13 +323,13 @@ __pabt_svc: @ @ Also, separately save sp_usr and lr_usr @ - stmia r5, {r2 - r4} - stmdb r5, {sp, lr}^ + stmia r0, {r2 - r4} + stmdb r0, {sp, lr}^ @ @ Enable the alignment trap while in kernel mode @ - alignment_trap r7, r0, __temp_\sym + alignment_trap r0 @ @ Clear FP to mark the first stack frame @@ -300,7 +339,7 @@ __pabt_svc: .align 5 __dabt_usr: - usr_entry abt + usr_entry @ @ Call the processor-specific abort handler: @@ -329,30 +368,23 @@ __dabt_usr: .align 5 __irq_usr: - usr_entry irq + usr_entry + get_thread_info tsk #ifdef CONFIG_PREEMPT - get_thread_info r8 - ldr r9, [r8, #TI_PREEMPT] @ get preempt count - add r7, r9, #1 @ increment it - str r7, [r8, #TI_PREEMPT] + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count + add r7, r8, #1 @ increment it + str r7, [tsk, #TI_PREEMPT] #endif -1: get_irqnr_and_base r0, r6, r5, lr - movne r1, sp - adrne lr, 1b - @ - @ routine called with r0 = irq number, r1 = struct pt_regs * - @ - bne asm_do_IRQ + + irq_handler #ifdef CONFIG_PREEMPT - ldr r0, [r8, #TI_PREEMPT] + ldr r0, [tsk, #TI_PREEMPT] + str r8, [tsk, #TI_PREEMPT] teq r0, r7 - str r9, [r8, #TI_PREEMPT] strne r0, [r0, -r0] - mov tsk, r8 -#else - get_thread_info tsk #endif + mov why, #0 b ret_to_user @@ -360,7 +392,7 @@ __irq_usr: .align 5 __und_usr: - usr_entry und + usr_entry tst r3, #PSR_T_BIT @ Thumb mode? bne fpundefinstr @ ignore FP @@ -476,7 +508,7 @@ fpundefinstr: .align 5 __pabt_usr: - usr_entry abt + usr_entry enable_irq @ Enable interrupts mov r0, r2 @ address (pc) @@ -741,29 +773,41 @@ __kuser_helper_end: * * Common stub entry macro: * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC + * + * SP points to a minimal amount of processor-private memory, the address + * of which is copied into r0 for the mode specific abort handler. */ - .macro vector_stub, name, sym, correction=0 + .macro vector_stub, name, correction=0 .align 5 vector_\name: - ldr r13, .LCs\sym .if \correction sub lr, lr, #\correction .endif - str lr, [r13] @ save lr_IRQ + + @ + @ Save r0, lr_<exception> (parent PC) and spsr_<exception> + @ (parent CPSR) + @ + stmia sp, {r0, lr} @ save r0, lr mrs lr, spsr - str lr, [r13, #4] @ save spsr_IRQ + str lr, [sp, #8] @ save spsr + @ - @ now branch to the relevant MODE handling routine + @ Prepare for SVC32 mode. IRQs remain disabled. @ - mrs r13, cpsr - bic r13, r13, #MODE_MASK - orr r13, r13, #SVC_MODE - msr spsr_cxsf, r13 @ switch to SVC_32 mode + mrs r0, cpsr + bic r0, r0, #MODE_MASK + orr r0, r0, #SVC_MODE + msr spsr_cxsf, r0 - and lr, lr, #15 + @ + @ the branch table must immediately follow this code + @ + mov r0, sp + and lr, lr, #0x0f ldr lr, [pc, lr, lsl #2] - movs pc, lr @ Changes mode and branches + movs pc, lr @ branch to handler in SVC mode .endm .globl __stubs_start @@ -771,7 +815,7 @@ __stubs_start: /* * Interrupt dispatcher */ - vector_stub irq, irq, 4 + vector_stub irq, 4 .long __irq_usr @ 0 (USR_26 / USR_32) .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) @@ -794,7 +838,7 @@ __stubs_start: * Data abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */ - vector_stub dabt, abt, 8 + vector_stub dabt, 8 .long __dabt_usr @ 0 (USR_26 / USR_32) .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) @@ -817,7 +861,7 @@ __stubs_start: * Prefetch abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */ - vector_stub pabt, abt, 4 + vector_stub pabt, 4 .long __pabt_usr @ 0 (USR_26 / USR_32) .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) @@ -840,7 +884,7 @@ __stubs_start: * Undef instr entry dispatcher * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */ - vector_stub und, und + vector_stub und .long __und_usr @ 0 (USR_26 / USR_32) .long __und_invalid @ 1 (FIQ_26 / FIQ_32) @@ -894,13 +938,6 @@ vector_addrexcptn: .LCvswi: .word vector_swi -.LCsirq: - .word __temp_irq -.LCsund: - .word __temp_und -.LCsabt: - .word __temp_abt - .globl __stubs_end __stubs_end: @@ -922,23 +959,6 @@ __vectors_end: .data -/* - * Do not reorder these, and do not insert extra data between... - */ - -__temp_irq: - .word 0 @ saved lr_irq - .word 0 @ saved spsr_irq - .word -1 @ old_r0 -__temp_und: - .word 0 @ Saved lr_und - .word 0 @ Saved spsr_und - .word -1 @ old_r0 -__temp_abt: - .word 0 @ Saved lr_abt - .word 0 @ Saved spsr_abt - .word -1 @ old_r0 - .globl cr_alignment .globl cr_no_alignment cr_alignment: diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index a3d40a0e2b0..afef2127396 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -59,11 +59,10 @@ mov \rd, \rd, lsl #13 .endm - .macro alignment_trap, rbase, rtemp, sym + .macro alignment_trap, rtemp #ifdef CONFIG_ALIGNMENT_TRAP -#define OFF_CR_ALIGNMENT(x) cr_alignment - x - - ldr \rtemp, [\rbase, #OFF_CR_ALIGNMENT(\sym)] + ldr \rtemp, .LCcralign + ldr \rtemp, [\rtemp] mcr p15, 0, \rtemp, c1, c0 #endif .endm diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 4733877296d..1155cf07c87 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -2,6 +2,8 @@ * linux/arch/arm/kernel/head.S * * Copyright (C) 1994-2002 Russell King + * Copyright (c) 2003 ARM Limited + * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -165,6 +167,48 @@ __mmap_switched: stmia r6, {r0, r4} @ Save control register values b start_kernel +#if defined(CONFIG_SMP) + .type secondary_startup, #function +ENTRY(secondary_startup) + /* + * Common entry point for secondary CPUs. + * + * Ensure that we're in SVC mode, and IRQs are disabled. Lookup + * the processor type - there is no need to check the machine type + * as it has already been validated by the primary processor. + */ + msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC + bl __lookup_processor_type + movs r10, r5 @ invalid processor? + moveq r0, #'p' @ yes, error 'p' + beq __error + + /* + * Use the page tables supplied from __cpu_up. + */ + adr r4, __secondary_data + ldmia r4, {r5, r6, r13} @ address to jump to after + sub r4, r4, r5 @ mmu has been enabled + ldr r4, [r6, r4] @ get secondary_data.pgdir + adr lr, __enable_mmu @ return address + add pc, r10, #12 @ initialise processor + @ (return control reg) + + /* + * r6 = &secondary_data + */ +ENTRY(__secondary_switched) + ldr sp, [r6, #4] @ get secondary_data.stack + mov fp, #0 + b secondary_start_kernel + + .type __secondary_data, %object +__secondary_data: + .long . + .long secondary_data + .long __secondary_switched +#endif /* defined(CONFIG_SMP) */ + /* @@ -300,9 +344,9 @@ __create_page_tables: str r6, [r0] #endif +#ifdef CONFIG_DEBUG_LL bic r7, r7, #0x0c @ turn off cacheable @ and bufferable bits -#ifdef CONFIG_DEBUG_LL /* * Map in IO space for serial debugging. * This allows debug messages to be output @@ -328,28 +372,24 @@ __create_page_tables: teq r1, #MACH_TYPE_NETWINDER teqne r1, #MACH_TYPE_CATS bne 1f - add r0, r4, #0x3fc0 @ ff000000 - mov r3, #0x7c000000 - orr r3, r3, r7 - str r3, [r0], #4 - add r3, r3, #1 << 20 - str r3, [r0], #4 + add r0, r4, #0xff000000 >> 18 + orr r3, r7, #0x7c000000 + str r3, [r0] 1: #endif -#endif #ifdef CONFIG_ARCH_RPC /* * Map in screen at 0x02000000 & SCREEN2_BASE * Similar reasons here - for debug. This is * only for Acorn RiscPC architectures. */ - add r0, r4, #0x80 @ 02000000 - mov r3, #0x02000000 - orr r3, r3, r7 + add r0, r4, #0x02000000 >> 18 + orr r3, r7, #0x02000000 str r3, [r0] - add r0, r4, #0x3600 @ d8000000 + add r0, r4, #0xd8000000 >> 18 str r3, [r0] #endif +#endif mov pc, lr .ltorg diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index ff187f4308f..395137a8fad 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -4,6 +4,10 @@ * Copyright (C) 1992 Linus Torvalds * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. * + * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation. + * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and + * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>. + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -37,6 +41,7 @@ #include <asm/irq.h> #include <asm/system.h> #include <asm/mach/irq.h> +#include <asm/mach/time.h> /* * Maximum IRQ count. Currently, this is arbitary. However, it should @@ -329,6 +334,15 @@ __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs) spin_unlock(&irq_controller_lock); +#ifdef CONFIG_NO_IDLE_HZ + if (!(action->flags & SA_TIMER) && system_timer->dyn_tick != NULL) { + write_seqlock(&xtime_lock); + if (system_timer->dyn_tick->state & DYN_TICK_ENABLED) + system_timer->dyn_tick->handler(irq, 0, regs); + write_sequnlock(&xtime_lock); + } +#endif + if (!(action->flags & SA_INTERRUPT)) local_irq_enable(); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 8f146a4b475..409db6d5ec9 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -32,6 +32,7 @@ #include <asm/leds.h> #include <asm/processor.h> #include <asm/uaccess.h> +#include <asm/mach/time.h> extern const char *processor_modes[]; extern void setup_mm_for_reboot(char mode); @@ -85,8 +86,10 @@ EXPORT_SYMBOL(pm_power_off); void default_idle(void) { local_irq_disable(); - if (!need_resched() && !hlt_counter) + if (!need_resched() && !hlt_counter) { + timer_dyn_reprogram(); arch_idle(); + } local_irq_enable(); } @@ -128,7 +131,6 @@ void machine_halt(void) { } -EXPORT_SYMBOL(machine_halt); void machine_power_off(void) { @@ -136,7 +138,6 @@ void machine_power_off(void) pm_power_off(); } -EXPORT_SYMBOL(machine_power_off); void machine_restart(char * __unused) { @@ -166,8 +167,6 @@ void machine_restart(char * __unused) while (1); } -EXPORT_SYMBOL(machine_restart); - void __show_regs(struct pt_regs *regs) { unsigned long flags = condition_codes(regs); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index c2a7da3ac0f..c9b69771f92 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -92,6 +92,14 @@ struct cpu_user_fns cpu_user; struct cpu_cache_fns cpu_cache; #endif +struct stack { + u32 irq[3]; + u32 abt[3]; + u32 und[3]; +} ____cacheline_aligned; + +static struct stack stacks[NR_CPUS]; + char elf_platform[ELF_PLATFORM_SIZE]; EXPORT_SYMBOL(elf_platform); @@ -307,8 +315,6 @@ static void __init setup_processor(void) cpu_name, processor_id, (int)processor_id & 15, proc_arch[cpu_architecture()]); - dump_cpu_info(smp_processor_id()); - sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS); sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); elf_hwcap = list->elf_hwcap; @@ -316,6 +322,47 @@ static void __init setup_processor(void) cpu_proc_init(); } +/* + * cpu_init - initialise one CPU. + * + * cpu_init dumps the cache information, initialises SMP specific + * information, and sets up the per-CPU stacks. + */ +void cpu_init(void) +{ + unsigned int cpu = smp_processor_id(); + struct stack *stk = &stacks[cpu]; + + if (cpu >= NR_CPUS) { + printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu); + BUG(); + } + + dump_cpu_info(cpu); + + /* + * setup stacks for re-entrant exception handlers + */ + __asm__ ( + "msr cpsr_c, %1\n\t" + "add sp, %0, %2\n\t" + "msr cpsr_c, %3\n\t" + "add sp, %0, %4\n\t" + "msr cpsr_c, %5\n\t" + "add sp, %0, %6\n\t" + "msr cpsr_c, %7" + : + : "r" (stk), + "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), + "I" (offsetof(struct stack, irq[0])), + "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), + "I" (offsetof(struct stack, abt[0])), + "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE), + "I" (offsetof(struct stack, und[0])), + "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE) + : "r14"); +} + static struct machine_desc * __init setup_machine(unsigned int nr) { struct machine_desc *list; @@ -349,6 +396,20 @@ static void __init early_initrd(char **p) } __early_param("initrd=", early_initrd); +static void __init add_memory(unsigned long start, unsigned long size) +{ + /* + * Ensure that start/size are aligned to a page boundary. + * Size is appropriately rounded down, start is rounded up. + */ + size -= start & ~PAGE_MASK; + + meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start); + meminfo.bank[meminfo.nr_banks].size = size & PAGE_MASK; + meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start); + meminfo.nr_banks += 1; +} + /* * Pick out the memory size. We look for mem=size@start, * where start and size are "size[KkMm]" @@ -373,10 +434,7 @@ static void __init early_mem(char **p) if (**p == '@') start = memparse(*p + 1, p); - meminfo.bank[meminfo.nr_banks].start = start; - meminfo.bank[meminfo.nr_banks].size = size; - meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start); - meminfo.nr_banks += 1; + add_memory(start, size); } __early_param("mem=", early_mem); @@ -518,11 +576,7 @@ static int __init parse_tag_mem32(const struct tag *tag) tag->u.mem.start, tag->u.mem.size / 1024); return -EINVAL; } - meminfo.bank[meminfo.nr_banks].start = tag->u.mem.start; - meminfo.bank[meminfo.nr_banks].size = tag->u.mem.size; - meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(tag->u.mem.start); - meminfo.nr_banks += 1; - + add_memory(tag->u.mem.start, tag->u.mem.size); return 0; } @@ -683,8 +737,8 @@ void __init setup_arch(char **cmdline_p) if (mdesc->soft_reboot) reboot_setup("s"); - if (mdesc->param_offset) - tags = phys_to_virt(mdesc->param_offset); + if (mdesc->boot_params) + tags = phys_to_virt(mdesc->boot_params); /* * If we have the old style parameters, convert them to @@ -715,6 +769,8 @@ void __init setup_arch(char **cmdline_p) paging_init(&meminfo, mdesc); request_standard_resources(&meminfo, mdesc); + cpu_init(); + /* * Set up various architecture-specific pointers */ diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 931919fd512..5e435e42dac 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -19,6 +19,7 @@ #include <asm/unistd.h> #include "ptrace.h" +#include "signal.h" #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) @@ -35,7 +36,7 @@ #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) -static const unsigned long retcodes[4] = { +const unsigned long sigreturn_codes[4] = { SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN }; @@ -500,17 +501,25 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, if (ka->sa.sa_flags & SA_SIGINFO) idx += 2; - if (__put_user(retcodes[idx], rc)) + if (__put_user(sigreturn_codes[idx], rc)) return 1; - /* - * Ensure that the instruction cache sees - * the return code written onto the stack. - */ - flush_icache_range((unsigned long)rc, - (unsigned long)(rc + 1)); - - retcode = ((unsigned long)rc) + thumb; + if (cpsr & MODE32_BIT) { + /* + * 32-bit code can use the new high-page + * signal return code support. + */ + retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; + } else { + /* + * Ensure that the instruction cache sees + * the return code written onto the stack. + */ + flush_icache_range((unsigned long)rc, + (unsigned long)(rc + 1)); + + retcode = ((unsigned long)rc) + thumb; + } } regs->ARM_r0 = usig; @@ -688,7 +697,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) if (!user_mode(regs)) return 0; - if (try_to_freeze(0)) + if (try_to_freeze()) goto no_signal; if (current->ptrace & PT_SINGLESTEP) diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h new file mode 100644 index 00000000000..91d26faca62 --- /dev/null +++ b/arch/arm/kernel/signal.h @@ -0,0 +1,12 @@ +/* + * linux/arch/arm/kernel/signal.h + * + * Copyright (C) 2005 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#define KERN_SIGRETURN_CODE 0xffff0500 + +extern const unsigned long sigreturn_codes[4]; diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ecc8c333240..b2085735a2b 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -24,6 +24,9 @@ #include <asm/atomic.h> #include <asm/cacheflush.h> #include <asm/cpu.h> +#include <asm/mmu_context.h> +#include <asm/pgtable.h> +#include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> @@ -33,10 +36,17 @@ * The present bitmask indicates that the CPU is physically present. * The online bitmask indicates that the CPU is up and running. */ -cpumask_t cpu_present_mask; +cpumask_t cpu_possible_map; cpumask_t cpu_online_map; /* + * as from 2.5, kernels no longer have an init_tasks structure + * so we need some other way of telling a new secondary core + * where to place its SVC stack + */ +struct secondary_data secondary_data; + +/* * structures for inter-processor calls * - A collection of single bit ipi messages. */ @@ -68,9 +78,11 @@ struct smp_call_struct { static struct smp_call_struct * volatile smp_call_function_data; static DEFINE_SPINLOCK(smp_call_function_lock); -int __init __cpu_up(unsigned int cpu) +int __cpuinit __cpu_up(unsigned int cpu) { struct task_struct *idle; + pgd_t *pgd; + pmd_t *pmd; int ret; /* @@ -84,11 +96,57 @@ int __init __cpu_up(unsigned int cpu) } /* + * Allocate initial page tables to allow the new CPU to + * enable the MMU safely. This essentially means a set + * of our "standard" page tables, with the addition of + * a 1:1 mapping for the physical address of the kernel. + */ + pgd = pgd_alloc(&init_mm); + pmd = pmd_offset(pgd, PHYS_OFFSET); + *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | + PMD_TYPE_SECT | PMD_SECT_AP_WRITE); + + /* + * We need to tell the secondary core where to find + * its stack and the page tables. + */ + secondary_data.stack = (void *)idle->thread_info + THREAD_SIZE - 8; + secondary_data.pgdir = virt_to_phys(pgd); + wmb(); + + /* * Now bring the CPU into our world. */ ret = boot_secondary(cpu, idle); + if (ret == 0) { + unsigned long timeout; + + /* + * CPU was successfully started, wait for it + * to come online or time out. + */ + timeout = jiffies + HZ; + while (time_before(jiffies, timeout)) { + if (cpu_online(cpu)) + break; + + udelay(10); + barrier(); + } + + if (!cpu_online(cpu)) + ret = -EIO; + } + + secondary_data.stack = 0; + secondary_data.pgdir = 0; + + *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0); + pgd_free(pgd); + if (ret) { - printk(KERN_CRIT "cpu_up: processor %d failed to boot\n", cpu); + printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); + /* * FIXME: We need to clean up the new idle thread. --rmk */ @@ -98,10 +156,61 @@ int __init __cpu_up(unsigned int cpu) } /* + * This is the secondary CPU boot entry. We're using this CPUs + * idle thread stack, but a set of temporary page tables. + */ +asmlinkage void __cpuinit secondary_start_kernel(void) +{ + struct mm_struct *mm = &init_mm; + unsigned int cpu = smp_processor_id(); + + printk("CPU%u: Booted secondary processor\n", cpu); + + /* + * All kernel threads share the same mm context; grab a + * reference and switch to it. + */ + atomic_inc(&mm->mm_users); + atomic_inc(&mm->mm_count); + current->active_mm = mm; + cpu_set(cpu, mm->cpu_vm_mask); + cpu_switch_mm(mm->pgd, mm); + enter_lazy_tlb(mm, current); + local_flush_tlb_all(); + + cpu_init(); + + /* + * Give the platform a chance to do its own initialisation. + */ + platform_secondary_init(cpu); + + /* + * Enable local interrupts. + */ + local_irq_enable(); + local_fiq_enable(); + + calibrate_delay(); + + smp_store_cpu_info(cpu); + + /* + * OK, now it's safe to let the boot CPU continue + */ + cpu_set(cpu, cpu_online_map); + + /* + * OK, it's off to the idle thread for us + */ + cpu_idle(); +} + +/* * Called by both boot and secondaries to move global data into * per-processor storage. */ -void __init smp_store_cpu_info(unsigned int cpuid) +void __cpuinit smp_store_cpu_info(unsigned int cpuid) { struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); @@ -127,7 +236,8 @@ void __init smp_prepare_boot_cpu(void) { unsigned int cpu = smp_processor_id(); - cpu_set(cpu, cpu_present_mask); + cpu_set(cpu, cpu_possible_map); + cpu_set(cpu, cpu_present_map); cpu_set(cpu, cpu_online_map); } @@ -247,7 +357,7 @@ void show_ipi_list(struct seq_file *p) seq_puts(p, "IPI:"); - for_each_online_cpu(cpu) + for_each_present_cpu(cpu) seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); seq_putc(p, '\n'); @@ -394,3 +504,126 @@ int __init setup_profiling_timer(unsigned int multiplier) { return -EINVAL; } + +static int +on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait, + cpumask_t mask) +{ + int ret = 0; + + preempt_disable(); + + ret = smp_call_function_on_cpu(func, info, retry, wait, mask); + if (cpu_isset(smp_processor_id(), mask)) + func(info); + + preempt_enable(); + + return ret; +} + +/**********************************************************************/ + +/* + * TLB operations + */ +struct tlb_args { + struct vm_area_struct *ta_vma; + unsigned long ta_start; + unsigned long ta_end; +}; + +static inline void ipi_flush_tlb_all(void *ignored) +{ + local_flush_tlb_all(); +} + +static inline void ipi_flush_tlb_mm(void *arg) +{ + struct mm_struct *mm = (struct mm_struct *)arg; + + local_flush_tlb_mm(mm); +} + +static inline void ipi_flush_tlb_page(void *arg) +{ + struct tlb_args *ta = (struct tlb_args *)arg; + + local_flush_tlb_page(ta->ta_vma, ta->ta_start); +} + +static inline void ipi_flush_tlb_kernel_page(void *arg) +{ + struct tlb_args *ta = (struct tlb_args *)arg; + + local_flush_tlb_kernel_page(ta->ta_start); +} + +static inline void ipi_flush_tlb_range(void *arg) +{ + struct tlb_args *ta = (struct tlb_args *)arg; + + local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); +} + +static inline void ipi_flush_tlb_kernel_range(void *arg) +{ + struct tlb_args *ta = (struct tlb_args *)arg; + + local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); +} + +void flush_tlb_all(void) +{ + on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + cpumask_t mask = mm->cpu_vm_mask; + + on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + cpumask_t mask = vma->vm_mm->cpu_vm_mask; + struct tlb_args ta; + + ta.ta_vma = vma; + ta.ta_start = uaddr; + + on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask); +} + +void flush_tlb_kernel_page(unsigned long kaddr) +{ + struct tlb_args ta; + + ta.ta_start = kaddr; + + on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1); +} + +void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + cpumask_t mask = vma->vm_mm->cpu_vm_mask; + struct tlb_args ta; + + ta.ta_vma = vma; + ta.ta_start = start; + ta.ta_end = end; + + on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + struct tlb_args ta; + + ta.ta_start = start; + ta.ta_end = end; + + on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1); +} diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index c232f24f4a6..1b7fcd50c3e 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -381,6 +381,99 @@ static struct sysdev_class timer_sysclass = { .resume = timer_resume, }; +#ifdef CONFIG_NO_IDLE_HZ +static int timer_dyn_tick_enable(void) +{ + struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick; + unsigned long flags; + int ret = -ENODEV; + + if (dyn_tick) { + write_seqlock_irqsave(&xtime_lock, flags); + ret = 0; + if (!(dyn_tick->state & DYN_TICK_ENABLED)) { + ret = dyn_tick->enable(); + + if (ret == 0) + dyn_tick->state |= DYN_TICK_ENABLED; + } + write_sequnlock_irqrestore(&xtime_lock, flags); + } + + return ret; +} + +static int timer_dyn_tick_disable(void) +{ + struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick; + unsigned long flags; + int ret = -ENODEV; + + if (dyn_tick) { + write_seqlock_irqsave(&xtime_lock, flags); + ret = 0; + if (dyn_tick->state & DYN_TICK_ENABLED) { + ret = dyn_tick->disable(); + + if (ret == 0) + dyn_tick->state &= ~DYN_TICK_ENABLED; + } + write_sequnlock_irqrestore(&xtime_lock, flags); + } + + return ret; +} + +/* + * Reprogram the system timer for at least the calculated time interval. + * This function should be called from the idle thread with IRQs disabled, + * immediately before sleeping. + */ +void timer_dyn_reprogram(void) +{ + struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick; + + write_seqlock(&xtime_lock); + if (dyn_tick->state & DYN_TICK_ENABLED) + dyn_tick->reprogram(next_timer_interrupt() - jiffies); + write_sequnlock(&xtime_lock); +} + +static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf) +{ + return sprintf(buf, "%i\n", + (system_timer->dyn_tick->state & DYN_TICK_ENABLED) >> 1); +} + +static ssize_t timer_set_dyn_tick(struct sys_device *dev, const char *buf, + size_t count) +{ + unsigned int enable = simple_strtoul(buf, NULL, 2); + + if (enable) + timer_dyn_tick_enable(); + else + timer_dyn_tick_disable(); + + return count; +} +static SYSDEV_ATTR(dyn_tick, 0644, timer_show_dyn_tick, timer_set_dyn_tick); + +/* + * dyntick=enable|disable + */ +static char dyntick_str[4] __initdata = ""; + +static int __init dyntick_setup(char *str) +{ + if (str) + strlcpy(dyntick_str, str, sizeof(dyntick_str)); + return 1; +} + +__setup("dyntick=", dyntick_setup); +#endif + static int __init timer_init_sysfs(void) { int ret = sysdev_class_register(&timer_sysclass); @@ -388,6 +481,20 @@ static int __init timer_init_sysfs(void) system_timer->dev.cls = &timer_sysclass; ret = sysdev_register(&system_timer->dev); } + +#ifdef CONFIG_NO_IDLE_HZ + if (ret == 0 && system_timer->dyn_tick) { + ret = sysdev_create_file(&system_timer->dev, &attr_dyn_tick); + + /* + * Turn on dynamic tick after calibrate delay + * for correct bogomips + */ + if (ret == 0 && dyntick_str[0] == 'e') + ret = timer_dyn_tick_enable(); + } +#endif + return ret; } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 45d2a032d89..d571c37ac30 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -30,6 +30,7 @@ #include <asm/traps.h> #include "ptrace.h" +#include "signal.h" const char *processor_modes[]= { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , @@ -229,16 +230,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) do_exit(SIGSEGV); } -void die_if_kernel(const char *str, struct pt_regs *regs, int err) -{ - if (user_mode(regs)) - return; - - die(str, regs, err); -} - -static void notify_die(const char *str, struct pt_regs *regs, siginfo_t *info, - unsigned long err, unsigned long trap) +void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, + unsigned long err, unsigned long trap) { if (user_mode(regs)) { current->thread.error_code = err; @@ -255,16 +248,20 @@ static DEFINE_SPINLOCK(undef_lock); void register_undef_hook(struct undef_hook *hook) { - spin_lock_irq(&undef_lock); + unsigned long flags; + + spin_lock_irqsave(&undef_lock, flags); list_add(&hook->node, &undef_hook); - spin_unlock_irq(&undef_lock); + spin_unlock_irqrestore(&undef_lock, flags); } void unregister_undef_hook(struct undef_hook *hook) { - spin_lock_irq(&undef_lock); + unsigned long flags; + + spin_lock_irqsave(&undef_lock, flags); list_del(&hook->node); - spin_unlock_irq(&undef_lock); + spin_unlock_irqrestore(&undef_lock, flags); } asmlinkage void do_undefinstr(struct pt_regs *regs) @@ -683,6 +680,14 @@ void __init trap_init(void) memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start); memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start); memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz); + + /* + * Copy signal return handlers into the vector page, and + * set sigreturn to be a pointer to these. + */ + memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes, + sizeof(sigreturn_codes)); + flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE); modify_domain(DOMAIN_USER, DOMAIN_CLIENT); } |