diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 19:53:12 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 19:53:12 -0800 |
commit | 9626357371b519f2b955fef399647181034a77fe (patch) | |
tree | 232abd741e773c7d3afb4ba6b02fcba03b82214d /arch/xtensa/kernel | |
parent | 2b37e9a28afbd11f899738e912fb4a617a74b462 (diff) | |
parent | 9cf81c759b7db1db593b2ca60b74ec350d5f9205 (diff) |
Merge tag 'xtensa-next-20130225' of git://github.com/czankel/xtensa-linux
Pull xtensa update from Chris Zankel:
"Added features:
- add support for thread local storage (TLS)
- add accept4 and finit_module syscalls
- support medium-priority interrupts
- add support for dc232c processor variant
- support file-base simulated disk for ISS simulator
Bug fixes:
- fix return values returned by the str[n]cmp functions
- avoid mmap cache aliasing
- fix handling of 'windowed registers' in ptrace"
* tag 'xtensa-next-20130225' of git://github.com/czankel/xtensa-linux:
xtensa: add accept4 syscall
xtensa: add support for TLS
xtensa: add missing include asm/uaccess.h to checksum.h
xtensa: do not enable GENERIC_GPIO by default
xtensa: complete ptrace handling of register windows
xtensa: add support for oprofile
xtensa: move spill_registers to traps.h
xtensa: ISS: add host file-based simulated disk
xtensa: fix str[n]cmp return value
xtensa: avoid mmap cache aliasing
xtensa: add finit_module syscall
xtensa: pull signal definitions from signal-defs.h
xtensa: fix ipc_parse_version selection
xtensa: dispatch medium-priority interrupts
xtensa: Add config files for Diamond 233L - Rev C processor variant
xtensa: use new common dtc rule
xtensa: rename prom_update_property to of_update_property
Diffstat (limited to 'arch/xtensa/kernel')
-rw-r--r-- | arch/xtensa/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/xtensa/kernel/entry.S | 69 | ||||
-rw-r--r-- | arch/xtensa/kernel/head.S | 9 | ||||
-rw-r--r-- | arch/xtensa/kernel/process.c | 5 | ||||
-rw-r--r-- | arch/xtensa/kernel/ptrace.c | 35 | ||||
-rw-r--r-- | arch/xtensa/kernel/setup.c | 42 | ||||
-rw-r--r-- | arch/xtensa/kernel/signal.c | 6 | ||||
-rw-r--r-- | arch/xtensa/kernel/syscall.c | 41 | ||||
-rw-r--r-- | arch/xtensa/kernel/traps.c | 74 | ||||
-rw-r--r-- | arch/xtensa/kernel/vectors.S | 57 | ||||
-rw-r--r-- | arch/xtensa/kernel/vmlinux.lds.S | 68 |
11 files changed, 333 insertions, 74 deletions
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index 0701fad170d..1915c7c889b 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -42,6 +42,7 @@ int main(void) DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel)); DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall)); DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1)); + DEFINE(PT_THREADPTR, offsetof(struct pt_regs, threadptr)); DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0])); DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0])); DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1])); diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 3777fec85e7..63845f95079 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -7,7 +7,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2004-2007 by Tensilica Inc. + * Copyright (C) 2004 - 2008 by Tensilica Inc. * * Chris Zankel <chris@zankel.net> * @@ -130,6 +130,11 @@ _user_exception: s32i a3, a1, PT_SAR s32i a2, a1, PT_ICOUNTLEVEL +#if XCHAL_HAVE_THREADPTR + rur a2, threadptr + s32i a2, a1, PT_THREADPTR +#endif + /* Rotate ws so that the current windowbase is at bit0. */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ @@ -349,15 +354,16 @@ common_exception: * so we can allow exceptions and interrupts (*) again. * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) * - * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before - * (interrupts disabled) and if this exception is not an interrupt. + * (*) We only allow interrupts of higher priority than current IRQ */ rsr a3, ps addi a0, a0, -4 movi a2, 1 - extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0] - moveqz a3, a2, a0 # a3 = 1 iff interrupt exception + extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + # a3 = PS.INTLEVEL + movnez a2, a3, a3 # a2 = 1: level-1, > 1: high priority + moveqz a3, a2, a0 # a3 = IRQ level iff interrupt movi a2, 1 << PS_WOE_BIT or a3, a3, a2 rsr a0, exccause @@ -398,7 +404,7 @@ common_exception: callx4 a4 /* Jump here for exception exit */ - + .global common_exception_return common_exception_return: /* Jump if we are returning from kernel exceptions. */ @@ -509,6 +515,11 @@ user_exception_exit: * (if we have restored WSBITS-1 frames). */ +#if XCHAL_HAVE_THREADPTR + l32i a3, a1, PT_THREADPTR + wur a3, threadptr +#endif + 2: j common_exception_exit /* This is the kernel exception exit. @@ -641,19 +652,51 @@ common_exception_exit: l32i a0, a1, PT_DEPC l32i a3, a1, PT_AREG3 + _bltui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f + + wsr a0, depc l32i a2, a1, PT_AREG2 - _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f + l32i a0, a1, PT_AREG0 + l32i a1, a1, PT_AREG1 + rfde +1: /* Restore a0...a3 and return */ + rsr a0, ps + extui a2, a0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + movi a0, 2f + slli a2, a2, 4 + add a0, a2, a0 + l32i a2, a1, PT_AREG2 + jx a0 + + .macro irq_exit_level level + .align 16 + .if XCHAL_EXCM_LEVEL >= \level + l32i a0, a1, PT_PC + wsr a0, epc\level l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 - rfe + rfi \level + .endif + .endm -1: wsr a0, depc + .align 16 +2: l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 - rfde + rfe + + .align 16 + /* no rfi for level-1 irq, handled by rfe above*/ + nop + + irq_exit_level 2 + irq_exit_level 3 + irq_exit_level 4 + irq_exit_level 5 + irq_exit_level 6 ENDPROC(kernel_exception) @@ -753,7 +796,7 @@ ENTRY(unrecoverable_exception) wsr a1, windowbase rsync - movi a1, (1 << PS_WOE_BIT) | 1 + movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL wsr a1, ps rsync @@ -1474,7 +1517,7 @@ ENTRY(_spill_registers) l32i a1, a3, EXC_TABLE_KSTK wsr a3, excsave1 - movi a4, (1 << PS_WOE_BIT) | 1 + movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL wsr a4, ps rsync @@ -1922,7 +1965,7 @@ ENTRY(_switch_to) s32i a6, a3, EXC_TABLE_FIXUP s32i a7, a3, EXC_TABLE_KSTK - /* restore context of the task that 'next' addresses */ + /* restore context of the task 'next' */ l32i a0, a13, THREAD_RA # restore return address l32i a1, a13, THREAD_SP # restore stack pointer diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index 91d9095284d..df88f98737f 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -7,7 +7,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2008 Tensilica Inc. * * Chris Zankel <chris@zankel.net> * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> @@ -128,14 +128,14 @@ ENTRY(_startup) wsr a0, cpenable #endif - /* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0 + /* Set PS.INTLEVEL=LOCKLEVEL, PS.WOE=0, kernel stack, PS.EXCM=0 * * Note: PS.EXCM must be cleared before using any loop * instructions; otherwise, they are silently disabled, and * at most one iteration of the loop is executed. */ - movi a1, 1 + movi a1, LOCKLEVEL wsr a1, ps rsync @@ -211,7 +211,8 @@ ENTRY(_startup) movi a1, init_thread_union addi a1, a1, KERNEL_STACK_SIZE - movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0 + movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL + # WOE=1, INTLEVEL=LOCKLEVEL, UM=0 wsr a2, ps # (enable reg-windows; progmode stack) rsync diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 0dd5784416d..5cd82e9f601 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -259,9 +259,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4], ®s->areg[XCHAL_NUM_AREGS - len/4], len); } -// FIXME: we need to set THREADPTR in thread_info... + + /* The thread pointer is passed in the '4th argument' (= a5) */ if (clone_flags & CLONE_SETTLS) - childregs->areg[2] = childregs->areg[6]; + childregs->threadptr = childregs->areg[5]; } else { p->thread.ra = MAKE_RA_FOR_CALL( (unsigned long)ret_from_kernel_thread, 1); diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index 61fb2e9e903..562fac66475 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -53,9 +53,8 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs) { struct pt_regs *regs = task_pt_regs(child); xtensa_gregset_t __user *gregset = uregs; - unsigned long wm = regs->wmask; unsigned long wb = regs->windowbase; - int live, i; + int i; if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t))) return -EIO; @@ -67,13 +66,11 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs) __put_user(regs->lcount, &gregset->lcount); __put_user(regs->windowstart, &gregset->windowstart); __put_user(regs->windowbase, &gregset->windowbase); + __put_user(regs->threadptr, &gregset->threadptr); - live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16; - - for (i = 0; i < live; i++) - __put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS)); - for (i = XCHAL_NUM_AREGS - (wm >> 4) * 4; i < XCHAL_NUM_AREGS; i++) - __put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS)); + for (i = 0; i < XCHAL_NUM_AREGS; i++) + __put_user(regs->areg[i], + gregset->a + ((wb * 4 + i) % XCHAL_NUM_AREGS)); return 0; } @@ -84,7 +81,7 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs) xtensa_gregset_t *gregset = uregs; const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK; unsigned long ps; - unsigned long wb; + unsigned long wb, ws; if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t))) return -EIO; @@ -94,21 +91,33 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs) __get_user(regs->lbeg, &gregset->lbeg); __get_user(regs->lend, &gregset->lend); __get_user(regs->lcount, &gregset->lcount); - __get_user(regs->windowstart, &gregset->windowstart); + __get_user(ws, &gregset->windowstart); __get_user(wb, &gregset->windowbase); + __get_user(regs->threadptr, &gregset->threadptr); regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT); if (wb >= XCHAL_NUM_AREGS / 4) return -EFAULT; - regs->windowbase = wb; + if (wb != regs->windowbase || ws != regs->windowstart) { + unsigned long rotws, wmask; + + rotws = (((ws | (ws << WSBITS)) >> wb) & + ((1 << WSBITS) - 1)) & ~1; + wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) | + (rotws & 0xF) | 1; + regs->windowbase = wb; + regs->windowstart = ws; + regs->wmask = wmask; + } if (wb != 0 && __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4, - gregset->a, wb * 16)) + gregset->a, wb * 16)) return -EFAULT; - if (__copy_from_user(regs->areg, gregset->a + wb*4, (WSBITS-wb) * 16)) + if (__copy_from_user(regs->areg, gregset->a + wb * 4, + (WSBITS - wb) * 16)) return -EFAULT; return 0; diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 24c1a57abb4..6dd25ecde3f 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -328,6 +328,27 @@ extern char _UserExceptionVector_literal_start; extern char _UserExceptionVector_text_end; extern char _DoubleExceptionVector_literal_start; extern char _DoubleExceptionVector_text_end; +#if XCHAL_EXCM_LEVEL >= 2 +extern char _Level2InterruptVector_text_start; +extern char _Level2InterruptVector_text_end; +#endif +#if XCHAL_EXCM_LEVEL >= 3 +extern char _Level3InterruptVector_text_start; +extern char _Level3InterruptVector_text_end; +#endif +#if XCHAL_EXCM_LEVEL >= 4 +extern char _Level4InterruptVector_text_start; +extern char _Level4InterruptVector_text_end; +#endif +#if XCHAL_EXCM_LEVEL >= 5 +extern char _Level5InterruptVector_text_start; +extern char _Level5InterruptVector_text_end; +#endif +#if XCHAL_EXCM_LEVEL >= 6 +extern char _Level6InterruptVector_text_start; +extern char _Level6InterruptVector_text_end; +#endif + #ifdef CONFIG_S32C1I_SELFTEST @@ -482,6 +503,27 @@ void __init setup_arch(char **cmdline_p) mem_reserve(__pa(&_DoubleExceptionVector_literal_start), __pa(&_DoubleExceptionVector_text_end), 0); +#if XCHAL_EXCM_LEVEL >= 2 + mem_reserve(__pa(&_Level2InterruptVector_text_start), + __pa(&_Level2InterruptVector_text_end), 0); +#endif +#if XCHAL_EXCM_LEVEL >= 3 + mem_reserve(__pa(&_Level3InterruptVector_text_start), + __pa(&_Level3InterruptVector_text_end), 0); +#endif +#if XCHAL_EXCM_LEVEL >= 4 + mem_reserve(__pa(&_Level4InterruptVector_text_start), + __pa(&_Level4InterruptVector_text_end), 0); +#endif +#if XCHAL_EXCM_LEVEL >= 5 + mem_reserve(__pa(&_Level5InterruptVector_text_start), + __pa(&_Level5InterruptVector_text_end), 0); +#endif +#if XCHAL_EXCM_LEVEL >= 6 + mem_reserve(__pa(&_Level6InterruptVector_text_start), + __pa(&_Level6InterruptVector_text_end), 0); +#endif + bootmem_init(); #ifdef CONFIG_OF diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index d7590dddd08..718eca1850b 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -337,7 +337,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, struct rt_sigframe *frame; int err = 0; int signal; - unsigned long sp, ra; + unsigned long sp, ra, tp; sp = regs->areg[1]; @@ -391,7 +391,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * Return context not modified until this point. */ - /* Set up registers for signal handler */ + /* Set up registers for signal handler; preserve the threadptr */ + tp = regs->threadptr; start_thread(regs, (unsigned long) ka->sa.sa_handler, (unsigned long) frame); @@ -402,6 +403,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, regs->areg[6] = (unsigned long) signal; regs->areg[7] = (unsigned long) &frame->info; regs->areg[8] = (unsigned long) &frame->uc; + regs->threadptr = tp; /* Set access mode to USER_DS. Nomenclature is outdated, but * functionality is used in uaccess.h diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 54fa8425cee..5d3f7a119ed 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -36,6 +36,10 @@ syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= { #include <uapi/asm/unistd.h> }; +#define COLOUR_ALIGN(addr, pgoff) \ + ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \ + (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1))) + asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) { unsigned long ret; @@ -52,3 +56,40 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice, { return sys_fadvise64_64(fd, offset, len, advice); } + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct vm_area_struct *vmm; + + if (flags & MAP_FIXED) { + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; + } + + if (len > TASK_SIZE) + return -ENOMEM; + if (!addr) + addr = TASK_UNMAPPED_BASE; + + if (flags & MAP_SHARED) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; + if (!vmm || addr + len <= vmm->vm_start) + return addr; + addr = vmm->vm_end; + if (flags & MAP_SHARED) + addr = COLOUR_ALIGN(addr, pgoff); + } +} diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index ded955d4515..923db5c1527 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -37,6 +37,7 @@ #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/processor.h> +#include <asm/traps.h> #ifdef CONFIG_KGDB extern int gdb_enter; @@ -193,28 +194,49 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause) } /* - * Level-1 interrupt. - * We currently have no priority encoding. + * IRQ handler. + * PS.INTLEVEL is the current IRQ priority level. */ -unsigned long ignored_level1_interrupts; extern void do_IRQ(int, struct pt_regs *); -void do_interrupt (struct pt_regs *regs) +void do_interrupt(struct pt_regs *regs) { - unsigned long intread = get_sr (interrupt); - unsigned long intenable = get_sr (intenable); - int i, mask; - - /* Handle all interrupts (no priorities). - * (Clear the interrupt before processing, in case it's - * edge-triggered or software-generated) - */ + static const unsigned int_level_mask[] = { + 0, + XCHAL_INTLEVEL1_MASK, + XCHAL_INTLEVEL2_MASK, + XCHAL_INTLEVEL3_MASK, + XCHAL_INTLEVEL4_MASK, + XCHAL_INTLEVEL5_MASK, + XCHAL_INTLEVEL6_MASK, + XCHAL_INTLEVEL7_MASK, + }; + unsigned level = get_sr(ps) & PS_INTLEVEL_MASK; + + if (WARN_ON_ONCE(level >= ARRAY_SIZE(int_level_mask))) + return; - for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) { - if (mask & (intread & intenable)) { - set_sr (mask, intclear); - do_IRQ (i,regs); + for (;;) { + unsigned intread = get_sr(interrupt); + unsigned intenable = get_sr(intenable); + unsigned int_at_level = intread & intenable & + int_level_mask[level]; + + if (!int_at_level) + return; + + /* + * Clear the interrupt before processing, in case it's + * edge-triggered or software-generated + */ + while (int_at_level) { + unsigned i = __ffs(int_at_level); + unsigned mask = 1 << i; + + int_at_level ^= mask; + set_sr(mask, intclear); + do_IRQ(i, regs); } } } @@ -392,26 +414,6 @@ static __always_inline unsigned long *stack_pointer(struct task_struct *task) return sp; } -static inline void spill_registers(void) -{ - unsigned int a0, ps; - - __asm__ __volatile__ ( - "movi a14, " __stringify(PS_EXCM_BIT | 1) "\n\t" - "mov a12, a0\n\t" - "rsr a13, sar\n\t" - "xsr a14, ps\n\t" - "movi a0, _spill_registers\n\t" - "rsync\n\t" - "callx0 a0\n\t" - "mov a0, a12\n\t" - "wsr a13, sar\n\t" - "wsr a14, ps\n\t" - :: "a" (&a0), "a" (&ps) - : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", - "memory"); -} - void show_trace(struct task_struct *task, unsigned long *sp) { unsigned long a0, a1, pc; diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 68df35f66ce..82109b42e24 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -10,7 +10,7 @@ * Public License. See the file "COPYING" in the main directory of * this archive for more details. * - * Copyright (C) 2005 Tensilica, Inc. + * Copyright (C) 2005 - 2008 Tensilica, Inc. * * Chris Zankel <chris@zankel.net> * @@ -366,6 +366,41 @@ ENTRY(_DebugInterruptVector) ENDPROC(_DebugInterruptVector) + +/* + * Medium priority level interrupt vectors + * + * Each takes less than 16 (0x10) bytes, no literals, by placing + * the extra 8 bytes that would otherwise be required in the window + * vectors area where there is space. With relocatable vectors, + * all vectors are within ~ 4 kB range of each other, so we can + * simply jump (J) to another vector without having to use JX. + * + * common_exception code gets current IRQ level in PS.INTLEVEL + * and preserves it for the IRQ handling time. + */ + + .macro irq_entry_level level + + .if XCHAL_EXCM_LEVEL >= \level + .section .Level\level\()InterruptVector.text, "ax" +ENTRY(_Level\level\()InterruptVector) + wsr a0, epc1 + rsr a0, epc\level + xsr a0, epc1 + # branch to user or kernel vector + j _SimulateUserKernelVectorException + .endif + + .endm + + irq_entry_level 2 + irq_entry_level 3 + irq_entry_level 4 + irq_entry_level 5 + irq_entry_level 6 + + /* Window overflow and underflow handlers. * The handlers must be 64 bytes apart, first starting with the underflow * handlers underflow-4 to underflow-12, then the overflow handlers @@ -396,6 +431,26 @@ ENTRY_ALIGN64(_WindowOverflow4) ENDPROC(_WindowOverflow4) +#if XCHAL_EXCM_LEVEL >= 2 + /* Not a window vector - but a convenient location + * (where we know there's space) for continuation of + * medium priority interrupt dispatch code. + * On entry here, a0 contains PS, and EPC2 contains saved a0: + */ + .align 4 +_SimulateUserKernelVectorException: + wsr a0, excsave2 + movi a0, 4 # LEVEL1_INTERRUPT cause + wsr a0, exccause + rsr a0, ps + bbsi.l a0, PS_UM_BIT, 1f # branch if user mode + rsr a0, excsave2 # restore a0 + j _KernelExceptionVector # simulate kernel vector exception +1: rsr a0, excsave2 # restore a0 + j _UserExceptionVector # simulate user vector exception +#endif + + /* 4-Register Window Underflow Vector (Handler) */ ENTRY_ALIGN64(_WindowUnderflow4) diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 255154f820b..14695240536 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -7,7 +7,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2008 Tensilica Inc. * * Chris Zankel <chris@zankel.net> * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> @@ -134,6 +134,26 @@ SECTIONS RELOCATE_ENTRY(_WindowVectors_text, .WindowVectors.text); +#if XCHAL_EXCM_LEVEL >= 2 + RELOCATE_ENTRY(_Level2InterruptVector_text, + .Level2InterruptVector.text); +#endif +#if XCHAL_EXCM_LEVEL >= 3 + RELOCATE_ENTRY(_Level3InterruptVector_text, + .Level3InterruptVector.text); +#endif +#if XCHAL_EXCM_LEVEL >= 4 + RELOCATE_ENTRY(_Level4InterruptVector_text, + .Level4InterruptVector.text); +#endif +#if XCHAL_EXCM_LEVEL >= 5 + RELOCATE_ENTRY(_Level5InterruptVector_text, + .Level5InterruptVector.text); +#endif +#if XCHAL_EXCM_LEVEL >= 6 + RELOCATE_ENTRY(_Level6InterruptVector_text, + .Level6InterruptVector.text); +#endif RELOCATE_ENTRY(_KernelExceptionVector_text, .KernelExceptionVector.text); RELOCATE_ENTRY(_UserExceptionVector_text, @@ -177,11 +197,53 @@ SECTIONS XCHAL_DEBUG_VECTOR_VADDR, 4, .DebugInterruptVector.literal) +#undef LAST +#define LAST .DebugInterruptVector.text +#if XCHAL_EXCM_LEVEL >= 2 + SECTION_VECTOR (_Level2InterruptVector_text, + .Level2InterruptVector.text, + XCHAL_INTLEVEL2_VECTOR_VADDR, + SIZEOF(LAST), LAST) +# undef LAST +# define LAST .Level2InterruptVector.text +#endif +#if XCHAL_EXCM_LEVEL >= 3 + SECTION_VECTOR (_Level3InterruptVector_text, + .Level3InterruptVector.text, + XCHAL_INTLEVEL3_VECTOR_VADDR, + SIZEOF(LAST), LAST) +# undef LAST +# define LAST .Level3InterruptVector.text +#endif +#if XCHAL_EXCM_LEVEL >= 4 + SECTION_VECTOR (_Level4InterruptVector_text, + .Level4InterruptVector.text, + XCHAL_INTLEVEL4_VECTOR_VADDR, + SIZEOF(LAST), LAST) +# undef LAST +# define LAST .Level4InterruptVector.text +#endif +#if XCHAL_EXCM_LEVEL >= 5 + SECTION_VECTOR (_Level5InterruptVector_text, + .Level5InterruptVector.text, + XCHAL_INTLEVEL5_VECTOR_VADDR, + SIZEOF(LAST), LAST) +# undef LAST +# define LAST .Level5InterruptVector.text +#endif +#if XCHAL_EXCM_LEVEL >= 6 + SECTION_VECTOR (_Level6InterruptVector_text, + .Level6InterruptVector.text, + XCHAL_INTLEVEL6_VECTOR_VADDR, + SIZEOF(LAST), LAST) +# undef LAST +# define LAST .Level6InterruptVector.text +#endif SECTION_VECTOR (_KernelExceptionVector_literal, .KernelExceptionVector.literal, XCHAL_KERNEL_VECTOR_VADDR - 4, - SIZEOF(.DebugInterruptVector.text), - .DebugInterruptVector.text) + SIZEOF(LAST), LAST) +#undef LAST SECTION_VECTOR (_KernelExceptionVector_text, .KernelExceptionVector.text, XCHAL_KERNEL_VECTOR_VADDR, |