diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 18:08:13 +0200 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 18:08:13 +0200 |
commit | f1bbbb6912662b9f6070c5bfc4ca9eb1f06a9d5b (patch) | |
tree | c2c130a74be25b0b2dff992e1a195e2728bdaadd /arch/parisc/kernel | |
parent | fd0961ff67727482bb20ca7e8ea97b83e9de2ddb (diff) | |
parent | 7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff) |
Merge branch 'master' into for-next
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r-- | arch/parisc/kernel/asm-offsets.c | 15 | ||||
-rw-r--r-- | arch/parisc/kernel/entry.S | 52 | ||||
-rw-r--r-- | arch/parisc/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/init_task.c | 6 | ||||
-rw-r--r-- | arch/parisc/kernel/syscall.S | 32 | ||||
-rw-r--r-- | arch/parisc/kernel/vmlinux.lds.S | 12 |
6 files changed, 40 insertions, 79 deletions
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index ec787b411e9..dcd55103a4b 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -45,8 +45,12 @@ #else #define FRAME_SIZE 64 #endif +#define FRAME_ALIGN 64 -#define align(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y))) +/* Add FRAME_SIZE to the size x and align it to y. All definitions + * that use align_frame will include space for a frame. + */ +#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y))) int main(void) { @@ -146,7 +150,8 @@ int main(void) DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior)); BLANK(); DEFINE(TASK_SZ, sizeof(struct task_struct)); - DEFINE(TASK_SZ_ALGN, align(sizeof(struct task_struct), 64)); + /* TASK_SZ_ALGN includes space for a stack frame. */ + DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN)); BLANK(); DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0])); DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1])); @@ -233,7 +238,8 @@ int main(void) DEFINE(PT_ISR, offsetof(struct pt_regs, isr)); DEFINE(PT_IOR, offsetof(struct pt_regs, ior)); DEFINE(PT_SIZE, sizeof(struct pt_regs)); - DEFINE(PT_SZ_ALGN, align(sizeof(struct pt_regs), 64)); + /* PT_SZ_ALGN includes space for a stack frame. */ + DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN)); BLANK(); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); @@ -242,7 +248,8 @@ int main(void) DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); DEFINE(THREAD_SZ, sizeof(struct thread_info)); - DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64)); + /* THREAD_SZ_ALGN includes space for a stack frame. */ + DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN)); BLANK(); DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base)); DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride)); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 3a44f7f704f..6337adef30f 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -364,32 +364,6 @@ .align 32 .endm - /* The following are simple 32 vs 64 bit instruction - * abstractions for the macros */ - .macro EXTR reg1,start,length,reg2 -#ifdef CONFIG_64BIT - extrd,u \reg1,32+(\start),\length,\reg2 -#else - extrw,u \reg1,\start,\length,\reg2 -#endif - .endm - - .macro DEP reg1,start,length,reg2 -#ifdef CONFIG_64BIT - depd \reg1,32+(\start),\length,\reg2 -#else - depw \reg1,\start,\length,\reg2 -#endif - .endm - - .macro DEPI val,start,length,reg -#ifdef CONFIG_64BIT - depdi \val,32+(\start),\length,\reg -#else - depwi \val,\start,\length,\reg -#endif - .endm - /* In LP64, the space contains part of the upper 32 bits of the * fault. We have to extract this and place it in the va, * zeroing the corresponding bits in the space register */ @@ -442,19 +416,19 @@ */ .macro L2_ptep pmd,pte,index,va,fault #if PT_NLEVELS == 3 - EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index + extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index #else - EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index + extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index #endif - DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ + dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ copy %r0,\pte ldw,s \index(\pmd),\pmd bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault - DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ + dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ copy \pmd,%r9 SHLREG %r9,PxD_VALUE_SHIFT,\pmd - EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index - DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ + extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index + dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd LDREG %r0(\pmd),\pte /* pmd is now pte */ bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault @@ -605,7 +579,7 @@ depdi 0,31,32,\tmp #endif copy \va,\tmp1 - DEPI 0,31,23,\tmp1 + depi 0,31,23,\tmp1 cmpb,COND(<>),n \tmp,\tmp1,\fault ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot depd,z \prot,8,7,\prot @@ -997,13 +971,6 @@ intr_restore: rfi nop - nop - nop - nop - nop - nop - nop - nop #ifndef CONFIG_PREEMPT # define intr_do_preempt intr_restore @@ -2076,9 +2043,10 @@ syscall_restore: LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ /* NOTE: We use rsm/ssm pair to make this operation atomic */ + LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */ rsm PSW_SM_I, %r0 - LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */ - mfsp %sr3,%r1 /* Get users space id */ + copy %r1,%r30 /* Restore user sp */ + mfsp %sr3,%r1 /* Get user space id */ mtsp %r1,%sr7 /* Restore sr7 */ ssm PSW_SM_I, %r0 diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index 0e3d9f9b9e3..4dbdf0ed6fa 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -345,7 +345,7 @@ smp_slave_stext: ENDPROC(stext) #ifndef CONFIG_64BIT - .section .data.read_mostly + .section .data..read_mostly .align 4 .export $global$,data diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c index d020eae6525..4a91e433416 100644 --- a/arch/parisc/kernel/init_task.c +++ b/arch/parisc/kernel/init_task.c @@ -53,11 +53,11 @@ union thread_union init_thread_union __init_task_data * guarantee that global objects will be laid out in memory in the same order * as the order of declaration, so put these in different sections and use * the linker script to order them. */ -pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data.vm0.pmd"), aligned(PAGE_SIZE))); +pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); #endif -pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data.vm0.pgd"), aligned(PAGE_SIZE))); -pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data.vm0.pte"), aligned(PAGE_SIZE))); +pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); +pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); /* * Initial task structure. diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index f5f96021caa..68e75ce838d 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -47,18 +47,17 @@ ENTRY(linux_gateway_page) KILL_INSN .endr - /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */ + /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ /* Light-weight-syscall entry must always be located at 0xb0 */ /* WARNING: Keep this number updated with table size changes */ #define __NR_lws_entries (2) lws_entry: - /* Unconditional branch to lws_start, located on the - same gateway page */ - b,n lws_start + gate lws_start, %r0 /* increase privilege */ + depi 3, 31, 2, %r31 /* Ensure we return into user mode. */ - /* Fill from 0xb4 to 0xe0 */ - .rept 11 + /* Fill from 0xb8 to 0xe0 */ + .rept 10 KILL_INSN .endr @@ -423,9 +422,6 @@ tracesys_sigexit: *********************************************************/ lws_start: - /* Gate and ensure we return to userspace */ - gate .+8, %r0 - depi 3, 31, 2, %r31 /* Ensure we return to userspace */ #ifdef CONFIG_64BIT /* FIXME: If we are a 64-bit kernel just @@ -442,7 +438,7 @@ lws_start: #endif /* Is the lws entry number valid? */ - comiclr,>>= __NR_lws_entries, %r20, %r0 + comiclr,>> __NR_lws_entries, %r20, %r0 b,n lws_exit_nosys /* WARNING: Trashing sr2 and sr3 */ @@ -473,7 +469,7 @@ lws_exit: /* now reset the lowest bit of sp if it was set */ xor %r30,%r1,%r30 #endif - be,n 0(%sr3, %r31) + be,n 0(%sr7, %r31) @@ -529,7 +525,6 @@ lws_compare_and_swap32: #endif lws_compare_and_swap: -#ifdef CONFIG_SMP /* Load start of lock table */ ldil L%lws_lock_start, %r20 ldo R%lws_lock_start(%r20), %r28 @@ -572,8 +567,6 @@ cas_wouldblock: ldo 2(%r0), %r28 /* 2nd case */ b lws_exit /* Contended... */ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ -#endif -/* CONFIG_SMP */ /* prev = *addr; @@ -601,13 +594,11 @@ cas_action: 1: ldw 0(%sr3,%r26), %r28 sub,<> %r28, %r25, %r0 2: stw %r24, 0(%sr3,%r26) -#ifdef CONFIG_SMP /* Free lock */ stw %r20, 0(%sr2,%r20) -# if ENABLE_LWS_DEBUG +#if ENABLE_LWS_DEBUG /* Clear thread register indicator */ stw %r0, 4(%sr2,%r20) -# endif #endif /* Return to userspace, set no error */ b lws_exit @@ -615,12 +606,10 @@ cas_action: 3: /* Error occured on load or store */ -#ifdef CONFIG_SMP /* Free lock */ stw %r20, 0(%sr2,%r20) -# if ENABLE_LWS_DEBUG +#if ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20) -# endif #endif b lws_exit ldo -EFAULT(%r0),%r21 /* set errno */ @@ -672,7 +661,6 @@ ENTRY(sys_call_table64) END(sys_call_table64) #endif -#ifdef CONFIG_SMP /* All light-weight-syscall atomic operations will use this set of locks @@ -694,8 +682,6 @@ ENTRY(lws_lock_start) .endr END(lws_lock_start) .previous -#endif -/* CONFIG_SMP for lws_lock_start */ .end diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 9dab4a4e09f..d64a6bbec2a 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -94,8 +94,8 @@ SECTIONS /* PA-RISC locks requires 16-byte alignment */ . = ALIGN(16); - .data.lock_aligned : { - *(.data.lock_aligned) + .data..lock_aligned : { + *(.data..lock_aligned) } /* End of data section */ @@ -105,10 +105,10 @@ SECTIONS __bss_start = .; /* page table entries need to be PAGE_SIZE aligned */ . = ALIGN(PAGE_SIZE); - .data.vmpages : { - *(.data.vm0.pmd) - *(.data.vm0.pgd) - *(.data.vm0.pte) + .data..vmpages : { + *(.data..vm0.pmd) + *(.data..vm0.pgd) + *(.data..vm0.pte) } .bss : { *(.bss) |