diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 18:08:13 +0200 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 18:08:13 +0200 |
commit | f1bbbb6912662b9f6070c5bfc4ca9eb1f06a9d5b (patch) | |
tree | c2c130a74be25b0b2dff992e1a195e2728bdaadd /arch/parisc | |
parent | fd0961ff67727482bb20ca7e8ea97b83e9de2ddb (diff) | |
parent | 7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff) |
Merge branch 'master' into for-next
Diffstat (limited to 'arch/parisc')
-rw-r--r-- | arch/parisc/Kconfig | 3 | ||||
-rw-r--r-- | arch/parisc/include/asm/bug.h | 8 | ||||
-rw-r--r-- | arch/parisc/include/asm/cache.h | 2 | ||||
-rw-r--r-- | arch/parisc/include/asm/cacheflush.h | 16 | ||||
-rw-r--r-- | arch/parisc/include/asm/scatterlist.h | 20 | ||||
-rw-r--r-- | arch/parisc/include/asm/system.h | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/asm-offsets.c | 15 | ||||
-rw-r--r-- | arch/parisc/kernel/entry.S | 52 | ||||
-rw-r--r-- | arch/parisc/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/init_task.c | 6 | ||||
-rw-r--r-- | arch/parisc/kernel/syscall.S | 32 | ||||
-rw-r--r-- | arch/parisc/kernel/vmlinux.lds.S | 12 | ||||
-rw-r--r-- | arch/parisc/math-emu/decode_exc.c | 1 | ||||
-rw-r--r-- | arch/parisc/mm/fault.c | 7 |
14 files changed, 68 insertions, 110 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 9c4da3d63bf..05a366a5c4d 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -98,6 +98,9 @@ config STACKTRACE_SUPPORT config NEED_DMA_MAP_STATE def_bool y +config NEED_SG_DMA_LENGTH + def_bool y + config ISA_DMA_API bool diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h index 75e46c557a1..72cfdb0cfdd 100644 --- a/arch/parisc/include/asm/bug.h +++ b/arch/parisc/include/asm/bug.h @@ -44,7 +44,7 @@ #endif #ifdef CONFIG_DEBUG_BUGVERBOSE -#define __WARN() \ +#define __WARN_TAINT(taint) \ do { \ asm volatile("\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \ @@ -54,11 +54,11 @@ "\t.org 2b+%c3\n" \ "\t.popsection" \ : : "i" (__FILE__), "i" (__LINE__), \ - "i" (BUGFLAG_WARNING), \ + "i" (BUGFLAG_TAINT(taint)), \ "i" (sizeof(struct bug_entry)) ); \ } while(0) #else -#define __WARN() \ +#define __WARN_TAINT(taint) \ do { \ asm volatile("\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \ @@ -67,7 +67,7 @@ "\t.short %c0\n" \ "\t.org 2b+%c1\n" \ "\t.popsection" \ - : : "i" (BUGFLAG_WARNING), \ + : : "i" (BUGFLAG_TAINT(taint)), \ "i" (sizeof(struct bug_entry)) ); \ } while(0) #endif diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index 32c2cca7434..45effe6978f 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -28,7 +28,7 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) void parisc_cache_init(void); /* initializes cache-flushing */ void disable_sr_hashing_asm(int); /* low level support for above */ diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 477277739da..4556d820128 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -2,6 +2,7 @@ #define _PARISC_CACHEFLUSH_H #include <linux/mm.h> +#include <linux/uaccess.h> /* The usual comment is "Caches aren't brain-dead on the <architecture>". * Unfortunately, that doesn't apply to PA-RISC. */ @@ -125,11 +126,20 @@ static inline void *kmap(struct page *page) #define kunmap(page) kunmap_parisc(page_address(page)) -#define kmap_atomic(page, idx) page_address(page) +static inline void *kmap_atomic(struct page *page, enum km_type idx) +{ + pagefault_disable(); + return page_address(page); +} -#define kunmap_atomic(addr, idx) kunmap_parisc(addr) +static inline void kunmap_atomic(void *addr, enum km_type idx) +{ + kunmap_parisc(addr); + pagefault_enable(); +} -#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) +#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) +#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) #define kmap_atomic_to_page(ptr) virt_to_page(ptr) #endif diff --git a/arch/parisc/include/asm/scatterlist.h b/arch/parisc/include/asm/scatterlist.h index 62269b31ebf..2c3b79b54b2 100644 --- a/arch/parisc/include/asm/scatterlist.h +++ b/arch/parisc/include/asm/scatterlist.h @@ -3,25 +3,9 @@ #include <asm/page.h> #include <asm/types.h> - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - - unsigned int length; - - /* an IOVA can be 64-bits on some PA-Risc platforms. */ - dma_addr_t iova; /* I/O Virtual Address */ - __u32 iova_length; /* bytes mapped */ -}; - -#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg)) -#define sg_dma_address(sg) ((sg)->iova) -#define sg_dma_len(sg) ((sg)->iova_length) +#include <asm-generic/scatterlist.h> #define ISA_DMA_THRESHOLD (~0UL) +#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg)) #endif /* _ASM_PARISC_SCATTERLIST_H */ diff --git a/arch/parisc/include/asm/system.h b/arch/parisc/include/asm/system.h index 4653c77bf9d..2ab4af58ecb 100644 --- a/arch/parisc/include/asm/system.h +++ b/arch/parisc/include/asm/system.h @@ -174,7 +174,7 @@ static inline void set_eiem(unsigned long val) }) #ifdef CONFIG_SMP -# define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) +# define __lock_aligned __attribute__((__section__(".data..lock_aligned"))) #endif #define arch_align_stack(x) (x) diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index ec787b411e9..dcd55103a4b 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -45,8 +45,12 @@ #else #define FRAME_SIZE 64 #endif +#define FRAME_ALIGN 64 -#define align(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y))) +/* Add FRAME_SIZE to the size x and align it to y. All definitions + * that use align_frame will include space for a frame. + */ +#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y))) int main(void) { @@ -146,7 +150,8 @@ int main(void) DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior)); BLANK(); DEFINE(TASK_SZ, sizeof(struct task_struct)); - DEFINE(TASK_SZ_ALGN, align(sizeof(struct task_struct), 64)); + /* TASK_SZ_ALGN includes space for a stack frame. */ + DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN)); BLANK(); DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0])); DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1])); @@ -233,7 +238,8 @@ int main(void) DEFINE(PT_ISR, offsetof(struct pt_regs, isr)); DEFINE(PT_IOR, offsetof(struct pt_regs, ior)); DEFINE(PT_SIZE, sizeof(struct pt_regs)); - DEFINE(PT_SZ_ALGN, align(sizeof(struct pt_regs), 64)); + /* PT_SZ_ALGN includes space for a stack frame. */ + DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN)); BLANK(); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); @@ -242,7 +248,8 @@ int main(void) DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); DEFINE(THREAD_SZ, sizeof(struct thread_info)); - DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64)); + /* THREAD_SZ_ALGN includes space for a stack frame. */ + DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN)); BLANK(); DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base)); DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride)); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 3a44f7f704f..6337adef30f 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -364,32 +364,6 @@ .align 32 .endm - /* The following are simple 32 vs 64 bit instruction - * abstractions for the macros */ - .macro EXTR reg1,start,length,reg2 -#ifdef CONFIG_64BIT - extrd,u \reg1,32+(\start),\length,\reg2 -#else - extrw,u \reg1,\start,\length,\reg2 -#endif - .endm - - .macro DEP reg1,start,length,reg2 -#ifdef CONFIG_64BIT - depd \reg1,32+(\start),\length,\reg2 -#else - depw \reg1,\start,\length,\reg2 -#endif - .endm - - .macro DEPI val,start,length,reg -#ifdef CONFIG_64BIT - depdi \val,32+(\start),\length,\reg -#else - depwi \val,\start,\length,\reg -#endif - .endm - /* In LP64, the space contains part of the upper 32 bits of the * fault. We have to extract this and place it in the va, * zeroing the corresponding bits in the space register */ @@ -442,19 +416,19 @@ */ .macro L2_ptep pmd,pte,index,va,fault #if PT_NLEVELS == 3 - EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index + extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index #else - EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index + extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index #endif - DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ + dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ copy %r0,\pte ldw,s \index(\pmd),\pmd bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault - DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ + dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ copy \pmd,%r9 SHLREG %r9,PxD_VALUE_SHIFT,\pmd - EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index - DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ + extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index + dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd LDREG %r0(\pmd),\pte /* pmd is now pte */ bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault @@ -605,7 +579,7 @@ depdi 0,31,32,\tmp #endif copy \va,\tmp1 - DEPI 0,31,23,\tmp1 + depi 0,31,23,\tmp1 cmpb,COND(<>),n \tmp,\tmp1,\fault ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot depd,z \prot,8,7,\prot @@ -997,13 +971,6 @@ intr_restore: rfi nop - nop - nop - nop - nop - nop - nop - nop #ifndef CONFIG_PREEMPT # define intr_do_preempt intr_restore @@ -2076,9 +2043,10 @@ syscall_restore: LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ /* NOTE: We use rsm/ssm pair to make this operation atomic */ + LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */ rsm PSW_SM_I, %r0 - LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */ - mfsp %sr3,%r1 /* Get users space id */ + copy %r1,%r30 /* Restore user sp */ + mfsp %sr3,%r1 /* Get user space id */ mtsp %r1,%sr7 /* Restore sr7 */ ssm PSW_SM_I, %r0 diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index 0e3d9f9b9e3..4dbdf0ed6fa 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -345,7 +345,7 @@ smp_slave_stext: ENDPROC(stext) #ifndef CONFIG_64BIT - .section .data.read_mostly + .section .data..read_mostly .align 4 .export $global$,data diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c index d020eae6525..4a91e433416 100644 --- a/arch/parisc/kernel/init_task.c +++ b/arch/parisc/kernel/init_task.c @@ -53,11 +53,11 @@ union thread_union init_thread_union __init_task_data * guarantee that global objects will be laid out in memory in the same order * as the order of declaration, so put these in different sections and use * the linker script to order them. */ -pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data.vm0.pmd"), aligned(PAGE_SIZE))); +pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); #endif -pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data.vm0.pgd"), aligned(PAGE_SIZE))); -pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data.vm0.pte"), aligned(PAGE_SIZE))); +pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); +pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); /* * Initial task structure. diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index f5f96021caa..68e75ce838d 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -47,18 +47,17 @@ ENTRY(linux_gateway_page) KILL_INSN .endr - /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */ + /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ /* Light-weight-syscall entry must always be located at 0xb0 */ /* WARNING: Keep this number updated with table size changes */ #define __NR_lws_entries (2) lws_entry: - /* Unconditional branch to lws_start, located on the - same gateway page */ - b,n lws_start + gate lws_start, %r0 /* increase privilege */ + depi 3, 31, 2, %r31 /* Ensure we return into user mode. */ - /* Fill from 0xb4 to 0xe0 */ - .rept 11 + /* Fill from 0xb8 to 0xe0 */ + .rept 10 KILL_INSN .endr @@ -423,9 +422,6 @@ tracesys_sigexit: *********************************************************/ lws_start: - /* Gate and ensure we return to userspace */ - gate .+8, %r0 - depi 3, 31, 2, %r31 /* Ensure we return to userspace */ #ifdef CONFIG_64BIT /* FIXME: If we are a 64-bit kernel just @@ -442,7 +438,7 @@ lws_start: #endif /* Is the lws entry number valid? */ - comiclr,>>= __NR_lws_entries, %r20, %r0 + comiclr,>> __NR_lws_entries, %r20, %r0 b,n lws_exit_nosys /* WARNING: Trashing sr2 and sr3 */ @@ -473,7 +469,7 @@ lws_exit: /* now reset the lowest bit of sp if it was set */ xor %r30,%r1,%r30 #endif - be,n 0(%sr3, %r31) + be,n 0(%sr7, %r31) @@ -529,7 +525,6 @@ lws_compare_and_swap32: #endif lws_compare_and_swap: -#ifdef CONFIG_SMP /* Load start of lock table */ ldil L%lws_lock_start, %r20 ldo R%lws_lock_start(%r20), %r28 @@ -572,8 +567,6 @@ cas_wouldblock: ldo 2(%r0), %r28 /* 2nd case */ b lws_exit /* Contended... */ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ -#endif -/* CONFIG_SMP */ /* prev = *addr; @@ -601,13 +594,11 @@ cas_action: 1: ldw 0(%sr3,%r26), %r28 sub,<> %r28, %r25, %r0 2: stw %r24, 0(%sr3,%r26) -#ifdef CONFIG_SMP /* Free lock */ stw %r20, 0(%sr2,%r20) -# if ENABLE_LWS_DEBUG +#if ENABLE_LWS_DEBUG /* Clear thread register indicator */ stw %r0, 4(%sr2,%r20) -# endif #endif /* Return to userspace, set no error */ b lws_exit @@ -615,12 +606,10 @@ cas_action: 3: /* Error occured on load or store */ -#ifdef CONFIG_SMP /* Free lock */ stw %r20, 0(%sr2,%r20) -# if ENABLE_LWS_DEBUG +#if ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20) -# endif #endif b lws_exit ldo -EFAULT(%r0),%r21 /* set errno */ @@ -672,7 +661,6 @@ ENTRY(sys_call_table64) END(sys_call_table64) #endif -#ifdef CONFIG_SMP /* All light-weight-syscall atomic operations will use this set of locks @@ -694,8 +682,6 @@ ENTRY(lws_lock_start) .endr END(lws_lock_start) .previous -#endif -/* CONFIG_SMP for lws_lock_start */ .end diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 9dab4a4e09f..d64a6bbec2a 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -94,8 +94,8 @@ SECTIONS /* PA-RISC locks requires 16-byte alignment */ . = ALIGN(16); - .data.lock_aligned : { - *(.data.lock_aligned) + .data..lock_aligned : { + *(.data..lock_aligned) } /* End of data section */ @@ -105,10 +105,10 @@ SECTIONS __bss_start = .; /* page table entries need to be PAGE_SIZE aligned */ . = ALIGN(PAGE_SIZE); - .data.vmpages : { - *(.data.vm0.pmd) - *(.data.vm0.pgd) - *(.data.vm0.pte) + .data..vmpages : { + *(.data..vm0.pmd) + *(.data..vm0.pgd) + *(.data..vm0.pte) } .bss : { *(.bss) diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c index 3ca1c614921..27a7492ddb0 100644 --- a/arch/parisc/math-emu/decode_exc.c +++ b/arch/parisc/math-emu/decode_exc.c @@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[]) return SIGNALCODE(SIGFPE, FPE_FLTINV); case DIVISIONBYZEROEXCEPTION: update_trap_counts(Fpu_register, aflags, bflags, trap_counts); + Clear_excp_register(exception_index); return SIGNALCODE(SIGFPE, FPE_FLTDIV); case INEXACTEXCEPTION: update_trap_counts(Fpu_register, aflags, bflags, trap_counts); diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index c6afbfc9577..18162ce4261 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -264,8 +264,7 @@ no_context: out_of_memory: up_read(&mm->mmap_sem); - printk(KERN_CRIT "VM: killing process %s\n", current->comm); - if (user_mode(regs)) - do_group_exit(SIGKILL); - goto no_context; + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); } |