diff options
Diffstat (limited to 'arch/parisc')
47 files changed, 392 insertions, 407 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 9038f39d9d7..524d9352f17 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -16,6 +16,9 @@ config PARISC select RTC_DRV_GENERIC select INIT_ALL_POSSIBLE select BUG + select HAVE_PERF_EVENTS + select GENERIC_ATOMIC64 if !64BIT + select HAVE_ARCH_TRACEHOOK help The PA-RISC microprocessor is designed by Hewlett-Packard and used in many of their workstations & servers (HP9000 700 and 800 series, diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index da6f66901c9..55cca1dac43 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -118,8 +118,8 @@ define archhelp @echo '* vmlinux - Uncompressed kernel image (./vmlinux)' @echo ' palo - Bootable image (./lifimage)' @echo ' install - Install kernel using' - @echo ' (your) ~/bin/installkernel or' - @echo ' (distribution) /sbin/installkernel or' + @echo ' (your) ~/bin/$(INSTALLKERNEL) or' + @echo ' (distribution) /sbin/$(INSTALLKERNEL) or' @echo ' copy to $$(INSTALL_PATH)' endef diff --git a/arch/parisc/include/asm/agp.h b/arch/parisc/include/asm/agp.h index 9651660da63..d226ffa8fc1 100644 --- a/arch/parisc/include/asm/agp.h +++ b/arch/parisc/include/asm/agp.h @@ -11,10 +11,6 @@ #define unmap_page_from_agp(page) /* nothing */ #define flush_agp_cache() mb() -/* Convert a physical address to an address suitable for the GART. */ -#define phys_to_gart(x) (x) -#define gart_to_phys(x) (x) - /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) \ ((char *)__get_free_pages(GFP_KERNEL, (order))) diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 7eeaff94436..8bc9e96699b 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -222,13 +222,13 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)(i)),(v)))) -#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)(i)),(v)))) +#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v)))) +#define atomic_sub(i,v) ((void)(__atomic_add_return(-(i),(v)))) #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v)))) #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v)))) -#define atomic_add_return(i,v) (__atomic_add_return( ((int)(i)),(v))) -#define atomic_sub_return(i,v) (__atomic_add_return(-((int)(i)),(v))) +#define atomic_add_return(i,v) (__atomic_add_return( (i),(v))) +#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v))) #define atomic_inc_return(v) (__atomic_add_return( 1,(v))) #define atomic_dec_return(v) (__atomic_add_return( -1,(v))) @@ -336,7 +336,11 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -#endif /* CONFIG_64BIT */ +#else /* CONFIG_64BIT */ + +#include <asm-generic/atomic64.h> + +#endif /* !CONFIG_64BIT */ #include <asm-generic/atomic-long.h> diff --git a/arch/parisc/include/asm/dma.h b/arch/parisc/include/asm/dma.h index 31ad0f05af3..f7a18f96870 100644 --- a/arch/parisc/include/asm/dma.h +++ b/arch/parisc/include/asm/dma.h @@ -1,5 +1,4 @@ -/* $Id: dma.h,v 1.2 1999/04/27 00:46:18 deller Exp $ - * linux/include/asm/dma.h: Defines for using and allocating dma channels. +/* asm/dma.h: Defines for using and allocating dma channels. * Written by Hennus Bergman, 1992. * High DMA channel support & info by Hannu Savolainen * and John Boyd, Nov. 1992. diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h index de3fe3a1822..6fec4d4a1a1 100644 --- a/arch/parisc/include/asm/fixmap.h +++ b/arch/parisc/include/asm/fixmap.h @@ -21,9 +21,9 @@ #define KERNEL_MAP_END (TMPALIAS_MAP_START) #ifndef __ASSEMBLY__ -extern void *vmalloc_start; +extern void *parisc_vmalloc_start; #define PCXL_DMA_MAP_SIZE (8*1024*1024) -#define VMALLOC_START ((unsigned long)vmalloc_start) +#define VMALLOC_START ((unsigned long)parisc_vmalloc_start) #define VMALLOC_END (KERNEL_MAP_END) #endif /*__ASSEMBLY__*/ diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h index ce93133d511..0d68184a76c 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h @@ -1,29 +1,11 @@ /* hardirq.h: PA-RISC hard IRQ support. * * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> - * - * The locking is really quite interesting. There's a cpu-local - * count of how many interrupts are being handled, and a global - * lock. An interrupt can only be serviced if the global lock - * is free. You can't be sure no more interrupts are being - * serviced until you've acquired the lock and then checked - * all the per-cpu interrupt counts are all zero. It's a specialised - * br_lock, and that's exactly how Sparc does it. We don't because - * it's more locking for us. This way is lock-free in the interrupt path. */ #ifndef _PARISC_HARDIRQ_H #define _PARISC_HARDIRQ_H -#include <linux/threads.h> -#include <linux/irq.h> - -typedef struct { - unsigned long __softirq_pending; /* set_bit is used on this */ -} ____cacheline_aligned irq_cpustat_t; - -#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ - -void ack_bad_irq(unsigned int irq); +#include <asm-generic/hardirq.h> #endif /* _PARISC_HARDIRQ_H */ diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h index defe752cc99..9749c8afe83 100644 --- a/arch/parisc/include/asm/mman.h +++ b/arch/parisc/include/asm/mman.h @@ -22,6 +22,8 @@ #define MAP_GROWSDOWN 0x8000 /* stack-like segment */ #define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x20000 /* do not block on IO */ +#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x80000 /* create a huge page mapping */ #define MS_SYNC 1 /* synchronous memory sync */ #define MS_ASYNC 2 /* sync memory asynchronously */ @@ -54,6 +56,9 @@ #define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */ #define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */ +#define MADV_MERGEABLE 65 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ + /* compatibility flags */ #define MAP_FILE 0 #define MAP_VARIABLE 0 diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index 7d842d699df..64c7aa590ae 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h @@ -233,7 +233,6 @@ static inline void pcibios_register_hba(struct pci_hba_data *x) * rp7420/8420 boxes and then revisit this issue. */ #define pcibios_assign_all_busses() (1) -#define pcibios_scan_all_fns(a, b) (0) #define PCIBIOS_MIN_IO 0x10 #define PCIBIOS_MIN_MEM 0x1000 /* NBPG - but pci/setup-res.c dies */ diff --git a/arch/parisc/include/asm/perf_event.h b/arch/parisc/include/asm/perf_event.h new file mode 100644 index 00000000000..cc146427d8f --- /dev/null +++ b/arch/parisc/include/asm/perf_event.h @@ -0,0 +1,7 @@ +#ifndef __ASM_PARISC_PERF_EVENT_H +#define __ASM_PARISC_PERF_EVENT_H + +/* parisc only supports software events through this interface. */ +static inline void set_perf_event_pending(void) { } + +#endif /* __ASM_PARISC_PERF_EVENT_H */ diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 9d64df8754b..9ce66e9d1c2 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -18,6 +18,7 @@ #include <asm/types.h> #include <asm/system.h> #include <asm/percpu.h> + #endif /* __ASSEMBLY__ */ #define KERNEL_STACK_SIZE (4*PAGE_SIZE) @@ -127,6 +128,8 @@ struct thread_struct { unsigned long flags; }; +#define task_pt_regs(tsk) ((struct pt_regs *)&((tsk)->thread.regs)) + /* Thread struct flags. */ #define PARISC_UAC_NOPRINT (1UL << 0) /* see prctl and unaligned.c */ #define PARISC_UAC_SIGBUS (1UL << 1) diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h index 302f68dc889..aead40b16dd 100644 --- a/arch/parisc/include/asm/ptrace.h +++ b/arch/parisc/include/asm/ptrace.h @@ -59,8 +59,11 @@ void user_enable_block_step(struct task_struct *task); #define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) #define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) #define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) +#define user_stack_pointer(regs) ((regs)->gr[30]) unsigned long profile_pc(struct pt_regs *); extern void show_regs(struct pt_regs *); -#endif + + +#endif /* __KERNEL__ */ #endif diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h index 21eb45a5262..2e73623feb6 100644 --- a/arch/parisc/include/asm/smp.h +++ b/arch/parisc/include/asm/smp.h @@ -30,7 +30,6 @@ extern void smp_send_all_nop(void); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask #endif /* !ASSEMBLY */ diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h index 885472bf7b7..960b1e5d8e1 100644 --- a/arch/parisc/include/asm/socket.h +++ b/arch/parisc/include/asm/socket.h @@ -24,6 +24,8 @@ #define SO_RCVTIMEO 0x1006 #define SO_ERROR 0x1007 #define SO_TYPE 0x1008 +#define SO_PROTOCOL 0x1028 +#define SO_DOMAIN 0x1029 #define SO_PEERNAME 0x2000 #define SO_NO_CHECK 0x400b diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h new file mode 100644 index 00000000000..8bdfd2c8c39 --- /dev/null +++ b/arch/parisc/include/asm/syscall.h @@ -0,0 +1,40 @@ +/* syscall.h */ + +#ifndef _ASM_PARISC_SYSCALL_H_ +#define _ASM_PARISC_SYSCALL_H_ + +#include <linux/err.h> +#include <asm/ptrace.h> + +static inline long syscall_get_nr(struct task_struct *tsk, + struct pt_regs *regs) +{ + return regs->gr[20]; +} + +static inline void syscall_get_arguments(struct task_struct *tsk, + struct pt_regs *regs, unsigned int i, + unsigned int n, unsigned long *args) +{ + BUG_ON(i); + + switch (n) { + case 6: + args[5] = regs->gr[21]; + case 5: + args[4] = regs->gr[22]; + case 4: + args[3] = regs->gr[23]; + case 3: + args[2] = regs->gr[24]; + case 2: + args[1] = regs->gr[25]; + case 1: + args[0] = regs->gr[26]; + break; + default: + BUG(); + } +} + +#endif /*_ASM_PARISC_SYSCALL_H_*/ diff --git a/arch/parisc/include/asm/system.h b/arch/parisc/include/asm/system.h index ee80c920b46..d91357bca5b 100644 --- a/arch/parisc/include/asm/system.h +++ b/arch/parisc/include/asm/system.h @@ -168,8 +168,8 @@ static inline void set_eiem(unsigned long val) /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ #define __ldcw(a) ({ \ unsigned __ret; \ - __asm__ __volatile__(__LDCW " 0(%1),%0" \ - : "=r" (__ret) : "r" (a)); \ + __asm__ __volatile__(__LDCW " 0(%2),%0" \ + : "=r" (__ret), "+m" (*(a)) : "r" (a)); \ __ret; \ }) diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 0407959da48..7ecc1039cfe 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -23,7 +23,7 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .addr_limit = KERNEL_DS, \ - .preempt_count = 1, \ + .preempt_count = INIT_PREEMPT_COUNT, \ .restart_block = { \ .fn = do_no_restart_syscall \ } \ @@ -32,6 +32,11 @@ struct thread_info { #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) +/* how to get the thread information struct from C */ +#define current_thread_info() ((struct thread_info *)mfctl(30)) + +#endif /* !__ASSEMBLY */ + /* thread information allocation */ #define THREAD_SIZE_ORDER 2 @@ -40,11 +45,6 @@ struct thread_info { #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) -/* how to get the thread information struct from C */ -#define current_thread_info() ((struct thread_info *)mfctl(30)) - -#endif /* !__ASSEMBLY */ - #define PREEMPT_ACTIVE_BIT 28 #define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) @@ -59,6 +59,9 @@ struct thread_info { #define TIF_MEMDIE 5 #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ #define TIF_FREEZE 7 /* is freezing for suspend */ +#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ +#define TIF_SINGLESTEP 9 /* single stepping? */ +#define TIF_BLOCKSTEP 10 /* branch stepping? */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) @@ -67,8 +70,11 @@ struct thread_info { #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_FREEZE (1 << TIF_FREEZE) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) +#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) -#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | \ +#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) #endif /* __KERNEL__ */ diff --git a/arch/parisc/include/asm/tlb.h b/arch/parisc/include/asm/tlb.h index 383b1db310e..07924903989 100644 --- a/arch/parisc/include/asm/tlb.h +++ b/arch/parisc/include/asm/tlb.h @@ -21,7 +21,7 @@ do { if (!(tlb)->fullmm) \ #include <asm-generic/tlb.h> -#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) +#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) +#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) #endif diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h index 1f6fd4fc05b..8f1a8100bf2 100644 --- a/arch/parisc/include/asm/tlbflush.h +++ b/arch/parisc/include/asm/tlbflush.h @@ -12,14 +12,12 @@ * N class systems, only one PxTLB inter processor broadcast can be * active at any one time on the Merced bus. This tlb purge * synchronisation is fairly lightweight and harmless so we activate - * it on all SMP systems not just the N class. We also need to have - * preemption disabled on uniprocessor machines, and spin_lock does that - * nicely. + * it on all systems not just the N class. */ extern spinlock_t pa_tlb_lock; -#define purge_tlb_start(x) spin_lock(&pa_tlb_lock) -#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) +#define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags) +#define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags) extern void flush_tlb_all(void); extern void flush_tlb_all_local(void *); @@ -63,14 +61,16 @@ static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { + unsigned long flags; + /* For one page, it's not worth testing the split_tlb variable */ mb(); mtsp(vma->vm_mm->context,1); - purge_tlb_start(); + purge_tlb_start(flags); pdtlb(addr); pitlb(addr); - purge_tlb_end(); + purge_tlb_end(flags); } void __flush_tlb_range(unsigned long sid, diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index ef26b009dc5..cda158318c6 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -807,8 +807,12 @@ #define __NR_dup3 (__NR_Linux + 312) #define __NR_pipe2 (__NR_Linux + 313) #define __NR_inotify_init1 (__NR_Linux + 314) +#define __NR_preadv (__NR_Linux + 315) +#define __NR_pwritev (__NR_Linux + 316) +#define __NR_rt_tgsigqueueinfo (__NR_Linux + 317) +#define __NR_perf_event_open (__NR_Linux + 318) -#define __NR_Linux_syscalls (__NR_inotify_init1 + 1) +#define __NR_Linux_syscalls (__NR_perf_event_open + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh index 9632b3e164c..e593fc8d58b 100644 --- a/arch/parisc/install.sh +++ b/arch/parisc/install.sh @@ -21,8 +21,8 @@ # User may have a custom install script -if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi -if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi +if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi +if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi # Default install diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 699cf8ef211..fcd3c707bf1 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -270,8 +270,8 @@ int main(void) DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count)); DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop)); BLANK(); - DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT); - DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT); + DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP); + DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP); BLANK(); DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 837530ea32e..b6ed34de14e 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -1,5 +1,4 @@ -/* $Id: cache.c,v 1.4 2000/01/25 00:11:38 prumpf Exp $ - * +/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. @@ -398,12 +397,13 @@ EXPORT_SYMBOL(flush_kernel_icache_range_asm); void clear_user_page_asm(void *page, unsigned long vaddr) { + unsigned long flags; /* This function is implemented in assembly in pacache.S */ extern void __clear_user_page_asm(void *page, unsigned long vaddr); - purge_tlb_start(); + purge_tlb_start(flags); __clear_user_page_asm(page, vaddr); - purge_tlb_end(); + purge_tlb_end(flags); } #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ @@ -444,20 +444,24 @@ extern void clear_user_page_asm(void *page, unsigned long vaddr); void clear_user_page(void *page, unsigned long vaddr, struct page *pg) { + unsigned long flags; + purge_kernel_dcache_page((unsigned long)page); - purge_tlb_start(); + purge_tlb_start(flags); pdtlb_kernel(page); - purge_tlb_end(); + purge_tlb_end(flags); clear_user_page_asm(page, vaddr); } EXPORT_SYMBOL(clear_user_page); void flush_kernel_dcache_page_addr(void *addr) { + unsigned long flags; + flush_kernel_dcache_page_asm(addr); - purge_tlb_start(); + purge_tlb_start(flags); pdtlb_kernel(addr); - purge_tlb_end(); + purge_tlb_end(flags); } EXPORT_SYMBOL(flush_kernel_dcache_page_addr); @@ -490,8 +494,10 @@ void __flush_tlb_range(unsigned long sid, unsigned long start, if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ flush_tlb_all(); else { + unsigned long flags; + mtsp(sid, 1); - purge_tlb_start(); + purge_tlb_start(flags); if (split_tlb) { while (npages--) { pdtlb(start); @@ -504,7 +510,7 @@ void __flush_tlb_range(unsigned long sid, unsigned long start, start += PAGE_SIZE; } } - purge_tlb_end(); + purge_tlb_end(flags); } } diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index ae3e70cd1e1..3a44f7f704f 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -553,7 +553,7 @@ * on most of those machines only handles cache transactions. */ extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 - depi 1,12,1,\prot + depdi 1,12,1,\prot /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ convert_for_tlb_insert20 \pte @@ -948,7 +948,7 @@ intr_check_sig: /* As above */ mfctl %cr30,%r1 LDREG TI_FLAGS(%r1),%r19 - ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20 + ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20 and,COND(<>) %r19, %r20, %r0 b,n intr_restore /* skip past if we've nothing to do */ @@ -2047,12 +2047,13 @@ syscall_do_signal: b,n syscall_check_sig syscall_restore: - /* Are we being ptraced? */ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 - ldw TASK_PTRACE(%r1), %r19 - bb,< %r19,31,syscall_restore_rfi - nop + /* Are we being ptraced? */ + ldw TASK_FLAGS(%r1),%r19 + ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2 + and,COND(=) %r19,%r2,%r0 + b,n syscall_restore_rfi ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ rest_fp %r19 @@ -2113,16 +2114,16 @@ syscall_restore_rfi: ldi 0x0b,%r20 /* Create new PSW */ depi -1,13,1,%r20 /* C, Q, D, and I bits */ - /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are - * set in include/linux/ptrace.h and converted to PA bitmap + /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are + * set in thread_info.h and converted to PA bitmap * numbers in asm-offsets.c */ - /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */ - extru,= %r19,PA_SINGLESTEP_BIT,1,%r0 + /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ + extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 depi -1,27,1,%r20 /* R bit */ - /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */ - extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0 + /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ + extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 depi -1,7,1,%r20 /* T bit */ STREG %r20,TASK_PT_PSW(%r1) diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c index 82974b20fc1..d020eae6525 100644 --- a/arch/parisc/kernel/init_task.c +++ b/arch/parisc/kernel/init_task.c @@ -43,8 +43,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); * way process stacks are handled. This is done by having a special * "init_task" linker map entry.. */ -union thread_union init_thread_union - __attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) = +union thread_union init_thread_union __init_task_data + __attribute__((aligned(128))) = { INIT_THREAD_INFO(init_task) }; #if PT_NLEVELS == 3 diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c index bd1f7f1ff74..d228d823787 100644 --- a/arch/parisc/kernel/inventory.c +++ b/arch/parisc/kernel/inventory.c @@ -170,23 +170,27 @@ static void __init pagezero_memconfig(void) static int __init pat_query_module(ulong pcell_loc, ulong mod_index) { - pdc_pat_cell_mod_maddr_block_t pa_pdc_cell; + pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; unsigned long bytecnt; unsigned long temp; /* 64-bit scratch value */ long status; /* PDC return value status */ struct parisc_device *dev; + pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL); + if (!pa_pdc_cell) + panic("couldn't allocate memory for PDC_PAT_CELL!"); + /* return cell module (PA or Processor view) */ status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index, - PA_VIEW, &pa_pdc_cell); + PA_VIEW, pa_pdc_cell); if (status != PDC_OK) { /* no more cell modules or error */ return status; } - temp = pa_pdc_cell.cba; - dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path); + temp = pa_pdc_cell->cba; + dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path)); if (!dev) { return PDC_OK; } @@ -203,8 +207,8 @@ pat_query_module(ulong pcell_loc, ulong mod_index) /* save generic info returned from the call */ /* REVISIT: who is the consumer of this? not sure yet... */ - dev->mod_info = pa_pdc_cell.mod_info; /* pass to PAT_GET_ENTITY() */ - dev->pmod_loc = pa_pdc_cell.mod_location; + dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */ + dev->pmod_loc = pa_pdc_cell->mod_location; register_parisc_device(dev); /* advertise device */ @@ -216,14 +220,14 @@ pat_query_module(ulong pcell_loc, ulong mod_index) case PAT_ENTITY_PROC: printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n", - pa_pdc_cell.mod[0]); + pa_pdc_cell->mod[0]); break; case PAT_ENTITY_MEM: printk(KERN_DEBUG "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n", - pa_pdc_cell.mod[0], pa_pdc_cell.mod[1], - pa_pdc_cell.mod[2]); + pa_pdc_cell->mod[0], pa_pdc_cell->mod[1], + pa_pdc_cell->mod[2]); break; case PAT_ENTITY_CA: printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc); @@ -243,23 +247,26 @@ pat_query_module(ulong pcell_loc, ulong mod_index) print_ranges: pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index, IO_VIEW, &io_pdc_cell); - printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell.mod[1]); - for (i = 0; i < pa_pdc_cell.mod[1]; i++) { + printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]); + for (i = 0; i < pa_pdc_cell->mod[1]; i++) { printk(KERN_DEBUG " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", - i, pa_pdc_cell.mod[2 + i * 3], /* type */ - pa_pdc_cell.mod[3 + i * 3], /* start */ - pa_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */ + i, pa_pdc_cell->mod[2 + i * 3], /* type */ + pa_pdc_cell->mod[3 + i * 3], /* start */ + pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */ printk(KERN_DEBUG " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", - i, io_pdc_cell.mod[2 + i * 3], /* type */ - io_pdc_cell.mod[3 + i * 3], /* start */ - io_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */ + i, io_pdc_cell->mod[2 + i * 3], /* type */ + io_pdc_cell->mod[3 + i * 3], /* start */ + io_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */ } printk(KERN_DEBUG "\n"); break; } #endif /* DEBUG_PAT */ + + kfree(pa_pdc_cell); + return PDC_OK; } diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 8007f1e6572..2e7610cb33d 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) if (CHECK_IRQ_PER_CPU(irq)) { /* Bad linux design decision. The mask has already * been set; we must reset it */ - cpumask_setall(&irq_desc[irq].affinity); + cpumask_setall(irq_desc[irq].affinity); return -EINVAL; } @@ -138,13 +138,13 @@ static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) if (cpu_dest < 0) return -1; - cpumask_copy(&irq_desc[irq].affinity, dest); + cpumask_copy(irq_desc[irq].affinity, dest); return 0; } #endif -static struct hw_interrupt_type cpu_interrupt_type = { +static struct irq_chip cpu_interrupt_type = { .typename = "CPU", .startup = cpu_startup_irq, .shutdown = cpu_disable_irq, @@ -299,7 +299,7 @@ int txn_alloc_irq(unsigned int bits_wide) unsigned long txn_affinity_addr(unsigned int irq, int cpu) { #ifdef CONFIG_SMP - cpumask_copy(&irq_desc[irq].affinity, cpumask_of(cpu)); + cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); #endif return per_cpu(cpu_data, cpu).txn_addr; @@ -356,7 +356,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) irq = eirr_to_irq(eirr_val); #ifdef CONFIG_SMP - cpumask_copy(&dest, &irq_desc[irq].affinity); + cpumask_copy(&dest, irq_desc[irq].affinity); if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && !cpu_isset(smp_processor_id(), dest)) { int cpu = first_cpu(dest); @@ -423,8 +423,3 @@ void __init init_IRQ(void) set_eiem(cpu_eiem); /* EIEM : enable all external intr */ } - -void ack_bad_irq(unsigned int irq) -{ - printk(KERN_WARNING "unexpected IRQ %d\n", irq); -} diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index ef5caf2e6ed..212074653df 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -86,8 +86,12 @@ * the bottom of the table, which has a maximum signed displacement of * 0x3fff; however, since we're only going forward, this becomes * 0x1fff, and thus, since each GOT entry is 8 bytes long we can have - * at most 1023 entries */ -#define MAX_GOTS 1023 + * at most 1023 entries. + * To overcome this 14bit displacement with some kernel modules, we'll + * use instead the unusal 16bit displacement method (see reassemble_16a) + * which gives us a maximum positive displacement of 0x7fff, and as such + * allows us to allocate up to 4095 GOT entries. */ +#define MAX_GOTS 4095 /* three functions to determine where in the module core * or init pieces the location is */ @@ -145,12 +149,40 @@ struct stub_entry { /* The reassemble_* functions prepare an immediate value for insertion into an opcode. pa-risc uses all sorts of weird bitfields in the instruction to hold the value. */ +static inline int sign_unext(int x, int len) +{ + int len_ones; + + len_ones = (1 << len) - 1; + return x & len_ones; +} + +static inline int low_sign_unext(int x, int len) +{ + int sign, temp; + + sign = (x >> (len-1)) & 1; + temp = sign_unext(x, len-1); + return (temp << 1) | sign; +} + static inline int reassemble_14(int as14) { return (((as14 & 0x1fff) << 1) | ((as14 & 0x2000) >> 13)); } +static inline int reassemble_16a(int as16) +{ + int s, t; + + /* Unusual 16-bit encoding, for wide mode only. */ + t = (as16 << 1) & 0xffff; + s = (as16 & 0x8000); + return (t ^ s ^ (s >> 1)) | (s >> 15); +} + + static inline int reassemble_17(int as17) { return (((as17 & 0x10000) >> 16) | @@ -407,6 +439,7 @@ static Elf_Addr get_stub(struct module *me, unsigned long value, long addend, enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec) { struct stub_entry *stub; + int __maybe_unused d; /* initialize stub_offset to point in front of the section */ if (!me->arch.section[targetsec].stub_offset) { @@ -460,12 +493,19 @@ static Elf_Addr get_stub(struct module *me, unsigned long value, long addend, */ switch (stub_type) { case ELF_STUB_GOT: - stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */ + d = get_got(me, value, addend); + if (d <= 15) { + /* Format 5 */ + stub->insns[0] = 0x0f6010db; /* ldd 0(%dp),%dp */ + stub->insns[0] |= low_sign_unext(d, 5) << 16; + } else { + /* Format 3 */ + stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */ + stub->insns[0] |= reassemble_16a(d); + } stub->insns[1] = 0x53610020; /* ldd 10(%dp),%r1 */ stub->insns[2] = 0xe820d000; /* bve (%r1) */ stub->insns[3] = 0x537b0030; /* ldd 18(%dp),%dp */ - - stub->insns[0] |= reassemble_14(get_got(me, value, addend) & 0x3fff); break; case ELF_STUB_MILLI: stub->insns[0] = 0x20200000; /* ldil 0,%r1 */ @@ -853,7 +893,7 @@ int module_finalize(const Elf_Ehdr *hdr, * ourselves */ for (i = 1; i < hdr->e_shnum; i++) { if(sechdrs[i].sh_type == SHT_SYMTAB - && (sechdrs[i].sh_type & SHF_ALLOC)) { + && (sechdrs[i].sh_flags & SHF_ALLOC)) { int strindex = sechdrs[i].sh_link; /* FIXME: AWFUL HACK * The cast is to drop the const from diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 7d927eac932..c07f618ff7d 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -90,12 +90,14 @@ static inline int map_pte_uncached(pte_t * pte, if (end > PMD_SIZE) end = PMD_SIZE; do { + unsigned long flags; + if (!pte_none(*pte)) printk(KERN_ERR "map_pte_uncached: page already exists\n"); set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); - purge_tlb_start(); + purge_tlb_start(flags); pdtlb_kernel(orig_vaddr); - purge_tlb_end(); + purge_tlb_end(flags); vaddr += PAGE_SIZE; orig_vaddr += PAGE_SIZE; (*paddr_ptr) += PAGE_SIZE; @@ -168,11 +170,13 @@ static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, if (end > PMD_SIZE) end = PMD_SIZE; do { + unsigned long flags; pte_t page = *pte; + pte_clear(&init_mm, vaddr, pte); - purge_tlb_start(); + purge_tlb_start(flags); pdtlb_kernel(orig_vaddr); - purge_tlb_end(); + purge_tlb_end(flags); vaddr += PAGE_SIZE; orig_vaddr += PAGE_SIZE; pte++; diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index 6936386c986..f7064abc3bb 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c @@ -1,5 +1,4 @@ -/* $Id: pci.c,v 1.6 2000/01/29 00:12:05 grundler Exp $ - * +/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 61c07078c07..1f3aa8db020 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -156,7 +156,7 @@ void machine_power_off(void) * software. The user has to press the button himself. */ printk(KERN_EMERG "System shut down completed.\n" - KERN_EMERG "Please power this system off now."); + "Please power this system off now."); } void (*pm_power_off)(void) = machine_power_off; diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index e09d0f7fb6b..c8fb61ed32f 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -1,5 +1,4 @@ -/* $Id: processor.c,v 1.1 2002/07/20 16:27:06 rhirst Exp $ - * +/* * Initial setup-routines for HP 9000 based hardware. * * Copyright (C) 1991, 1992, 1995 Linus Torvalds @@ -121,22 +120,28 @@ static int __cpuinit processor_probe(struct parisc_device *dev) if (is_pdc_pat()) { ulong status; unsigned long bytecnt; - pdc_pat_cell_mod_maddr_block_t pa_pdc_cell; + pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; #undef USE_PAT_CPUID #ifdef USE_PAT_CPUID struct pdc_pat_cpu_num cpu_info; #endif + pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL); + if (!pa_pdc_cell) + panic("couldn't allocate memory for PDC_PAT_CELL!"); + status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc, - dev->mod_index, PA_VIEW, &pa_pdc_cell); + dev->mod_index, PA_VIEW, pa_pdc_cell); BUG_ON(PDC_OK != status); /* verify it's the same as what do_pat_inventory() found */ - BUG_ON(dev->mod_info != pa_pdc_cell.mod_info); - BUG_ON(dev->pmod_loc != pa_pdc_cell.mod_location); + BUG_ON(dev->mod_info != pa_pdc_cell->mod_info); + BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location); + + txn_addr = pa_pdc_cell->mod[0]; /* id_eid for IO sapic */ - txn_addr = pa_pdc_cell.mod[0]; /* id_eid for IO sapic */ + kfree(pa_pdc_cell); #ifdef USE_PAT_CPUID /* We need contiguous numbers for cpuid. Firmware's notion diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 927db3668b6..c4f49e45129 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -13,6 +13,7 @@ #include <linux/smp.h> #include <linux/errno.h> #include <linux/ptrace.h> +#include <linux/tracehook.h> #include <linux/user.h> #include <linux/personality.h> #include <linux/security.h> @@ -35,7 +36,8 @@ */ void ptrace_disable(struct task_struct *task) { - task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); + clear_tsk_thread_flag(task, TIF_SINGLESTEP); + clear_tsk_thread_flag(task, TIF_BLOCKSTEP); /* make sure the trap bits are not set */ pa_psw(task)->r = 0; @@ -55,8 +57,8 @@ void user_disable_single_step(struct task_struct *task) void user_enable_single_step(struct task_struct *task) { - task->ptrace &= ~PT_BLOCKSTEP; - task->ptrace |= PT_SINGLESTEP; + clear_tsk_thread_flag(task, TIF_BLOCKSTEP); + set_tsk_thread_flag(task, TIF_SINGLESTEP); if (pa_psw(task)->n) { struct siginfo si; @@ -98,8 +100,8 @@ void user_enable_single_step(struct task_struct *task) void user_enable_block_step(struct task_struct *task) { - task->ptrace &= ~PT_SINGLESTEP; - task->ptrace |= PT_BLOCKSTEP; + clear_tsk_thread_flag(task, TIF_SINGLESTEP); + set_tsk_thread_flag(task, TIF_BLOCKSTEP); /* Enable taken branch trap. */ pa_psw(task)->r = 0; @@ -263,22 +265,20 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, } #endif +long do_syscall_trace_enter(struct pt_regs *regs) +{ + if (test_thread_flag(TIF_SYSCALL_TRACE) && + tracehook_report_syscall_entry(regs)) + return -1L; + + return regs->gr[20]; +} -void syscall_trace(void) +void do_syscall_trace_exit(struct pt_regs *regs) { - if (!test_thread_flag(TIF_SYSCALL_TRACE)) - return; - if (!(current->ptrace & PT_PTRACED)) - return; - ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) - ? 0x80 : 0)); - /* - * this isn't the same as continuing with a signal, but it will do - * for normal use. strace only continues with a signal if the - * stopping signal is not SIGTRAP. -brl - */ - if (current->exit_code) { - send_sig(current->exit_code, current, 1); - current->exit_code = 0; - } + int stepping = test_thread_flag(TIF_SINGLESTEP) || + test_thread_flag(TIF_BLOCKSTEP); + + if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(regs, stepping); } diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 82131ca8e05..cb71f3dac99 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -1,5 +1,4 @@ -/* $Id: setup.c,v 1.8 2000/02/02 04:42:38 prumpf Exp $ - * +/* * Initial setup-routines for HP 9000 based hardware. * * Copyright (C) 1991, 1992, 1995 Linus Torvalds diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index f82544225e8..e8467e4aa8d 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -21,10 +21,12 @@ #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> +#include <linux/tracehook.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/compat.h> #include <linux/elf.h> +#include <linux/tracehook.h> #include <asm/ucontext.h> #include <asm/rt_sigframe.h> #include <asm/uaccess.h> @@ -33,7 +35,6 @@ #include <asm/asm-offsets.h> #ifdef CONFIG_COMPAT -#include <linux/compat.h> #include "signal32.h" #endif @@ -467,6 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigaddset(¤t->blocked,sig); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); + + tracehook_signal_handler(sig, info, ka, regs, 0); + return 1; } @@ -645,4 +649,11 @@ void do_notify_resume(struct pt_regs *regs, long in_syscall) if (test_thread_flag(TIF_SIGPENDING) || test_thread_flag(TIF_RESTORE_SIGMASK)) do_signal(regs, in_syscall); + + if (test_thread_flag(TIF_NOTIFY_RESUME)) { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + if (current->replacement_session_keyring) + key_replace_session_keyring(); + } } diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index 1adb40c8166..561388b17c9 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c @@ -18,7 +18,6 @@ #include <linux/signal.h> #include <linux/resource.h> #include <linux/times.h> -#include <linux/utsname.h> #include <linux/time.h> #include <linux/smp.h> #include <linux/smp_lock.h> @@ -174,68 +173,6 @@ asmlinkage long sys32_sched_rr_get_interval(pid_t pid, return ret; } -/*** copied from mips64 ***/ -/* - * Ooo, nasty. We need here to frob 32-bit unsigned longs to - * 64-bit unsigned longs. - */ - -static inline int -get_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset) -{ - n = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32)); - if (ufdset) { - unsigned long odd; - - if (!access_ok(VERIFY_WRITE, ufdset, n*sizeof(u32))) - return -EFAULT; - - odd = n & 1UL; - n &= ~1UL; - while (n) { - unsigned long h, l; - __get_user(l, ufdset); - __get_user(h, ufdset+1); - ufdset += 2; - *fdset++ = h << 32 | l; - n -= 2; - } - if (odd) - __get_user(*fdset, ufdset); - } else { - /* Tricky, must clear full unsigned long in the - * kernel fdset at the end, this makes sure that - * actually happens. - */ - memset(fdset, 0, ((n + 1) & ~1)*sizeof(u32)); - } - return 0; -} - -static inline void -set_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset) -{ - unsigned long odd; - n = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32)); - - if (!ufdset) - return; - - odd = n & 1UL; - n &= ~1UL; - while (n) { - unsigned long h, l; - l = *fdset++; - h = l >> 32; - __put_user(l, ufdset); - __put_user(h, ufdset+1); - ufdset += 2; - n -= 2; - } - if (odd) - __put_user(*fdset, ufdset); -} - struct msgbuf32 { int mtype; char mtext[1]; diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 59fc1a43ec3..f5f96021caa 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -288,18 +288,23 @@ tracesys: STREG %r18,PT_GR18(%r2) /* Finished saving things for the debugger */ - ldil L%syscall_trace,%r1 + copy %r2,%r26 + ldil L%do_syscall_trace_enter,%r1 ldil L%tracesys_next,%r2 - be R%syscall_trace(%sr7,%r1) + be R%do_syscall_trace_enter(%sr7,%r1) ldo R%tracesys_next(%r2),%r2 -tracesys_next: +tracesys_next: + /* do_syscall_trace_enter either returned the syscallno, or -1L, + * so we skip restoring the PT_GR20 below, since we pulled it from + * task->thread.regs.gr[20] above. + */ + copy %ret0,%r20 ldil L%sys_call_table,%r1 ldo R%sys_call_table(%r1), %r19 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG TI_TASK(%r1), %r1 - LDREG TASK_PT_GR20(%r1), %r20 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ LDREG TASK_PT_GR25(%r1), %r25 LDREG TASK_PT_GR24(%r1), %r24 @@ -336,7 +341,8 @@ tracesys_exit: #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl syscall_trace, %r2 + ldo TASK_REGS(%r1),%r26 + bl do_syscall_trace_exit,%r2 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG TI_TASK(%r1), %r1 @@ -353,12 +359,12 @@ tracesys_exit: tracesys_sigexit: ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ - LDREG 0(%r1), %r1 + LDREG TI_TASK(%r1), %r1 #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl syscall_trace, %r2 - nop + bl do_syscall_trace_exit,%r2 + ldo TASK_REGS(%r1),%r26 ldil L%syscall_exit_rfi,%r1 be,n R%syscall_exit_rfi(%sr7,%r1) diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 03b9a01bc16..843f423dec6 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -413,6 +413,10 @@ ENTRY_SAME(dup3) ENTRY_SAME(pipe2) ENTRY_SAME(inotify_init1) + ENTRY_COMP(preadv) /* 315 */ + ENTRY_COMP(pwritev) + ENTRY_COMP(rt_tgsigqueueinfo) + ENTRY_SAME(perf_event_open) /* Nothing yet */ diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index d4dd05674c6..a79c6f9e7e2 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -56,9 +56,9 @@ static unsigned long clocktick __read_mostly; /* timer cycles per tick */ */ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) { - unsigned long now; + unsigned long now, now2; unsigned long next_tick; - unsigned long cycles_elapsed, ticks_elapsed; + unsigned long cycles_elapsed, ticks_elapsed = 1; unsigned long cycles_remainder; unsigned int cpu = smp_processor_id(); struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); @@ -71,44 +71,24 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) /* Initialize next_tick to the expected tick time. */ next_tick = cpuinfo->it_value; - /* Get current interval timer. - * CR16 reads as 64 bits in CPU wide mode. - * CR16 reads as 32 bits in CPU narrow mode. - */ + /* Get current cycle counter (Control Register 16). */ now = mfctl(16); cycles_elapsed = now - next_tick; - if ((cycles_elapsed >> 5) < cpt) { + if ((cycles_elapsed >> 6) < cpt) { /* use "cheap" math (add/subtract) instead * of the more expensive div/mul method */ cycles_remainder = cycles_elapsed; - ticks_elapsed = 1; while (cycles_remainder > cpt) { cycles_remainder -= cpt; ticks_elapsed++; } } else { + /* TODO: Reduce this to one fdiv op */ cycles_remainder = cycles_elapsed % cpt; - ticks_elapsed = 1 + cycles_elapsed / cpt; - } - - /* Can we differentiate between "early CR16" (aka Scenario 1) and - * "long delay" (aka Scenario 3)? I don't think so. - * - * We expected timer_interrupt to be delivered at least a few hundred - * cycles after the IT fires. But it's arbitrary how much time passes - * before we call it "late". I've picked one second. - */ - if (unlikely(ticks_elapsed > HZ)) { - /* Scenario 3: very long delay? bad in any case */ - printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!" - " cycles %lX rem %lX " - " next/now %lX/%lX\n", - cpu, - cycles_elapsed, cycles_remainder, - next_tick, now ); + ticks_elapsed += cycles_elapsed / cpt; } /* convert from "division remainder" to "remainder of clock tick" */ @@ -122,18 +102,56 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) cpuinfo->it_value = next_tick; - /* Skip one clocktick on purpose if we are likely to miss next_tick. - * We want to avoid the new next_tick being less than CR16. - * If that happened, itimer wouldn't fire until CR16 wrapped. - * We'll catch the tick we missed on the tick after that. + /* Program the IT when to deliver the next interrupt. + * Only bottom 32-bits of next_tick are writable in CR16! */ - if (!(cycles_remainder >> 13)) - next_tick += cpt; - - /* Program the IT when to deliver the next interrupt. */ - /* Only bottom 32-bits of next_tick are written to cr16. */ mtctl(next_tick, 16); + /* Skip one clocktick on purpose if we missed next_tick. + * The new CR16 must be "later" than current CR16 otherwise + * itimer would not fire until CR16 wrapped - e.g 4 seconds + * later on a 1Ghz processor. We'll account for the missed + * tick on the next timer interrupt. + * + * "next_tick - now" will always give the difference regardless + * if one or the other wrapped. If "now" is "bigger" we'll end up + * with a very large unsigned number. + */ + now2 = mfctl(16); + if (next_tick - now2 > cpt) + mtctl(next_tick+cpt, 16); + +#if 1 +/* + * GGG: DEBUG code for how many cycles programming CR16 used. + */ + if (unlikely(now2 - now > 0x3000)) /* 12K cycles */ + printk (KERN_CRIT "timer_interrupt(CPU %d): SLOW! 0x%lx cycles!" + " cyc %lX rem %lX " + " next/now %lX/%lX\n", + cpu, now2 - now, cycles_elapsed, cycles_remainder, + next_tick, now ); +#endif + + /* Can we differentiate between "early CR16" (aka Scenario 1) and + * "long delay" (aka Scenario 3)? I don't think so. + * + * Timer_interrupt will be delivered at least a few hundred cycles + * after the IT fires. But it's arbitrary how much time passes + * before we call it "late". I've picked one second. + * + * It's important NO printk's are between reading CR16 and + * setting up the next value. May introduce huge variance. + */ + if (unlikely(ticks_elapsed > HZ)) { + /* Scenario 3: very long delay? bad in any case */ + printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!" + " cycles %lX rem %lX " + " next/now %lX/%lX\n", + cpu, + cycles_elapsed, cycles_remainder, + next_tick, now ); + } /* Done mucking with unreliable delivery of interrupts. * Go do system house keeping. @@ -173,7 +191,7 @@ EXPORT_SYMBOL(profile_pc); /* clock source code */ -static cycle_t read_cr16(void) +static cycle_t read_cr16(struct clocksource *cs) { return get_cycles(); } diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index c32f5d6d778..8b58bf0b7d5 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -250,15 +250,14 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) oops_enter(); /* Amuse the user in a SPARC fashion */ - if (err) printk( -KERN_CRIT " _______________________________ \n" -KERN_CRIT " < Your System ate a SPARC! Gah! >\n" -KERN_CRIT " ------------------------------- \n" -KERN_CRIT " \\ ^__^\n" -KERN_CRIT " \\ (xx)\\_______\n" -KERN_CRIT " (__)\\ )\\/\\\n" -KERN_CRIT " U ||----w |\n" -KERN_CRIT " || ||\n"); + if (err) printk(KERN_CRIT + " _______________________________ \n" + " < Your System ate a SPARC! Gah! >\n" + " ------------------------------- \n" + " \\ ^__^\n" + " (__)\\ )\\/\\\n" + " U ||----w |\n" + " || ||\n"); /* unlock the pdc lock if necessary */ pdc_emergency_unlock(); @@ -533,7 +532,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) /* Kill the user process later */ regs->iaoq[0] = 0 | 3; regs->iaoq[1] = regs->iaoq[0] + 4; - regs->iasq[0] = regs->iasq[0] = regs->sr[7]; + regs->iasq[0] = regs->iasq[1] = regs->sr[7]; regs->gr[0] &= ~PSW_B; return; } @@ -797,7 +796,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs) else printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ", code); - printk("pid=%d command='%s'\n", task_pid_nr(current), current->comm); + printk(KERN_CONT "pid=%d command='%s'\n", + task_pid_nr(current), current->comm); show_regs(regs); #endif si.si_signo = SIGSEGV; diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 69dad5a850a..a36799e8569 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -28,7 +28,7 @@ #define dbg(x...) #endif -#define KERNEL_START (KERNEL_BINARY_TEXT_START - 0x1000) +#define KERNEL_START (KERNEL_BINARY_TEXT_START) extern struct unwind_table_entry __start___unwind[]; extern struct unwind_table_entry __stop___unwind[]; diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index fd2cc4fd2b6..9dab4a4e09f 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -28,6 +28,7 @@ #include <asm/cache.h> #include <asm/page.h> #include <asm/asm-offsets.h> +#include <asm/thread_info.h> /* ld script to make hppa Linux kernel */ #ifndef CONFIG_64BIT @@ -77,15 +78,6 @@ SECTIONS */ . = ALIGN(PAGE_SIZE); data_start = .; - . = ALIGN(16); - /* Exception table */ - __ex_table : { - __start___ex_table = .; - *(__ex_table) - __stop___ex_table = .; - } - - NOTES /* unwind info */ .PARISC.unwind : { @@ -94,23 +86,11 @@ SECTIONS __stop___unwind = .; } - /* rarely changed data like cpu maps */ - . = ALIGN(16); - .data.read_mostly : { - *(.data.read_mostly) - } + EXCEPTION_TABLE(16) + NOTES - . = ALIGN(L1_CACHE_BYTES); /* Data */ - .data : { - DATA_DATA - CONSTRUCTORS - } - - . = ALIGN(L1_CACHE_BYTES); - .data.cacheline_aligned : { - *(.data.cacheline_aligned) - } + RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) /* PA-RISC locks requires 16-byte alignment */ . = ALIGN(16); @@ -118,17 +98,6 @@ SECTIONS *(.data.lock_aligned) } - /* nosave data is really only used for software suspend...it's here - * just in case we ever implement it - */ - . = ALIGN(PAGE_SIZE); - __nosave_begin = .; - .data_nosave : { - *(.data.nosave) - } - . = ALIGN(PAGE_SIZE); - __nosave_end = .; - /* End of data section */ _edata = .; @@ -147,14 +116,6 @@ SECTIONS } __bss_stop = .; - - /* assembler code expects init_task to be 16k aligned */ - . = ALIGN(16384); - /* init_task */ - .data.init_task : { - *(.data.init_task) - } - #ifdef CONFIG_64BIT . = ALIGN(16); /* Linkage tables */ @@ -172,64 +133,17 @@ SECTIONS /* reserve space for interrupt stack by aligning __init* to 16k */ . = ALIGN(16384); __init_begin = .; - .init.text : { - _sinittext = .; - INIT_TEXT - _einittext = .; - } - .init.data : { - INIT_DATA - } - . = ALIGN(16); - .init.setup : { - __setup_start = .; - *(.init.setup) - __setup_end = .; - } - .initcall.init : { - __initcall_start = .; - INITCALLS - __initcall_end = .; - } - .con_initcall.init : { - __con_initcall_start = .; - *(.con_initcall.init) - __con_initcall_end = .; - } - SECURITY_INIT - - /* alternate instruction replacement. This is a mechanism x86 uses - * to detect the CPU type and replace generic instruction sequences - * with CPU specific ones. We don't currently do this in PA, but - * it seems like a good idea... - */ - . = ALIGN(4); - .altinstructions : { - __alt_instructions = .; - *(.altinstructions) - __alt_instructions_end = .; - } - .altinstr_replacement : { - *(.altinstr_replacement) - } - - /* .exit.text is discard at runtime, not link time, to deal with references - * from .altinstructions and .eh_frame - */ - .exit.text : { + INIT_TEXT_SECTION(16384) + INIT_DATA_SECTION(16) + /* we have to discard exit text and such at runtime, not link time */ + .exit.text : + { EXIT_TEXT } - .exit.data : { + .exit.data : + { EXIT_DATA } -#ifdef CONFIG_BLK_DEV_INITRD - . = ALIGN(PAGE_SIZE); - .init.ramfs : { - __initramfs_start = .; - *(.init.ramfs) - __initramfs_end = .; - } -#endif PERCPU(PAGE_SIZE) . = ALIGN(PAGE_SIZE); @@ -237,9 +151,12 @@ SECTIONS /* freed after init ends here */ _end = . ; + STABS_DEBUG + .note 0 : { *(.note) } + /* Sections to be discarded */ + DISCARDS /DISCARD/ : { - *(.exitcall.exit) #ifdef CONFIG_64BIT /* temporary hack until binutils is fixed to not emit these * for static binaries @@ -252,7 +169,4 @@ SECTIONS *(.gnu.hash) #endif } - - STABS_DEBUG - .note 0 : { *(.note) } } diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c index 462696d30d3..ae66d31f9ec 100644 --- a/arch/parisc/lib/checksum.c +++ b/arch/parisc/lib/checksum.c @@ -13,8 +13,6 @@ * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. - * - * $Id: checksum.c,v 1.3 1997/12/01 17:57:34 ralf Exp $ */ #include <linux/module.h> #include <linux/types.h> diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index bbda909c866..abf41f4632a 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -405,7 +405,7 @@ byte_copy: unaligned_copy: /* possibly we are aligned on a word, but not on a double... */ - if (likely(t1 & (sizeof(unsigned int)-1)) == 0) { + if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) { t2 = src & (sizeof(unsigned int) - 1); if (unlikely(t2 != 0)) { diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c index 66c8a9f6a27..3ca1c614921 100644 --- a/arch/parisc/math-emu/decode_exc.c +++ b/arch/parisc/math-emu/decode_exc.c @@ -40,7 +40,7 @@ * END_DESC */ - +#include <linux/kernel.h> #include "float.h" #include "sgl_float.h" #include "dbl_float.h" diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index bfb6dd6ab38..c6afbfc9577 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -1,5 +1,4 @@ -/* $Id: fault.c,v 1.5 2000/01/26 16:20:29 jsm Exp $ - * +/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 4356ceb1e36..13b6e3e59b9 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -370,34 +370,22 @@ static void __init setup_bootmem(void) void free_initmem(void) { - unsigned long addr, init_begin, init_end; - - printk(KERN_INFO "Freeing unused kernel memory: "); + unsigned long addr; + unsigned long init_begin = (unsigned long)__init_begin; + unsigned long init_end = (unsigned long)__init_end; #ifdef CONFIG_DEBUG_KERNEL /* Attempt to catch anyone trying to execute code here * by filling the page with BRK insns. - * - * If we disable interrupts for all CPUs, then IPI stops working. - * Kinda breaks the global cache flushing. */ - local_irq_disable(); - - memset(__init_begin, 0x00, - (unsigned long)__init_end - (unsigned long)__init_begin); - - flush_data_cache(); - asm volatile("sync" : : ); - flush_icache_range((unsigned long)__init_begin, (unsigned long)__init_end); - asm volatile("sync" : : ); - - local_irq_enable(); + memset((void *)init_begin, 0x00, init_end - init_begin); + flush_icache_range(init_begin, init_end); #endif /* align __init_begin and __init_end to page size, ignoring linker script where we might have tried to save RAM */ - init_begin = PAGE_ALIGN((unsigned long)(__init_begin)); - init_end = PAGE_ALIGN((unsigned long)(__init_end)); + init_begin = PAGE_ALIGN(init_begin); + init_end = PAGE_ALIGN(init_end); for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); @@ -409,7 +397,8 @@ void free_initmem(void) /* set up a new led state on systems shipped LED State panel */ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); - printk("%luk freed\n", (init_end - init_begin) >> 10); + printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n", + (init_end - init_begin) >> 10); } @@ -445,8 +434,8 @@ void mark_rodata_ro(void) #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ & ~(VM_MAP_OFFSET-1))) -void *vmalloc_start __read_mostly; -EXPORT_SYMBOL(vmalloc_start); +void *parisc_vmalloc_start __read_mostly; +EXPORT_SYMBOL(parisc_vmalloc_start); #ifdef CONFIG_PA11 unsigned long pcxl_dma_start __read_mostly; @@ -507,17 +496,18 @@ void __init mem_init(void) #ifdef CONFIG_PA11 if (hppa_dma_ops == &pcxl_dma_ops) { pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); - vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE); + parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + + PCXL_DMA_MAP_SIZE); } else { pcxl_dma_start = 0; - vmalloc_start = SET_MAP_OFFSET(MAP_START); + parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); } #else - vmalloc_start = SET_MAP_OFFSET(MAP_START); + parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); #endif printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", - (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), |