diff options
author | Dmitry Torokhov <dtor@insightbb.com> | 2007-05-01 00:24:54 -0400 |
---|---|---|
committer | Dmitry Torokhov <dtor@insightbb.com> | 2007-05-01 00:24:54 -0400 |
commit | bc95f3669f5e6f63cf0b84fe4922c3c6dd4aa775 (patch) | |
tree | 427fcf2a7287c16d4b5aa6cbf494d59579a6a8b1 /arch/mips/kernel | |
parent | 3d29cdff999c37b3876082278a8134a0642a02cd (diff) | |
parent | dc87c3985e9b442c60994308a96f887579addc39 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/usb/input/Makefile
drivers/usb/input/gtco.c
Diffstat (limited to 'arch/mips/kernel')
29 files changed, 616 insertions, 719 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 1bf2c844891..49246264cc7 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -53,9 +53,9 @@ obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o obj-$(CONFIG_32BIT) += scall32-o32.o obj-$(CONFIG_64BIT) += scall64-64.o obj-$(CONFIG_BINFMT_IRIX) += binfmt_irix.o -obj-$(CONFIG_MIPS32_COMPAT) += linux32.o signal32.o +obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o -obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o ptrace32.o +obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o obj-$(CONFIG_KGDB) += gdb-low.o gdb-stub.o obj-$(CONFIG_PROC_FS) += proc.o @@ -65,7 +65,6 @@ obj-$(CONFIG_64BIT) += cpu-bugs64.o obj-$(CONFIG_I8253) += i8253.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) - -EXTRA_AFLAGS := $(CFLAGS) diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index c0b089d4718..761a779d5c4 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -64,6 +64,9 @@ void output_ptreg_defines(void) offset("#define PT_R31 ", struct pt_regs, regs[31]); offset("#define PT_LO ", struct pt_regs, lo); offset("#define PT_HI ", struct pt_regs, hi); +#ifdef CONFIG_CPU_HAS_SMARTMIPS + offset("#define PT_ACX ", struct pt_regs, acx); +#endif offset("#define PT_EPC ", struct pt_regs, cp0_epc); offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr); offset("#define PT_STATUS ", struct pt_regs, cp0_status); @@ -99,7 +102,6 @@ void output_thread_info_defines(void) offset("#define TI_ADDR_LIMIT ", struct thread_info, addr_limit); offset("#define TI_RESTART_BLOCK ", struct thread_info, restart_block); offset("#define TI_REGS ", struct thread_info, regs); - constant("#define _THREAD_SIZE_ORDER ", THREAD_SIZE_ORDER); constant("#define _THREAD_SIZE ", THREAD_SIZE); constant("#define _THREAD_MASK ", THREAD_MASK); linefeed; @@ -246,6 +248,7 @@ void output_sc_defines(void) text("/* Linux sigcontext offsets. */"); offset("#define SC_REGS ", struct sigcontext, sc_regs); offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs); + offset("#define SC_ACX ", struct sigcontext, sc_acx); offset("#define SC_MDHI ", struct sigcontext, sc_mdhi); offset("#define SC_MDLO ", struct sigcontext, sc_mdlo); offset("#define SC_PC ", struct sigcontext, sc_pc); diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index f59ef271d24..ab755ea26c6 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -16,6 +16,7 @@ #include <linux/ptrace.h> #include <linux/stddef.h> +#include <asm/bugs.h> #include <asm/cpu.h> #include <asm/fpu.h> #include <asm/mipsregs.h> @@ -97,7 +98,7 @@ static void au1k_wait(void) static int __initdata nowait = 0; -int __init wait_disable(char *s) +static int __init wait_disable(char *s) { nowait = 1; diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c new file mode 100644 index 00000000000..304efdc5682 --- /dev/null +++ b/arch/mips/kernel/early_printk.c @@ -0,0 +1,40 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2002, 2003, 06, 07 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2007 MIPS Technologies, Inc. + * written by Ralf Baechle (ralf@linux-mips.org) + */ +#include <linux/console.h> +#include <linux/init.h> + +extern void prom_putchar(char); + +static void early_console_write(struct console *con, const char *s, unsigned n) +{ + while (n-- && *s) { + if (*s == '\n') + prom_putchar('\r'); + prom_putchar(*s); + s++; + } +} + +static struct console early_console = { + .name = "early", + .write = early_console_write, + .flags = CON_PRINTBUFFER | CON_BOOT, + .index = -1 +}; + +void __init setup_early_printk(void) +{ + register_console(&early_console); +} + +void __init disable_early_printk(void) +{ + unregister_console(&early_console); +} diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 0b78fcbf044..686249c5c32 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -121,7 +121,11 @@ FEXPORT(restore_partial) # restore partial frame SAVE_AT SAVE_TEMP LONG_L v0, PT_STATUS(sp) - and v0, 1 +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) + and v0, ST0_IEP +#else + and v0, ST0_IE +#endif beqz v0, 1f jal trace_hardirqs_on b 2f diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index aacd4a005c5..297bd56c234 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -128,6 +128,37 @@ handle_vcei: .align 5 NESTED(handle_int, PT_SIZE, sp) +#ifdef CONFIG_TRACE_IRQFLAGS + /* + * Check to see if the interrupted code has just disabled + * interrupts and ignore this interrupt for now if so. + * + * local_irq_disable() disables interrupts and then calls + * trace_hardirqs_off() to track the state. If an interrupt is taken + * after interrupts are disabled but before the state is updated + * it will appear to restore_all that it is incorrectly returning with + * interrupts disabled + */ + .set push + .set noat + mfc0 k0, CP0_STATUS +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) + and k0, ST0_IEP + bnez k0, 1f + + mfc0 k0, EP0_EPC + .set noreorder + j k0 + rfe +#else + and k0, ST0_IE + bnez k0, 1f + + eret +#endif +1: + .set pop +#endif SAVE_ALL CLI TRACE_IRQS_OFF @@ -181,13 +212,13 @@ NESTED(except_vec_vi, 0, sp) * during service by SMTC kernel, we also want to * pass the IM value to be cleared. */ -EXPORT(except_vec_vi_mori) +FEXPORT(except_vec_vi_mori) ori a0, $0, 0 #endif /* CONFIG_MIPS_MT_SMTC */ -EXPORT(except_vec_vi_lui) +FEXPORT(except_vec_vi_lui) lui v0, 0 /* Patched */ j except_vec_vi_handler -EXPORT(except_vec_vi_ori) +FEXPORT(except_vec_vi_ori) ori v0, 0 /* Patched */ .set pop END(except_vec_vi) @@ -220,7 +251,17 @@ NESTED(except_vec_vi_handler, 0, sp) _ehb #endif /* CONFIG_MIPS_MT_SMTC */ CLI +#ifdef CONFIG_TRACE_IRQFLAGS + move s0, v0 +#ifdef CONFIG_MIPS_MT_SMTC + move s1, a0 +#endif TRACE_IRQS_OFF +#ifdef CONFIG_MIPS_MT_SMTC + move a0, s1 +#endif + move v0, s0 +#endif LONG_L s0, TI_REGS($28) LONG_S sp, TI_REGS($28) diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index b33ba6cd7f5..2345160e63f 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -28,7 +28,7 @@ * moves to arch independent land */ -static int i8259A_auto_eoi; +static int i8259A_auto_eoi = -1; DEFINE_SPINLOCK(i8259A_lock); /* some platforms call this... */ void mask_and_ack_8259A(unsigned int); @@ -216,7 +216,8 @@ spurious_8259A_irq: static int i8259A_resume(struct sys_device *dev) { - init_8259A(i8259A_auto_eoi); + if (i8259A_auto_eoi >= 0) + init_8259A(i8259A_auto_eoi); return 0; } @@ -226,8 +227,10 @@ static int i8259A_shutdown(struct sys_device *dev) * the kernel initialization code can get it * out of. */ - outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ - outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ + if (i8259A_auto_eoi >= 0) { + outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ + outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ + } return 0; } @@ -252,7 +255,7 @@ static int __init i8259A_init_sysfs(void) device_initcall(i8259A_init_sysfs); -void __init init_8259A(int auto_eoi) +void init_8259A(int auto_eoi) { unsigned long flags; @@ -325,8 +328,8 @@ void __init init_i8259_irqs (void) { int i; - request_resource(&ioport_resource, &pic1_io_resource); - request_resource(&ioport_resource, &pic2_io_resource); + insert_resource(&ioport_resource, &pic1_io_resource); + insert_resource(&ioport_resource, &pic2_io_resource); init_8259A(0); diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index 5929f883e46..c6580018c94 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c @@ -17,6 +17,7 @@ */ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/sched.h> #include <linux/unistd.h> #include <linux/file.h> #include <linux/fs.h> @@ -70,6 +71,7 @@ static int sp_stopping = 0; #define MTSP_SYSCALL_GETTIME (MTSP_SYSCALL_BASE + 7) #define MTSP_SYSCALL_PIPEFREQ (MTSP_SYSCALL_BASE + 8) #define MTSP_SYSCALL_GETTOD (MTSP_SYSCALL_BASE + 9) +#define MTSP_SYSCALL_IOCTL (MTSP_SYSCALL_BASE + 10) #define MTSP_O_RDONLY 0x0000 #define MTSP_O_WRONLY 0x0001 @@ -110,7 +112,8 @@ struct apsp_table syscall_command_table[] = { { MTSP_SYSCALL_CLOSE, __NR_close }, { MTSP_SYSCALL_READ, __NR_read }, { MTSP_SYSCALL_WRITE, __NR_write }, - { MTSP_SYSCALL_LSEEK32, __NR_lseek } + { MTSP_SYSCALL_LSEEK32, __NR_lseek }, + { MTSP_SYSCALL_IOCTL, __NR_ioctl } }; static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3) @@ -189,17 +192,22 @@ void sp_work_handle_request(void) struct mtsp_syscall_generic generic; struct mtsp_syscall_ret ret; struct kspd_notifications *n; + unsigned long written; + mm_segment_t old_fs; struct timeval tv; struct timezone tz; int cmd; char *vcwd; - mm_segment_t old_fs; int size; ret.retval = -1; - if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall), 0)) { + old_fs = get_fs(); + set_fs(KERNEL_DS); + + if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall))) { + set_fs(old_fs); printk(KERN_ERR "Expected request but nothing to read\n"); return; } @@ -207,7 +215,8 @@ void sp_work_handle_request(void) size = sc.size; if (size) { - if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size, 0)) { + if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size)) { + set_fs(old_fs); printk(KERN_ERR "Expected request but nothing to read\n"); return; } @@ -232,8 +241,6 @@ void sp_work_handle_request(void) if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, (int)&tz, 0,0)) == 0) ret.retval = tv.tv_sec; - - ret.errno = errno; break; case MTSP_SYSCALL_EXIT: @@ -270,7 +277,6 @@ void sp_work_handle_request(void) if (cmd >= 0) { ret.retval = sp_syscall(cmd, generic.arg0, generic.arg1, generic.arg2, generic.arg3); - ret.errno = errno; } else printk(KERN_WARNING "KSPD: Unknown SP syscall number %d\n", sc.cmd); @@ -280,8 +286,11 @@ void sp_work_handle_request(void) if (vpe_getuid(SP_VPE)) sp_setfsuidgid( 0, 0); - if ((rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(struct mtsp_syscall_ret), 0)) - < sizeof(struct mtsp_syscall_ret)) + old_fs = get_fs(); + set_fs(KERNEL_DS); + written = rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(ret)); + set_fs(old_fs); + if (written < sizeof(ret)) printk("KSPD: sp_work_handle_request failed to send to SP\n"); } diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index fc4dd6c9dd8..37849edd064 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -166,34 +166,6 @@ out: return error; } -asmlinkage long -sysn32_waitid(int which, compat_pid_t pid, - siginfo_t __user *uinfo, int options, - struct compat_rusage __user *uru) -{ - struct rusage ru; - long ret; - mm_segment_t old_fs = get_fs(); - int si_signo; - - if (!access_ok(VERIFY_WRITE, uinfo, sizeof(*uinfo))) - return -EFAULT; - - set_fs (KERNEL_DS); - ret = sys_waitid(which, pid, uinfo, options, - uru ? (struct rusage __user *) &ru : NULL); - set_fs (old_fs); - - if (__get_user(si_signo, &uinfo->si_signo)) - return -EFAULT; - if (ret < 0 || si_signo == 0) - return ret; - - if (uru) - ret = put_compat_rusage(&ru, uru); - return ret; -} - #define RLIM_INFINITY32 0x7fffffff #define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) @@ -339,6 +311,8 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid, return ret; } +#ifdef CONFIG_SYSVIPC + asmlinkage long sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) { @@ -396,6 +370,16 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) return err; } +#else + +asmlinkage long +sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) +{ + return -ENOSYS; +} + +#endif /* CONFIG_SYSVIPC */ + #ifdef CONFIG_MIPS32_N32 asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg) { @@ -572,151 +556,6 @@ asmlinkage long sys32_sync_file_range(int fd, int __pad, flags); } -/* Argument list sizes for sys_socketcall */ -#define AL(x) ((x) * sizeof(unsigned int)) -static unsigned char socketcall_nargs[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), - AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), - AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)}; -#undef AL - -/* - * System call vectors. - * - * Argument checking cleaned up. Saved 20% in size. - * This function doesn't need to set the kernel lock because - * it is set by the callees. - */ - -asmlinkage long sys32_socketcall(int call, unsigned int __user *args32) -{ - unsigned int a[6]; - unsigned int a0,a1; - int err; - - extern asmlinkage long sys_socket(int family, int type, int protocol); - extern asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen); - extern asmlinkage long sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen); - extern asmlinkage long sys_listen(int fd, int backlog); - extern asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen); - extern asmlinkage long sys_getsockname(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len); - extern asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len); - extern asmlinkage long sys_socketpair(int family, int type, int protocol, int __user *usockvec); - extern asmlinkage long sys_send(int fd, void __user * buff, size_t len, unsigned flags); - extern asmlinkage long sys_sendto(int fd, void __user * buff, size_t len, unsigned flags, - struct sockaddr __user *addr, int addr_len); - extern asmlinkage long sys_recv(int fd, void __user * ubuf, size_t size, unsigned flags); - extern asmlinkage long sys_recvfrom(int fd, void __user * ubuf, size_t size, unsigned flags, - struct sockaddr __user *addr, int __user *addr_len); - extern asmlinkage long sys_shutdown(int fd, int how); - extern asmlinkage long sys_setsockopt(int fd, int level, int optname, char __user *optval, int optlen); - extern asmlinkage long sys_getsockopt(int fd, int level, int optname, char __user *optval, int __user *optlen); - extern asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); - extern asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned int flags); - - - if(call<1||call>SYS_RECVMSG) - return -EINVAL; - - /* copy_from_user should be SMP safe. */ - if (copy_from_user(a, args32, socketcall_nargs[call])) - return -EFAULT; - - a0=a[0]; - a1=a[1]; - - switch(call) - { - case SYS_SOCKET: - err = sys_socket(a0,a1,a[2]); - break; - case SYS_BIND: - err = sys_bind(a0,(struct sockaddr __user *)A(a1), a[2]); - break; - case SYS_CONNECT: - err = sys_connect(a0, (struct sockaddr __user *)A(a1), a[2]); - break; - case SYS_LISTEN: - err = sys_listen(a0,a1); - break; - case SYS_ACCEPT: - err = sys_accept(a0,(struct sockaddr __user *)A(a1), (int __user *)A(a[2])); - break; - case SYS_GETSOCKNAME: - err = sys_getsockname(a0,(struct sockaddr __user *)A(a1), (int __user *)A(a[2])); - break; - case SYS_GETPEERNAME: - err = sys_getpeername(a0, (struct sockaddr __user *)A(a1), (int __user *)A(a[2])); - break; - case SYS_SOCKETPAIR: - err = sys_socketpair(a0,a1, a[2], (int __user *)A(a[3])); - break; - case SYS_SEND: - err = sys_send(a0, (void __user *)A(a1), a[2], a[3]); - break; - case SYS_SENDTO: - err = sys_sendto(a0,(void __user *)A(a1), a[2], a[3], - (struct sockaddr __user *)A(a[4]), a[5]); - break; - case SYS_RECV: - err = sys_recv(a0, (void __user *)A(a1), a[2], a[3]); - break; - case SYS_RECVFROM: - err = sys_recvfrom(a0, (void __user *)A(a1), a[2], a[3], - (struct sockaddr __user *)A(a[4]), (int __user *)A(a[5])); - break; - case SYS_SHUTDOWN: - err = sys_shutdown(a0,a1); - break; - case SYS_SETSOCKOPT: - err = sys_setsockopt(a0, a1, a[2], (char __user *)A(a[3]), a[4]); - break; - case SYS_GETSOCKOPT: - err = sys_getsockopt(a0, a1, a[2], (char __user *)A(a[3]), (int __user *)A(a[4])); - break; - case SYS_SENDMSG: - err = sys_sendmsg(a0, (struct msghdr __user *) A(a1), a[2]); - break; - case SYS_RECVMSG: - err = sys_recvmsg(a0, (struct msghdr __user *) A(a1), a[2]); - break; - default: - err = -EINVAL; - break; - } - return err; -} - -struct sigevent32 { - u32 sigev_value; - u32 sigev_signo; - u32 sigev_notify; - u32 payload[(64 / 4) - 3]; -}; - -extern asmlinkage long -sys_timer_create(clockid_t which_clock, - struct sigevent __user *timer_event_spec, - timer_t __user * created_timer_id); - -long -sys32_timer_create(u32 clock, struct sigevent32 __user *se32, timer_t __user *timer_id) -{ - struct sigevent __user *p = NULL; - if (se32) { - struct sigevent se; - p = compat_alloc_user_space(sizeof(struct sigevent)); - memset(&se, 0, sizeof(struct sigevent)); - if (get_user(se.sigev_value.sival_int, &se32->sigev_value) || - __get_user(se.sigev_signo, &se32->sigev_signo) || - __get_user(se.sigev_notify, &se32->sigev_notify) || - __copy_from_user(&se._sigev_un._pad, &se32->payload, - sizeof(se32->payload)) || - copy_to_user(p, &se, sizeof(se))) - return -EFAULT; - } - return sys_timer_create(clock, p, timer_id); -} - save_static_function(sys32_clone); __attribute_used__ noinline static int _sys32_clone(nabi_no_regargs struct pt_regs regs) @@ -737,49 +576,3 @@ _sys32_clone(nabi_no_regargs struct pt_regs regs) return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr); } - -/* - * Implement the event wait interface for the eventpoll file. It is the kernel - * part of the user space epoll_pwait(2). - */ -asmlinkage long compat_sys_epoll_pwait(int epfd, - struct epoll_event __user *events, int maxevents, int timeout, - const compat_sigset_t __user *sigmask, size_t sigsetsize) -{ - int error; - sigset_t ksigmask, sigsaved; - - /* - * If the caller wants a certain signal mask to be set during the wait, - * we apply it here. - */ - if (sigmask) { - if (sigsetsize != sizeof(sigset_t)) - return -EINVAL; - if (!access_ok(VERIFY_READ, sigmask, sizeof(ksigmask))) - return -EFAULT; - if (__copy_conv_sigset_from_user(&ksigmask, sigmask)) - return -EFAULT; - sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); - sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); - } - - error = sys_epoll_wait(epfd, events, maxevents, timeout); - - /* - * If we changed the signal mask, we need to restore the original one. - * In case we've got a signal while waiting, we do not restore the - * signal mask yet, and we allow do_signal() to deliver the signal on - * the way back to userspace, before the signal mask is restored. - */ - if (sigmask) { - if (error == -EINTR) { - memcpy(¤t->saved_sigmask, &sigsaved, - sizeof(sigsaved)); - set_thread_flag(TIF_RESTORE_SIGMASK); - } else - sigprocmask(SIG_SETMASK, &sigsaved, NULL); - } - - return error; -} diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index e0ad754c7ed..8f42fa85ac9 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c @@ -13,8 +13,8 @@ #include <asm/cacheflush.h> #include <asm/page.h> -const extern unsigned char relocate_new_kernel[]; -const extern unsigned int relocate_new_kernel_size; +extern const unsigned char relocate_new_kernel[]; +extern const unsigned int relocate_new_kernel_size; extern unsigned long kexec_start_address; extern unsigned long kexec_indirection_page; diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 2ef857c3ee5..225755d0c1f 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -37,6 +37,7 @@ EXPORT_SYMBOL(kernel_thread); * Userspace access stuff. */ EXPORT_SYMBOL(__copy_user); +EXPORT_SYMBOL(__copy_user_inatomic); EXPORT_SYMBOL(__bzero); EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm); EXPORT_SYMBOL(__strncpy_from_user_asm); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 04e5b38d327..6bdfb5a9fa1 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -26,7 +26,6 @@ #include <linux/completion.h> #include <linux/kallsyms.h> -#include <asm/abi.h> #include <asm/bootinfo.h> #include <asm/cpu.h> #include <asm/dsp.h> @@ -52,11 +51,11 @@ ATTRIB_NORET void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { while (!need_resched()) { -#ifdef CONFIG_MIPS_MT_SMTC +#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG extern void smtc_idle_loop_hook(void); smtc_idle_loop_hook(); -#endif /* CONFIG_MIPS_MT_SMTC */ +#endif if (cpu_wait) (*cpu_wait)(); } @@ -66,38 +65,6 @@ ATTRIB_NORET void cpu_idle(void) } } -/* - * Native o32 and N64 ABI without DSP ASE - */ -struct mips_abi mips_abi = { - .do_signal = do_signal, -#ifdef CONFIG_TRAD_SIGNALS - .setup_frame = setup_frame, -#endif - .setup_rt_frame = setup_rt_frame -}; - -#ifdef CONFIG_MIPS32_O32 -/* - * o32 compatibility on 64-bit kernels, without DSP ASE - */ -struct mips_abi mips_abi_32 = { - .do_signal = do_signal32, - .setup_frame = setup_frame_32, - .setup_rt_frame = setup_rt_frame_32 -}; -#endif /* CONFIG_MIPS32_O32 */ - -#ifdef CONFIG_MIPS32_N32 -/* - * N32 on 64-bit kernels, without DSP ASE - */ -struct mips_abi mips_abi_n32 = { - .do_signal = do_signal, - .setup_rt_frame = setup_rt_frame_n32 -}; -#endif /* CONFIG_MIPS32_N32 */ - asmlinkage void ret_from_fork(void); void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) @@ -246,7 +213,7 @@ int dump_task_fpu (struct task_struct *t, elf_fpregset_t *fpr) /* * Create a kernel thread */ -ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *)) +static ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *)) { do_exit(fn(arg)); } diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 258d74fd0b6..201ae194d1b 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -236,6 +236,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) case MMLO: tmp = regs->lo; break; +#ifdef CONFIG_CPU_HAS_SMARTMIPS + case ACX: + tmp = regs->acx; + break; +#endif case FPC_CSR: tmp = child->thread.fpu.fcr31; break; @@ -362,6 +367,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) case MMLO: regs->lo = data; break; +#ifdef CONFIG_CPU_HAS_SMARTMIPS + case ACX: + regs->acx = data; + break; +#endif case FPC_CSR: child->thread.fpu.fcr31 = data; break; diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 59c1577ecbb..dbd42adc52e 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -114,14 +114,6 @@ LEAF(_save_fp_context32) */ LEAF(_restore_fp_context) EX lw t0, SC_FPC_CSR(a0) - - /* Fail if the CSR has exceptions pending */ - srl t1, t0, 5 - and t1, t0 - andi t1, 0x1f << 7 - bnez t1, fault - nop - #ifdef CONFIG_64BIT EX ldc1 $f1, SC_FPREGS+8(a0) EX ldc1 $f3, SC_FPREGS+24(a0) @@ -165,14 +157,6 @@ LEAF(_restore_fp_context) LEAF(_restore_fp_context32) /* Restore an o32 sigcontext. */ EX lw t0, SC32_FPC_CSR(a0) - - /* Fail if the CSR has exceptions pending */ - srl t1, t0, 5 - and t1, t0 - andi t1, 0x1f << 7 - bnez t1, fault - nop - EX ldc1 $f0, SC32_FPREGS+0(a0) EX ldc1 $f2, SC32_FPREGS+16(a0) EX ldc1 $f4, SC32_FPREGS+32(a0) diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index d92c48e0d7a..bfc8ca168f8 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -53,7 +53,8 @@ static char module_name[] = "rtlx"; static struct chan_waitqueues { wait_queue_head_t rt_queue; wait_queue_head_t lx_queue; - int in_open; + atomic_t in_open; + struct mutex mutex; } channel_wqs[RTLX_CHANNELS]; static struct irqaction irq; @@ -146,110 +147,91 @@ static void stopping(int vpe) int rtlx_open(int index, int can_sleep) { - int ret; + struct rtlx_info **p; struct rtlx_channel *chan; - volatile struct rtlx_info **p; + enum rtlx_state state; + int ret = 0; if (index >= RTLX_CHANNELS) { printk(KERN_DEBUG "rtlx_open index out of range\n"); return -ENOSYS; } - if (channel_wqs[index].in_open) { - printk(KERN_DEBUG "rtlx_open channel %d already opened\n", index); - return -EBUSY; + if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { + printk(KERN_DEBUG "rtlx_open channel %d already opened\n", + index); + ret = -EBUSY; + goto out_fail; } - channel_wqs[index].in_open++; - if (rtlx == NULL) { if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) { if (can_sleep) { - DECLARE_WAITQUEUE(wait, current); - - /* go to sleep */ - add_wait_queue(&channel_wqs[index].lx_queue, &wait); - - set_current_state(TASK_INTERRUPTIBLE); - while ((p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) { - schedule(); - set_current_state(TASK_INTERRUPTIBLE); - } - - set_current_state(TASK_RUNNING); - remove_wait_queue(&channel_wqs[index].lx_queue, &wait); - - /* back running */ + __wait_event_interruptible(channel_wqs[index].lx_queue, + (p = vpe_get_shared(RTLX_TARG_VPE)), + ret); + if (ret) + goto out_fail; } else { - printk( KERN_DEBUG "No SP program loaded, and device " + printk(KERN_DEBUG "No SP program loaded, and device " "opened with O_NONBLOCK\n"); - channel_wqs[index].in_open = 0; - return -ENOSYS; + ret = -ENOSYS; + goto out_fail; } } + smp_rmb(); if (*p == NULL) { if (can_sleep) { - DECLARE_WAITQUEUE(wait, current); - - /* go to sleep */ - add_wait_queue(&channel_wqs[index].lx_queue, &wait); - - set_current_state(TASK_INTERRUPTIBLE); - while (*p == NULL) { - schedule(); - - /* reset task state to interruptable otherwise - we'll whizz round here like a very fast loopy - thing. schedule() appears to return with state - set to TASK_RUNNING. - - If the loaded SP program, for whatever reason, - doesn't set up the shared structure *p will never - become true. So whoever connected to either /dev/rt? - or if it was kspd, will then take up rather a lot of - processor cycles. - */ - - set_current_state(TASK_INTERRUPTIBLE); + DEFINE_WAIT(wait); + + for (;;) { + prepare_to_wait(&channel_wqs[index].lx_queue, &wait, TASK_INTERRUPTIBLE); + smp_rmb(); + if (*p != NULL) + break; + if (!signal_pending(current)) { + schedule(); + continue; + } + ret = -ERESTARTSYS; + goto out_fail; } - - set_current_state(TASK_RUNNING); - remove_wait_queue(&channel_wqs[index].lx_queue, &wait); - - /* back running */ - } - else { + finish_wait(&channel_wqs[index].lx_queue, &wait); + } else { printk(" *vpe_get_shared is NULL. " "Has an SP program been loaded?\n"); - channel_wqs[index].in_open = 0; - return -ENOSYS; + ret = -ENOSYS; + goto out_fail; } } if ((unsigned int)*p < KSEG0) { printk(KERN_WARNING "vpe_get_shared returned an invalid pointer " "maybe an error code %d\n", (int)*p); - channel_wqs[index].in_open = 0; - return -ENOSYS; + ret = -ENOSYS; + goto out_fail; } - if ((ret = rtlx_init(*p)) < 0) { - channel_wqs[index].in_open = 0; - return ret; - } + if ((ret = rtlx_init(*p)) < 0) + goto out_ret; } chan = &rtlx->channel[index]; - if (chan->lx_state == RTLX_STATE_OPENED) { - channel_wqs[index].in_open = 0; - return -EBUSY; - } + state = xchg(&chan->lx_state, RTLX_STATE_OPENED); + if (state == RTLX_STATE_OPENED) { + ret = -EBUSY; + goto out_fail; + } - chan->lx_state = RTLX_STATE_OPENED; - channel_wqs[index].in_open = 0; - return 0; +out_fail: + smp_mb(); + atomic_dec(&channel_wqs[index].in_open); + smp_mb(); + +out_ret: + return ret; } int rtlx_release(int index) @@ -270,30 +252,17 @@ unsigned int rtlx_read_poll(int index, int can_sleep) /* data available to read? */ if (chan->lx_read == chan->lx_write) { if (can_sleep) { - DECLARE_WAITQUEUE(wait, current); - - /* go to sleep */ - add_wait_queue(&channel_wqs[index].lx_queue, &wait); - - set_current_state(TASK_INTERRUPTIBLE); - while (chan->lx_read == chan->lx_write) { - schedule(); + int ret = 0; - set_current_state(TASK_INTERRUPTIBLE); + __wait_event_interruptible(channel_wqs[index].lx_queue, + chan->lx_read != chan->lx_write || sp_stopping, + ret); + if (ret) + return ret; - if (sp_stopping) { - set_current_state(TASK_RUNNING); - remove_wait_queue(&channel_wqs[index].lx_queue, &wait); - return 0; - } - } - - set_current_state(TASK_RUNNING); - remove_wait_queue(&channel_wqs[index].lx_queue, &wait); - - /* back running */ - } - else + if (sp_stopping) + return 0; + } else return 0; } @@ -320,56 +289,53 @@ unsigned int rtlx_write_poll(int index) return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size); } -static inline void copy_to(void *dst, void *src, size_t count, int user) +ssize_t rtlx_read(int index, void __user *buff, size_t count) { - if (user) - copy_to_user(dst, src, count); - else - memcpy(dst, src, count); -} - -static inline void copy_from(void *dst, void *src, size_t count, int user) -{ - if (user) - copy_from_user(dst, src, count); - else - memcpy(dst, src, count); -} - -ssize_t rtlx_read(int index, void *buff, size_t count, int user) -{ - size_t fl = 0L; + size_t lx_write, fl = 0L; struct rtlx_channel *lx; + unsigned long failed; if (rtlx == NULL) return -ENOSYS; lx = &rtlx->channel[index]; + mutex_lock(&channel_wqs[index].mutex); + smp_rmb(); + lx_write = lx->lx_write; + /* find out how much in total */ count = min(count, - (size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) + (size_t)(lx_write + lx->buffer_size - lx->lx_read) % lx->buffer_size); /* then how much from the read pointer onwards */ - fl = min( count, (size_t)lx->buffer_size - lx->lx_read); + fl = min(count, (size_t)lx->buffer_size - lx->lx_read); - copy_to(buff, &lx->lx_buffer[lx->lx_read], fl, user); + failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl); + if (failed) + goto out; /* and if there is anything left at the beginning of the buffer */ - if ( count - fl ) - copy_to (buff + fl, lx->lx_buffer, count - fl, user); + if (count - fl) + failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl); + +out: + count -= failed; - /* update the index */ - lx->lx_read += count; - lx->lx_read %= lx->buffer_size; + smp_wmb(); + lx->lx_read = (lx->lx_read + count) % lx->buffer_size; + smp_wmb(); + mutex_unlock(&channel_wqs[index].mutex); return count; } -ssize_t rtlx_write(int index, void *buffer, size_t count, int user) +ssize_t rtlx_write(int index, const void __user *buffer, size_t count) { struct rtlx_channel *rt; + unsigned long failed; + size_t rt_read; size_t fl; if (rtlx == NULL) @@ -377,24 +343,35 @@ ssize_t rtlx_write(int index, void *buffer, size_t count, int user) rt = &rtlx->channel[index]; + mutex_lock(&channel_wqs[index].mutex); + smp_rmb(); + rt_read = rt->rt_read; + /* total number of bytes to copy */ count = min(count, - (size_t)write_spacefree(rt->rt_read, rt->rt_write, - rt->buffer_size)); + (size_t)write_spacefree(rt_read, rt->rt_write, rt->buffer_size)); /* first bit from write pointer to the end of the buffer, or count */ fl = min(count, (size_t) rt->buffer_size - rt->rt_write); - copy_from (&rt->rt_buffer[rt->rt_write], buffer, fl, user); + failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl); + if (failed) + goto out; /* if there's any left copy to the beginning of the buffer */ - if( count - fl ) - copy_from (rt->rt_buffer, buffer + fl, count - fl, user); + if (count - fl) { + failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); + } - rt->rt_write += count; - rt->rt_write %= rt->buffer_size; +out: + count -= failed; - return(count); + smp_wmb(); + rt->rt_write = (rt->rt_write + count) % rt->buffer_size; + smp_wmb(); + mutex_unlock(&channel_wqs[index].mutex); + + return count; } @@ -446,7 +423,7 @@ static ssize_t file_read(struct file *file, char __user * buffer, size_t count, return 0; // -EAGAIN makes cat whinge } - return rtlx_read(minor, buffer, count, 1); + return rtlx_read(minor, buffer, count); } static ssize_t file_write(struct file *file, const char __user * buffer, @@ -454,28 +431,25 @@ static ssize_t file_write(struct file *file, const char __user * buffer, { int minor; struct rtlx_channel *rt; - DECLARE_WAITQUEUE(wait, current); minor = iminor(file->f_path.dentry->d_inode); rt = &rtlx->channel[minor]; /* any space left... */ if (!rtlx_write_poll(minor)) { + int ret = 0; if (file->f_flags & O_NONBLOCK) return -EAGAIN; - add_wait_queue(&channel_wqs[minor].rt_queue, &wait); - set_current_state(TASK_INTERRUPTIBLE); - - while (!rtlx_write_poll(minor)) - schedule(); - - set_current_state(TASK_RUNNING); - remove_wait_queue(&channel_wqs[minor].rt_queue, &wait); + __wait_event_interruptible(channel_wqs[minor].rt_queue, + rtlx_write_poll(minor), + ret); + if (ret) + return ret; } - return rtlx_write(minor, (void *)buffer, count, 1); + return rtlx_write(minor, buffer, count); } static const struct file_operations rtlx_fops = { @@ -513,7 +487,8 @@ static int rtlx_module_init(void) for (i = 0; i < RTLX_CHANNELS; i++) { init_waitqueue_head(&channel_wqs[i].rt_queue); init_waitqueue_head(&channel_wqs[i].lx_queue); - channel_wqs[i].in_open = 0; + atomic_set(&channel_wqs[i].in_open, 0); + mutex_init(&channel_wqs[i].mutex); dev = device_create(mt_class, NULL, MKDEV(major, i), "%s%d", module_name, i); diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 7c0b3936ba4..0c9a9ff8cd2 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -656,6 +656,8 @@ einval: li v0, -EINVAL sys sys_kexec_load 4 sys sys_getcpu 3 sys sys_epoll_pwait 6 + sys sys_ioprio_set 3 + sys sys_ioprio_get 2 .endm /* We pre-compute the number of _instruction_ bytes needed to diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 10e9a18630a..23f3b118f71 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -470,4 +470,7 @@ sys_call_table: PTR sys_get_robust_list PTR sys_kexec_load /* 5270 */ PTR sys_getcpu - PTR compat_sys_epoll_pwait + PTR sys_epoll_pwait + PTR sys_ioprio_set + PTR sys_ioprio_get + .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 2ceda4644a4..6eac2833742 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -340,7 +340,7 @@ EXPORT(sysn32_call_table) PTR compat_sys_statfs64 PTR compat_sys_fstatfs64 PTR sys_sendfile64 - PTR sys32_timer_create /* 6220 */ + PTR compat_sys_timer_create /* 6220 */ PTR compat_sys_timer_settime PTR compat_sys_timer_gettime PTR sys_timer_getoverrun @@ -361,7 +361,7 @@ EXPORT(sysn32_call_table) PTR compat_sys_mq_notify PTR compat_sys_mq_getsetattr PTR sys_ni_syscall /* 6240, sys_vserver */ - PTR sysn32_waitid + PTR compat_sys_waitid PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key PTR sys_request_key @@ -395,5 +395,8 @@ EXPORT(sysn32_call_table) PTR compat_sys_set_robust_list PTR compat_sys_get_robust_list PTR compat_sys_kexec_load - PTR sys_getcpu + PTR sys_getcpu /* 6275 */ PTR compat_sys_epoll_pwait + PTR sys_ioprio_set + PTR sys_ioprio_get + .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index c5f590ca99b..7e74b412a78 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -307,7 +307,7 @@ sys_call_table: PTR compat_sys_statfs PTR compat_sys_fstatfs /* 4100 */ PTR sys_ni_syscall /* sys_ioperm */ - PTR sys32_socketcall + PTR compat_sys_socketcall PTR sys_syslog PTR compat_sys_setitimer PTR compat_sys_getitimer /* 4105 */ @@ -462,7 +462,7 @@ sys_call_table: PTR sys_fadvise64_64 PTR compat_sys_statfs64 /* 4255 */ PTR compat_sys_fstatfs64 - PTR sys32_timer_create + PTR compat_sys_timer_create PTR compat_sys_timer_settime PTR compat_sys_timer_gettime PTR sys_timer_getoverrun /* 4260 */ @@ -518,5 +518,7 @@ sys_call_table: PTR compat_sys_get_robust_list /* 4310 */ PTR compat_sys_kexec_load PTR sys_getcpu - PTR sys_epoll_pwait + PTR compat_sys_epoll_pwait + PTR sys_ioprio_set + PTR sys_ioprio_get /* 4315 */ .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 394540fad76..4975da0bfb6 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -525,6 +525,14 @@ void __init setup_arch(char **cmdline_p) { cpu_probe(); prom_init(); + +#ifdef CONFIG_EARLY_PRINTK + { + extern void setup_early_printk(void); + + setup_early_printk(); + } +#endif cpu_report(); #if defined(CONFIG_VT) @@ -543,7 +551,7 @@ void __init setup_arch(char **cmdline_p) #endif } -int __init fpu_disable(char *s) +static int __init fpu_disable(char *s) { int i; @@ -555,7 +563,7 @@ int __init fpu_disable(char *s) __setup("nofpu", fpu_disable); -int __init dsp_disable(char *s) +static int __init dsp_disable(char *s) { cpu_data[0].ases &= ~MIPS_ASE_DSP; diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index fdbdbdc65b5..c0faabd5201 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h @@ -31,4 +31,16 @@ extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, */ extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall); +/* Check and clear pending FPU exceptions in saved CSR */ +extern int fpcsr_pending(unsigned int __user *fpcsr); + +/* Make sure we will not lose FPU ownership */ +#ifdef CONFIG_PREEMPT +#define lock_fpu_owner() preempt_disable() +#define unlock_fpu_owner() preempt_enable() +#else +#define lock_fpu_owner() pagefault_disable() +#define unlock_fpu_owner() pagefault_enable() +#endif + #endif /* __SIGNAL_COMMON_H */ diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index b2e9ab1bb10..07d67309451 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -20,6 +20,7 @@ #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/compiler.h> +#include <linux/uaccess.h> #include <asm/abi.h> #include <asm/asm.h> @@ -27,7 +28,6 @@ #include <asm/cacheflush.h> #include <asm/fpu.h> #include <asm/sim.h> -#include <asm/uaccess.h> #include <asm/ucontext.h> #include <asm/cpu-features.h> #include <asm/war.h> @@ -78,10 +78,51 @@ struct rt_sigframe { /* * Helper routines */ +static int protected_save_fp_context(struct sigcontext __user *sc) +{ + int err; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(1); + err = save_fp_context(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __put_user(0, &sc->sc_fpregs[0]) | + __put_user(0, &sc->sc_fpregs[31]) | + __put_user(0, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + +static int protected_restore_fp_context(struct sigcontext __user *sc) +{ + int err, tmp; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(0); + err = restore_fp_context(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __get_user(tmp, &sc->sc_fpregs[0]) | + __get_user(tmp, &sc->sc_fpregs[31]) | + __get_user(tmp, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; int i; + unsigned int used_math; err |= __put_user(regs->cp0_epc, &sc->sc_pc); @@ -89,6 +130,9 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) for (i = 1; i < 32; i++) err |= __put_user(regs->regs[i], &sc->sc_regs[i]); +#ifdef CONFIG_CPU_HAS_SMARTMIPS + err |= __put_user(regs->acx, &sc->sc_acx); +#endif err |= __put_user(regs->hi, &sc->sc_mdhi); err |= __put_user(regs->lo, &sc->sc_mdlo); if (cpu_has_dsp) { @@ -101,24 +145,48 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); } - err |= __put_user(!!used_math(), &sc->sc_used_math); + used_math = !!used_math(); + err |= __put_user(used_math, &sc->sc_used_math); - if (used_math()) { + if (used_math) { /* * Save FPU state to signal context. Signal handler * will "inherit" current FPU state. */ - preempt_disable(); + err |= protected_save_fp_context(sc); + } + return err; +} - if (!is_fpu_owner()) { - own_fpu(); - restore_fp(current); - } - err |= save_fp_context(sc); +int fpcsr_pending(unsigned int __user *fpcsr) +{ + int err, sig = 0; + unsigned int csr, enabled; - preempt_enable(); + err = __get_user(csr, fpcsr); + enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); + /* + * If the signal handler set some FPU exceptions, clear it and + * send SIGFPE. + */ + if (csr & enabled) { + csr &= ~enabled; + err |= __put_user(csr, fpcsr); + sig = SIGFPE; } - return err; + return err ?: sig; +} + +static int +check_and_restore_fp_context(struct sigcontext __user *sc) +{ + int err, sig; + + err = sig = fpcsr_pending(&sc->sc_fpc_csr); + if (err > 0) + err = 0; + err |= protected_restore_fp_context(sc); + return err ?: sig; } int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) @@ -132,6 +200,10 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) current_thread_info()->restart_block.fn = do_no_restart_syscall; err |= __get_user(regs->cp0_epc, &sc->sc_pc); + +#ifdef CONFIG_CPU_HAS_SMARTMIPS + err |= __get_user(regs->acx, &sc->sc_acx); +#endif err |= __get_user(regs->hi, &sc->sc_mdhi); err |= __get_user(regs->lo, &sc->sc_mdlo); if (cpu_has_dsp) { @@ -150,19 +222,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) err |= __get_user(used_math, &sc->sc_used_math); conditional_used_math(used_math); - preempt_disable(); - - if (used_math()) { + if (used_math) { /* restore fpu context if we have used it before */ - own_fpu(); - err |= restore_fp_context(sc); + if (!err) + err = check_and_restore_fp_context(sc); } else { /* signal handler may have used FPU. Give it up. */ - lose_fpu(); + lose_fpu(0); } - preempt_enable(); - return err; } @@ -325,6 +393,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) { struct sigframe __user *frame; sigset_t blocked; + int sig; frame = (struct sigframe __user *) regs.regs[29]; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) @@ -338,8 +407,11 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - if (restore_sigcontext(®s, &frame->sf_sc)) + sig = restore_sigcontext(®s, &frame->sf_sc); + if (sig < 0) goto badframe; + else if (sig) + force_sig(sig, current); /* * Don't let your children do this ... @@ -361,6 +433,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) struct rt_sigframe __user *frame; sigset_t set; stack_t st; + int sig; frame = (struct rt_sigframe __user *) regs.regs[29]; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) @@ -374,8 +447,11 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - if (restore_sigcontext(®s, &frame->rs_uc.uc_mcontext)) + sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); + if (sig < 0) goto badframe; + else if (sig) + force_sig(sig, current); if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st))) goto badframe; @@ -398,7 +474,7 @@ badframe: } #ifdef CONFIG_TRAD_SIGNALS -int setup_frame(struct k_sigaction * ka, struct pt_regs *regs, +static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs, int signr, sigset_t *set) { struct sigframe __user *frame; @@ -443,7 +519,7 @@ give_sigsegv: } #endif -int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, +static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) { struct rt_sigframe __user *frame; @@ -501,6 +577,14 @@ give_sigsegv: return -EFAULT; } +struct mips_abi mips_abi = { +#ifdef CONFIG_TRAD_SIGNALS + .setup_frame = setup_frame, +#endif + .setup_rt_frame = setup_rt_frame, + .restart = __NR_restart_syscall +}; + static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) { @@ -539,7 +623,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info, return ret; } -void do_signal(struct pt_regs *regs) +static void do_signal(struct pt_regs *regs) { struct k_sigaction ka; sigset_t *oldset; @@ -589,7 +673,7 @@ void do_signal(struct pt_regs *regs) regs->cp0_epc -= 8; } if (regs->regs[2] == ERESTART_RESTARTBLOCK) { - regs->regs[2] = __NR_restart_syscall; + regs->regs[2] = current->thread.abi->restart; regs->regs[7] = regs->regs[26]; regs->cp0_epc -= 4; } @@ -615,5 +699,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, { /* deal with pending signal delivery */ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) - current->thread.abi->do_signal(regs); + do_signal(regs); } diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index c28cb21514c..b9a014411f8 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -22,6 +22,7 @@ #include <linux/compat.h> #include <linux/suspend.h> #include <linux/compiler.h> +#include <linux/uaccess.h> #include <asm/abi.h> #include <asm/asm.h> @@ -29,7 +30,6 @@ #include <linux/bitops.h> #include <asm/cacheflush.h> #include <asm/sim.h> -#include <asm/uaccess.h> #include <asm/ucontext.h> #include <asm/system.h> #include <asm/fpu.h> @@ -104,17 +104,10 @@ typedef struct compat_siginfo { */ #define __NR_O32_sigreturn 4119 #define __NR_O32_rt_sigreturn 4193 -#define __NR_O32_restart_syscall 4253 +#define __NR_O32_restart_syscall 4253 /* 32-bit compatibility types */ -#define _NSIG_BPW32 32 -#define _NSIG_WORDS32 (_NSIG / _NSIG_BPW32) - -typedef struct { - unsigned int sig[_NSIG_WORDS32]; -} sigset_t32; - typedef unsigned int __sighandler32_t; typedef void (*vfptr_t)(void); @@ -136,7 +129,7 @@ struct ucontext32 { s32 uc_link; stack32_t uc_stack; struct sigcontext32 uc_mcontext; - sigset_t32 uc_sigmask; /* mask last for extensibility */ + compat_sigset_t uc_sigmask; /* mask last for extensibility */ }; /* @@ -150,7 +143,7 @@ struct sigframe32 { u32 sf_ass[4]; /* argument save space for o32 */ u32 sf_code[2]; /* signal trampoline */ struct sigcontext32 sf_sc; - sigset_t sf_mask; + compat_sigset_t sf_mask; }; struct rt_sigframe32 { @@ -166,7 +159,7 @@ struct sigframe32 { u32 sf_ass[4]; /* argument save space for o32 */ u32 sf_pad[2]; struct sigcontext32 sf_sc; /* hw context */ - sigset_t sf_mask; + compat_sigset_t sf_mask; u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */ }; @@ -183,11 +176,52 @@ struct rt_sigframe32 { /* * sigcontext handlers */ +static int protected_save_fp_context32(struct sigcontext32 __user *sc) +{ + int err; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(1); + err = save_fp_context32(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __put_user(0, &sc->sc_fpregs[0]) | + __put_user(0, &sc->sc_fpregs[31]) | + __put_user(0, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + +static int protected_restore_fp_context32(struct sigcontext32 __user *sc) +{ + int err, tmp; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(0); + err = restore_fp_context32(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __get_user(tmp, &sc->sc_fpregs[0]) | + __get_user(tmp, &sc->sc_fpregs[31]) | + __get_user(tmp, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + static int setup_sigcontext32(struct pt_regs *regs, struct sigcontext32 __user *sc) { int err = 0; int i; + u32 used_math; err |= __put_user(regs->cp0_epc, &sc->sc_pc); @@ -207,26 +241,31 @@ static int setup_sigcontext32(struct pt_regs *regs, err |= __put_user(mflo3(), &sc->sc_lo3); } - err |= __put_user(!!used_math(), &sc->sc_used_math); + used_math = !!used_math(); + err |= __put_user(used_math, &sc->sc_used_math); - if (used_math()) { + if (used_math) { /* * Save FPU state to signal context. Signal handler * will "inherit" current FPU state. */ - preempt_disable(); - - if (!is_fpu_owner()) { - own_fpu(); - restore_fp(current); - } - err |= save_fp_context32(sc); - - preempt_enable(); + err |= protected_save_fp_context32(sc); } return err; } +static int +check_and_restore_fp_context32(struct sigcontext32 __user *sc) +{ + int err, sig; + + err = sig = fpcsr_pending(&sc->sc_fpc_csr); + if (err > 0) + err = 0; + err |= protected_restore_fp_context32(sc); + return err ?: sig; +} + static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 __user *sc) { @@ -257,19 +296,15 @@ static int restore_sigcontext32(struct pt_regs *regs, err |= __get_user(used_math, &sc->sc_used_math); conditional_used_math(used_math); - preempt_disable(); - - if (used_math()) { + if (used_math) { /* restore fpu context if we have used it before */ - own_fpu(); - err |= restore_fp_context32(sc); + if (!err) + err = check_and_restore_fp_context32(sc); } else { /* signal handler may have used FPU. Give it up. */ - lose_fpu(); + lose_fpu(0); } - preempt_enable(); - return err; } @@ -515,6 +550,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) { struct sigframe32 __user *frame; sigset_t blocked; + int sig; frame = (struct sigframe32 __user *) regs.regs[29]; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) @@ -528,8 +564,11 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - if (restore_sigcontext32(®s, &frame->sf_sc)) + sig = restore_sigcontext32(®s, &frame->sf_sc); + if (sig < 0) goto badframe; + else if (sig) + force_sig(sig, current); /* * Don't let your children do this ... @@ -552,6 +591,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) sigset_t set; stack_t st; s32 sp; + int sig; frame = (struct rt_sigframe32 __user *) regs.regs[29]; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) @@ -565,8 +605,11 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - if (restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext)) + sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext); + if (sig < 0) goto badframe; + else if (sig) + force_sig(sig, current); /* The ucontext contains a stack32_t, so we must convert! */ if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) @@ -598,7 +641,7 @@ badframe: force_sig(SIGSEGV, current); } -int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, +static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, int signr, sigset_t *set) { struct sigframe32 __user *frame; @@ -644,7 +687,7 @@ give_sigsegv: return -EFAULT; } -int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, +static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) { struct rt_sigframe32 __user *frame; @@ -704,110 +747,14 @@ give_sigsegv: return -EFAULT; } -static inline int handle_signal(unsigned long sig, siginfo_t *info, - struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs) -{ - int ret; - - switch (regs->regs[0]) { - case ERESTART_RESTARTBLOCK: - case ERESTARTNOHAND: - regs->regs[2] = EINTR; - break; - case ERESTARTSYS: - if (!(ka->sa.sa_flags & SA_RESTART)) { - regs->regs[2] = EINTR; - break; - } - /* fallthrough */ - case ERESTARTNOINTR: /* Userland will reload $v0. */ - regs->regs[7] = regs->regs[26]; - regs->cp0_epc -= 8; - } - - regs->regs[0] = 0; /* Don't deal with this again. */ - - if (ka->sa.sa_flags & SA_SIGINFO) - ret = current->thread.abi->setup_rt_frame(ka, regs, sig, oldset, info); - else - ret = current->thread.abi->setup_frame(ka, regs, sig, oldset); - - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked,sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - return ret; -} - -void do_signal32(struct pt_regs *regs) -{ - struct k_sigaction ka; - sigset_t *oldset; - siginfo_t info; - int signr; - - /* - * We want the common case to go fast, which is why we may in certain - * cases get here from kernel mode. Just return without doing anything - * if so. - */ - if (!user_mode(regs)) - return; - - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - - signr = get_signal_to_deliver(&info, &ka, regs, NULL); - if (signr > 0) { - /* Whee! Actually deliver the signal. */ - if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { - /* - * A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag. - */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - } - - return; - } - - /* - * Who's code doesn't conform to the restartable syscall convention - * dies here!!! The li instruction, a single machine instruction, - * must directly be followed by the syscall instruction. - */ - if (regs->regs[0]) { - if (regs->regs[2] == ERESTARTNOHAND || - regs->regs[2] == ERESTARTSYS || - regs->regs[2] == ERESTARTNOINTR) { - regs->regs[7] = regs->regs[26]; - regs->cp0_epc -= 8; - } - if (regs->regs[2] == ERESTART_RESTARTBLOCK) { - regs->regs[2] = __NR_O32_restart_syscall; - regs->regs[7] = regs->regs[26]; - regs->cp0_epc -= 4; - } - regs->regs[0] = 0; /* Don't deal with this again. */ - } - - /* - * If there's no signal to deliver, we just put the saved sigmask - * back - */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } -} +/* + * o32 compatibility on 64-bit kernels, without DSP ASE + */ +struct mips_abi mips_abi_32 = { + .setup_frame = setup_frame_32, + .setup_rt_frame = setup_rt_frame_32, + .restart = __NR_O32_restart_syscall +}; asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, struct sigaction32 __user *oact, diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index 7ca2a078841..a9202fa9598 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -29,6 +29,7 @@ #include <linux/compat.h> #include <linux/bitops.h> +#include <asm/abi.h> #include <asm/asm.h> #include <asm/cacheflush.h> #include <asm/compat-signal.h> @@ -126,6 +127,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) sigset_t set; stack_t st; s32 sp; + int sig; frame = (struct rt_sigframe_n32 __user *) regs.regs[29]; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) @@ -139,8 +141,11 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - if (restore_sigcontext(®s, &frame->rs_uc.uc_mcontext)) + sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); + if (sig < 0) goto badframe; + else if (sig) + force_sig(sig, current); /* The ucontext contains a stack32_t, so we must convert! */ if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) @@ -169,7 +174,7 @@ badframe: force_sig(SIGSEGV, current); } -int setup_rt_frame_n32(struct k_sigaction * ka, +static int setup_rt_frame_n32(struct k_sigaction * ka, struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) { struct rt_sigframe_n32 __user *frame; @@ -228,3 +233,8 @@ give_sigsegv: force_sigsegv(signr, current); return -EFAULT; } + +struct mips_abi mips_abi_n32 = { + .setup_rt_frame = setup_rt_frame_n32, + .restart = __NR_N32_restart_syscall +}; diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 0555fc554f6..c46e479c992 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -51,31 +51,14 @@ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ EXPORT_SYMBOL(phys_cpu_present_map); EXPORT_SYMBOL(cpu_online_map); +/* This happens early in bootup, can't really do it better */ static void smp_tune_scheduling (void) { struct cache_desc *cd = ¤t_cpu_data.scache; - unsigned long cachesize; /* kB */ - unsigned long cpu_khz; + unsigned long cachesize = cd->linesz * cd->sets * cd->ways; - /* - * Crude estimate until we actually meassure ... - */ - cpu_khz = loops_per_jiffy * 2 * HZ / 1000; - - /* - * Rough estimation for SMP scheduling, this is the number of - * cycles it takes for a fully memory-limited process to flush - * the SMP-local cache. - * - * (For a P5 this pretty much means we will choose another idle - * CPU almost always at wakeup time (this is due to the small - * L1 cache), on PIIs it's around 50-100 usecs, depending on - * the cache size) - */ - if (!cpu_khz) - return; - - cachesize = cd->linesz * cd->sets * cd->ways; + if (cachesize > max_cache_size) + max_cache_size = cachesize; } extern void __init calibrate_delay(void); diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 9251ea82493..5dcfab6b288 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -4,6 +4,7 @@ #include <linux/sched.h> #include <linux/cpumask.h> #include <linux/interrupt.h> +#include <linux/kernel_stat.h> #include <linux/module.h> #include <asm/cpu.h> @@ -14,6 +15,7 @@ #include <asm/hazards.h> #include <asm/mmu_context.h> #include <asm/smp.h> +#include <asm/mips-boards/maltaint.h> #include <asm/mipsregs.h> #include <asm/cacheflush.h> #include <asm/time.h> @@ -75,7 +77,7 @@ static struct smtc_ipi_q freeIPIq; void ipi_decode(struct smtc_ipi *); static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); -static void setup_cross_vpe_interrupts(void); +static void setup_cross_vpe_interrupts(unsigned int nvpe); void init_smtc_stats(void); /* Global SMTC Status */ @@ -141,10 +143,7 @@ __setup("ipibufs=", ipibufs); __setup("nostlb", stlb_disable); __setup("asidmask=", asidmask_set); -/* Enable additional debug checks before going into CPU idle loop */ -#define SMTC_IDLE_HOOK_DEBUG - -#ifdef SMTC_IDLE_HOOK_DEBUG +#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG static int hang_trig = 0; @@ -171,12 +170,15 @@ __setup("tintq=", tintq); int imstuckcount[2][8]; /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ -int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}}; +int vpemask[2][8] = { + {0, 0, 1, 0, 0, 0, 0, 1}, + {0, 0, 0, 0, 0, 0, 0, 1} +}; int tcnoprog[NR_CPUS]; static atomic_t idle_hook_initialized = {0}; static int clock_hang_reported[NR_CPUS]; -#endif /* SMTC_IDLE_HOOK_DEBUG */ +#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ /* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */ @@ -394,10 +396,10 @@ void mipsmt_prepare_cpus(void) printk("ASID mask value override to 0x%x\n", asidmask); /* Temporary */ -#ifdef SMTC_IDLE_HOOK_DEBUG +#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG if (hang_trig) printk("Logic Analyser Trigger on suspected TC hang\n"); -#endif /* SMTC_IDLE_HOOK_DEBUG */ +#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ /* Put MVPE's into 'configuration state' */ write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); @@ -504,8 +506,7 @@ void mipsmt_prepare_cpus(void) /* If we have multiple VPEs running, set up the cross-VPE interrupt */ - if (nvpe > 1) - setup_cross_vpe_interrupts(); + setup_cross_vpe_interrupts(nvpe); /* Set up queue of free IPI "messages". */ nipi = NR_CPUS * IPIBUF_PER_CPU; @@ -610,7 +611,12 @@ void smtc_cpus_done(void) int setup_irq_smtc(unsigned int irq, struct irqaction * new, unsigned long hwmask) { + unsigned int vpe = current_cpu_data.vpe_id; + irq_hwmask[irq] = hwmask; +#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG + vpemask[vpe][irq - MIPSCPU_INT_BASE] = 1; +#endif return setup_irq(irq, new); } @@ -815,12 +821,15 @@ void ipi_decode(struct smtc_ipi *pipi) smtc_ipi_nq(&freeIPIq, pipi); switch (type_copy) { case SMTC_CLOCK_TICK: + irq_enter(); + kstat_this_cpu.irqs[MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR]++; /* Invoke Clock "Interrupt" */ ipi_timer_latch[dest_copy] = 0; -#ifdef SMTC_IDLE_HOOK_DEBUG +#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG clock_hang_reported[dest_copy] = 0; -#endif /* SMTC_IDLE_HOOK_DEBUG */ +#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ local_timer_interrupt(0, NULL); + irq_exit(); break; case LINUX_SMP_IPI: switch ((int)arg_copy) { @@ -968,8 +977,11 @@ static void ipi_irq_dispatch(void) static struct irqaction irq_ipi; -static void setup_cross_vpe_interrupts(void) +static void setup_cross_vpe_interrupts(unsigned int nvpe) { + if (nvpe < 1) + return; + if (!cpu_has_vint) panic("SMTC Kernel requires Vectored Interupt support"); @@ -987,10 +999,17 @@ static void setup_cross_vpe_interrupts(void) /* * SMTC-specific hacks invoked from elsewhere in the kernel. + * + * smtc_ipi_replay is called from raw_local_irq_restore which is only ever + * called with interrupts disabled. We do rely on interrupts being disabled + * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would + * result in a recursive call to raw_local_irq_restore(). */ -void smtc_ipi_replay(void) +static void __smtc_ipi_replay(void) { + unsigned int cpu = smp_processor_id(); + /* * To the extent that we've ever turned interrupts off, * we may have accumulated deferred IPIs. This is subtle. @@ -1005,22 +1024,35 @@ void smtc_ipi_replay(void) * is clear, and we'll handle it as a real pseudo-interrupt * and not a pseudo-pseudo interrupt. */ - if (IPIQ[smp_processor_id()].depth > 0) { - struct smtc_ipi *pipi; - extern void self_ipi(struct smtc_ipi *); + if (IPIQ[cpu].depth > 0) { + while (1) { + struct smtc_ipi_q *q = &IPIQ[cpu]; + struct smtc_ipi *pipi; + extern void self_ipi(struct smtc_ipi *); + + spin_lock(&q->lock); + pipi = __smtc_ipi_dq(q); + spin_unlock(&q->lock); + if (!pipi) + break; - while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) { self_ipi(pipi); - smtc_cpu_stats[smp_processor_id()].selfipis++; + smtc_cpu_stats[cpu].selfipis++; } } } +void smtc_ipi_replay(void) +{ + raw_local_irq_disable(); + __smtc_ipi_replay(); +} + EXPORT_SYMBOL(smtc_ipi_replay); void smtc_idle_loop_hook(void) { -#ifdef SMTC_IDLE_HOOK_DEBUG +#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG int im; int flags; int mtflags; @@ -1113,14 +1145,20 @@ void smtc_idle_loop_hook(void) local_irq_restore(flags); if (pdb_msg != &id_ho_db_msg[0]) printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); -#endif /* SMTC_IDLE_HOOK_DEBUG */ +#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ /* * Replay any accumulated deferred IPIs. If "Instant Replay" * is in use, there should never be any. */ #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY - smtc_ipi_replay(); + { + unsigned long flags; + + local_irq_save(flags); + __smtc_ipi_replay(); + local_irq_restore(flags); + } #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 2a932cada24..493cb29b8a4 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -229,6 +229,9 @@ void show_regs(struct pt_regs *regs) printk("\n"); } +#ifdef CONFIG_CPU_HAS_SMARTMIPS + printk("Acx : %0*lx\n", field, regs->acx); +#endif printk("Hi : %0*lx\n", field, regs->hi); printk("Lo : %0*lx\n", field, regs->lo); @@ -340,13 +343,9 @@ NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) extern const struct exception_table_entry __start___dbe_table[]; extern const struct exception_table_entry __stop___dbe_table[]; -void __declare_dbe_table(void) -{ - __asm__ __volatile__( - ".section\t__dbe_table,\"a\"\n\t" - ".previous" - ); -} +__asm__( +" .section __dbe_table, \"a\"\n" +" .previous \n"); /* Given an address, look for it in the exception tables. */ static const struct exception_table_entry *search_dbe_tables(unsigned long addr) @@ -611,16 +610,6 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) if (fcr31 & FPU_CSR_UNI_X) { int sig; - preempt_disable(); - -#ifdef CONFIG_PREEMPT - if (!is_fpu_owner()) { - /* We might lose fpu before disabling preempt... */ - own_fpu(); - BUG_ON(!used_math()); - restore_fp(current); - } -#endif /* * Unimplemented operation exception. If we've got the full * software emulator on-board, let's use it... @@ -631,18 +620,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) * register operands before invoking the emulator, which seems * a bit extreme for what should be an infrequent event. */ - save_fp(current); /* Ensure 'resume' not overwrite saved fp context again. */ - lose_fpu(); - - preempt_enable(); + lose_fpu(1); /* Run the emulator */ sig = fpu_emulator_cop1Handler (regs, ¤t->thread.fpu, 1); - preempt_disable(); - - own_fpu(); /* Using the FPU again. */ /* * We can't allow the emulated instruction to leave any of * the cause bit set in $fcr31. @@ -650,9 +633,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; /* Restore the hardware register state */ - restore_fp(current); - - preempt_enable(); + own_fpu(1); /* Using the FPU again. */ /* If something went wrong, signal */ if (sig) @@ -669,7 +650,7 @@ asmlinkage void do_bp(struct pt_regs *regs) unsigned int opcode, bcode; siginfo_t info; - if (get_user(opcode, (unsigned int __user *) exception_epc(regs))) + if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) goto out_sigsegv; /* @@ -708,6 +689,7 @@ asmlinkage void do_bp(struct pt_regs *regs) die_if_kernel("Break instruction in kernel code", regs); force_sig(SIGTRAP, current); } + return; out_sigsegv: force_sig(SIGSEGV, current); @@ -718,7 +700,7 @@ asmlinkage void do_tr(struct pt_regs *regs) unsigned int opcode, tcode = 0; siginfo_t info; - if (get_user(opcode, (unsigned int __user *) exception_epc(regs))) + if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) goto out_sigsegv; /* Immediate versions don't provide a code. */ @@ -751,6 +733,7 @@ asmlinkage void do_tr(struct pt_regs *regs) die_if_kernel("Trap instruction in kernel code", regs); force_sig(SIGTRAP, current); } + return; out_sigsegv: force_sig(SIGSEGV, current); @@ -790,21 +773,15 @@ asmlinkage void do_cpu(struct pt_regs *regs) break; case 1: - preempt_disable(); - - own_fpu(); - if (used_math()) { /* Using the FPU again. */ - restore_fp(current); - } else { /* First time FPU user. */ + if (used_math()) /* Using the FPU again. */ + own_fpu(1); + else { /* First time FPU user. */ init_fpu(); set_used_math(); } - if (cpu_has_fpu) { - preempt_enable(); - } else { + if (!raw_cpu_has_fpu) { int sig; - preempt_enable(); sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0); if (sig) @@ -845,7 +822,6 @@ asmlinkage void do_cpu(struct pt_regs *regs) case 2: case 3: - die_if_kernel("do_cpu invoked from kernel context!", regs); break; } @@ -1258,26 +1234,26 @@ static inline void mips_srs_init(void) /* * This is used by native signal handling */ -asmlinkage int (*save_fp_context)(struct sigcontext *sc); -asmlinkage int (*restore_fp_context)(struct sigcontext *sc); +asmlinkage int (*save_fp_context)(struct sigcontext __user *sc); +asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc); -extern asmlinkage int _save_fp_context(struct sigcontext *sc); -extern asmlinkage int _restore_fp_context(struct sigcontext *sc); +extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); +extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); -extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); -extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); +extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); +extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); #ifdef CONFIG_SMP -static int smp_save_fp_context(struct sigcontext *sc) +static int smp_save_fp_context(struct sigcontext __user *sc) { - return cpu_has_fpu + return raw_cpu_has_fpu ? _save_fp_context(sc) : fpu_emulator_save_context(sc); } -static int smp_restore_fp_context(struct sigcontext *sc) +static int smp_restore_fp_context(struct sigcontext __user *sc) { - return cpu_has_fpu + return raw_cpu_has_fpu ? _restore_fp_context(sc) : fpu_emulator_restore_context(sc); } @@ -1305,14 +1281,14 @@ static inline void signal_init(void) /* * This is used by 32-bit signal stuff on the 64-bit kernel */ -asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc); -asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc); +asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc); +asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc); -extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc); -extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc); +extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); +extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); -extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc); -extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc); +extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc); +extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc); static inline void signal32_init(void) { diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 7e7d5482348..24b7b053cfe 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -515,7 +515,7 @@ asmlinkage void do_ade(struct pt_regs *regs) goto sigbus; pc = (unsigned int __user *) exception_epc(regs); - if ((current->thread.mflags & MF_FIXADE) == 0) + if (user_mode(regs) && (current->thread.mflags & MF_FIXADE) == 0) goto sigbus; /* diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 9aca871a307..c9ee9d2d585 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -1079,6 +1079,7 @@ static int getcwd(char *buff, int size) static int vpe_open(struct inode *inode, struct file *filp) { int minor, ret; + enum vpe_state state; struct vpe *v; struct vpe_notifications *not; @@ -1093,7 +1094,8 @@ static int vpe_open(struct inode *inode, struct file *filp) return -ENODEV; } - if (v->state != VPE_STATE_UNUSED) { + state = xchg(&v->state, VPE_STATE_INUSE); + if (state != VPE_STATE_UNUSED) { dvpe(); printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n"); @@ -1108,9 +1110,6 @@ static int vpe_open(struct inode *inode, struct file *filp) cleanup_tc(get_tc(minor)); } - // allocate it so when we get write ops we know it's expected. - v->state = VPE_STATE_INUSE; - /* this of-course trashes what was there before... */ v->pbuffer = vmalloc(P_SIZE); v->plen = P_SIZE; |