diff options
Diffstat (limited to 'arch/s390')
30 files changed, 222 insertions, 362 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 9015060919a..e16390c0bca 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -122,6 +122,7 @@ config S390 select ARCH_INLINE_WRITE_UNLOCK_BH select ARCH_INLINE_WRITE_UNLOCK_IRQ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE + select GENERIC_SMP_IDLE_THREAD config SCHED_OMIT_FRAME_POINTER def_bool y @@ -217,7 +218,7 @@ config COMPAT def_bool y prompt "Kernel support for 31 bit emulation" depends on 64BIT - select COMPAT_BINFMT_ELF + select COMPAT_BINFMT_ELF if BINFMT_ELF select ARCH_WANT_OLD_COMPAT_IPC help Select this option if you want to enable your system kernel to @@ -234,6 +235,25 @@ config KEYS_COMPAT config AUDIT_ARCH def_bool y +config HAVE_MARCH_Z900_FEATURES + def_bool n + +config HAVE_MARCH_Z990_FEATURES + def_bool n + select HAVE_MARCH_Z900_FEATURES + +config HAVE_MARCH_Z9_109_FEATURES + def_bool n + select HAVE_MARCH_Z990_FEATURES + +config HAVE_MARCH_Z10_FEATURES + def_bool n + select HAVE_MARCH_Z9_109_FEATURES + +config HAVE_MARCH_Z196_FEATURES + def_bool n + select HAVE_MARCH_Z10_FEATURES + comment "Code generation options" choice @@ -249,6 +269,7 @@ config MARCH_G5 config MARCH_Z900 bool "IBM zSeries model z800 and z900" + select HAVE_MARCH_Z900_FEATURES if 64BIT help Select this to enable optimizations for model z800/z900 (2064 and 2066 series). This will enable some optimizations that are not @@ -256,6 +277,7 @@ config MARCH_Z900 config MARCH_Z990 bool "IBM zSeries model z890 and z990" + select HAVE_MARCH_Z990_FEATURES if 64BIT help Select this to enable optimizations for model z890/z990 (2084 and 2086 series). The kernel will be slightly faster but will not work @@ -263,6 +285,7 @@ config MARCH_Z990 config MARCH_Z9_109 bool "IBM System z9" + select HAVE_MARCH_Z9_109_FEATURES if 64BIT help Select this to enable optimizations for IBM System z9 (2094 and 2096 series). The kernel will be slightly faster but will not work @@ -270,6 +293,7 @@ config MARCH_Z9_109 config MARCH_Z10 bool "IBM System z10" + select HAVE_MARCH_Z10_FEATURES if 64BIT help Select this to enable optimizations for IBM System z10 (2097 and 2098 series). The kernel will be slightly faster but will not work @@ -277,6 +301,7 @@ config MARCH_Z10 config MARCH_Z196 bool "IBM zEnterprise 114 and 196" + select HAVE_MARCH_Z196_FEATURES if 64BIT help Select this to enable optimizations for IBM zEnterprise 114 and 196 (2818 and 2817 series). The kernel will be slightly faster but will @@ -406,33 +431,6 @@ config CHSC_SCH comment "Misc" -config IPL - def_bool y - prompt "Builtin IPL record support" - help - If you want to use the produced kernel to IPL directly from a - device, you have to merge a bootsector specific to the device - into the first bytes of the kernel. You will have to select the - IPL device. - -choice - prompt "IPL method generated into head.S" - depends on IPL - default IPL_VM - help - Select "tape" if you want to IPL the image from a Tape. - - Select "vm_reader" if you are running under VM/ESA and want - to IPL the image from the emulated card reader. - -config IPL_TAPE - bool "tape" - -config IPL_VM - bool "vm_reader" - -endchoice - source "fs/Kconfig.binfmt" config FORCE_MAX_ZONEORDER @@ -569,7 +567,7 @@ config KEXEC config CRASH_DUMP bool "kernel crash dumps" - depends on 64BIT + depends on 64BIT && SMP select KEXEC help Generate crash dump after being started by kexec. diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 0ad2f1e1ce9..49e76e8b477 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -91,7 +91,6 @@ OBJCOPYFLAGS := -O binary head-y := arch/s390/kernel/head.o head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o) -head-y += arch/s390/kernel/init_task.o # See arch/s390/Kbuild for content of core part of the kernel core-y += arch/s390/ diff --git a/arch/s390/boot/.gitignore b/arch/s390/boot/.gitignore new file mode 100644 index 00000000000..017d5912ad2 --- /dev/null +++ b/arch/s390/boot/.gitignore @@ -0,0 +1,2 @@ +image +bzImage diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore new file mode 100644 index 00000000000..ae06b9b4c02 --- /dev/null +++ b/arch/s390/boot/compressed/.gitignore @@ -0,0 +1,3 @@ +sizes.h +vmlinux +vmlinux.lds diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 1957a9dd256..37d2bf26796 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -155,7 +155,6 @@ CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 451273ad4d3..10a50880294 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -11,25 +11,28 @@ * Force strict CPU ordering. * And yes, this is required on UP too when we're talking * to devices. - * - * This is very similar to the ppc eieio/sync instruction in that is - * does a checkpoint syncronisation & makes sure that - * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ). */ -#define eieio() asm volatile("bcr 15,0" : : : "memory") -#define SYNC_OTHER_CORES(x) eieio() -#define mb() eieio() -#define rmb() eieio() -#define wmb() eieio() -#define read_barrier_depends() do { } while(0) -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() +static inline void mb(void) +{ +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + /* Fast-BCR without checkpoint synchronization */ + asm volatile("bcr 14,0" : : : "memory"); +#else + asm volatile("bcr 15,0" : : : "memory"); +#endif +} + +#define rmb() mb() +#define wmb() mb() +#define read_barrier_depends() do { } while(0) +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() -#define set_mb(var, value) do { var = value; mb(); } while (0) +#define set_mb(var, value) do { var = value; mb(); } while (0) #endif /* __ASM_BARRIER_H */ diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index f2ea2c56a7e..f2ef34f6d6e 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h @@ -29,9 +29,7 @@ struct ccwgroup_device { /** * struct ccwgroup_driver - driver for ccw group devices - * @max_slaves: maximum number of slave devices - * @driver_id: unique id - * @probe: function called on probe + * @setup: function called during device creation to setup the device * @remove: function called on remove * @set_online: function called when device is set online * @set_offline: function called when device is set offline @@ -44,10 +42,7 @@ struct ccwgroup_device { * @driver: embedded driver structure */ struct ccwgroup_driver { - int max_slaves; - unsigned long driver_id; - - int (*probe) (struct ccwgroup_device *); + int (*setup) (struct ccwgroup_device *); void (*remove) (struct ccwgroup_device *); int (*set_online) (struct ccwgroup_device *); int (*set_offline) (struct ccwgroup_device *); @@ -63,9 +58,8 @@ struct ccwgroup_driver { extern int ccwgroup_driver_register (struct ccwgroup_driver *cdriver); extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver); -int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, - struct ccw_driver *cdrv, int num_devices, - const char *buf); +int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv, + int num_devices, const char *buf); extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev); extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev); diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index b7ff6afc3ca..27216d31799 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -38,11 +38,8 @@ static inline void * phys_to_virt(unsigned long address) return (void *) address; } -/* - * Convert a physical pointer to a virtual kernel pointer for /dev/mem - * access - */ -#define xlate_dev_mem_ptr(p) __va(p) +void *xlate_dev_mem_ptr(unsigned long phys); +void unxlate_dev_mem_ptr(unsigned long phys, void *addr); /* * Convert a virtual cached pointer to an uncached pointer diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index d75c8e78f7e..f039d86adf6 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -258,11 +258,6 @@ struct slsb { u8 val[QDIO_MAX_BUFFERS_PER_Q]; } __attribute__ ((packed, aligned(256))); -#define CHSC_AC2_MULTI_BUFFER_AVAILABLE 0x0080 -#define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040 -#define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010 -#define CHSC_AC2_DATA_DIV_ENABLED 0x0002 - /** * struct qdio_outbuf_state - SBAL related asynchronous operation information * (for communication with upper layer programs) @@ -293,6 +288,8 @@ struct qdio_outbuf_state { #define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */ #define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */ +#define CHSC_AC2_MULTI_BUFFER_AVAILABLE 0x0080 +#define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040 #define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010 #define CHSC_AC2_DATA_DIV_ENABLED 0x0002 @@ -328,11 +325,13 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, int, int, unsigned long); /* qdio errors reported to the upper-layer program */ -#define QDIO_ERROR_SIGA_TARGET 0x02 -#define QDIO_ERROR_SIGA_ACCESS_EXCEPTION 0x10 -#define QDIO_ERROR_SIGA_BUSY 0x20 -#define QDIO_ERROR_ACTIVATE_CHECK_CONDITION 0x40 -#define QDIO_ERROR_SLSB_STATE 0x80 +#define QDIO_ERROR_ACTIVATE 0x0001 +#define QDIO_ERROR_GET_BUF_STATE 0x0002 +#define QDIO_ERROR_SET_BUF_STATE 0x0004 +#define QDIO_ERROR_SLSB_STATE 0x0100 + +#define QDIO_ERROR_FATAL 0x00ff +#define QDIO_ERROR_TEMPORARY 0xff00 /* for qdio_cleanup */ #define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01 diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index b21e46e5d4b..7244e1f6412 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -82,7 +82,6 @@ extern unsigned int user_mode; #define MACHINE_FLAG_LPAR (1UL << 12) #define MACHINE_FLAG_SPP (1UL << 13) #define MACHINE_FLAG_TOPOLOGY (1UL << 14) -#define MACHINE_FLAG_STCKF (1UL << 15) #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) @@ -101,7 +100,6 @@ extern unsigned int user_mode; #define MACHINE_HAS_PFMF (0) #define MACHINE_HAS_SPP (0) #define MACHINE_HAS_TOPOLOGY (0) -#define MACHINE_HAS_STCKF (0) #else /* __s390x__ */ #define MACHINE_HAS_IEEE (1) #define MACHINE_HAS_CSP (1) @@ -113,7 +111,6 @@ extern unsigned int user_mode; #define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) #define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) -#define MACHINE_HAS_STCKF (S390_lowcore.machine_flags & MACHINE_FLAG_STCKF) #endif /* __s390x__ */ #define ZFCPDUMP_HSA_SIZE (32UL<<20) diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index c77c6de6f6c..0b6f586c138 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -16,7 +16,7 @@ extern struct mutex smp_cpu_state_mutex; extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; -extern int __cpu_up(unsigned int cpu); +extern int __cpu_up(unsigned int cpu, struct task_struct *tidle); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index a73038155e0..003b04edcff 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -95,7 +95,6 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ #define TIF_SECCOMP 10 /* secure computing */ #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ -#define TIF_SIE 12 /* guest execution active */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_31BIT 17 /* 32bit process */ @@ -114,7 +113,6 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) -#define _TIF_SIE (1<<TIF_SIE) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_31BIT (1<<TIF_31BIT) #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index c447a27a7fd..239ece9e53c 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -73,11 +73,15 @@ static inline void local_tick_enable(unsigned long long comp) typedef unsigned long long cycles_t; -static inline unsigned long long get_clock (void) +static inline unsigned long long get_clock(void) { unsigned long long clk; +#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES + asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc"); +#else asm volatile("stck %0" : "=Q" (clk) : : "cc"); +#endif return clk; } @@ -86,17 +90,6 @@ static inline void get_clock_ext(char *clk) asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); } -static inline unsigned long long get_clock_fast(void) -{ - unsigned long long clk; - - if (MACHINE_HAS_STCKF) - asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc"); - else - clk = get_clock(); - return clk; -} - static inline unsigned long long get_clock_xt(void) { unsigned char clk[16]; diff --git a/arch/s390/kernel/.gitignore b/arch/s390/kernel/.gitignore new file mode 100644 index 00000000000..c5f676c3c22 --- /dev/null +++ b/arch/s390/kernel/.gitignore @@ -0,0 +1 @@ +vmlinux.lds diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 884b18afc86..9733b3f0eb6 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -28,7 +28,7 @@ obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) -extra-y += head.o init_task.o vmlinux.lds +extra-y += head.o vmlinux.lds extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) obj-$(CONFIG_MODULES) += s390_ksyms.o module.o diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 28040fd5e8a..377c096ca4a 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -437,13 +437,6 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) sp = current->sas_ss_sp + current->sas_ss_size; } - /* This is the legacy signal stack switching. */ - else if (!user_mode(regs) && - !(ka->sa.sa_flags & SA_RESTORER) && - ka->sa.sa_restorer) { - sp = (unsigned long) ka->sa.sa_restorer; - } - return (void __user *)((sp - frame_size) & -8ul); } diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 9475e682727..d84181f1f5e 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -374,8 +374,6 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; if (test_facility(40)) S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; - if (test_facility(25)) - S390_lowcore.machine_flags |= MACHINE_FLAG_STCKF; #endif } diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 74ee563fe62..1ae93b573d7 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -145,22 +145,23 @@ STACK_SIZE = 1 << STACK_SHIFT * gpr2 = prev */ ENTRY(__switch_to) + stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task + st %r15,__THREAD_ksp(%r2) # store kernel stack of prev l %r4,__THREAD_info(%r2) # get thread_info of prev l %r5,__THREAD_info(%r3) # get thread_info of next + lr %r15,%r5 + ahi %r15,STACK_SIZE # end of kernel stack of next + st %r3,__LC_CURRENT # store task struct of next + st %r5,__LC_THREAD_INFO # store thread info of next + st %r15,__LC_KERNEL_STACK # store end of kernel stack + lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 + mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next + l %r15,__THREAD_ksp(%r3) # load kernel stack of next tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? jz 0f ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next -0: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task - st %r15,__THREAD_ksp(%r2) # store kernel stack of prev - l %r15,__THREAD_ksp(%r3) # load kernel stack of next - lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 - lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task - st %r3,__LC_CURRENT # store task struct of next - mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next - st %r5,__LC_THREAD_INFO # store thread info of next - ahi %r5,STACK_SIZE # end of kernel stack of next - st %r5,__LC_KERNEL_STACK # store end of kernel stack +0: lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task br %r14 __critical_start: diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 4e1c292fa7e..229fe1d0774 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -81,16 +81,14 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) .macro HANDLE_SIE_INTERCEPT scratch #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) - tm __TI_flags+6(%r12),_TIF_SIE>>8 - jz .+42 - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP - jz .+8 - .insn s,0xb2800000,BASED(.Lhost_id) # set host id + tmhh %r8,0x0001 # interrupting from user ? + jnz .+42 lgr \scratch,%r9 slg \scratch,BASED(.Lsie_loop) clg \scratch,BASED(.Lsie_length) - jhe .+10 + jhe .+22 lg %r9,BASED(.Lsie_loop) + SPP BASED(.Lhost_id) # set host id #endif .endm @@ -148,6 +146,14 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) ssm __LC_RETURN_PSW .endm + .macro STCK savearea +#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES + .insn s,0xb27c0000,\savearea # store clock fast +#else + .insn s,0xb2050000,\savearea # store clock +#endif + .endm + .section .kprobes.text, "ax" /* @@ -158,22 +164,23 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) * gpr2 = prev */ ENTRY(__switch_to) + stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task + stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev lg %r4,__THREAD_info(%r2) # get thread_info of prev lg %r5,__THREAD_info(%r3) # get thread_info of next + lgr %r15,%r5 + aghi %r15,STACK_SIZE # end of kernel stack of next + stg %r3,__LC_CURRENT # store task struct of next + stg %r5,__LC_THREAD_INFO # store thread info of next + stg %r15,__LC_KERNEL_STACK # store end of kernel stack + lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 + mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next + lg %r15,__THREAD_ksp(%r3) # load kernel stack of next tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? jz 0f ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next -0: stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task - stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev - lg %r15,__THREAD_ksp(%r3) # load kernel stack of next - lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 - lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task - stg %r3,__LC_CURRENT # store task struct of next - mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next - stg %r5,__LC_THREAD_INFO # store thread info of next - aghi %r5,STACK_SIZE # end of kernel stack of next - stg %r5,__LC_KERNEL_STACK # store end of kernel stack +0: lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task br %r14 __critical_start: @@ -458,7 +465,7 @@ pgm_svcper: * IO interrupt handler routine */ ENTRY(io_int_handler) - stck __LC_INT_CLOCK + STCK __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r10,__LC_LAST_BREAK @@ -604,7 +611,7 @@ io_notify_resume: * External interrupt handler routine */ ENTRY(ext_int_handler) - stck __LC_INT_CLOCK + STCK __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r10,__LC_LAST_BREAK @@ -622,6 +629,7 @@ ext_skip: mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) TRACE_IRQS_OFF + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lghi %r1,4096 lgr %r2,%r11 # pass pointer to pt_regs llgf %r3,__LC_EXT_CPU_ADDR # get cpu address + interruption code @@ -638,7 +646,7 @@ ENTRY(psw_idle) larl %r1,psw_idle_lpsw+4 stg %r1,__SF_EMPTY+8(%r15) larl %r1,.Lvtimer_max - stck __IDLE_ENTER(%r2) + STCK __IDLE_ENTER(%r2) ltr %r5,%r5 stpt __VQ_IDLE_ENTER(%r3) jz psw_idle_lpsw @@ -654,7 +662,7 @@ __critical_end: * Machine check handler routines */ ENTRY(mcck_int_handler) - stck __LC_MCCK_CLOCK + STCK __LC_MCCK_CLOCK la %r1,4095 # revalidate r1 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs @@ -967,7 +975,6 @@ ENTRY(sie64a) xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 lg %r14,__LC_THREAD_INFO # pointer thread_info struct - oi __TI_flags+6(%r14),_TIF_SIE>>8 sie_loop: lg %r14,__LC_THREAD_INFO # pointer thread_info struct tm __TI_flags+7(%r14),_TIF_EXIT_SIE @@ -985,7 +992,6 @@ sie_done: lg %r14,__LC_THREAD_INFO # pointer thread_info struct sie_exit: lctlg %c1,%c1,__LC_USER_ASCE # load primary asce - ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) lg %r14,__SF_EMPTY+8(%r15) # load guest register save area stmg %r0,%r13,0(%r14) # save guest gprs 0-13 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers @@ -994,7 +1000,6 @@ sie_exit: sie_fault: lctlg %c1,%c1,__LC_USER_ASCE # load primary asce lg %r14,__LC_THREAD_INFO # pointer thread_info struct - ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) lg %r14,__SF_EMPTY+8(%r15) # load guest register save area stmg %r0,%r13,0(%r14) # save guest gprs 0-13 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index adccd908ebc..4939d15375a 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -34,125 +34,7 @@ #endif __HEAD -#ifndef CONFIG_IPL - .org 0 - .long 0x00080000,0x80000000+startup # Just a restart PSW -#else -#ifdef CONFIG_IPL_TAPE -#define IPL_BS 1024 - .org 0 - .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded - .long 0x27000000,0x60000001 # by ipl to addresses 0-23. - .long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs). - .long 0x00000000,0x00000000 # external old psw - .long 0x00000000,0x00000000 # svc old psw - .long 0x00000000,0x00000000 # program check old psw - .long 0x00000000,0x00000000 # machine check old psw - .long 0x00000000,0x00000000 # io old psw - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x000a0000,0x00000058 # external new psw - .long 0x000a0000,0x00000060 # svc new psw - .long 0x000a0000,0x00000068 # program check new psw - .long 0x000a0000,0x00000070 # machine check new psw - .long 0x00080000,0x80000000+.Lioint # io new psw - .org 0x100 -# -# subroutine for loading from tape -# Parameters: -# R1 = device number -# R2 = load address -.Lloader: - st %r14,.Lldret - la %r3,.Lorbread # r3 = address of orb - la %r5,.Lirb # r5 = address of irb - st %r2,.Lccwread+4 # initialize CCW data addresses - lctl %c6,%c6,.Lcr6 - slr %r2,%r2 -.Lldlp: - la %r6,3 # 3 retries -.Lssch: - ssch 0(%r3) # load chunk of IPL_BS bytes - bnz .Llderr -.Lw4end: - bas %r14,.Lwait4io - tm 8(%r5),0x82 # do we have a problem ? - bnz .Lrecov - slr %r7,%r7 - icm %r7,3,10(%r5) # get residual count - lcr %r7,%r7 - la %r7,IPL_BS(%r7) # IPL_BS-residual=#bytes read - ar %r2,%r7 # add to total size - tm 8(%r5),0x01 # found a tape mark ? - bnz .Ldone - l %r0,.Lccwread+4 # update CCW data addresses - ar %r0,%r7 - st %r0,.Lccwread+4 - b .Lldlp -.Ldone: - l %r14,.Lldret - br %r14 # r2 contains the total size -.Lrecov: - bas %r14,.Lsense # do the sensing - bct %r6,.Lssch # dec. retry count & branch - b .Llderr -# -# Sense subroutine -# -.Lsense: - st %r14,.Lsnsret - la %r7,.Lorbsense - ssch 0(%r7) # start sense command - bnz .Llderr - bas %r14,.Lwait4io - l %r14,.Lsnsret - tm 8(%r5),0x82 # do we have a problem ? - bnz .Llderr - br %r14 -# -# Wait for interrupt subroutine -# -.Lwait4io: - lpsw .Lwaitpsw -.Lioint: - c %r1,0xb8 # compare subchannel number - bne .Lwait4io - tsch 0(%r5) - slr %r0,%r0 - tm 8(%r5),0x82 # do we have a problem ? - bnz .Lwtexit - tm 8(%r5),0x04 # got device end ? - bz .Lwait4io -.Lwtexit: - br %r14 -.Llderr: - lpsw .Lcrash - - .align 8 -.Lorbread: - .long 0x00000000,0x0080ff00,.Lccwread - .align 8 -.Lorbsense: - .long 0x00000000,0x0080ff00,.Lccwsense - .align 8 -.Lccwread: - .long 0x02200000+IPL_BS,0x00000000 -.Lccwsense: - .long 0x04200001,0x00000000 -.Lwaitpsw: - .long 0x020a0000,0x80000000+.Lioint - -.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -.Lcr6: .long 0xff000000 - .align 8 -.Lcrash:.long 0x000a0000,0x00000000 -.Lldret:.long 0 -.Lsnsret: .long 0 -#endif /* CONFIG_IPL_TAPE */ - -#ifdef CONFIG_IPL_VM #define IPL_BS 0x730 .org 0 .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded @@ -256,7 +138,6 @@ __HEAD .long 0x02600050,0x00000000 .endr .long 0x02200050,0x00000000 -#endif /* CONFIG_IPL_VM */ iplstart: lh %r1,0xb8 # test if subchannel number @@ -325,7 +206,6 @@ iplstart: clc 0(3,%r2),.L_eof bz .Lagain2 -#ifdef CONFIG_IPL_VM # # reset files in VM reader # @@ -358,7 +238,6 @@ iplstart: .long 0x00080000,0x80000000+.Lrdrint .Lrdrwaitpsw: .long 0x020a0000,0x80000000+.Lrdrint -#endif # # everything loaded, go for it @@ -376,8 +255,6 @@ iplstart: .L_eof: .long 0xc5d6c600 /* C'EOF' */ .L_hdr: .long 0xc8c4d900 /* C'HDR' */ -#endif /* CONFIG_IPL */ - # # SALIPL loader support. Based on a patch by Rob van der Heij. # This entry point is called directly from the SALIPL loader and diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c deleted file mode 100644 index 4d1c9fb0b54..00000000000 --- a/arch/s390/kernel/init_task.c +++ /dev/null @@ -1,38 +0,0 @@ -/* - * arch/s390/kernel/init_task.c - * - * S390 version - * - * Derived from "arch/i386/kernel/init_task.c" - */ - -#include <linux/mm.h> -#include <linux/fs.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/init_task.h> -#include <linux/mqueue.h> - -#include <asm/uaccess.h> -#include <asm/pgtable.h> - -static struct signal_struct init_signals = INIT_SIGNALS(init_signals); -static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -/* - * Initial thread structure. - * - * We need to make sure that this is THREAD_SIZE aligned due to the - * way process stacks are handled. This is done by having a special - * "init_task" linker map entry.. - */ -union thread_union init_thread_union __init_task_data = - { INIT_THREAD_INFO(init_task) }; - -/* - * Initial task structure. - * - * All other task structs will be allocated on slabs in fork.c - */ -struct task_struct init_task = INIT_TASK(init_task); - -EXPORT_SYMBOL(init_task); diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 02f300fbf07..4993e689b2c 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -719,7 +719,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) long ret = 0; /* Do the secure computing check first. */ - secure_computing(regs->gprs[2]); + secure_computing_strict(regs->gprs[2]); /* * The sysc_tracesys code in entry.S stored the system diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index f7582b27f60..8a4e2b760d5 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -235,13 +235,6 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) sp = current->sas_ss_sp + current->sas_ss_size; } - /* This is the legacy signal stack switching. */ - else if (!user_mode(regs) && - !(ka->sa.sa_flags & SA_RESTORER) && - ka->sa.sa_restorer) { - sp = (unsigned long) ka->sa.sa_restorer; - } - return (void __user *)((sp - frame_size) & -8ul); } @@ -414,15 +407,6 @@ void do_signal(struct pt_regs *regs) struct k_sigaction ka; sigset_t *oldset; - /* - * We want the common case to go fast, which - * is why we may in certain cases get here from - * kernel mode. Just return without doing anything - * if so. - */ - if (!user_mode(regs)) - return; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = ¤t->saved_sigmask; else diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 1f77227669e..647ba942589 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -85,7 +85,6 @@ enum { struct pcpu { struct cpu cpu; - struct task_struct *idle; /* idle process for the cpu */ struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ unsigned long async_stack; /* async stack for the cpu */ unsigned long panic_stack; /* panic stack for the cpu */ @@ -226,6 +225,8 @@ out: return -ENOMEM; } +#ifdef CONFIG_HOTPLUG_CPU + static void pcpu_free_lowcore(struct pcpu *pcpu) { pcpu_sigp_retry(pcpu, sigp_set_prefix, 0); @@ -247,6 +248,8 @@ static void pcpu_free_lowcore(struct pcpu *pcpu) } } +#endif /* CONFIG_HOTPLUG_CPU */ + static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) { struct _lowcore *lc = pcpu->lowcore; @@ -721,26 +724,9 @@ static void __cpuinit smp_start_secondary(void *cpuvoid) cpu_idle(); } -struct create_idle { - struct work_struct work; - struct task_struct *idle; - struct completion done; - int cpu; -}; - -static void __cpuinit smp_fork_idle(struct work_struct *work) -{ - struct create_idle *c_idle; - - c_idle = container_of(work, struct create_idle, work); - c_idle->idle = fork_idle(c_idle->cpu); - complete(&c_idle->done); -} - /* Upping and downing of CPUs */ -int __cpuinit __cpu_up(unsigned int cpu) +int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) { - struct create_idle c_idle; struct pcpu *pcpu; int rc; @@ -750,22 +736,12 @@ int __cpuinit __cpu_up(unsigned int cpu) if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) != sigp_order_code_accepted) return -EIO; - if (!pcpu->idle) { - c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done); - INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle); - c_idle.cpu = cpu; - schedule_work(&c_idle.work); - wait_for_completion(&c_idle.done); - if (IS_ERR(c_idle.idle)) - return PTR_ERR(c_idle.idle); - pcpu->idle = c_idle.idle; - } - init_idle(pcpu->idle, cpu); + rc = pcpu_alloc_lowcore(pcpu, cpu); if (rc) return rc; pcpu_prepare_secondary(pcpu, cpu); - pcpu_attach_task(pcpu, pcpu->idle); + pcpu_attach_task(pcpu, tidle); pcpu_start_fn(pcpu, smp_start_secondary, NULL); while (!cpu_online(cpu)) cpu_relax(); @@ -852,7 +828,6 @@ void __init smp_prepare_boot_cpu(void) struct pcpu *pcpu = pcpu_devices; boot_cpu_address = stap(); - pcpu->idle = current; pcpu->state = CPU_STATE_CONFIGURED; pcpu->address = boot_cpu_address; pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); diff --git a/arch/s390/kernel/vdso32/.gitignore b/arch/s390/kernel/vdso32/.gitignore new file mode 100644 index 00000000000..e45fba9d0ce --- /dev/null +++ b/arch/s390/kernel/vdso32/.gitignore @@ -0,0 +1 @@ +vdso32.lds diff --git a/arch/s390/kernel/vdso64/.gitignore b/arch/s390/kernel/vdso64/.gitignore new file mode 100644 index 00000000000..3fd18cf9fec --- /dev/null +++ b/arch/s390/kernel/vdso64/.gitignore @@ -0,0 +1 @@ +vdso64.lds diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 46ef3fd0663..72cec9ecd96 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -294,7 +294,7 @@ static inline int do_exception(struct pt_regs *regs, int access) down_read(&mm->mmap_sem); #ifdef CONFIG_PGSTE - if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) { + if ((current->flags & PF_VCPU) && S390_lowcore.gmap) { address = __gmap_fault(address, (struct gmap *) S390_lowcore.gmap); if (address == -EFAULT) { @@ -549,19 +549,15 @@ static void pfault_interrupt(struct ext_code ext_code, if ((subcode & 0xff00) != __SUBCODE_MASK) return; kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; - if (subcode & 0x0080) { - /* Get the token (= pid of the affected task). */ - pid = sizeof(void *) == 4 ? param32 : param64; - rcu_read_lock(); - tsk = find_task_by_pid_ns(pid, &init_pid_ns); - if (tsk) - get_task_struct(tsk); - rcu_read_unlock(); - if (!tsk) - return; - } else { - tsk = current; - } + /* Get the token (= pid of the affected task). */ + pid = sizeof(void *) == 4 ? param32 : param64; + rcu_read_lock(); + tsk = find_task_by_pid_ns(pid, &init_pid_ns); + if (tsk) + get_task_struct(tsk); + rcu_read_unlock(); + if (!tsk) + return; spin_lock(&pfault_lock); if (subcode & 0x0080) { /* signal bit is set -> a page has been swapped in by VM */ @@ -574,6 +570,7 @@ static void pfault_interrupt(struct ext_code ext_code, tsk->thread.pfault_wait = 0; list_del(&tsk->thread.list); wake_up_process(tsk); + put_task_struct(tsk); } else { /* Completion interrupt was faster than initial * interrupt. Set pfault_wait to -1 so the initial @@ -585,24 +582,35 @@ static void pfault_interrupt(struct ext_code ext_code, if (tsk->state == TASK_RUNNING) tsk->thread.pfault_wait = -1; } - put_task_struct(tsk); } else { /* signal bit not set -> a real page is missing. */ - if (tsk->thread.pfault_wait == -1) { + if (WARN_ON_ONCE(tsk != current)) + goto out; + if (tsk->thread.pfault_wait == 1) { + /* Already on the list with a reference: put to sleep */ + __set_task_state(tsk, TASK_UNINTERRUPTIBLE); + set_tsk_need_resched(tsk); + } else if (tsk->thread.pfault_wait == -1) { /* Completion interrupt was faster than the initial * interrupt (pfault_wait == -1). Set pfault_wait * back to zero and exit. */ tsk->thread.pfault_wait = 0; } else { /* Initial interrupt arrived before completion - * interrupt. Let the task sleep. */ + * interrupt. Let the task sleep. + * An extra task reference is needed since a different + * cpu may set the task state to TASK_RUNNING again + * before the scheduler is reached. */ + get_task_struct(tsk); tsk->thread.pfault_wait = 1; list_add(&tsk->thread.list, &pfault_list); - set_task_state(tsk, TASK_UNINTERRUPTIBLE); + __set_task_state(tsk, TASK_UNINTERRUPTIBLE); set_tsk_need_resched(tsk); } } +out: spin_unlock(&pfault_lock); + put_task_struct(tsk); } static int __cpuinit pfault_cpu_notify(struct notifier_block *self, @@ -620,6 +628,7 @@ static int __cpuinit pfault_cpu_notify(struct notifier_block *self, list_del(&thread->list); tsk = container_of(thread, struct task_struct, thread); wake_up_process(tsk); + put_task_struct(tsk); } spin_unlock_irq(&pfault_lock); break; diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 597bb2d27c3..900de2b3cf2 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -58,6 +58,8 @@ void arch_release_hugepage(struct page *page) ptep = (pte_t *) page[1].index; if (!ptep) return; + clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY, + PTRS_PER_PTE * sizeof(pte_t)); page_table_free(&init_mm, (unsigned long *) ptep); page[1].index = 0; } diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index e1335dc2b1b..795a0a9bb2e 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -12,6 +12,7 @@ #include <linux/types.h> #include <linux/errno.h> #include <linux/gfp.h> +#include <linux/cpu.h> #include <asm/ctl_reg.h> /* @@ -166,3 +167,69 @@ out: free_page((unsigned long) buf); return rc; } + +/* + * Check if physical address is within prefix or zero page + */ +static int is_swapped(unsigned long addr) +{ + unsigned long lc; + int cpu; + + if (addr < sizeof(struct _lowcore)) + return 1; + for_each_online_cpu(cpu) { + lc = (unsigned long) lowcore_ptr[cpu]; + if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc) + continue; + return 1; + } + return 0; +} + +/* + * Return swapped prefix or zero page address + */ +static unsigned long get_swapped(unsigned long addr) +{ + unsigned long prefix = store_prefix(); + + if (addr < sizeof(struct _lowcore)) + return addr + prefix; + if (addr >= prefix && addr < prefix + sizeof(struct _lowcore)) + return addr - prefix; + return addr; +} + +/* + * Convert a physical pointer for /dev/mem access + * + * For swapped prefix pages a new buffer is returned that contains a copy of + * the absolute memory. The buffer size is maximum one page large. + */ +void *xlate_dev_mem_ptr(unsigned long addr) +{ + void *bounce = (void *) addr; + unsigned long size; + + get_online_cpus(); + preempt_disable(); + if (is_swapped(addr)) { + size = PAGE_SIZE - (addr & ~PAGE_MASK); + bounce = (void *) __get_free_page(GFP_ATOMIC); + if (bounce) + memcpy_real(bounce, (void *) get_swapped(addr), size); + } + preempt_enable(); + put_online_cpus(); + return bounce; +} + +/* + * Free converted buffer for /dev/mem access (if necessary) + */ +void unxlate_dev_mem_ptr(unsigned long addr, void *buf) +{ + if ((void *) addr != buf) + free_page((unsigned long) buf); +} diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 6e765bf0067..a3db5a3ea08 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -822,6 +822,8 @@ int s390_enable_sie(void) /* we copy the mm and let dup_mm create the page tables with_pgstes */ tsk->mm->context.alloc_pgste = 1; + /* make sure that both mms have a correct rss state */ + sync_mm_rss(tsk->mm); mm = dup_mm(tsk); tsk->mm->context.alloc_pgste = 0; if (!mm) |