diff options
Diffstat (limited to 'arch/s390')
32 files changed, 2393 insertions, 884 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 0f293aa7b0f..e6ec418093e 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -41,6 +41,11 @@ config GENERIC_HWEIGHT config GENERIC_TIME def_bool y +config GENERIC_BUG + bool + depends on BUG + default y + config NO_IOMEM def_bool y @@ -514,6 +519,14 @@ config KEXEC current kernel, and to start another kernel. It is like a reboot but is independent of hardware/microcode support. +config ZFCPDUMP + tristate "zfcpdump support" + select SMP + default n + help + Select this option if you want to build an zfcpdump enabled kernel. + Refer to "Documentation/s390/zfcpdump.txt" for more details on this. + endmenu source "net/Kconfig" diff --git a/arch/s390/Makefile b/arch/s390/Makefile index b1e55849646..68441e0e74b 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -67,8 +67,10 @@ endif ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y) cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE) +ifneq ($(call cc-option-yn,-mstack-size=8192),y) cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD) endif +endif ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y) cflags-$(CONFIG_WARN_STACK) += -mwarn-dynamicstack @@ -103,6 +105,9 @@ install: vmlinux image: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ +zfcpdump: + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 0c3cf4b16ae..ee89b33145d 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -668,45 +668,7 @@ EXPORT_SYMBOL_GPL(appldata_register_ops); EXPORT_SYMBOL_GPL(appldata_unregister_ops); EXPORT_SYMBOL_GPL(appldata_diag); -#ifdef MODULE -/* - * Kernel symbols needed by appldata_mem and appldata_os modules. - * However, if this file is compiled as a module (for testing only), these - * symbols are not exported. In this case, we define them locally and export - * those. - */ -void si_swapinfo(struct sysinfo *val) -{ - val->freeswap = -1ul; - val->totalswap = -1ul; -} - -unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200, - -1 - FIXED_1/200}; -int nr_threads = -1; - -void get_full_page_state(struct page_state *ps) -{ - memset(ps, -1, sizeof(struct page_state)); -} - -unsigned long nr_running(void) -{ - return -1; -} - -unsigned long nr_iowait(void) -{ - return -1; -} - -/*unsigned long nr_context_switches(void) -{ - return -1; -}*/ -#endif /* MODULE */ EXPORT_SYMBOL_GPL(si_swapinfo); EXPORT_SYMBOL_GPL(nr_threads); EXPORT_SYMBOL_GPL(nr_running); EXPORT_SYMBOL_GPL(nr_iowait); -//EXPORT_SYMBOL_GPL(nr_context_switches); diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index f64b8c867ae..2180ac105b0 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c @@ -107,10 +107,7 @@ static void appldata_get_net_sum_data(void *data) tx_dropped = 0; collisions = 0; read_lock(&dev_base_lock); - for (dev = dev_base; dev != NULL; dev = dev->next) { - if (dev->get_stats == NULL) { - continue; - } + for_each_netdev(dev) { stats = dev->get_stats(dev); rx_packets += stats->rx_packets; tx_packets += stats->tx_packets; diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 91636353f6f..3660ca6a330 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -119,7 +119,8 @@ static struct crypto_alg aes_alg = { .cra_name = "aes", .cra_driver_name = "aes-s390", .cra_priority = CRYPT_S390_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER | + CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_aes_ctx), .cra_module = THIS_MODULE, @@ -206,7 +207,8 @@ static struct crypto_alg ecb_aes_alg = { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-s390", .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_aes_ctx), .cra_type = &crypto_blkcipher_type, @@ -300,7 +302,8 @@ static struct crypto_alg cbc_aes_alg = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-s390", .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_aes_ctx), .cra_type = &crypto_blkcipher_type, @@ -333,10 +336,14 @@ static int __init aes_init(void) return -EOPNOTSUPP; /* z9 109 and z9 BC/EC only support 128 bit key length */ - if (keylen_flag == AES_KEYLEN_128) + if (keylen_flag == AES_KEYLEN_128) { + aes_alg.cra_u.cipher.cia_max_keysize = AES_MIN_KEY_SIZE; + ecb_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE; + cbc_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE; printk(KERN_INFO "aes_s390: hardware acceleration only available for" "128 bit keys\n"); + } ret = crypto_register_alg(&aes_alg); if (ret) diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index 969639f3197..af4460ec381 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -25,99 +25,100 @@ */ #include <linux/init.h> #include <linux/module.h> -#include <linux/mm.h> #include <linux/crypto.h> -#include <asm/scatterlist.h> -#include <asm/byteorder.h> + #include "crypt_s390.h" #define SHA1_DIGEST_SIZE 20 #define SHA1_BLOCK_SIZE 64 -struct crypt_s390_sha1_ctx { - u64 count; +struct s390_sha1_ctx { + u64 count; /* message length */ u32 state[5]; - u32 buf_len; - u8 buffer[2 * SHA1_BLOCK_SIZE]; + u8 buf[2 * SHA1_BLOCK_SIZE]; }; static void sha1_init(struct crypto_tfm *tfm) { - struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->state[0] = 0x67452301; - ctx->state[1] = 0xEFCDAB89; - ctx->state[2] = 0x98BADCFE; - ctx->state[3] = 0x10325476; - ctx->state[4] = 0xC3D2E1F0; - - ctx->count = 0; - ctx->buf_len = 0; + struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); + + sctx->state[0] = 0x67452301; + sctx->state[1] = 0xEFCDAB89; + sctx->state[2] = 0x98BADCFE; + sctx->state[3] = 0x10325476; + sctx->state[4] = 0xC3D2E1F0; + sctx->count = 0; } static void sha1_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) { - struct crypt_s390_sha1_ctx *sctx; - long imd_len; - - sctx = crypto_tfm_ctx(tfm); - sctx->count += len * 8; /* message bit length */ - - /* anything in buffer yet? -> must be completed */ - if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { - /* complete full block and hash */ - memcpy(sctx->buffer + sctx->buf_len, data, - SHA1_BLOCK_SIZE - sctx->buf_len); - crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, - SHA1_BLOCK_SIZE); - data += SHA1_BLOCK_SIZE - sctx->buf_len; - len -= SHA1_BLOCK_SIZE - sctx->buf_len; - sctx->buf_len = 0; + struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); + unsigned int index; + int ret; + + /* how much is already in the buffer? */ + index = sctx->count & 0x3f; + + sctx->count += len; + + if (index + len < SHA1_BLOCK_SIZE) + goto store; + + /* process one stored block */ + if (index) { + memcpy(sctx->buf + index, data, SHA1_BLOCK_SIZE - index); + ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, + SHA1_BLOCK_SIZE); + BUG_ON(ret != SHA1_BLOCK_SIZE); + data += SHA1_BLOCK_SIZE - index; + len -= SHA1_BLOCK_SIZE - index; } - /* rest of data contains full blocks? */ - imd_len = len & ~0x3ful; - if (imd_len) { - crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); - data += imd_len; - len -= imd_len; + /* process as many blocks as possible */ + if (len >= SHA1_BLOCK_SIZE) { + ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, + len & ~(SHA1_BLOCK_SIZE - 1)); + BUG_ON(ret != (len & ~(SHA1_BLOCK_SIZE - 1))); + data += ret; + len -= ret; } - /* anything left? store in buffer */ - if (len) { - memcpy(sctx->buffer + sctx->buf_len , data, len); - sctx->buf_len += len; - } -} +store: + /* anything left? */ + if (len) + memcpy(sctx->buf + index , data, len); +} -static void pad_message(struct crypt_s390_sha1_ctx* sctx) +/* Add padding and return the message digest. */ +static void sha1_final(struct crypto_tfm *tfm, u8 *out) { - int index; + struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); + u64 bits; + unsigned int index, end; + int ret; + + /* must perform manual padding */ + index = sctx->count & 0x3f; + end = (index < 56) ? SHA1_BLOCK_SIZE : (2 * SHA1_BLOCK_SIZE); - index = sctx->buf_len; - sctx->buf_len = (sctx->buf_len < 56) ? - SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE; /* start pad with 1 */ - sctx->buffer[index] = 0x80; + sctx->buf[index] = 0x80; + /* pad with zeros */ index++; - memset(sctx->buffer + index, 0x00, sctx->buf_len - index); - /* append length */ - memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count, - sizeof sctx->count); -} + memset(sctx->buf + index, 0x00, end - index - 8); -/* Add padding and return the message digest. */ -static void sha1_final(struct crypto_tfm *tfm, u8 *out) -{ - struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); + /* append message length */ + bits = sctx->count * 8; + memcpy(sctx->buf + end - 8, &bits, sizeof(bits)); + + ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, end); + BUG_ON(ret != end); - /* must perform manual padding */ - pad_message(sctx); - crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); /* copy digest to out */ memcpy(out, sctx->state, SHA1_DIGEST_SIZE); + /* wipe context */ memset(sctx, 0, sizeof *sctx); } @@ -128,7 +129,7 @@ static struct crypto_alg alg = { .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), + .cra_ctxsize = sizeof(struct s390_sha1_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_u = { .digest = { diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index 78436c696d3..2ced3330bce 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c @@ -26,7 +26,7 @@ #define SHA256_BLOCK_SIZE 64 struct s390_sha256_ctx { - u64 count; + u64 count; /* message length */ u32 state[8]; u8 buf[2 * SHA256_BLOCK_SIZE]; }; @@ -54,10 +54,9 @@ static void sha256_update(struct crypto_tfm *tfm, const u8 *data, int ret; /* how much is already in the buffer? */ - index = sctx->count / 8 & 0x3f; + index = sctx->count & 0x3f; - /* update message bit length */ - sctx->count += len * 8; + sctx->count += len; if ((index + len) < SHA256_BLOCK_SIZE) goto store; @@ -87,12 +86,17 @@ store: memcpy(sctx->buf + index , data, len); } -static void pad_message(struct s390_sha256_ctx* sctx) +/* Add padding and return the message digest */ +static void sha256_final(struct crypto_tfm *tfm, u8 *out) { - int index, end; + struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); + u64 bits; + unsigned int index, end; + int ret; - index = sctx->count / 8 & 0x3f; - end = index < 56 ? SHA256_BLOCK_SIZE : 2 * SHA256_BLOCK_SIZE; + /* must perform manual padding */ + index = sctx->count & 0x3f; + end = (index < 56) ? SHA256_BLOCK_SIZE : (2 * SHA256_BLOCK_SIZE); /* start pad with 1 */ sctx->buf[index] = 0x80; @@ -102,21 +106,11 @@ static void pad_message(struct s390_sha256_ctx* sctx) memset(sctx->buf + index, 0x00, end - index - 8); /* append message length */ - memcpy(sctx->buf + end - 8, &sctx->count, sizeof sctx->count); - - sctx->count = end * 8; -} - -/* Add padding and return the message digest */ -static void sha256_final(struct crypto_tfm *tfm, u8 *out) -{ - struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); - - /* must perform manual padding */ - pad_message(sctx); + bits = sctx->count * 8; + memcpy(sctx->buf + end - 8, &bits, sizeof(bits)); - crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, - sctx->count / 8); + ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, end); + BUG_ON(ret != end); /* copy digest to out */ memcpy(out, sctx->state, SHA256_DIGEST_SIZE); diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 741d2bbb2b3..0e4da8a7d82 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -12,6 +12,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y # CONFIG_ARCH_HAS_ILOG2_U64 is not set CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_BUG=y CONFIG_NO_IOMEM=y CONFIG_S390=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" @@ -166,6 +167,7 @@ CONFIG_NO_IDLE_HZ=y CONFIG_NO_IDLE_HZ_INIT=y CONFIG_S390_HYPFS_FS=y CONFIG_KEXEC=y +# CONFIG_ZFCPDUMP is not set # # Networking @@ -705,6 +707,7 @@ CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_SPINLOCK_SLEEP=y # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set # CONFIG_DEBUG_VM is not set # CONFIG_DEBUG_LIST is not set diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 5492d25d7d6..3195d375bd5 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -6,7 +6,7 @@ EXTRA_AFLAGS := -traditional obj-y := bitmap.o traps.o time.o process.o base.o early.o \ setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ - semaphore.o s390_ext.o debug.o irq.o ipl.o + semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 664c669b185..5236fdb17fc 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -495,29 +495,34 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo) * sys32_execve() executes a new program after the asm stub has set * things up for us. This should basically do what I want it to. */ -asmlinkage long -sys32_execve(struct pt_regs regs) +asmlinkage long sys32_execve(void) { - int error; - char * filename; + struct pt_regs *regs = task_pt_regs(current); + char *filename; + unsigned long result; + int rc; - filename = getname(compat_ptr(regs.orig_gpr2)); - error = PTR_ERR(filename); - if (IS_ERR(filename)) + filename = getname(compat_ptr(regs->orig_gpr2)); + if (IS_ERR(filename)) { + result = PTR_ERR(filename); goto out; - error = compat_do_execve(filename, compat_ptr(regs.gprs[3]), - compat_ptr(regs.gprs[4]), ®s); - if (error == 0) - { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - current->thread.fp_regs.fpc=0; - asm volatile("sfpc %0,0" : : "d" (0)); } + rc = compat_do_execve(filename, compat_ptr(regs->gprs[3]), + compat_ptr(regs->gprs[4]), regs); + if (rc) { + result = rc; + goto out_putname; + } + task_lock(current); + current->ptrace &= ~PT_DTRACE; + task_unlock(current); + current->thread.fp_regs.fpc=0; + asm volatile("sfpc %0,0" : : "d" (0)); + result = regs->gprs[2]; +out_putname: putname(filename); out: - return error; + return result; } @@ -918,19 +923,20 @@ asmlinkage long sys32_write(unsigned int fd, char __user * buf, size_t count) return sys_write(fd, buf, count); } -asmlinkage long sys32_clone(struct pt_regs regs) +asmlinkage long sys32_clone(void) { - unsigned long clone_flags; - unsigned long newsp; + struct pt_regs *regs = task_pt_regs(current); + unsigned long clone_flags; + unsigned long newsp; int __user *parent_tidptr, *child_tidptr; - clone_flags = regs.gprs[3] & 0xffffffffUL; - newsp = regs.orig_gpr2 & 0x7fffffffUL; - parent_tidptr = compat_ptr(regs.gprs[4]); - child_tidptr = compat_ptr(regs.gprs[5]); - if (!newsp) - newsp = regs.gprs[15]; - return do_fork(clone_flags, newsp, ®s, 0, + clone_flags = regs->gprs[3] & 0xffffffffUL; + newsp = regs->orig_gpr2 & 0x7fffffffUL; + parent_tidptr = compat_ptr(regs->gprs[4]); + child_tidptr = compat_ptr(regs->gprs[5]); + if (!newsp) + newsp = regs->gprs[15]; + return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); } diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 887a9881d0d..80a54a0149a 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -255,9 +255,9 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, } asmlinkage long -sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss, - struct pt_regs *regs) +sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss) { + struct pt_regs *regs = task_pt_regs(current); stack_t kss, koss; unsigned long ss_sp; int ret, err = 0; @@ -344,8 +344,9 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) return 0; } -asmlinkage long sys32_sigreturn(struct pt_regs *regs) +asmlinkage long sys32_sigreturn(void) { + struct pt_regs *regs = task_pt_regs(current); sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15]; sigset_t set; @@ -370,8 +371,9 @@ badframe: return 0; } -asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) +asmlinkage long sys32_rt_sigreturn(void) { + struct pt_regs *regs = task_pt_regs(current); rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15]; sigset_t set; stack_t st; @@ -407,8 +409,8 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) return regs->gprs[2]; badframe: - force_sig(SIGSEGV, current); - return 0; + force_sig(SIGSEGV, current); + return 0; } /* diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c new file mode 100644 index 00000000000..dabaf98943d --- /dev/null +++ b/arch/s390/kernel/dis.c @@ -0,0 +1,1278 @@ +/* + * arch/s390/kernel/dis.c + * + * Disassemble s390 instructions. + * + * Copyright IBM Corp. 2007 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/ptrace.h> +#include <linux/timer.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/kallsyms.h> +#include <linux/reboot.h> +#include <linux/kprobes.h> + +#include <asm/system.h> +#include <asm/uaccess.h> +#include <asm/io.h> +#include <asm/atomic.h> +#include <asm/mathemu.h> +#include <asm/cpcmd.h> +#include <asm/s390_ext.h> +#include <asm/lowcore.h> +#include <asm/debug.h> +#include <asm/kdebug.h> + +#ifndef CONFIG_64BIT +#define ONELONG "%08lx: " +#else /* CONFIG_64BIT */ +#define ONELONG "%016lx: " +#endif /* CONFIG_64BIT */ + +#define OPERAND_GPR 0x1 /* Operand printed as %rx */ +#define OPERAND_FPR 0x2 /* Operand printed as %fx */ +#define OPERAND_AR 0x4 /* Operand printed as %ax */ +#define OPERAND_CR 0x8 /* Operand printed as %cx */ +#define OPERAND_DISP 0x10 /* Operand printed as displacement */ +#define OPERAND_BASE 0x20 /* Operand printed as base register */ +#define OPERAND_INDEX 0x40 /* Operand printed as index register */ +#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */ +#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */ +#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */ + +enum { + UNUSED, /* Indicates the end of the operand list */ + R_8, /* GPR starting at position 8 */ + R_12, /* GPR starting at position 12 */ + R_16, /* GPR starting at position 16 */ + R_20, /* GPR starting at position 20 */ + R_24, /* GPR starting at position 24 */ + R_28, /* GPR starting at position 28 */ + R_32, /* GPR starting at position 32 */ + F_8, /* FPR starting at position 8 */ + F_12, /* FPR starting at position 12 */ + F_16, /* FPR starting at position 16 */ + F_20, /* FPR starting at position 16 */ + F_24, /* FPR starting at position 24 */ + F_28, /* FPR starting at position 28 */ + F_32, /* FPR starting at position 32 */ + A_8, /* Access reg. starting at position 8 */ + A_12, /* Access reg. starting at position 12 */ + A_24, /* Access reg. starting at position 24 */ + A_28, /* Access reg. starting at position 28 */ + C_8, /* Control reg. starting at position 8 */ + C_12, /* Control reg. starting at position 12 */ + B_16, /* Base register starting at position 16 */ + B_32, /* Base register starting at position 32 */ + X_12, /* Index register starting at position 12 */ + D_20, /* Displacement starting at position 20 */ + D_36, /* Displacement starting at position 36 */ + D20_20, /* 20 bit displacement starting at 20 */ + L4_8, /* 4 bit length starting at position 8 */ + L4_12, /* 4 bit length starting at position 12 */ + L8_8, /* 8 bit length starting at position 8 */ + U4_8, /* 4 bit unsigned value starting at 8 */ + U4_12, /* 4 bit unsigned value starting at 12 */ + U4_16, /* 4 bit unsigned value starting at 16 */ + U4_20, /* 4 bit unsigned value starting at 20 */ + U8_8, /* 8 bit unsigned value starting at 8 */ + U8_16, /* 8 bit unsigned value starting at 16 */ + I16_16, /* 16 bit signed value starting at 16 */ + U16_16, /* 16 bit unsigned value starting at 16 */ + J16_16, /* PC relative jump offset at 16 */ + J32_16, /* PC relative long offset at 16 */ + I32_16, /* 32 bit signed value starting at 16 */ + U32_16, /* 32 bit unsigned value starting at 16 */ + M_16, /* 4 bit optional mask starting at 16 */ + RO_28, /* optional GPR starting at position 28 */ +}; + +/* + * Enumeration of the different instruction formats. + * For details consult the principles of operation. + */ +enum { + INSTR_INVALID, + INSTR_E, INSTR_RIE_RRP, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, + INSTR_RIL_UP, INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, + INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0, + INSTR_RRE_FF, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR, + INSTR_RRE_RR_OPT, INSTR_RRF_F0FF, INSTR_RRF_FUFF, INSTR_RRF_M0RR, + INSTR_RRF_R0RR, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, + INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, + INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP, + INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, + INSTR_RSY_RURD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, + INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD, + INSTR_RXF_FRRDF, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RX_FRRD, + INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_SIY_URD, INSTR_SI_URD, + INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, + INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, + INSTR_S_00, INSTR_S_RD, +}; + +struct operand { + int bits; /* The number of bits in the operand. */ + int shift; /* The number of bits to shift. */ + int flags; /* One bit syntax flags. */ +}; + +struct insn { + const char name[5]; + unsigned char opfrag; + unsigned char format; +}; + +static const struct operand operands[] = +{ + [UNUSED] = { 0, 0, 0 }, + [R_8] = { 4, 8, OPERAND_GPR }, + [R_12] = { 4, 12, OPERAND_GPR }, + [R_16] = { 4, 16, OPERAND_GPR }, + [R_20] = { 4, 20, OPERAND_GPR }, + [R_24] = { 4, 24, OPERAND_GPR }, + [R_28] = { 4, 28, OPERAND_GPR }, + [R_32] = { 4, 32, OPERAND_GPR }, + [F_8] = { 4, 8, OPERAND_FPR }, + [F_12] = { 4, 12, OPERAND_FPR }, + [F_16] = { 4, 16, OPERAND_FPR }, + [F_20] = { 4, 16, OPERAND_FPR }, + [F_24] = { 4, 24, OPERAND_FPR }, + [F_28] = { 4, 28, OPERAND_FPR }, + [F_32] = { 4, 32, OPERAND_FPR }, + [A_8] = { 4, 8, OPERAND_AR }, + [A_12] = { 4, 12, OPERAND_AR }, + [A_24] = { 4, 24, OPERAND_AR }, + [A_28] = { 4, 28, OPERAND_AR }, + [C_8] = { 4, 8, OPERAND_CR }, + [C_12] = { 4, 12, OPERAND_CR }, + [B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR }, + [B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR }, + [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR }, + [D_20] = { 12, 20, OPERAND_DISP }, + [D_36] = { 12, 36, OPERAND_DISP }, + [D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED }, + [L4_8] = { 4, 8, OPERAND_LENGTH }, + [L4_12] = { 4, 12, OPERAND_LENGTH }, + [L8_8] = { 8, 8, OPERAND_LENGTH }, + [U4_8] = { 4, 8, 0 }, + [U4_12] = { 4, 12, 0 }, + [U4_16] = { 4, 16, 0 }, + [U4_20] = { 4, 20, 0 }, + [U8_8] = { 8, 8, 0 }, + [U8_16] = { 8, 16, 0 }, + [I16_16] = { 16, 16, OPERAND_SIGNED }, + [U16_16] = { 16, 16, 0 }, + [J16_16] = { 16, 16, OPERAND_PCREL }, + [J32_16] = { 32, 16, OPERAND_PCREL }, + [I32_16] = { 32, 16, OPERAND_SIGNED }, + [U32_16] = { 32, 16, 0 }, + [M_16] = { 4, 16, 0 }, + [RO_28] = { 4, 28, OPERAND_GPR } +}; + +static const unsigned char formats[][7] = { + [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, /* e.g. pr */ + [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxhg */ + [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, /* e.g. brasl */ + [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, /* e.g. brcl */ + [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, /* e.g. afi */ + [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, /* e.g. alfi */ + [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, /* e.g. ahi */ + [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, /* e.g. brct */ + [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, /* e.g. tml */ + [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, /* e.g. brc */ + [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. palb */ + [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, /* e.g. tb */ + [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, /* e.g. cpya */ + [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, /* e.g. sar */ + [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, /* e.g. sqer */ + [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, /* e.g. debr */ + [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, /* e.g. ipm */ + [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, /* e.g. ear */ + [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, /* e.g. cefbr */ + [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, /* e.g. lura */ + [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, /* efpc, sfpc */ + [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ + [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ + [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ + [INSTR_RRF_R0RR] = { 0xff, R_24,R_28,R_16,0,0,0 }, /* e.g. idte */ + [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ + [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ + [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ + [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, /* e.g. adr */ + [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, /* e.g. spm */ + [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, /* e.g. lr */ + [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, /* e.g. svc */ + [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, /* e.g. bcr */ + [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. lmh */ + [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lmh */ + [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icmh */ + [INSTR_RSL_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. tp */ + [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxh */ + [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },/* e.g. stmy */ + [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, + /* e.g. icmh */ + [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },/* e.g. lamy */ + [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },/* e.g. lamy */ + [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, /* e.g. lam */ + [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lctl */ + [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. sll */ + [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. cs */ + [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icm */ + [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. axbr */ + [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. lg */ + [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, + /* e.g. madb */ + [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },/* e.g. ly */ + [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley */ + [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. ae */ + [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. l */ + [INSTR_RX_URRD] = { 0x00, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc */ + [INSTR_SI_URD] = { 0x00, D_20,B_16,U8_8,0,0,0 }, /* e.g. cli */ + [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, /* e.g. tmy */ + [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */ + [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, + /* e.g. mvc */ + [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, + /* e.g. srp */ + [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, + /* e.g. pack */ + [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 }, + /* e.g. mvck */ + [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 }, + /* e.g. plo */ + [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 }, + /* e.g. lmd */ + [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. hsch */ + [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, /* e.g. lpsw */ + [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, + /* e.g. mvcos */ +}; + +static struct insn opcode[] = { +#ifdef CONFIG_64BIT + { "lmd", 0xef, INSTR_SS_RRRDRD3 }, +#endif + { "spm", 0x04, INSTR_RR_R0 }, + { "balr", 0x05, INSTR_RR_RR }, + { "bctr", 0x06, INSTR_RR_RR }, + { "bcr", 0x07, INSTR_RR_UR }, + { "svc", 0x0a, INSTR_RR_U0 }, + { "bsm", 0x0b, INSTR_RR_RR }, + { "bassm", 0x0c, INSTR_RR_RR }, + { "basr", 0x0d, INSTR_RR_RR }, + { "mvcl", 0x0e, INSTR_RR_RR }, + { "clcl", 0x0f, INSTR_RR_RR }, + { "lpr", 0x10, INSTR_RR_RR }, + { "lnr", 0x11, INSTR_RR_RR }, + { "ltr", 0x12, INSTR_RR_RR }, + { "lcr", 0x13, INSTR_RR_RR }, + { "nr", 0x14, INSTR_RR_RR }, + { "clr", 0x15, INSTR_RR_RR }, + { "or", 0x16, INSTR_RR_RR }, + { "xr", 0x17, INSTR_RR_RR }, + { "lr", 0x18, INSTR_RR_RR }, + { "cr", 0x19, INSTR_RR_RR }, + { "ar", 0x1a, INSTR_RR_RR }, + { "sr", 0x1b, INSTR_RR_RR }, + { "mr", 0x1c, INSTR_RR_RR }, + { "dr", 0x1d, INSTR_RR_RR }, + { "alr", 0x1e, INSTR_RR_RR }, + { "slr", 0x1f, INSTR_RR_RR }, + { "lpdr", 0x20, INSTR_RR_FF }, + { "lndr", 0x21, INSTR_RR_FF }, + { "ltdr", 0x22, INSTR_RR_FF }, + { "lcdr", 0x23, INSTR_RR_FF }, + { "hdr", 0x24, INSTR_RR_FF }, + { "ldxr", 0x25, INSTR_RR_FF }, + { "lrdr", 0x25, INSTR_RR_FF }, + { "mxr", 0x26, INSTR_RR_FF }, + { "mxdr", 0x27, INSTR_RR_FF }, + { "ldr", 0x28, INSTR_RR_FF }, + { "cdr", 0x29, INSTR_RR_FF }, + { "adr", 0x2a, INSTR_RR_FF }, + { "sdr", 0x2b, INSTR_RR_FF }, + { "mdr", 0x2c, INSTR_RR_FF }, + { "ddr", 0x2d, INSTR_RR_FF }, + { "awr", 0x2e, INSTR_RR_FF }, + { "swr", 0x2f, INSTR_RR_FF }, + { "lper", 0x30, INSTR_RR_FF }, + { "lner", 0x31, INSTR_RR_FF }, + { "lter", 0x32, INSTR_RR_FF }, + { "lcer", 0x33, INSTR_RR_FF }, + { "her", 0x34, INSTR_RR_FF }, + { "ledr", 0x35, INSTR_RR_FF }, + { "lrer", 0x35, INSTR_RR_FF }, + { "axr", 0x36, INSTR_RR_FF }, + { "sxr", 0x37, INSTR_RR_FF }, + { "ler", 0x38, INSTR_RR_FF }, + { "cer", 0x39, INSTR_RR_FF }, + { "aer", 0x3a, INSTR_RR_FF }, + { "ser", 0x3b, INSTR_RR_FF }, + { "mder", 0x3c, INSTR_RR_FF }, + { "mer", 0x3c, INSTR_RR_FF }, + { "der", 0x3d, INSTR_RR_FF }, + { "aur", 0x3e, INSTR_RR_FF }, + { "sur", 0x3f, INSTR_RR_FF }, + { "sth", 0x40, INSTR_RX_RRRD }, + { "la", 0x41, INSTR_RX_RRRD }, + { "stc", 0x42, INSTR_RX_RRRD }, + { "ic", 0x43, INSTR_RX_RRRD }, + { "ex", 0x44, INSTR_RX_RRRD }, + { "bal", 0x45, INSTR_RX_RRRD }, + { "bct", 0x46, INSTR_RX_RRRD }, + { "bc", 0x47, INSTR_RX_URRD }, + { "lh", 0x48, INSTR_RX_RRRD }, + { "ch", 0x49, INSTR_RX_RRRD }, + { "ah", 0x4a, INSTR_RX_RRRD }, + { "sh", 0x4b, INSTR_RX_RRRD }, + { "mh", 0x4c, INSTR_RX_RRRD }, + { "bas", 0x4d, INSTR_RX_RRRD }, + { "cvd", 0x4e, INSTR_RX_RRRD }, + { "cvb", 0x4f, INSTR_RX_RRRD }, + { "st", 0x50, INSTR_RX_RRRD }, + { "lae", 0x51, INSTR_RX_RRRD }, + { "n", 0x54, INSTR_RX_RRRD }, + { "cl", 0x55, INSTR_RX_RRRD }, + { "o", 0x56, INSTR_RX_RRRD }, + { "x", 0x57, INSTR_RX_RRRD }, + { "l", 0x58, INSTR_RX_RRRD }, + { "c", 0x59, INSTR_RX_RRRD }, + { "a", 0x5a, INSTR_RX_RRRD }, + { "s", 0x5b, INSTR_RX_RRRD }, + { "m", 0x5c, INSTR_RX_RRRD }, + { "d", 0x5d, INSTR_RX_RRRD }, + { "al", 0x5e, INSTR_RX_RRRD }, + { "sl", 0x5f, INSTR_RX_RRRD }, + { "std", 0x60, INSTR_RX_FRRD }, + { "mxd", 0x67, INSTR_RX_FRRD }, + { "ld", 0x68, INSTR_RX_FRRD }, + { "cd", 0x69, INSTR_RX_FRRD }, + { "ad", 0x6a, INSTR_RX_FRRD }, + { "sd", 0x6b, INSTR_RX_FRRD }, + { "md", 0x6c, INSTR_RX_FRRD }, + { "dd", 0x6d, INSTR_RX_FRRD }, + { "aw", 0x6e, INSTR_RX_FRRD }, + { "sw", 0x6f, INSTR_RX_FRRD }, + { "ste", 0x70, INSTR_RX_FRRD }, + { "ms", 0x71, INSTR_RX_RRRD }, + { "le", 0x78, INSTR_RX_FRRD }, + { "ce", 0x79, INSTR_RX_FRRD }, + { "ae", 0x7a, INSTR_RX_FRRD }, + { "se", 0x7b, INSTR_RX_FRRD }, + { "mde", 0x7c, INSTR_RX_FRRD }, + { "me", 0x7c, INSTR_RX_FRRD }, + { "de", 0x7d, INSTR_RX_FRRD }, + { "au", 0x7e, INSTR_RX_FRRD }, + { "su", 0x7f, INSTR_RX_FRRD }, + { "ssm", 0x80, INSTR_S_RD }, + { "lpsw", 0x82, INSTR_S_RD }, + { "diag", 0x83, INSTR_RS_RRRD }, + { "brxh", 0x84, INSTR_RSI_RRP }, + { "brxle", 0x85, INSTR_RSI_RRP }, + { "bxh", 0x86, INSTR_RS_RRRD }, + { "bxle", 0x87, INSTR_RS_RRRD }, + { "srl", 0x88, INSTR_RS_R0RD }, + { "sll", 0x89, INSTR_RS_R0RD }, + { "sra", 0x8a, INSTR_RS_R0RD }, + { "sla", 0x8b, INSTR_RS_R0RD }, + { "srdl", 0x8c, INSTR_RS_R0RD }, + { "sldl", 0x8d, INSTR_RS_R0RD }, + { "srda", 0x8e, INSTR_RS_R0RD }, + { "slda", 0x8f, INSTR_RS_R0RD }, + { "stm", 0x90, INSTR_RS_RRRD }, + { "tm", 0x91, INSTR_SI_URD }, + { "mvi", 0x92, INSTR_SI_URD }, + { "ts", 0x93, INSTR_S_RD }, + { "ni", 0x94, INSTR_SI_URD }, + { "cli", 0x95, INSTR_SI_URD }, + { "oi", 0x96, INSTR_SI_URD }, + { "xi", 0x97, INSTR_SI_URD }, + { "lm", 0x98, INSTR_RS_RRRD }, + { "trace", 0x99, INSTR_RS_RRRD }, + { "lam", 0x9a, INSTR_RS_AARD }, + { "stam", 0x9b, INSTR_RS_AARD }, + { "mvcle", 0xa8, INSTR_RS_RRRD }, + { "clcle", 0xa9, INSTR_RS_RRRD }, + { "stnsm", 0xac, INSTR_SI_URD }, + { "stosm", 0xad, INSTR_SI_URD }, + { "sigp", 0xae, INSTR_RS_RRRD }, + { "mc", 0xaf, INSTR_SI_URD }, + { "lra", 0xb1, INSTR_RX_RRRD }, + { "stctl", 0xb6, INSTR_RS_CCRD }, + { "lctl", 0xb7, INSTR_RS_CCRD }, + { "cs", 0xba, INSTR_RS_RRRD }, + { "cds", 0xbb, INSTR_RS_RRRD }, + { "clm", 0xbd, INSTR_RS_RURD }, + { "stcm", 0xbe, INSTR_RS_RURD }, + { "icm", 0xbf, INSTR_RS_RURD }, + { "mvn", 0xd1, INSTR_SS_L0RDRD }, + { "mvc", 0xd2, INSTR_SS_L0RDRD }, + { "mvz", 0xd3, INSTR_SS_L0RDRD }, + { "nc", 0xd4, INSTR_SS_L0RDRD }, + { "clc", 0xd5, INSTR_SS_L0RDRD }, + { "oc", 0xd6, INSTR_SS_L0RDRD }, + { "xc", 0xd7, INSTR_SS_L0RDRD }, + { "mvck", 0xd9, INSTR_SS_RRRDRD }, + { "mvcp", 0xda, INSTR_SS_RRRDRD }, + { "mvcs", 0xdb, INSTR_SS_RRRDRD }, + { "tr", 0xdc, INSTR_SS_L0RDRD }, + { "trt", 0xdd, INSTR_SS_L0RDRD }, + { "ed", 0xde, INSTR_SS_L0RDRD }, + { "edmk", 0xdf, INSTR_SS_L0RDRD }, + { "pku", 0xe1, INSTR_SS_L0RDRD }, + { "unpku", 0xe2, INSTR_SS_L0RDRD }, + { "mvcin", 0xe8, INSTR_SS_L0RDRD }, + { "pka", 0xe9, INSTR_SS_L0RDRD }, + { "unpka", 0xea, INSTR_SS_L0RDRD }, + { "plo", 0xee, INSTR_SS_RRRDRD2 }, + { "srp", 0xf0, INSTR_SS_LIRDRD }, + { "mvo", 0xf1, INSTR_SS_LLRDRD }, + { "pack", 0xf2, INSTR_SS_LLRDRD }, + { "unpk", 0xf3, INSTR_SS_LLRDRD }, + { "zap", 0xf8, INSTR_SS_LLRDRD }, + { "cp", 0xf9, INSTR_SS_LLRDRD }, + { "ap", 0xfa, INSTR_SS_LLRDRD }, + { "sp", 0xfb, INSTR_SS_LLRDRD }, + { "mp", 0xfc, INSTR_SS_LLRDRD }, + { "dp", 0xfd, INSTR_SS_LLRDRD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_01[] = { +#ifdef CONFIG_64BIT + { "sam64", 0x0e, INSTR_E }, +#endif + { "pr", 0x01, INSTR_E }, + { "upt", 0x02, INSTR_E }, + { "sckpf", 0x07, INSTR_E }, + { "tam", 0x0b, INSTR_E }, + { "sam24", 0x0c, INSTR_E }, + { "sam31", 0x0d, INSTR_E }, + { "trap2", 0xff, INSTR_E }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_a5[] = { +#ifdef CONFIG_64BIT + { "iihh", 0x00, INSTR_RI_RU }, + { "iihl", 0x01, INSTR_RI_RU }, + { "iilh", 0x02, INSTR_RI_RU }, + { "iill", 0x03, INSTR_RI_RU }, + { "nihh", 0x04, INSTR_RI_RU }, + { "nihl", 0x05, INSTR_RI_RU }, + { "nilh", 0x06, INSTR_RI_RU }, + { "nill", 0x07, INSTR_RI_RU }, + { "oihh", 0x08, INSTR_RI_RU }, + { "oihl", 0x09, INSTR_RI_RU }, + { "oilh", 0x0a, INSTR_RI_RU }, + { "oill", 0x0b, INSTR_RI_RU }, + { "llihh", 0x0c, INSTR_RI_RU }, + { "llihl", 0x0d, INSTR_RI_RU }, + { "llilh", 0x0e, INSTR_RI_RU }, + { "llill", 0x0f, INSTR_RI_RU }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_a7[] = { +#ifdef CONFIG_64BIT + { "tmhh", 0x02, INSTR_RI_RU }, + { "tmhl", 0x03, INSTR_RI_RU }, + { "brctg", 0x07, INSTR_RI_RP }, + { "lghi", 0x09, INSTR_RI_RI }, + { "aghi", 0x0b, INSTR_RI_RI }, + { "mghi", 0x0d, INSTR_RI_RI }, + { "cghi", 0x0f, INSTR_RI_RI }, +#endif + { "tmlh", 0x00, INSTR_RI_RU }, + { "tmll", 0x01, INSTR_RI_RU }, + { "brc", 0x04, INSTR_RI_UP }, + { "bras", 0x05, INSTR_RI_RP }, + { "brct", 0x06, INSTR_RI_RP }, + { "lhi", 0x08, INSTR_RI_RI }, + { "ahi", 0x0a, INSTR_RI_RI }, + { "mhi", 0x0c, INSTR_RI_RI }, + { "chi", 0x0e, INSTR_RI_RI }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_b2[] = { +#ifdef CONFIG_64BIT + { "sske", 0x2b, INSTR_RRF_M0RR }, + { "stckf", 0x7c, INSTR_S_RD }, + { "cu21", 0xa6, INSTR_RRF_M0RR }, + { "cuutf", 0xa6, INSTR_RRF_M0RR }, + { "cu12", 0xa7, INSTR_RRF_M0RR }, + { "cutfu", 0xa7, INSTR_RRF_M0RR }, + { "stfle", 0xb0, INSTR_S_RD }, + { "lpswe", 0xb2, INSTR_S_RD }, +#endif + { "stidp", 0x02, INSTR_S_RD }, + { "sck", 0x04, INSTR_S_RD }, + { "stck", 0x05, INSTR_S_RD }, + { "sckc", 0x06, INSTR_S_RD }, + { "stckc", 0x07, INSTR_S_RD }, + { "spt", 0x08, INSTR_S_RD }, + { "stpt", 0x09, INSTR_S_RD }, + { "spka", 0x0a, INSTR_S_RD }, + { "ipk", 0x0b, INSTR_S_00 }, + { "ptlb", 0x0d, INSTR_S_00 }, + { "spx", 0x10, INSTR_S_RD }, + { "stpx", 0x11, INSTR_S_RD }, + { "stap", 0x12, INSTR_S_RD }, + { "sie", 0x14, INSTR_S_RD }, + { "pc", 0x18, INSTR_S_RD }, + { "sac", 0x19, INSTR_S_RD }, + { "cfc", 0x1a, INSTR_S_RD }, + { "ipte", 0x21, INSTR_RRE_RR }, + { "ipm", 0x22, INSTR_RRE_R0 }, + { "ivsk", 0x23, INSTR_RRE_RR }, + { "iac", 0x24, INSTR_RRE_R0 }, + { "ssar", 0x25, INSTR_RRE_R0 }, + { "epar", 0x26, INSTR_RRE_R0 }, + { "esar", 0x27, INSTR_RRE_R0 }, + { "pt", 0x28, INSTR_RRE_RR }, + { "iske", 0x29, INSTR_RRE_RR }, + { "rrbe", 0x2a, INSTR_RRE_RR }, + { "sske", 0x2b, INSTR_RRE_RR }, + { "tb", 0x2c, INSTR_RRE_0R }, + { "dxr", 0x2d, INSTR_RRE_F0 }, + { "pgin", 0x2e, INSTR_RRE_RR }, + { "pgout", 0x2f, INSTR_RRE_RR }, + { "csch", 0x30, INSTR_S_00 }, + { "hsch", 0x31, INSTR_S_00 }, + { "msch", 0x32, INSTR_S_RD }, + { "ssch", 0x33, INSTR_S_RD }, + { "stsch", 0x34, INSTR_S_RD }, + { "tsch", 0x35, INSTR_S_RD }, + { "tpi", 0x36, INSTR_S_RD }, + { "sal", 0x37, INSTR_S_00 }, + { "rsch", 0x38, INSTR_S_00 }, + { "stcrw", 0x39, INSTR_S_RD }, + { "stcps", 0x3a, INSTR_S_RD }, + { "rchp", 0x3b, INSTR_S_00 }, + { "schm", 0x3c, INSTR_S_00 }, + { "bakr", 0x40, INSTR_RRE_RR }, + { "cksm", 0x41, INSTR_RRE_RR }, + { "sqdr", 0x44, INSTR_RRE_F0 }, + { "sqer", 0x45, INSTR_RRE_F0 }, + { "stura", 0x46, INSTR_RRE_RR }, + { "msta", 0x47, INSTR_RRE_R0 }, + { "palb", 0x48, INSTR_RRE_00 }, + { "ereg", 0x49, INSTR_RRE_RR }, + { "esta", 0x4a, INSTR_RRE_RR }, + { "lura", 0x4b, INSTR_RRE_RR }, + { "tar", 0x4c, INSTR_RRE_AR }, + { "cpya", INSTR_RRE_AA }, + { "sar", 0x4e, INSTR_RRE_AR }, + { "ear", 0x4f, INSTR_RRE_RA }, + { "csp", 0x50, INSTR_RRE_RR }, + { "msr", 0x52, INSTR_RRE_RR }, + { "mvpg", 0x54, INSTR_RRE_RR }, + { "mvst", 0x55, INSTR_RRE_RR }, + { "cuse", 0x57, INSTR_RRE_RR }, + { "bsg", 0x58, INSTR_RRE_RR }, + { "bsa", 0x5a, INSTR_RRE_RR }, + { "clst", 0x5d, INSTR_RRE_RR }, + { "srst", 0x5e, INSTR_RRE_RR }, + { "cmpsc", 0x63, INSTR_RRE_RR }, + { "cmpsc", 0x63, INSTR_RRE_RR }, + { "siga", 0x74, INSTR_S_RD }, + { "xsch", 0x76, INSTR_S_00 }, + { "rp", 0x77, INSTR_S_RD }, + { "stcke", 0x78, INSTR_S_RD }, + { "sacf", 0x79, INSTR_S_RD }, + { "stsi", 0x7d, INSTR_S_RD }, + { "srnm", 0x99, INSTR_S_RD }, + { "stfpc", 0x9c, INSTR_S_RD }, + { "lfpc", 0x9d, INSTR_S_RD }, + { "tre", 0xa5, INSTR_RRE_RR }, + { "cuutf", 0xa6, INSTR_RRE_RR }, + { "cutfu", 0xa7, INSTR_RRE_RR }, + { "stfl", 0xb1, INSTR_S_RD }, + { "trap4", 0xff, INSTR_S_RD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_b3[] = { +#ifdef CONFIG_64BIT + { "maylr", 0x38, INSTR_RRF_F0FF }, + { "mylr", 0x39, INSTR_RRF_F0FF }, + { "mayr", 0x3a, INSTR_RRF_F0FF }, + { "myr", 0x3b, INSTR_RRF_F0FF }, + { "mayhr", 0x3c, INSTR_RRF_F0FF }, + { "myhr", 0x3d, INSTR_RRF_F0FF }, + { "cegbr", 0xa4, INSTR_RRE_RR }, + { "cdgbr", 0xa5, INSTR_RRE_RR }, + { "cxgbr", 0xa6, INSTR_RRE_RR }, + { "cgebr", 0xa8, INSTR_RRF_U0RF }, + { "cgdbr", 0xa9, INSTR_RRF_U0RF }, + { "cgxbr", 0xaa, INSTR_RRF_U0RF }, + { "cfer", 0xb8, INSTR_RRF_U0RF }, + { "cfdr", 0xb9, INSTR_RRF_U0RF }, + { "cfxr", 0xba, INSTR_RRF_U0RF }, + { "cegr", 0xc4, INSTR_RRE_RR }, + { "cdgr", 0xc5, INSTR_RRE_RR }, + { "cxgr", 0xc6, INSTR_RRE_RR }, + { "cger", 0xc8, INSTR_RRF_U0RF }, + { "cgdr", 0xc9, INSTR_RRF_U0RF }, + { "cgxr", 0xca, INSTR_RRF_U0RF }, +#endif + { "lpebr", 0x00, INSTR_RRE_FF }, + { "lnebr", 0x01, INSTR_RRE_FF }, + { "ltebr", 0x02, INSTR_RRE_FF }, + { "lcebr", 0x03, INSTR_RRE_FF }, + { "ldebr", 0x04, INSTR_RRE_FF }, + { "lxdbr", 0x05, INSTR_RRE_FF }, + { "lxebr", 0x06, INSTR_RRE_FF }, + { "mxdbr", 0x07, INSTR_RRE_FF }, + { "kebr", 0x08, INSTR_RRE_FF }, + { "cebr", 0x09, INSTR_RRE_FF }, + { "aebr", 0x0a, INSTR_RRE_FF }, + { "sebr", 0x0b, INSTR_RRE_FF }, + { "mdebr", 0x0c, INSTR_RRE_FF }, + { "debr", 0x0d, INSTR_RRE_FF }, + { "maebr", 0x0e, INSTR_RRF_F0FF }, + { "msebr", 0x0f, INSTR_RRF_F0FF }, + { "lpdbr", 0x10, INSTR_RRE_FF }, + { "lndbr", 0x11, INSTR_RRE_FF }, + { "ltdbr", 0x12, INSTR_RRE_FF }, + { "lcdbr", 0x13, INSTR_RRE_FF }, + { "sqebr", 0x14, INSTR_RRE_FF }, + { "sqdbr", 0x15, INSTR_RRE_FF }, + { "sqxbr", 0x16, INSTR_RRE_FF }, + { "meebr", 0x17, INSTR_RRE_FF }, + { "kdbr", 0x18, INSTR_RRE_FF }, + { "cdbr", 0x19, INSTR_RRE_FF }, + { "adbr", 0x1a, INSTR_RRE_FF }, + { "sdbr", 0x1b, INSTR_RRE_FF }, + { "mdbr", 0x1c, INSTR_RRE_FF }, + { "ddbr", 0x1d, INSTR_RRE_FF }, + { "madbr", 0x1e, INSTR_RRF_F0FF }, + { "msdbr", 0x1f, INSTR_RRF_F0FF }, + { "lder", 0x24, INSTR_RRE_FF }, + { "lxdr", 0x25, INSTR_RRE_FF }, + { "lxer", 0x26, INSTR_RRE_FF }, + { "maer", 0x2e, INSTR_RRF_F0FF }, + { "mser", 0x2f, INSTR_RRF_F0FF }, + { "sqxr", 0x36, INSTR_RRE_FF }, + { "meer", 0x37, INSTR_RRE_FF }, + { "madr", 0x3e, INSTR_RRF_F0FF }, + { "msdr", 0x3f, INSTR_RRF_F0FF }, + { "lpxbr", 0x40, INSTR_RRE_FF }, + { "lnxbr", 0x41, INSTR_RRE_FF }, + { "ltxbr", 0x42, INSTR_RRE_FF }, + { "lcxbr", 0x43, INSTR_RRE_FF }, + { "ledbr", 0x44, INSTR_RRE_FF }, + { "ldxbr", 0x45, INSTR_RRE_FF }, + { "lexbr", 0x46, INSTR_RRE_FF }, + { "fixbr", 0x47, INSTR_RRF_U0FF }, + { "kxbr", 0x48, INSTR_RRE_FF }, + { "cxbr", 0x49, INSTR_RRE_FF }, + { "axbr", 0x4a, INSTR_RRE_FF }, + { "sxbr", 0x4b, INSTR_RRE_FF }, + { "mxbr", 0x4c, INSTR_RRE_FF }, + { "dxbr", 0x4d, INSTR_RRE_FF }, + { "tbedr", 0x50, INSTR_RRF_U0FF }, + { "tbdr", 0x51, INSTR_RRF_U0FF }, + { "diebr", 0x53, INSTR_RRF_FUFF }, + { "fiebr", 0x57, INSTR_RRF_U0FF }, + { "thder", 0x58, INSTR_RRE_RR }, + { "thdr", 0x59, INSTR_RRE_RR }, + { "didbr", 0x5b, INSTR_RRF_FUFF }, + { "fidbr", 0x5f, INSTR_RRF_U0FF }, + { "lpxr", 0x60, INSTR_RRE_FF }, + { "lnxr", 0x61, INSTR_RRE_FF }, + { "ltxr", 0x62, INSTR_RRE_FF }, + { "lcxr", 0x63, INSTR_RRE_FF }, + { "lxr", 0x65, INSTR_RRE_RR }, + { "lexr", 0x66, INSTR_RRE_FF }, + { "fixr", 0x67, INSTR_RRF_U0FF }, + { "cxr", 0x69, INSTR_RRE_FF }, + { "lzer", 0x74, INSTR_RRE_R0 }, + { "lzdr", 0x75, INSTR_RRE_R0 }, + { "lzxr", 0x76, INSTR_RRE_R0 }, + { "fier", 0x77, INSTR_RRF_U0FF }, + { "fidr", 0x7f, INSTR_RRF_U0FF }, + { "sfpc", 0x84, INSTR_RRE_RR_OPT }, + { "efpc", 0x8c, INSTR_RRE_RR_OPT }, + { "cefbr", 0x94, INSTR_RRE_RF }, + { "cdfbr", 0x95, INSTR_RRE_RF }, + { "cxfbr", 0x96, INSTR_RRE_RF }, + { "cfebr", 0x98, INSTR_RRF_U0RF }, + { "cfdbr", 0x99, INSTR_RRF_U0RF }, + { "cfxbr", 0x9a, INSTR_RRF_U0RF }, + { "cefr", 0xb4, INSTR_RRE_RF }, + { "cdfr", 0xb5, INSTR_RRE_RF }, + { "cxfr", 0xb6, INSTR_RRE_RF }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_b9[] = { +#ifdef CONFIG_64BIT + { "lpgr", 0x00, INSTR_RRE_RR }, + { "lngr", 0x01, INSTR_RRE_RR }, + { "ltgr", 0x02, INSTR_RRE_RR }, + { "lcgr", 0x03, INSTR_RRE_RR }, + { "lgr", 0x04, INSTR_RRE_RR }, + { "lurag", 0x05, INSTR_RRE_RR }, + { "lgbr", 0x06, INSTR_RRE_RR }, + { "lghr", 0x07, INSTR_RRE_RR }, + { "agr", 0x08, INSTR_RRE_RR }, + { "sgr", 0x09, INSTR_RRE_RR }, + { "algr", 0x0a, INSTR_RRE_RR }, + { "slgr", 0x0b, INSTR_RRE_RR }, + { "msgr", 0x0c, INSTR_RRE_RR }, + { "dsgr", 0x0d, INSTR_RRE_RR }, + { "eregg", 0x0e, INSTR_RRE_RR }, + { "lrvgr", 0x0f, INSTR_RRE_RR }, + { "lpgfr", 0x10, INSTR_RRE_RR }, + { "lngfr", 0x11, INSTR_RRE_RR }, + { "ltgfr", 0x12, INSTR_RRE_RR }, + { "lcgfr", 0x13, INSTR_RRE_RR }, + { "lgfr", 0x14, INSTR_RRE_RR }, + { "llgfr", 0x16, INSTR_RRE_RR }, + { "llgtr", 0x17, INSTR_RRE_RR }, + { "agfr", 0x18, INSTR_RRE_RR }, + { "sgfr", 0x19, INSTR_RRE_RR }, + { "algfr", 0x1a, INSTR_RRE_RR }, + { "slgfr", 0x1b, INSTR_RRE_RR }, + { "msgfr", 0x1c, INSTR_RRE_RR }, + { "dsgfr", 0x1d, INSTR_RRE_RR }, + { "cgr", 0x20, INSTR_RRE_RR }, + { "clgr", 0x21, INSTR_RRE_RR }, + { "sturg", 0x25, INSTR_RRE_RR }, + { "lbr", 0x26, INSTR_RRE_RR }, + { "lhr", 0x27, INSTR_RRE_RR }, + { "cgfr", 0x30, INSTR_RRE_RR }, + { "clgfr", 0x31, INSTR_RRE_RR }, + { "bctgr", 0x46, INSTR_RRE_RR }, + { "ngr", 0x80, INSTR_RRE_RR }, + { "ogr", 0x81, INSTR_RRE_RR }, + { "xgr", 0x82, INSTR_RRE_RR }, + { "flogr", 0x83, INSTR_RRE_RR }, + { "llgcr", 0x84, INSTR_RRE_RR }, + { "llghr", 0x85, INSTR_RRE_RR }, + { "mlgr", 0x86, INSTR_RRE_RR }, + { "dlgr", 0x87, INSTR_RRE_RR }, + { "alcgr", 0x88, INSTR_RRE_RR }, + { "slbgr", 0x89, INSTR_RRE_RR }, + { "cspg", 0x8a, INSTR_RRE_RR }, + { "idte", 0x8e, INSTR_RRF_R0RR }, + { "llcr", 0x94, INSTR_RRE_RR }, + { "llhr", 0x95, INSTR_RRE_RR }, + { "esea", 0x9d, INSTR_RRE_R0 }, + { "lptea", 0xaa, INSTR_RRF_RURR }, + { "cu14", 0xb0, INSTR_RRF_M0RR }, + { "cu24", 0xb1, INSTR_RRF_M0RR }, + { "cu41", 0xb2, INSTR_RRF_M0RR }, + { "cu42", 0xb3, INSTR_RRF_M0RR }, +#endif + { "kmac", 0x1e, INSTR_RRE_RR }, + { "lrvr", 0x1f, INSTR_RRE_RR }, + { "km", 0x2e, INSTR_RRE_RR }, + { "kmc", 0x2f, INSTR_RRE_RR }, + { "kimd", 0x3e, INSTR_RRE_RR }, + { "klmd", 0x3f, INSTR_RRE_RR }, + { "epsw", 0x8d, INSTR_RRE_RR }, + { "trtt", 0x90, INSTR_RRE_RR }, + { "trtt", 0x90, INSTR_RRF_M0RR }, + { "trto", 0x91, INSTR_RRE_RR }, + { "trto", 0x91, INSTR_RRF_M0RR }, + { "trot", 0x92, INSTR_RRE_RR }, + { "trot", 0x92, INSTR_RRF_M0RR }, + { "troo", 0x93, INSTR_RRE_RR }, + { "troo", 0x93, INSTR_RRF_M0RR }, + { "mlr", 0x96, INSTR_RRE_RR }, + { "dlr", 0x97, INSTR_RRE_RR }, + { "alcr", 0x98, INSTR_RRE_RR }, + { "slbr", 0x99, INSTR_RRE_RR }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_c0[] = { +#ifdef CONFIG_64BIT + { "lgfi", 0x01, INSTR_RIL_RI }, + { "xihf", 0x06, INSTR_RIL_RU }, + { "xilf", 0x07, INSTR_RIL_RU }, + { "iihf", 0x08, INSTR_RIL_RU }, + { "iilf", 0x09, INSTR_RIL_RU }, + { "nihf", 0x0a, INSTR_RIL_RU }, + { "nilf", 0x0b, INSTR_RIL_RU }, + { "oihf", 0x0c, INSTR_RIL_RU }, + { "oilf", 0x0d, INSTR_RIL_RU }, + { "llihf", 0x0e, INSTR_RIL_RU }, + { "llilf", 0x0f, INSTR_RIL_RU }, +#endif + { "larl", 0x00, INSTR_RIL_RP }, + { "brcl", 0x04, INSTR_RIL_UP }, + { "brasl", 0x05, INSTR_RIL_RP }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_c2[] = { +#ifdef CONFIG_64BIT + { "slgfi", 0x04, INSTR_RIL_RU }, + { "slfi", 0x05, INSTR_RIL_RU }, + { "agfi", 0x08, INSTR_RIL_RI }, + { "afi", 0x09, INSTR_RIL_RI }, + { "algfi", 0x0a, INSTR_RIL_RU }, + { "alfi", 0x0b, INSTR_RIL_RU }, + { "cgfi", 0x0c, INSTR_RIL_RI }, + { "cfi", 0x0d, INSTR_RIL_RI }, + { "clgfi", 0x0e, INSTR_RIL_RU }, + { "clfi", 0x0f, INSTR_RIL_RU }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_c8[] = { +#ifdef CONFIG_64BIT + { "mvcos", 0x00, INSTR_SSF_RRDRD }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_e3[] = { +#ifdef CONFIG_64BIT + { "ltg", 0x02, INSTR_RXY_RRRD }, + { "lrag", 0x03, INSTR_RXY_RRRD }, + { "lg", 0x04, INSTR_RXY_RRRD }, + { "cvby", 0x06, INSTR_RXY_RRRD }, + { "ag", 0x08, INSTR_RXY_RRRD }, + { "sg", 0x09, INSTR_RXY_RRRD }, + { "alg", 0x0a, INSTR_RXY_RRRD }, + { "slg", 0x0b, INSTR_RXY_RRRD }, + { "msg", 0x0c, INSTR_RXY_RRRD }, + { "dsg", 0x0d, INSTR_RXY_RRRD }, + { "cvbg", 0x0e, INSTR_RXY_RRRD }, + { "lrvg", 0x0f, INSTR_RXY_RRRD }, + { "lt", 0x12, INSTR_RXY_RRRD }, + { "lray", 0x13, INSTR_RXY_RRRD }, + { "lgf", 0x14, INSTR_RXY_RRRD }, + { "lgh", 0x15, INSTR_RXY_RRRD }, + { "llgf", 0x16, INSTR_RXY_RRRD }, + { "llgt", 0x17, INSTR_RXY_RRRD }, + { "agf", 0x18, INSTR_RXY_RRRD }, + { "sgf", 0x19, INSTR_RXY_RRRD }, + { "algf", 0x1a, INSTR_RXY_RRRD }, + { "slgf", 0x1b, INSTR_RXY_RRRD }, + { "msgf", 0x1c, INSTR_RXY_RRRD }, + { "dsgf", 0x1d, INSTR_RXY_RRRD }, + { "cg", 0x20, INSTR_RXY_RRRD }, + { "clg", 0x21, INSTR_RXY_RRRD }, + { "stg", 0x24, INSTR_RXY_RRRD }, + { "cvdy", 0x26, INSTR_RXY_RRRD }, + { "cvdg", 0x2e, INSTR_RXY_RRRD }, + { "strvg", 0x2f, INSTR_RXY_RRRD }, + { "cgf", 0x30, INSTR_RXY_RRRD }, + { "clgf", 0x31, INSTR_RXY_RRRD }, + { "strvh", 0x3f, INSTR_RXY_RRRD }, + { "bctg", 0x46, INSTR_RXY_RRRD }, + { "sty", 0x50, INSTR_RXY_RRRD }, + { "msy", 0x51, INSTR_RXY_RRRD }, + { "ny", 0x54, INSTR_RXY_RRRD }, + { "cly", 0x55, INSTR_RXY_RRRD }, + { "oy", 0x56, INSTR_RXY_RRRD }, + { "xy", 0x57, INSTR_RXY_RRRD }, + { "ly", 0x58, INSTR_RXY_RRRD }, + { "cy", 0x59, INSTR_RXY_RRRD }, + { "ay", 0x5a, INSTR_RXY_RRRD }, + { "sy", 0x5b, INSTR_RXY_RRRD }, + { "aly", 0x5e, INSTR_RXY_RRRD }, + { "sly", 0x5f, INSTR_RXY_RRRD }, + { "sthy", 0x70, INSTR_RXY_RRRD }, + { "lay", 0x71, INSTR_RXY_RRRD }, + { "stcy", 0x72, INSTR_RXY_RRRD }, + { "icy", 0x73, INSTR_RXY_RRRD }, + { "lb", 0x76, INSTR_RXY_RRRD }, + { "lgb", 0x77, INSTR_RXY_RRRD }, + { "lhy", 0x78, INSTR_RXY_RRRD }, + { "chy", 0x79, INSTR_RXY_RRRD }, + { "ahy", 0x7a, INSTR_RXY_RRRD }, + { "shy", 0x7b, INSTR_RXY_RRRD }, + { "ng", 0x80, INSTR_RXY_RRRD }, + { "og", 0x81, INSTR_RXY_RRRD }, + { "xg", 0x82, INSTR_RXY_RRRD }, + { "mlg", 0x86, INSTR_RXY_RRRD }, + { "dlg", 0x87, INSTR_RXY_RRRD }, + { "alcg", 0x88, INSTR_RXY_RRRD }, + { "slbg", 0x89, INSTR_RXY_RRRD }, + { "stpq", 0x8e, INSTR_RXY_RRRD }, + { "lpq", 0x8f, INSTR_RXY_RRRD }, + { "llgc", 0x90, INSTR_RXY_RRRD }, + { "llgh", 0x91, INSTR_RXY_RRRD }, + { "llc", 0x94, INSTR_RXY_RRRD }, + { "llh", 0x95, INSTR_RXY_RRRD }, +#endif + { "lrv", 0x1e, INSTR_RXY_RRRD }, + { "lrvh", 0x1f, INSTR_RXY_RRRD }, + { "strv", 0x3e, INSTR_RXY_RRRD }, + { "ml", 0x96, INSTR_RXY_RRRD }, + { "dl", 0x97, INSTR_RXY_RRRD }, + { "alc", 0x98, INSTR_RXY_RRRD }, + { "slb", 0x99, INSTR_RXY_RRRD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_e5[] = { +#ifdef CONFIG_64BIT + { "strag", 0x02, INSTR_SSE_RDRD }, +#endif + { "lasp", 0x00, INSTR_SSE_RDRD }, + { "tprot", 0x01, INSTR_SSE_RDRD }, + { "mvcsk", 0x0e, INSTR_SSE_RDRD }, + { "mvcdk", 0x0f, INSTR_SSE_RDRD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_eb[] = { +#ifdef CONFIG_64BIT + { "lmg", 0x04, INSTR_RSY_RRRD }, + { "srag", 0x0a, INSTR_RSY_RRRD }, + { "slag", 0x0b, INSTR_RSY_RRRD }, + { "srlg", 0x0c, INSTR_RSY_RRRD }, + { "sllg", 0x0d, INSTR_RSY_RRRD }, + { "tracg", 0x0f, INSTR_RSY_RRRD }, + { "csy", 0x14, INSTR_RSY_RRRD }, + { "rllg", 0x1c, INSTR_RSY_RRRD }, + { "clmh", 0x20, INSTR_RSY_RURD }, + { "clmy", 0x21, INSTR_RSY_RURD }, + { "stmg", 0x24, INSTR_RSY_RRRD }, + { "stctg", 0x25, INSTR_RSY_CCRD }, + { "stmh", 0x26, INSTR_RSY_RRRD }, + { "stcmh", 0x2c, INSTR_RSY_RURD }, + { "stcmy", 0x2d, INSTR_RSY_RURD }, + { "lctlg", 0x2f, INSTR_RSY_CCRD }, + { "csg", 0x30, INSTR_RSY_RRRD }, + { "cdsy", 0x31, INSTR_RSY_RRRD }, + { "cdsg", 0x3e, INSTR_RSY_RRRD }, + { "bxhg", 0x44, INSTR_RSY_RRRD }, + { "bxleg", 0x45, INSTR_RSY_RRRD }, + { "tmy", 0x51, INSTR_SIY_URD }, + { "mviy", 0x52, INSTR_SIY_URD }, + { "niy", 0x54, INSTR_SIY_URD }, + { "cliy", 0x55, INSTR_SIY_URD }, + { "oiy", 0x56, INSTR_SIY_URD }, + { "xiy", 0x57, INSTR_SIY_URD }, + { "icmh", 0x80, INSTR_RSE_RURD }, + { "icmh", 0x80, INSTR_RSY_RURD }, + { "icmy", 0x81, INSTR_RSY_RURD }, + { "clclu", 0x8f, INSTR_RSY_RRRD }, + { "stmy", 0x90, INSTR_RSY_RRRD }, + { "lmh", 0x96, INSTR_RSY_RRRD }, + { "lmy", 0x98, INSTR_RSY_RRRD }, + { "lamy", 0x9a, INSTR_RSY_AARD }, + { "stamy", 0x9b, INSTR_RSY_AARD }, +#endif + { "rll", 0x1d, INSTR_RSY_RRRD }, + { "mvclu", 0x8e, INSTR_RSY_RRRD }, + { "tp", 0xc0, INSTR_RSL_R0RD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_ec[] = { +#ifdef CONFIG_64BIT + { "brxhg", 0x44, INSTR_RIE_RRP }, + { "brxlg", 0x45, INSTR_RIE_RRP }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_ed[] = { +#ifdef CONFIG_64BIT + { "mayl", 0x38, INSTR_RXF_FRRDF }, + { "myl", 0x39, INSTR_RXF_FRRDF }, + { "may", 0x3a, INSTR_RXF_FRRDF }, + { "my", 0x3b, INSTR_RXF_FRRDF }, + { "mayh", 0x3c, INSTR_RXF_FRRDF }, + { "myh", 0x3d, INSTR_RXF_FRRDF }, + { "ley", 0x64, INSTR_RXY_FRRD }, + { "ldy", 0x65, INSTR_RXY_FRRD }, + { "stey", 0x66, INSTR_RXY_FRRD }, + { "stdy", 0x67, INSTR_RXY_FRRD }, +#endif + { "ldeb", 0x04, INSTR_RXE_FRRD }, + { "lxdb", 0x05, INSTR_RXE_FRRD }, + { "lxeb", 0x06, INSTR_RXE_FRRD }, + { "mxdb", 0x07, INSTR_RXE_FRRD }, + { "keb", 0x08, INSTR_RXE_FRRD }, + { "ceb", 0x09, INSTR_RXE_FRRD }, + { "aeb", 0x0a, INSTR_RXE_FRRD }, + { "seb", 0x0b, INSTR_RXE_FRRD }, + { "mdeb", 0x0c, INSTR_RXE_FRRD }, + { "deb", 0x0d, INSTR_RXE_FRRD }, + { "maeb", 0x0e, INSTR_RXF_FRRDF }, + { "mseb", 0x0f, INSTR_RXF_FRRDF }, + { "tceb", 0x10, INSTR_RXE_FRRD }, + { "tcdb", 0x11, INSTR_RXE_FRRD }, + { "tcxb", 0x12, INSTR_RXE_FRRD }, + { "sqeb", 0x14, INSTR_RXE_FRRD }, + { "sqdb", 0x15, INSTR_RXE_FRRD }, + { "meeb", 0x17, INSTR_RXE_FRRD }, + { "kdb", 0x18, INSTR_RXE_FRRD }, + { "cdb", 0x19, INSTR_RXE_FRRD }, + { "adb", 0x1a, INSTR_RXE_FRRD }, + { "sdb", 0x1b, INSTR_RXE_FRRD }, + { "mdb", 0x1c, INSTR_RXE_FRRD }, + { "ddb", 0x1d, INSTR_RXE_FRRD }, + { "madb", 0x1e, INSTR_RXF_FRRDF }, + { "msdb", 0x1f, INSTR_RXF_FRRDF }, + { "lde", 0x24, INSTR_RXE_FRRD }, + { "lxd", 0x25, INSTR_RXE_FRRD }, + { "lxe", 0x26, INSTR_RXE_FRRD }, + { "mae", 0x2e, INSTR_RXF_FRRDF }, + { "mse", 0x2f, INSTR_RXF_FRRDF }, + { "sqe", 0x34, INSTR_RXE_FRRD }, + { "mee", 0x37, INSTR_RXE_FRRD }, + { "mad", 0x3e, INSTR_RXF_FRRDF }, + { "msd", 0x3f, INSTR_RXF_FRRDF }, + { "", 0, INSTR_INVALID } +}; + +/* Extracts an operand value from an instruction. */ +static unsigned int extract_operand(unsigned char *code, + const struct operand *operand) +{ + unsigned int val; + int bits; + + /* Extract fragments of the operand byte for byte. */ + code += operand->shift / 8; + bits = (operand->shift & 7) + operand->bits; + val = 0; + do { + val <<= 8; + val |= (unsigned int) *code++; + bits -= 8; + } while (bits > 0); + val >>= -bits; + val &= ((1U << (operand->bits - 1)) << 1) - 1; + + /* Check for special long displacement case. */ + if (operand->bits == 20 && operand->shift == 20) + val = (val & 0xff) << 12 | (val & 0xfff00) >> 8; + + /* Sign extend value if the operand is signed or pc relative. */ + if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) && + (val & (1U << (operand->bits - 1)))) + val |= (-1U << (operand->bits - 1)) << 1; + + /* Double value if the operand is pc relative. */ + if (operand->flags & OPERAND_PCREL) + val <<= 1; + + /* Length x in an instructions has real length x + 1. */ + if (operand->flags & OPERAND_LENGTH) + val++; + return val; +} + +static inline int insn_length(unsigned char code) +{ + return ((((int) code + 64) >> 7) + 1) << 1; +} + +static struct insn *find_insn(unsigned char *code) +{ + unsigned char opfrag = code[1]; + unsigned char opmask; + struct insn *table; + + switch (code[0]) { + case 0x01: + table = opcode_01; + break; + case 0xa5: + table = opcode_a5; + break; + case 0xa7: + table = opcode_a7; + break; + case 0xb2: + table = opcode_b2; + break; + case 0xb3: + table = opcode_b3; + break; + case 0xb9: + table = opcode_b9; + break; + case 0xc0: + table = opcode_c0; + break; + case 0xc2: + table = opcode_c2; + break; + case 0xc8: + table = opcode_c8; + break; + case 0xe3: + table = opcode_e3; + opfrag = code[5]; + break; + case 0xe5: + table = opcode_e5; + break; + case 0xeb: + table = opcode_eb; + opfrag = code[5]; + break; + case 0xec: + table = opcode_ec; + opfrag = code[5]; + break; + case 0xed: + table = opcode_ed; + opfrag = code[5]; + break; + default: + table = opcode; + opfrag = code[0]; + break; + } + while (table->format != INSTR_INVALID) { + opmask = formats[table->format][0]; + if (table->opfrag == (opfrag & opmask)) + return table; + table++; + } + return NULL; +} + +static int print_insn(char *buffer, unsigned char *code, unsigned long addr) +{ + struct insn *insn; + const unsigned char *ops; + const struct operand *operand; + unsigned int value; + char separator; + char *ptr; + + ptr = buffer; + insn = find_insn(code); + if (insn) { + ptr += sprintf(ptr, "%.5s\t", insn->name); + /* Extract the operands. */ + separator = 0; + for (ops = formats[insn->format] + 1; *ops != 0; ops++) { + operand = operands + *ops; + value = extract_operand(code, operand); + if ((operand->flags & OPERAND_INDEX) && value == 0) + continue; + if ((operand->flags & OPERAND_BASE) && + value == 0 && separator == '(') { + separator = ','; + continue; + } + if (separator) + ptr += sprintf(ptr, "%c", separator); + if (operand->flags & OPERAND_GPR) + ptr += sprintf(ptr, "%%r%i", value); + else if (operand->flags & OPERAND_FPR) + ptr += sprintf(ptr, "%%f%i", value); + else if (operand->flags & OPERAND_AR) + ptr += sprintf(ptr, "%%a%i", value); + else if (operand->flags & OPERAND_CR) + ptr += sprintf(ptr, "%%c%i", value); + else if (operand->flags & OPERAND_PCREL) + ptr += sprintf(ptr, "%lx", value + addr); + else if (operand->flags & OPERAND_SIGNED) + ptr += sprintf(ptr, "%i", value); + else + ptr += sprintf(ptr, "%u", value); + if (operand->flags & OPERAND_DISP) + separator = '('; + else if (operand->flags & OPERAND_BASE) { + ptr += sprintf(ptr, ")"); + separator = ','; + } else + separator = ','; + } + } else + ptr += sprintf(ptr, "unknown"); + return (int) (ptr - buffer); +} + +void show_code(struct pt_regs *regs) +{ + char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; + unsigned char code[64]; + char buffer[64], *ptr; + mm_segment_t old_fs; + unsigned long addr; + int start, end, opsize, hops, i; + + /* Get a snapshot of the 64 bytes surrounding the fault address. */ + old_fs = get_fs(); + set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS); + for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { + addr = regs->psw.addr - 34 + start; + if (__copy_from_user(code + start - 2, + (char __user *) addr, 2)) + break; + } + for (end = 32; end < 64; end += 2) { + addr = regs->psw.addr + end - 32; + if (__copy_from_user(code + end, + (char __user *) addr, 2)) + break; + } + set_fs(old_fs); + /* Code snapshot useable ? */ + if ((regs->psw.addr & 1) || start >= end) { + printk("%s Code: Bad PSW.\n", mode); + return; + } + /* Find a starting point for the disassembly. */ + while (start < 32) { + hops = 0; + for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) { + if (!find_insn(code + start + i)) + break; + i += insn_length(code[start + i]); + } + if (start + i == 32) + /* Looks good, sequence ends at PSW. */ + break; + start += 2; + } + /* Decode the instructions. */ + ptr = buffer; + ptr += sprintf(ptr, "%s Code:", mode); + hops = 0; + while (start < end && hops < 8) { + *ptr++ = (start == 32) ? '>' : ' '; + addr = regs->psw.addr + start - 32; + ptr += sprintf(ptr, ONELONG, addr); + opsize = insn_length(code[start]); + if (start + opsize >= end) + break; + for (i = 0; i < opsize; i++) + ptr += sprintf(ptr, "%02x", code[start + i]); + *ptr++ = '\t'; + if (i < 6) + *ptr++ = '\t'; + ptr += print_insn(ptr, code + start, addr); + start += opsize; + printk(buffer); + ptr = buffer; + ptr += sprintf(ptr, "\n "); + hops++; + } + printk("\n"); +} diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 5e47936573f..50538e54561 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -253,11 +253,10 @@ static noinline __init void find_memory_chunks(unsigned long memsize) break; #endif /* - * Finish memory detection at the first hole, unless - * - we reached the hsa -> skip it. - * - we know there must be more. + * Finish memory detection at the first hole + * if storage size is unknown. */ - if (cc == -1UL && !memsize && old_addr != ADDR2G) + if (cc == -1UL && !memsize) break; if (memsize && addr >= memsize) break; diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index dddc3de3040..c8a2212014e 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -249,8 +249,6 @@ sysc_do_restart: bnz BASED(sysc_tracesys) basr %r14,%r8 # call sys_xxxx st %r2,SP_R2(%r15) # store return value (change R2 on stack) - # ATTENTION: check sys_execve_glue before - # changing anything here !! sysc_return: tm SP_PSW+1(%r15),0x01 # returning to user ? @@ -381,50 +379,37 @@ ret_from_fork: b BASED(sysc_return) # -# clone, fork, vfork, exec and sigreturn need glue, -# because they all expect pt_regs as parameter, -# but are called with different parameter. -# return-address is set up above +# kernel_execve function needs to deal with pt_regs that is not +# at the usual place # -sys_clone_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - l %r1,BASED(.Lclone) - br %r1 # branch to sys_clone - -sys_fork_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - l %r1,BASED(.Lfork) - br %r1 # branch to sys_fork - -sys_vfork_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - l %r1,BASED(.Lvfork) - br %r1 # branch to sys_vfork - -sys_execve_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - l %r1,BASED(.Lexecve) - lr %r12,%r14 # save return address - basr %r14,%r1 # call sys_execve - ltr %r2,%r2 # check if execve failed - bnz 0(%r12) # it did fail -> store result in gpr2 - b 4(%r12) # SKIP ST 2,SP_R2(15) after BASR 14,8 - # in system_call/sysc_tracesys - -sys_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - l %r1,BASED(.Lsigreturn) - br %r1 # branch to sys_sigreturn - -sys_rt_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - l %r1,BASED(.Lrt_sigreturn) - br %r1 # branch to sys_sigreturn - -sys_sigaltstack_glue: - la %r4,SP_PTREGS(%r15) # load pt_regs as parameter - l %r1,BASED(.Lsigaltstack) - br %r1 # branch to sys_sigreturn + .globl kernel_execve +kernel_execve: + stm %r12,%r15,48(%r15) + lr %r14,%r15 + l %r13,__LC_SVC_NEW_PSW+4 + s %r15,BASED(.Lc_spsize) + st %r14,__SF_BACKCHAIN(%r15) + la %r12,SP_PTREGS(%r15) + xc 0(__PT_SIZE,%r12),0(%r12) + l %r1,BASED(.Ldo_execve) + lr %r5,%r12 + basr %r14,%r1 + ltr %r2,%r2 + be BASED(0f) + a %r15,BASED(.Lc_spsize) + lm %r12,%r15,48(%r15) + br %r14 + # execve succeeded. +0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts + l %r15,__LC_KERNEL_STACK # load ksp + s %r15,BASED(.Lc_spsize) # make room for registers & psw + l %r9,__LC_THREAD_INFO + mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs + xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) + stosm __SF_EMPTY(%r15),0x03 # reenable interrupts + l %r1,BASED(.Lexecve_tail) + basr %r14,%r1 + b BASED(sysc_return) /* * Program check handler routine @@ -1031,19 +1016,11 @@ cleanup_io_leave_insn: .Ldo_extint: .long do_extint .Ldo_signal: .long do_signal .Lhandle_per: .long do_single_step +.Ldo_execve: .long do_execve +.Lexecve_tail: .long execve_tail .Ljump_table: .long pgm_check_table .Lschedule: .long schedule -.Lclone: .long sys_clone -.Lexecve: .long sys_execve -.Lfork: .long sys_fork -.Lrt_sigreturn: .long sys_rt_sigreturn -.Lrt_sigsuspend: - .long sys_rt_sigsuspend -.Lsigreturn: .long sys_sigreturn -.Lsigsuspend: .long sys_sigsuspend -.Lsigaltstack: .long sys_sigaltstack .Ltrace: .long syscall_trace -.Lvfork: .long sys_vfork .Lschedtail: .long schedule_tail .Lsysc_table: .long sys_call_table #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 0f758c329a5..93745fd8f55 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -244,8 +244,6 @@ sysc_noemu: jnz sysc_tracesys basr %r14,%r8 # call sys_xxxx stg %r2,SP_R2(%r15) # store return value (change R2 on stack) - # ATTENTION: check sys_execve_glue before - # changing anything here !! sysc_return: tm SP_PSW+1(%r15),0x01 # returning to user ? @@ -371,77 +369,35 @@ ret_from_fork: j sysc_return # -# clone, fork, vfork, exec and sigreturn need glue, -# because they all expect pt_regs as parameter, -# but are called with different parameter. -# return-address is set up above +# kernel_execve function needs to deal with pt_regs that is not +# at the usual place # -sys_clone_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - jg sys_clone # branch to sys_clone - -#ifdef CONFIG_COMPAT -sys32_clone_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - jg sys32_clone # branch to sys32_clone -#endif - -sys_fork_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - jg sys_fork # branch to sys_fork - -sys_vfork_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - jg sys_vfork # branch to sys_vfork - -sys_execve_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - lgr %r12,%r14 # save return address - brasl %r14,sys_execve # call sys_execve - ltgr %r2,%r2 # check if execve failed - bnz 0(%r12) # it did fail -> store result in gpr2 - b 6(%r12) # SKIP STG 2,SP_R2(15) in - # system_call/sysc_tracesys -#ifdef CONFIG_COMPAT -sys32_execve_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - lgr %r12,%r14 # save return address - brasl %r14,sys32_execve # call sys32_execve - ltgr %r2,%r2 # check if execve failed - bnz 0(%r12) # it did fail -> store result in gpr2 - b 6(%r12) # SKIP STG 2,SP_R2(15) in - # system_call/sysc_tracesys -#endif - -sys_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys_sigreturn # branch to sys_sigreturn - -#ifdef CONFIG_COMPAT -sys32_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys32_sigreturn # branch to sys32_sigreturn -#endif - -sys_rt_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys_rt_sigreturn # branch to sys_sigreturn - -#ifdef CONFIG_COMPAT -sys32_rt_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys32_rt_sigreturn # branch to sys32_sigreturn -#endif - -sys_sigaltstack_glue: - la %r4,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys_sigaltstack # branch to sys_sigreturn - -#ifdef CONFIG_COMPAT -sys32_sigaltstack_glue: - la %r4,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys32_sigaltstack_wrapper # branch to sys_sigreturn -#endif + .globl kernel_execve +kernel_execve: + stmg %r12,%r15,96(%r15) + lgr %r14,%r15 + aghi %r15,-SP_SIZE + stg %r14,__SF_BACKCHAIN(%r15) + la %r12,SP_PTREGS(%r15) + xc 0(__PT_SIZE,%r12),0(%r12) + lgr %r5,%r12 + brasl %r14,do_execve + ltgfr %r2,%r2 + je 0f + aghi %r15,SP_SIZE + lmg %r12,%r15,96(%r15) + br %r14 + # execve succeeded. +0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts + lg %r15,__LC_KERNEL_STACK # load ksp + aghi %r15,-SP_SIZE # make room for registers & psw + lg %r13,__LC_SVC_NEW_PSW+8 + lg %r9,__LC_THREAD_INFO + mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + stosm __SF_EMPTY(%r15),0x03 # reenable interrupts + brasl %r14,execve_tail + j sysc_return /* * Program check handler routine diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 37010709fe6..a87b1976d40 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -39,7 +39,69 @@ startup_continue: basr %r13,0 # get base .LPG1: sll %r13,1 # remove high order bit srl %r13,1 - lhi %r1,1 # mode 1 = esame + +#ifdef CONFIG_ZFCPDUMP + + # check if we have been ipled using zfcp dump: + + tm 0xb9,0x01 # test if subchannel is enabled + jno .nodump # subchannel disabled + l %r1,0xb8 + la %r5,.Lipl_schib-.LPG1(%r13) + stsch 0(%r5) # get schib of subchannel + jne .nodump # schib not available + tm 5(%r5),0x01 # devno valid? + jno .nodump + tm 4(%r5),0x80 # qdio capable device? + jno .nodump + l %r2,20(%r0) # address of ipl parameter block + lhi %r3,0 + ic %r3,0x148(%r2) # get opt field + chi %r3,0x20 # load with dump? + jne .nodump + + # store all prefix registers in case of load with dump: + + la %r7,0 # base register for 0 page + la %r8,0 # first cpu + l %r11,.Lpref_arr_ptr-.LPG1(%r13) # address of prefix array + ahi %r11,4 # skip boot cpu + lr %r12,%r11 + ahi %r12,(CONFIG_NR_CPUS*4) # end of prefix array + stap .Lcurrent_cpu+2-.LPG1(%r13) # store current cpu addr +1: + cl %r8,.Lcurrent_cpu-.LPG1(%r13) # is ipl cpu ? + je 4f # if yes get next cpu +2: + lr %r9,%r7 + sigp %r9,%r8,0x9 # stop & store status of cpu + brc 8,3f # accepted + brc 4,4f # status stored: next cpu + brc 2,2b # busy: try again + brc 1,4f # not op: next cpu +3: + mvc 0(4,%r11),264(%r7) # copy prefix register to prefix array + ahi %r11,4 # next element in prefix array + clr %r11,%r12 + je 5f # no more space in prefix array +4: + ahi %r8,1 # next cpu (r8 += 1) + cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ? + jl 1b # jump if not last cpu +5: + lhi %r1,2 # mode 2 = esame (dump) + j 6f + .align 4 +.Lipl_schib: + .rept 13 + .long 0 + .endr +.nodump: + lhi %r1,1 # mode 1 = esame (normal ipl) +6: +#else + lhi %r1,1 # mode 1 = esame (normal ipl) +#endif /* CONFIG_ZFCPDUMP */ mvi __LC_AR_MODE_ID,1 # set esame flag slr %r0,%r0 # set cpuid to zero sigp %r1,%r0,0x12 # switch to esame mode @@ -149,6 +211,14 @@ startup_continue: .L4malign:.quad 0xffffffffffc00000 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 .Lnop: .long 0x07000700 +#ifdef CONFIG_ZFCPDUMP +.Lcurrent_cpu: + .long 0x0 +.Llast_cpu: + .long 0x0000ffff +.Lpref_arr_ptr: + .long zfcpdump_prefix_array +#endif /* CONFIG_ZFCPDUMP */ .Lparmaddr: .quad PARMAREA .align 64 diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index f731185bf2b..0ea048d350d 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -29,36 +29,21 @@ #define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm) #define SCCB_FLAG (s390_readinfo_sccb.flags) -enum ipl_type { - IPL_TYPE_NONE = 1, - IPL_TYPE_UNKNOWN = 2, - IPL_TYPE_CCW = 4, - IPL_TYPE_FCP = 8, - IPL_TYPE_NSS = 16, -}; - -#define IPL_NONE_STR "none" -#define IPL_UNKNOWN_STR "unknown" -#define IPL_CCW_STR "ccw" -#define IPL_FCP_STR "fcp" -#define IPL_NSS_STR "nss" - -/* - * Must be in data section since the bss section - * is not cleared when these are accessed. - */ -u16 ipl_devno __attribute__((__section__(".data"))) = 0; -u32 ipl_flags __attribute__((__section__(".data"))) = 0; +#define IPL_UNKNOWN_STR "unknown" +#define IPL_CCW_STR "ccw" +#define IPL_FCP_STR "fcp" +#define IPL_FCP_DUMP_STR "fcp_dump" +#define IPL_NSS_STR "nss" static char *ipl_type_str(enum ipl_type type) { switch (type) { - case IPL_TYPE_NONE: - return IPL_NONE_STR; case IPL_TYPE_CCW: return IPL_CCW_STR; case IPL_TYPE_FCP: return IPL_FCP_STR; + case IPL_TYPE_FCP_DUMP: + return IPL_FCP_DUMP_STR; case IPL_TYPE_NSS: return IPL_NSS_STR; case IPL_TYPE_UNKNOWN: @@ -67,15 +52,55 @@ static char *ipl_type_str(enum ipl_type type) } } +enum dump_type { + DUMP_TYPE_NONE = 1, + DUMP_TYPE_CCW = 2, + DUMP_TYPE_FCP = 4, +}; + +#define DUMP_NONE_STR "none" +#define DUMP_CCW_STR "ccw" +#define DUMP_FCP_STR "fcp" + +static char *dump_type_str(enum dump_type type) +{ + switch (type) { + case DUMP_TYPE_NONE: + return DUMP_NONE_STR; + case DUMP_TYPE_CCW: + return DUMP_CCW_STR; + case DUMP_TYPE_FCP: + return DUMP_FCP_STR; + default: + return NULL; + } +} + +/* + * Must be in data section since the bss section + * is not cleared when these are accessed. + */ +static u16 ipl_devno __attribute__((__section__(".data"))) = 0; +u32 ipl_flags __attribute__((__section__(".data"))) = 0; + enum ipl_method { - IPL_METHOD_NONE, - IPL_METHOD_CCW_CIO, - IPL_METHOD_CCW_DIAG, - IPL_METHOD_CCW_VM, - IPL_METHOD_FCP_RO_DIAG, - IPL_METHOD_FCP_RW_DIAG, - IPL_METHOD_FCP_RO_VM, - IPL_METHOD_NSS, + REIPL_METHOD_CCW_CIO, + REIPL_METHOD_CCW_DIAG, + REIPL_METHOD_CCW_VM, + REIPL_METHOD_FCP_RO_DIAG, + REIPL_METHOD_FCP_RW_DIAG, + REIPL_METHOD_FCP_RO_VM, + REIPL_METHOD_FCP_DUMP, + REIPL_METHOD_NSS, + REIPL_METHOD_DEFAULT, +}; + +enum dump_method { + DUMP_METHOD_NONE, + DUMP_METHOD_CCW_CIO, + DUMP_METHOD_CCW_DIAG, + DUMP_METHOD_CCW_VM, + DUMP_METHOD_FCP_DIAG, }; enum shutdown_action { @@ -107,15 +132,15 @@ static int diag308_set_works = 0; static int reipl_capabilities = IPL_TYPE_UNKNOWN; static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; -static enum ipl_method reipl_method = IPL_METHOD_NONE; +static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT; static struct ipl_parameter_block *reipl_block_fcp; static struct ipl_parameter_block *reipl_block_ccw; static char reipl_nss_name[NSS_NAME_SIZE + 1]; -static int dump_capabilities = IPL_TYPE_NONE; -static enum ipl_type dump_type = IPL_TYPE_NONE; -static enum ipl_method dump_method = IPL_METHOD_NONE; +static int dump_capabilities = DUMP_TYPE_NONE; +static enum dump_type dump_type = DUMP_TYPE_NONE; +static enum dump_method dump_method = DUMP_METHOD_NONE; static struct ipl_parameter_block *dump_block_fcp; static struct ipl_parameter_block *dump_block_ccw; @@ -134,11 +159,12 @@ int diag308(unsigned long subcode, void *addr) : "d" (subcode) : "cc", "memory"); return _rc; } +EXPORT_SYMBOL_GPL(diag308); /* SYSFS */ #define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ -static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \ +static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset, \ char *page) \ { \ return sprintf(page, _format, _value); \ @@ -147,13 +173,13 @@ static struct subsys_attribute sys_##_prefix##_##_name##_attr = \ __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL); #define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \ -static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \ +static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset, \ char *page) \ { \ return sprintf(page, _fmt_out, \ (unsigned long long) _value); \ } \ -static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\ +static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset, \ const char *buf, size_t len) \ { \ unsigned long long value; \ @@ -168,12 +194,12 @@ static struct subsys_attribute sys_##_prefix##_##_name##_attr = \ sys_##_prefix##_##_name##_store); #define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\ -static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \ +static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset, \ char *page) \ { \ return sprintf(page, _fmt_out, _value); \ } \ -static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\ +static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset, \ const char *buf, size_t len) \ { \ if (sscanf(buf, _fmt_in, _value) != 1) \ @@ -197,7 +223,7 @@ static void make_attrs_ro(struct attribute **attrs) * ipl section */ -static enum ipl_type ipl_get_type(void) +static __init enum ipl_type get_ipl_type(void) { struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; @@ -211,24 +237,57 @@ static enum ipl_type ipl_get_type(void) return IPL_TYPE_UNKNOWN; if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP) return IPL_TYPE_UNKNOWN; + if (ipl->ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP) + return IPL_TYPE_FCP_DUMP; return IPL_TYPE_FCP; } -static ssize_t ipl_type_show(struct subsystem *subsys, char *page) +void __init setup_ipl_info(void) +{ + ipl_info.type = get_ipl_type(); + switch (ipl_info.type) { + case IPL_TYPE_CCW: + ipl_info.data.ccw.dev_id.devno = ipl_devno; + ipl_info.data.ccw.dev_id.ssid = 0; + break; + case IPL_TYPE_FCP: + case IPL_TYPE_FCP_DUMP: + ipl_info.data.fcp.dev_id.devno = + IPL_PARMBLOCK_START->ipl_info.fcp.devno; + ipl_info.data.fcp.dev_id.ssid = 0; + ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn; + ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun; + break; + case IPL_TYPE_NSS: + strncpy(ipl_info.data.nss.name, kernel_nss_name, + sizeof(ipl_info.data.nss.name)); + break; + case IPL_TYPE_UNKNOWN: + default: + /* We have no info to copy */ + break; + } +} + +struct ipl_info ipl_info; +EXPORT_SYMBOL_GPL(ipl_info); + +static ssize_t ipl_type_show(struct kset *kset, char *page) { - return sprintf(page, "%s\n", ipl_type_str(ipl_get_type())); + return sprintf(page, "%s\n", ipl_type_str(ipl_info.type)); } static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); -static ssize_t sys_ipl_device_show(struct subsystem *subsys, char *page) +static ssize_t sys_ipl_device_show(struct kset *kset, char *page) { struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; - switch (ipl_get_type()) { + switch (ipl_info.type) { case IPL_TYPE_CCW: return sprintf(page, "0.0.%04x\n", ipl_devno); case IPL_TYPE_FCP: + case IPL_TYPE_FCP_DUMP: return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno); default: return 0; @@ -312,7 +371,7 @@ static struct attribute_group ipl_fcp_attr_group = { /* CCW ipl device attributes */ -static ssize_t ipl_ccw_loadparm_show(struct subsystem *subsys, char *page) +static ssize_t ipl_ccw_loadparm_show(struct kset *kset, char *page) { char loadparm[LOADPARM_LEN + 1] = {}; @@ -410,7 +469,7 @@ static void reipl_get_ascii_loadparm(char *loadparm) strstrip(loadparm); } -static ssize_t reipl_ccw_loadparm_show(struct subsystem *subsys, char *page) +static ssize_t reipl_ccw_loadparm_show(struct kset *kset, char *page) { char buf[LOADPARM_LEN + 1]; @@ -418,7 +477,7 @@ static ssize_t reipl_ccw_loadparm_show(struct subsystem *subsys, char *page) return sprintf(page, "%s\n", buf); } -static ssize_t reipl_ccw_loadparm_store(struct subsystem *subsys, +static ssize_t reipl_ccw_loadparm_store(struct kset *kset, const char *buf, size_t len) { int i, lp_len; @@ -485,34 +544,40 @@ static int reipl_set_type(enum ipl_type type) switch(type) { case IPL_TYPE_CCW: if (MACHINE_IS_VM) - reipl_method = IPL_METHOD_CCW_VM; + reipl_method = REIPL_METHOD_CCW_VM; else - reipl_method = IPL_METHOD_CCW_CIO; + reipl_method = REIPL_METHOD_CCW_CIO; break; case IPL_TYPE_FCP: if (diag308_set_works) - reipl_method = IPL_METHOD_FCP_RW_DIAG; + reipl_method = REIPL_METHOD_FCP_RW_DIAG; else if (MACHINE_IS_VM) - reipl_method = IPL_METHOD_FCP_RO_VM; + reipl_method = REIPL_METHOD_FCP_RO_VM; else - reipl_method = IPL_METHOD_FCP_RO_DIAG; + reipl_method = REIPL_METHOD_FCP_RO_DIAG; + break; + case IPL_TYPE_FCP_DUMP: + reipl_method = REIPL_METHOD_FCP_DUMP; break; case IPL_TYPE_NSS: - reipl_method = IPL_METHOD_NSS; + reipl_method = REIPL_METHOD_NSS; + break; + case IPL_TYPE_UNKNOWN: + reipl_method = REIPL_METHOD_DEFAULT; break; default: - reipl_method = IPL_METHOD_NONE; + BUG(); } reipl_type = type; return 0; } -static ssize_t reipl_type_show(struct subsystem *subsys, char *page) +static ssize_t reipl_type_show(struct kset *kset, char *page) { return sprintf(page, "%s\n", ipl_type_str(reipl_type)); } -static ssize_t reipl_type_store(struct subsystem *subsys, const char *buf, +static ssize_t reipl_type_store(struct kset *kset, const char *buf, size_t len) { int rc = -EINVAL; @@ -579,43 +644,43 @@ static struct attribute_group dump_ccw_attr_group = { /* dump type */ -static int dump_set_type(enum ipl_type type) +static int dump_set_type(enum dump_type type) { if (!(dump_capabilities & type)) return -EINVAL; switch(type) { - case IPL_TYPE_CCW: + case DUMP_TYPE_CCW: if (MACHINE_IS_VM) - dump_method = IPL_METHOD_CCW_VM; + dump_method = DUMP_METHOD_CCW_VM; else - dump_method = IPL_METHOD_CCW_CIO; + dump_method = DUMP_METHOD_CCW_CIO; break; - case IPL_TYPE_FCP: - dump_method = IPL_METHOD_FCP_RW_DIAG; + case DUMP_TYPE_FCP: + dump_method = DUMP_METHOD_FCP_DIAG; break; default: - dump_method = IPL_METHOD_NONE; + dump_method = DUMP_METHOD_NONE; } dump_type = type; return 0; } -static ssize_t dump_type_show(struct subsystem *subsys, char *page) +static ssize_t dump_type_show(struct kset *kset, char *page) { - return sprintf(page, "%s\n", ipl_type_str(dump_type)); + return sprintf(page, "%s\n", dump_type_str(dump_type)); } -static ssize_t dump_type_store(struct subsystem *subsys, const char *buf, +static ssize_t dump_type_store(struct kset *kset, const char *buf, size_t len) { int rc = -EINVAL; - if (strncmp(buf, IPL_NONE_STR, strlen(IPL_NONE_STR)) == 0) - rc = dump_set_type(IPL_TYPE_NONE); - else if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0) - rc = dump_set_type(IPL_TYPE_CCW); - else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) - rc = dump_set_type(IPL_TYPE_FCP); + if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0) + rc = dump_set_type(DUMP_TYPE_NONE); + else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0) + rc = dump_set_type(DUMP_TYPE_CCW); + else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0) + rc = dump_set_type(DUMP_TYPE_FCP); return (rc != 0) ? rc : len; } @@ -632,12 +697,12 @@ static decl_subsys(shutdown_actions, NULL, NULL); /* on panic */ -static ssize_t on_panic_show(struct subsystem *subsys, char *page) +static ssize_t on_panic_show(struct kset *kset, char *page) { return sprintf(page, "%s\n", shutdown_action_str(on_panic_action)); } -static ssize_t on_panic_store(struct subsystem *subsys, const char *buf, +static ssize_t on_panic_store(struct kset *kset, const char *buf, size_t len) { if (strncmp(buf, SHUTDOWN_REIPL_STR, strlen(SHUTDOWN_REIPL_STR)) == 0) @@ -664,14 +729,14 @@ void do_reipl(void) char loadparm[LOADPARM_LEN + 1]; switch (reipl_method) { - case IPL_METHOD_CCW_CIO: + case REIPL_METHOD_CCW_CIO: devid.devno = reipl_block_ccw->ipl_info.ccw.devno; - if (ipl_get_type() == IPL_TYPE_CCW && devid.devno == ipl_devno) + if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno) diag308(DIAG308_IPL, NULL); devid.ssid = 0; reipl_ccw_dev(&devid); break; - case IPL_METHOD_CCW_VM: + case REIPL_METHOD_CCW_VM: reipl_get_ascii_loadparm(loadparm); if (strlen(loadparm) == 0) sprintf(buf, "IPL %X", @@ -681,30 +746,32 @@ void do_reipl(void) reipl_block_ccw->ipl_info.ccw.devno, loadparm); __cpcmd(buf, NULL, 0, NULL); break; - case IPL_METHOD_CCW_DIAG: + case REIPL_METHOD_CCW_DIAG: diag308(DIAG308_SET, reipl_block_ccw); diag308(DIAG308_IPL, NULL); break; - case IPL_METHOD_FCP_RW_DIAG: + case REIPL_METHOD_FCP_RW_DIAG: diag308(DIAG308_SET, reipl_block_fcp); diag308(DIAG308_IPL, NULL); break; - case IPL_METHOD_FCP_RO_DIAG: + case REIPL_METHOD_FCP_RO_DIAG: diag308(DIAG308_IPL, NULL); break; - case IPL_METHOD_FCP_RO_VM: + case REIPL_METHOD_FCP_RO_VM: __cpcmd("IPL", NULL, 0, NULL); break; - case IPL_METHOD_NSS: + case REIPL_METHOD_NSS: sprintf(buf, "IPL %s", reipl_nss_name); __cpcmd(buf, NULL, 0, NULL); break; - case IPL_METHOD_NONE: - default: + case REIPL_METHOD_DEFAULT: if (MACHINE_IS_VM) __cpcmd("IPL", NULL, 0, NULL); diag308(DIAG308_IPL, NULL); break; + case REIPL_METHOD_FCP_DUMP: + default: + break; } signal_processor(smp_processor_id(), sigp_stop_and_store_status); } @@ -715,28 +782,28 @@ static void do_dump(void) static char buf[100]; switch (dump_method) { - case IPL_METHOD_CCW_CIO: + case DUMP_METHOD_CCW_CIO: smp_send_stop(); devid.devno = dump_block_ccw->ipl_info.ccw.devno; devid.ssid = 0; reipl_ccw_dev(&devid); break; - case IPL_METHOD_CCW_VM: + case DUMP_METHOD_CCW_VM: smp_send_stop(); sprintf(buf, "STORE STATUS"); __cpcmd(buf, NULL, 0, NULL); sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); __cpcmd(buf, NULL, 0, NULL); break; - case IPL_METHOD_CCW_DIAG: + case DUMP_METHOD_CCW_DIAG: diag308(DIAG308_SET, dump_block_ccw); diag308(DIAG308_DUMP, NULL); break; - case IPL_METHOD_FCP_RW_DIAG: + case DUMP_METHOD_FCP_DIAG: diag308(DIAG308_SET, dump_block_fcp); diag308(DIAG308_DUMP, NULL); break; - case IPL_METHOD_NONE: + case DUMP_METHOD_NONE: default: return; } @@ -777,12 +844,13 @@ static int __init ipl_init(void) rc = firmware_register(&ipl_subsys); if (rc) return rc; - switch (ipl_get_type()) { + switch (ipl_info.type) { case IPL_TYPE_CCW: rc = sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_ccw_attr_group); break; case IPL_TYPE_FCP: + case IPL_TYPE_FCP_DUMP: rc = ipl_register_fcp_files(); break; case IPL_TYPE_NSS: @@ -852,7 +920,7 @@ static int __init reipl_ccw_init(void) /* FIXME: check for diag308_set_works when enabling diag ccw reipl */ if (!MACHINE_IS_VM) sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO; - if (ipl_get_type() == IPL_TYPE_CCW) + if (ipl_info.type == IPL_TYPE_CCW) reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; reipl_capabilities |= IPL_TYPE_CCW; return 0; @@ -862,9 +930,9 @@ static int __init reipl_fcp_init(void) { int rc; - if ((!diag308_set_works) && (ipl_get_type() != IPL_TYPE_FCP)) + if ((!diag308_set_works) && (ipl_info.type != IPL_TYPE_FCP)) return 0; - if ((!diag308_set_works) && (ipl_get_type() == IPL_TYPE_FCP)) + if ((!diag308_set_works) && (ipl_info.type == IPL_TYPE_FCP)) make_attrs_ro(reipl_fcp_attrs); reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); @@ -875,7 +943,7 @@ static int __init reipl_fcp_init(void) free_page((unsigned long)reipl_block_fcp); return rc; } - if (ipl_get_type() == IPL_TYPE_FCP) { + if (ipl_info.type == IPL_TYPE_FCP) { memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); } else { reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; @@ -909,7 +977,7 @@ static int __init reipl_init(void) rc = reipl_nss_init(); if (rc) return rc; - rc = reipl_set_type(ipl_get_type()); + rc = reipl_set_type(ipl_info.type); if (rc) return rc; return 0; @@ -931,7 +999,7 @@ static int __init dump_ccw_init(void) dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; dump_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; - dump_capabilities |= IPL_TYPE_CCW; + dump_capabilities |= DUMP_TYPE_CCW; return 0; } @@ -956,7 +1024,7 @@ static int __init dump_fcp_init(void) dump_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP; dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP; - dump_capabilities |= IPL_TYPE_FCP; + dump_capabilities |= DUMP_TYPE_FCP; return 0; } @@ -995,7 +1063,7 @@ static int __init dump_init(void) rc = dump_fcp_init(); if (rc) return rc; - dump_set_type(IPL_TYPE_NONE); + dump_set_type(DUMP_TYPE_NONE); return 0; } @@ -1038,6 +1106,27 @@ static int __init s390_ipl_init(void) __initcall(s390_ipl_init); +void __init ipl_save_parameters(void) +{ + struct cio_iplinfo iplinfo; + unsigned int *ipl_ptr; + void *src, *dst; + + if (cio_get_iplinfo(&iplinfo)) + return; + + ipl_devno = iplinfo.devno; + ipl_flags |= IPL_DEVNO_VALID; + if (!iplinfo.is_qdio) + return; + ipl_flags |= IPL_PARMBLOCK_VALID; + ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; + src = (void *)(unsigned long)*ipl_ptr; + dst = (void *)IPL_PARMBLOCK_ORIGIN; + memmove(dst, src, PAGE_SIZE); + *ipl_ptr = IPL_PARMBLOCK_ORIGIN; +} + static LIST_HEAD(rcall); static DEFINE_MUTEX(rcall_mutex); diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 993f3538149..23c61f6d965 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -516,7 +516,7 @@ out: return 1; } -static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) +int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -603,7 +603,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, ret = NOTIFY_STOP; break; case DIE_TRAP: - case DIE_PAGE_FAULT: /* kprobe_running() needs smp_processor_id() */ preempt_disable(); if (kprobe_running() && diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 39d1dd75252..59b4e796680 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -31,6 +31,7 @@ #include <linux/string.h> #include <linux/kernel.h> #include <linux/moduleloader.h> +#include <linux/bug.h> #if 0 #define DEBUGP printk @@ -398,9 +399,10 @@ int module_finalize(const Elf_Ehdr *hdr, struct module *me) { vfree(me->arch.syminfo); - return 0; + return module_bug_finalize(hdr, sechdrs, me); } void module_arch_cleanup(struct module *mod) { + module_bug_cleanup(mod); } diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 5acfac654f9..11d9b019762 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -280,24 +280,26 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, return 0; } -asmlinkage long sys_fork(struct pt_regs regs) +asmlinkage long sys_fork(void) { - return do_fork(SIGCHLD, regs.gprs[15], ®s, 0, NULL, NULL); + struct pt_regs *regs = task_pt_regs(current); + return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL); } -asmlinkage long sys_clone(struct pt_regs regs) +asmlinkage long sys_clone(void) { - unsigned long clone_flags; - unsigned long newsp; + struct pt_regs *regs = task_pt_regs(current); + unsigned long clone_flags; + unsigned long newsp; int __user *parent_tidptr, *child_tidptr; - clone_flags = regs.gprs[3]; - newsp = regs.orig_gpr2; - parent_tidptr = (int __user *) regs.gprs[4]; - child_tidptr = (int __user *) regs.gprs[5]; - if (!newsp) - newsp = regs.gprs[15]; - return do_fork(clone_flags, newsp, ®s, 0, + clone_flags = regs->gprs[3]; + newsp = regs->orig_gpr2; + parent_tidptr = (int __user *) regs->gprs[4]; + child_tidptr = (int __user *) regs->gprs[5]; + if (!newsp) + newsp = regs->gprs[15]; + return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); } @@ -311,40 +313,52 @@ asmlinkage long sys_clone(struct pt_regs regs) * do not have enough call-clobbered registers to hold all * the information you need. */ -asmlinkage long sys_vfork(struct pt_regs regs) +asmlinkage long sys_vfork(void) { + struct pt_regs *regs = task_pt_regs(current); return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, - regs.gprs[15], ®s, 0, NULL, NULL); + regs->gprs[15], regs, 0, NULL, NULL); +} + +asmlinkage void execve_tail(void) +{ + task_lock(current); + current->ptrace &= ~PT_DTRACE; + task_unlock(current); + current->thread.fp_regs.fpc = 0; + if (MACHINE_HAS_IEEE) + asm volatile("sfpc %0,%0" : : "d" (0)); } /* * sys_execve() executes a new program. */ -asmlinkage long sys_execve(struct pt_regs regs) +asmlinkage long sys_execve(void) { - int error; - char * filename; - - filename = getname((char __user *) regs.orig_gpr2); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - goto out; - error = do_execve(filename, (char __user * __user *) regs.gprs[3], - (char __user * __user *) regs.gprs[4], ®s); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - current->thread.fp_regs.fpc = 0; - if (MACHINE_HAS_IEEE) - asm volatile("sfpc %0,%0" : : "d" (0)); + struct pt_regs *regs = task_pt_regs(current); + char *filename; + unsigned long result; + int rc; + + filename = getname((char __user *) regs->orig_gpr2); + if (IS_ERR(filename)) { + result = PTR_ERR(filename); + goto out; } - putname(filename); + rc = do_execve(filename, (char __user * __user *) regs->gprs[3], + (char __user * __user *) regs->gprs[4], regs); + if (rc) { + result = rc; + goto out_putname; + } + execve_tail(); + result = regs->gprs[2]; +out_putname: + putname(filename); out: - return error; + return result; } - /* * fill in the FPU structure for a core dump. */ diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 863c8d08c02..6bfb0889eb1 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -65,7 +65,7 @@ long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | * User copy operations. */ struct uaccess_ops uaccess; -EXPORT_SYMBOL_GPL(uaccess); +EXPORT_SYMBOL(uaccess); /* * Machine setup.. @@ -74,6 +74,8 @@ unsigned int console_mode = 0; unsigned int console_devno = -1; unsigned int console_irq = -1; unsigned long machine_flags = 0; +unsigned long elf_hwcap = 0; +char elf_platform[ELF_PLATFORM_SIZE]; struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ @@ -285,6 +287,26 @@ static void __init conmode_default(void) } } +#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) +static void __init setup_zfcpdump(unsigned int console_devno) +{ + static char str[64]; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return; + if (console_devno != -1) + sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x", + ipl_info.data.fcp.dev_id.devno, console_devno); + else + sprintf(str, "cio_ignore=all,!0.0.%04x", + ipl_info.data.fcp.dev_id.devno); + strcat(COMMAND_LINE, str); + console_loglevel = 2; +} +#else +static inline void setup_zfcpdump(unsigned int console_devno) {} +#endif /* CONFIG_ZFCPDUMP */ + #ifdef CONFIG_SMP void (*_machine_restart)(char *command) = machine_restart_smp; void (*_machine_halt)(void) = machine_halt_smp; @@ -586,13 +608,20 @@ setup_resources(void) } } +unsigned long real_memory_size; +EXPORT_SYMBOL_GPL(real_memory_size); + static void __init setup_memory_end(void) { - unsigned long real_size, memory_size; + unsigned long memory_size; unsigned long max_mem, max_phys; int i; - memory_size = real_size = 0; +#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) + if (ipl_info.type == IPL_TYPE_FCP_DUMP) + memory_end = ZFCPDUMP_HSA_SIZE; +#endif + memory_size = 0; max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE; memory_end &= PAGE_MASK; @@ -601,7 +630,8 @@ static void __init setup_memory_end(void) for (i = 0; i < MEMORY_CHUNKS; i++) { struct mem_chunk *chunk = &memory_chunk[i]; - real_size = max(real_size, chunk->addr + chunk->size); + real_memory_size = max(real_memory_size, + chunk->addr + chunk->size); if (chunk->addr >= max_mem) { memset(chunk, 0, sizeof(*chunk)); continue; @@ -721,6 +751,98 @@ setup_memory(void) #endif } +static __init unsigned int stfl(void) +{ + asm volatile( + " .insn s,0xb2b10000,0(0)\n" /* stfl */ + "0:\n" + EX_TABLE(0b,0b)); + return S390_lowcore.stfl_fac_list; +} + +static __init int stfle(unsigned long long *list, int doublewords) +{ + typedef struct { unsigned long long _[doublewords]; } addrtype; + register unsigned long __nr asm("0") = doublewords - 1; + + asm volatile(".insn s,0xb2b00000,%0" /* stfle */ + : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc"); + return __nr + 1; +} + +/* + * Setup hardware capabilities. + */ +static void __init setup_hwcaps(void) +{ + static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; + struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data; + unsigned long long facility_list_extended; + unsigned int facility_list; + int i; + + facility_list = stfl(); + /* + * The store facility list bits numbers as found in the principles + * of operation are numbered with bit 1UL<<31 as number 0 to + * bit 1UL<<0 as number 31. + * Bit 0: instructions named N3, "backported" to esa-mode + * Bit 2: z/Architecture mode is active + * Bit 7: the store-facility-list-extended facility is installed + * Bit 17: the message-security assist is installed + * Bit 19: the long-displacement facility is installed + * Bit 21: the extended-immediate facility is installed + * These get translated to: + * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1, + * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3, + * HWCAP_S390_LDISP bit 4, and HWCAP_S390_EIMM bit 5. + */ + for (i = 0; i < 6; i++) + if (facility_list & (1UL << (31 - stfl_bits[i]))) + elf_hwcap |= 1UL << i; + + /* + * Check for additional facilities with store-facility-list-extended. + * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0 + * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information + * as stored by stfl, bits 32-xxx contain additional facilities. + * How many facility words are stored depends on the number of + * doublewords passed to the instruction. The additional facilites + * are: + * Bit 43: decimal floating point facility is installed + * translated to: + * HWCAP_S390_DFP bit 6. + */ + if ((elf_hwcap & (1UL << 2)) && + stfle(&facility_list_extended, 1) > 0) { + if (facility_list_extended & (1ULL << (64 - 43))) + elf_hwcap |= 1UL << 6; + } + + switch (cpuinfo->cpu_id.machine) { + case 0x9672: +#if !defined(CONFIG_64BIT) + default: /* Use "g5" as default for 31 bit kernels. */ +#endif + strcpy(elf_platform, "g5"); + break; + case 0x2064: + case 0x2066: +#if defined(CONFIG_64BIT) + default: /* Use "z900" as default for 64 bit kernels. */ +#endif + strcpy(elf_platform, "z900"); + break; + case 0x2084: + case 0x2086: + strcpy(elf_platform, "z990"); + break; + case 0x2094: + strcpy(elf_platform, "z9-109"); + break; + } +} + /* * Setup function called from init/main.c just after the banner * was printed. @@ -765,6 +887,7 @@ setup_arch(char **cmdline_p) parse_early_param(); + setup_ipl_info(); setup_memory_end(); setup_addressing_mode(); setup_memory(); @@ -776,12 +899,20 @@ setup_arch(char **cmdline_p) smp_setup_cpu_possible_map(); /* + * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). + */ + setup_hwcaps(); + + /* * Create kernel page tables and switch to virtual addressing. */ paging_init(); /* Setup default console */ conmode_default(); + + /* Setup zfcpdump support */ + setup_zfcpdump(console_devno); } void print_cpu_info(struct cpuinfo_S390 *cpuinfo) @@ -807,8 +938,12 @@ void print_cpu_info(struct cpuinfo_S390 *cpuinfo) static int show_cpuinfo(struct seq_file *m, void *v) { + static const char *hwcap_str[7] = { + "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp" + }; struct cpuinfo_S390 *cpuinfo; unsigned long n = (unsigned long) v - 1; + int i; s390_adjust_jiffies(); preempt_disable(); @@ -818,7 +953,13 @@ static int show_cpuinfo(struct seq_file *m, void *v) "bogomips per cpu: %lu.%02lu\n", num_online_cpus(), loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ))%100); + seq_puts(m, "features\t: "); + for (i = 0; i < 7; i++) + if (hwcap_str[i] && (elf_hwcap & (1UL << i))) + seq_printf(m, "%s ", hwcap_str[i]); + seq_puts(m, "\n"); } + if (cpu_online(n)) { #ifdef CONFIG_SMP if (smp_processor_id() == n) diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 554f9cf7499..3c41907799a 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -102,9 +102,9 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, } asmlinkage long -sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, - struct pt_regs *regs) +sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) { + struct pt_regs *regs = task_pt_regs(current); return do_sigaltstack(uss, uoss, regs->gprs[15]); } @@ -163,8 +163,9 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) return 0; } -asmlinkage long sys_sigreturn(struct pt_regs *regs) +asmlinkage long sys_sigreturn(void) { + struct pt_regs *regs = task_pt_regs(current); sigframe __user *frame = (sigframe __user *)regs->gprs[15]; sigset_t set; @@ -189,8 +190,9 @@ badframe: return 0; } -asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) +asmlinkage long sys_rt_sigreturn(void) { + struct pt_regs *regs = task_pt_regs(current); rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15]; sigset_t set; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 97764f710bb..3754e2031b3 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -1,12 +1,12 @@ /* * arch/s390/kernel/smp.c * - * Copyright (C) IBM Corp. 1999,2006 + * Copyright IBM Corp. 1999,2007 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), - * Martin Schwidefsky (schwidefsky@de.ibm.com) - * Heiko Carstens (heiko.carstens@de.ibm.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * Heiko Carstens (heiko.carstens@de.ibm.com) * - * based on other smp stuff by + * based on other smp stuff by * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> * (c) 1998 Ingo Molnar * @@ -31,6 +31,7 @@ #include <linux/interrupt.h> #include <linux/cpu.h> #include <linux/timex.h> +#include <linux/bootmem.h> #include <asm/ipl.h> #include <asm/setup.h> #include <asm/sigp.h> @@ -40,17 +41,19 @@ #include <asm/cpcmd.h> #include <asm/tlbflush.h> #include <asm/timer.h> - -extern volatile int __cpu_logical_map[]; +#include <asm/lowcore.h> /* * An array with a pointer the lowcore of every CPU. */ - struct _lowcore *lowcore_ptr[NR_CPUS]; +EXPORT_SYMBOL(lowcore_ptr); cpumask_t cpu_online_map = CPU_MASK_NONE; +EXPORT_SYMBOL(cpu_online_map); + cpumask_t cpu_possible_map = CPU_MASK_NONE; +EXPORT_SYMBOL(cpu_possible_map); static struct task_struct *current_set[NR_CPUS]; @@ -70,7 +73,7 @@ struct call_data_struct { int wait; }; -static struct call_data_struct * call_data; +static struct call_data_struct *call_data; /* * 'Call function' interrupt callback @@ -150,8 +153,8 @@ out: * * Run a function on all other CPUs. * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler. You may call it from a bottom half. + * You must not call this function with disabled interrupts, from a + * hardware interrupt handler or from a bottom half. */ int smp_call_function(void (*func) (void *info), void *info, int nonatomic, int wait) @@ -177,11 +180,11 @@ EXPORT_SYMBOL(smp_call_function); * * Run a function on one processor. * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler. You may call it from a bottom half. + * You must not call this function with disabled interrupts, from a + * hardware interrupt handler or from a bottom half. */ int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, - int wait, int cpu) + int wait, int cpu) { cpumask_t map = CPU_MASK_NONE; @@ -195,9 +198,9 @@ EXPORT_SYMBOL(smp_call_function_on); static void do_send_stop(void) { - int cpu, rc; + int cpu, rc; - /* stop all processors */ + /* stop all processors */ for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; @@ -209,9 +212,9 @@ static void do_send_stop(void) static void do_store_status(void) { - int cpu, rc; + int cpu, rc; - /* store status of all processors in their lowcores (real 0) */ + /* store status of all processors in their lowcores (real 0) */ for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; @@ -219,8 +222,8 @@ static void do_store_status(void) rc = signal_processor_p( (__u32)(unsigned long) lowcore_ptr[cpu], cpu, sigp_store_status_at_address); - } while(rc == sigp_busy); - } + } while (rc == sigp_busy); + } } static void do_wait_for_stop(void) @@ -231,7 +234,7 @@ static void do_wait_for_stop(void) for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; - while(!smp_cpu_not_running(cpu)) + while (!smp_cpu_not_running(cpu)) cpu_relax(); } } @@ -245,7 +248,7 @@ void smp_send_stop(void) /* Disable all interrupts/machine checks */ __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); - /* write magic number to zero page (absolute 0) */ + /* write magic number to zero page (absolute 0) */ lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; /* stop other processors. */ @@ -261,8 +264,7 @@ void smp_send_stop(void) /* * Reboot, halt and power_off routines for SMP. */ - -void machine_restart_smp(char * __unused) +void machine_restart_smp(char *__unused) { smp_send_stop(); do_reipl(); @@ -293,17 +295,17 @@ void machine_power_off_smp(void) static void do_ext_call_interrupt(__u16 code) { - unsigned long bits; + unsigned long bits; - /* - * handle bit signal external calls - * - * For the ec_schedule signal we have to do nothing. All the work - * is done automatically when we return from the interrupt. - */ + /* + * handle bit signal external calls + * + * For the ec_schedule signal we have to do nothing. All the work + * is done automatically when we return from the interrupt. + */ bits = xchg(&S390_lowcore.ext_call_fast, 0); - if (test_bit(ec_call_function, &bits)) + if (test_bit(ec_call_function, &bits)) do_call_function(); } @@ -313,11 +315,11 @@ static void do_ext_call_interrupt(__u16 code) */ static void smp_ext_bitcall(int cpu, ec_bit_sig sig) { - /* - * Set signaling bit in lowcore of target cpu and kick it - */ + /* + * Set signaling bit in lowcore of target cpu and kick it + */ set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); - while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) + while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) udelay(10); } @@ -332,7 +334,7 @@ void smp_ptlb_callback(void *info) void smp_ptlb_all(void) { - on_each_cpu(smp_ptlb_callback, NULL, 0, 1); + on_each_cpu(smp_ptlb_callback, NULL, 0, 1); } EXPORT_SYMBOL(smp_ptlb_all); #endif /* ! CONFIG_64BIT */ @@ -344,7 +346,7 @@ EXPORT_SYMBOL(smp_ptlb_all); */ void smp_send_reschedule(int cpu) { - smp_ext_bitcall(cpu, ec_schedule); + smp_ext_bitcall(cpu, ec_schedule); } /* @@ -358,11 +360,12 @@ struct ec_creg_mask_parms { /* * callback for setting/clearing control bits */ -static void smp_ctl_bit_callback(void *info) { +static void smp_ctl_bit_callback(void *info) +{ struct ec_creg_mask_parms *pp = info; unsigned long cregs[16]; int i; - + __ctl_store(cregs, 0, 15); for (i = 0; i <= 15; i++) cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; @@ -381,6 +384,7 @@ void smp_ctl_set_bit(int cr, int bit) parms.orvals[cr] = 1 << bit; on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } +EXPORT_SYMBOL(smp_ctl_set_bit); /* * Clear a bit in a control register of all cpus @@ -394,13 +398,72 @@ void smp_ctl_clear_bit(int cr, int bit) parms.andvals[cr] = ~(1L << bit); on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } +EXPORT_SYMBOL(smp_ctl_clear_bit); + +#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) + +/* + * zfcpdump_prefix_array holds prefix registers for the following scenario: + * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to + * save its prefix registers, since they get lost, when switching from 31 bit + * to 64 bit. + */ +unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ + __attribute__((__section__(".data"))); + +static void __init smp_get_save_areas(void) +{ + unsigned int cpu, cpu_num, rc; + __u16 boot_cpu_addr; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return; + boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; + cpu_num = 1; + for (cpu = 0; cpu <= 65535; cpu++) { + if ((u16) cpu == boot_cpu_addr) + continue; + __cpu_logical_map[1] = (__u16) cpu; + if (signal_processor(1, sigp_sense) == sigp_not_operational) + continue; + if (cpu_num >= NR_CPUS) { + printk("WARNING: Registers for cpu %i are not " + "saved, since dump kernel was compiled with" + "NR_CPUS=%i!\n", cpu_num, NR_CPUS); + continue; + } + zfcpdump_save_areas[cpu_num] = + alloc_bootmem(sizeof(union save_area)); + while (1) { + rc = signal_processor(1, sigp_stop_and_store_status); + if (rc != sigp_busy) + break; + cpu_relax(); + } + memcpy(zfcpdump_save_areas[cpu_num], + (void *)(unsigned long) store_prefix() + + SAVE_AREA_BASE, SAVE_AREA_SIZE); +#ifdef __s390x__ + /* copy original prefix register */ + zfcpdump_save_areas[cpu_num]->s390x.pref_reg = + zfcpdump_prefix_array[cpu_num]; +#endif + cpu_num++; + } +} + +union save_area *zfcpdump_save_areas[NR_CPUS + 1]; +EXPORT_SYMBOL_GPL(zfcpdump_save_areas); + +#else +#define smp_get_save_areas() do { } while (0) +#endif /* * Lets check how many CPUs we have. */ -static unsigned int -__init smp_count_cpus(void) +static unsigned int __init smp_count_cpus(void) { unsigned int cpu, num_cpus; __u16 boot_cpu_addr; @@ -416,31 +479,30 @@ __init smp_count_cpus(void) if ((__u16) cpu == boot_cpu_addr) continue; __cpu_logical_map[1] = (__u16) cpu; - if (signal_processor(1, sigp_sense) == - sigp_not_operational) + if (signal_processor(1, sigp_sense) == sigp_not_operational) continue; num_cpus++; } - printk("Detected %d CPU's\n",(int) num_cpus); + printk("Detected %d CPU's\n", (int) num_cpus); printk("Boot cpu address %2X\n", boot_cpu_addr); return num_cpus; } /* - * Activate a secondary processor. + * Activate a secondary processor. */ int __devinit start_secondary(void *cpuvoid) { - /* Setup the cpu */ - cpu_init(); + /* Setup the cpu */ + cpu_init(); preempt_disable(); /* Enable TOD clock interrupts on the secondary cpu. */ - init_cpu_timer(); + init_cpu_timer(); #ifdef CONFIG_VIRT_TIMER /* Enable cpu timer interrupts on the secondary cpu. */ - init_cpu_vtimer(); + init_cpu_vtimer(); #endif /* Enable pfault pseudo page faults on this cpu. */ pfault_init(); @@ -449,11 +511,11 @@ int __devinit start_secondary(void *cpuvoid) cpu_set(smp_processor_id(), cpu_online_map); /* Switch on interrupts */ local_irq_enable(); - /* Print info about this processor */ - print_cpu_info(&S390_lowcore.cpu_data); - /* cpu_idle will call schedule for us */ - cpu_idle(); - return 0; + /* Print info about this processor */ + print_cpu_info(&S390_lowcore.cpu_data); + /* cpu_idle will call schedule for us */ + cpu_idle(); + return 0; } static void __init smp_create_idle(unsigned int cpu) @@ -470,56 +532,13 @@ static void __init smp_create_idle(unsigned int cpu) current_set[cpu] = p; } -/* Reserving and releasing of CPUs */ - -static DEFINE_SPINLOCK(smp_reserve_lock); -static int smp_cpu_reserved[NR_CPUS]; - -int -smp_get_cpu(cpumask_t cpu_mask) -{ - unsigned long flags; - int cpu; - - spin_lock_irqsave(&smp_reserve_lock, flags); - /* Try to find an already reserved cpu. */ - for_each_cpu_mask(cpu, cpu_mask) { - if (smp_cpu_reserved[cpu] != 0) { - smp_cpu_reserved[cpu]++; - /* Found one. */ - goto out; - } - } - /* Reserve a new cpu from cpu_mask. */ - for_each_cpu_mask(cpu, cpu_mask) { - if (cpu_online(cpu)) { - smp_cpu_reserved[cpu]++; - goto out; - } - } - cpu = -ENODEV; -out: - spin_unlock_irqrestore(&smp_reserve_lock, flags); - return cpu; -} - -void -smp_put_cpu(int cpu) -{ - unsigned long flags; - - spin_lock_irqsave(&smp_reserve_lock, flags); - smp_cpu_reserved[cpu]--; - spin_unlock_irqrestore(&smp_reserve_lock, flags); -} - -static int -cpu_stopped(int cpu) +static int cpu_stopped(int cpu) { __u32 status; /* Check for stopped state */ - if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { + if (signal_processor_ps(&status, 0, cpu, sigp_sense) == + sigp_status_stored) { if (status & 0x40) return 1; } @@ -528,14 +547,13 @@ cpu_stopped(int cpu) /* Upping and downing of CPUs */ -int -__cpu_up(unsigned int cpu) +int __cpu_up(unsigned int cpu) { struct task_struct *idle; - struct _lowcore *cpu_lowcore; + struct _lowcore *cpu_lowcore; struct stack_frame *sf; - sigp_ccode ccode; - int curr_cpu; + sigp_ccode ccode; + int curr_cpu; for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { __cpu_logical_map[cpu] = (__u16) curr_cpu; @@ -548,7 +566,7 @@ __cpu_up(unsigned int cpu) ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), cpu, sigp_set_prefix); - if (ccode){ + if (ccode) { printk("sigp_set_prefix failed for cpu %d " "with condition code %d\n", (int) cpu, (int) ccode); @@ -556,9 +574,9 @@ __cpu_up(unsigned int cpu) } idle = current_set[cpu]; - cpu_lowcore = lowcore_ptr[cpu]; + cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore->kernel_stack = (unsigned long) - task_stack_page(idle) + (THREAD_SIZE); + task_stack_page(idle) + THREAD_SIZE; sf = (struct stack_frame *) (cpu_lowcore->kernel_stack - sizeof(struct pt_regs) - sizeof(struct stack_frame)); @@ -570,11 +588,11 @@ __cpu_up(unsigned int cpu) " stam 0,15,0(%0)" : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; - cpu_lowcore->current_task = (unsigned long) idle; - cpu_lowcore->cpu_data.cpu_nr = cpu; + cpu_lowcore->current_task = (unsigned long) idle; + cpu_lowcore->cpu_data.cpu_nr = cpu; eieio(); - while (signal_processor(cpu,sigp_restart) == sigp_busy) + while (signal_processor(cpu, sigp_restart) == sigp_busy) udelay(10); while (!cpu_online(cpu)) @@ -589,6 +607,7 @@ void __init smp_setup_cpu_possible_map(void) { unsigned int phy_cpus, pos_cpus, cpu; + smp_get_save_areas(); phy_cpus = smp_count_cpus(); pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); @@ -620,18 +639,11 @@ static int __init setup_possible_cpus(char *s) } early_param("possible_cpus", setup_possible_cpus); -int -__cpu_disable(void) +int __cpu_disable(void) { - unsigned long flags; struct ec_creg_mask_parms cr_parms; int cpu = smp_processor_id(); - spin_lock_irqsave(&smp_reserve_lock, flags); - if (smp_cpu_reserved[cpu] != 0) { - spin_unlock_irqrestore(&smp_reserve_lock, flags); - return -EBUSY; - } cpu_clear(cpu, cpu_online_map); /* Disable pfault pseudo page faults on this cpu. */ @@ -642,24 +654,23 @@ __cpu_disable(void) /* disable all external interrupts */ cr_parms.orvals[0] = 0; - cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | - 1<<11 | 1<<10 | 1<< 6 | 1<< 4); + cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | + 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); /* disable all I/O interrupts */ cr_parms.orvals[6] = 0; - cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | - 1<<27 | 1<<26 | 1<<25 | 1<<24); + cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | + 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); /* disable most machine checks */ cr_parms.orvals[14] = 0; - cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); + cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | + 1 << 25 | 1 << 24); smp_ctl_bit_callback(&cr_parms); - spin_unlock_irqrestore(&smp_reserve_lock, flags); return 0; } -void -__cpu_die(unsigned int cpu) +void __cpu_die(unsigned int cpu) { /* Wait until target cpu is down */ while (!smp_cpu_not_running(cpu)) @@ -667,13 +678,12 @@ __cpu_die(unsigned int cpu) printk("Processor %d spun down\n", cpu); } -void -cpu_die(void) +void cpu_die(void) { idle_task_exit(); signal_processor(smp_processor_id(), sigp_stop); BUG(); - for(;;); + for (;;); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -686,36 +696,36 @@ void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned long stack; unsigned int cpu; - int i; - - /* request the 0x1201 emergency signal external interrupt */ - if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) - panic("Couldn't request external interrupt 0x1201"); - memset(lowcore_ptr,0,sizeof(lowcore_ptr)); - /* - * Initialize prefix pages and stacks for all possible cpus - */ + int i; + + /* request the 0x1201 emergency signal external interrupt */ + if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) + panic("Couldn't request external interrupt 0x1201"); + memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); + /* + * Initialize prefix pages and stacks for all possible cpus + */ print_cpu_info(&S390_lowcore.cpu_data); - for_each_possible_cpu(i) { + for_each_possible_cpu(i) { lowcore_ptr[i] = (struct _lowcore *) - __get_free_pages(GFP_KERNEL|GFP_DMA, - sizeof(void*) == 8 ? 1 : 0); - stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); - if (lowcore_ptr[i] == NULL || stack == 0ULL) + __get_free_pages(GFP_KERNEL | GFP_DMA, + sizeof(void*) == 8 ? 1 : 0); + stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); + if (!lowcore_ptr[i] || !stack) panic("smp_boot_cpus failed to allocate memory\n"); *(lowcore_ptr[i]) = S390_lowcore; - lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); - stack = __get_free_pages(GFP_KERNEL,0); - if (stack == 0ULL) + lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; + stack = __get_free_pages(GFP_KERNEL, 0); + if (!stack) panic("smp_boot_cpus failed to allocate memory\n"); - lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); + lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lowcore_ptr[i]->extended_save_area_addr = - (__u32) __get_free_pages(GFP_KERNEL,0); - if (lowcore_ptr[i]->extended_save_area_addr == 0) + (__u32) __get_free_pages(GFP_KERNEL, 0); + if (!lowcore_ptr[i]->extended_save_area_addr) panic("smp_boot_cpus failed to " "allocate memory\n"); } @@ -754,34 +764,63 @@ void smp_cpus_done(unsigned int max_cpus) */ int setup_profiling_timer(unsigned int multiplier) { - return 0; + return 0; } static DEFINE_PER_CPU(struct cpu, cpu_devices); +static ssize_t show_capability(struct sys_device *dev, char *buf) +{ + unsigned int capability; + int rc; + + rc = get_cpu_capability(&capability); + if (rc) + return rc; + return sprintf(buf, "%u\n", capability); +} +static SYSDEV_ATTR(capability, 0444, show_capability, NULL); + +static int __cpuinit smp_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned int)(long)hcpu; + struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; + + switch (action) { + case CPU_ONLINE: + if (sysdev_create_file(s, &attr_capability)) + return NOTIFY_BAD; + break; + case CPU_DEAD: + sysdev_remove_file(s, &attr_capability); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata smp_cpu_nb = { + .notifier_call = smp_cpu_notify, +}; + static int __init topology_init(void) { int cpu; - int ret; + + register_cpu_notifier(&smp_cpu_nb); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; c->hotpluggable = 1; - ret = register_cpu(c, cpu); - if (ret) - printk(KERN_WARNING "topology_init: register_cpu %d " - "failed (%d)\n", cpu, ret); + register_cpu(c, cpu); + if (!cpu_online(cpu)) + continue; + s = &c->sysdev; + sysdev_create_file(s, &attr_capability); } return 0; } - subsys_initcall(topology_init); - -EXPORT_SYMBOL(cpu_online_map); -EXPORT_SYMBOL(cpu_possible_map); -EXPORT_SYMBOL(lowcore_ptr); -EXPORT_SYMBOL(smp_ctl_set_bit); -EXPORT_SYMBOL(smp_ctl_clear_bit); -EXPORT_SYMBOL(smp_get_cpu); -EXPORT_SYMBOL(smp_put_cpu); diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 584ed95f338..3a77c22cda7 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -266,23 +266,3 @@ s390_fadvise64_64(struct fadvise64_64_args __user *args) return -EFAULT; return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice); } - -/* - * Do a system call from kernel instead of calling sys_execve so we - * end up with proper pt_regs. - */ -int kernel_execve(const char *filename, char *const argv[], char *const envp[]) -{ - register const char *__arg1 asm("2") = filename; - register char *const*__arg2 asm("3") = argv; - register char *const*__arg3 asm("4") = envp; - register long __svcres asm("2"); - asm volatile( - "svc %b1" - : "=d" (__svcres) - : "i" (__NR_execve), - "0" (__arg1), - "d" (__arg2), - "d" (__arg3) : "memory"); - return __svcres; -} diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index c774f1069e1..cd8d321cd0c 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -10,7 +10,7 @@ NI_SYSCALL /* 0 */ SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper) -SYSCALL(sys_fork_glue,sys_fork_glue,sys_fork_glue) +SYSCALL(sys_fork,sys_fork,sys_fork) SYSCALL(sys_read,sys_read,sys32_read_wrapper) SYSCALL(sys_write,sys_write,sys32_write_wrapper) SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */ @@ -19,7 +19,7 @@ SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall) SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper) SYSCALL(sys_link,sys_link,sys32_link_wrapper) SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */ -SYSCALL(sys_execve_glue,sys_execve_glue,sys32_execve_glue) +SYSCALL(sys_execve,sys_execve,sys32_execve) SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper) SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper) /* old time syscall */ SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper) @@ -127,8 +127,8 @@ SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */ SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper) SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper) SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper) -SYSCALL(sys_sigreturn_glue,sys_sigreturn_glue,sys32_sigreturn_glue) -SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue) /* 120 */ +SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn) +SYSCALL(sys_clone,sys_clone,sys32_clone) /* 120 */ SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper) SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper) NI_SYSCALL /* modify_ldt for i386 */ @@ -181,7 +181,7 @@ SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper) SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */ SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */ SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper) -SYSCALL(sys_rt_sigreturn_glue,sys_rt_sigreturn_glue,sys32_rt_sigreturn_glue) +SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,sys32_rt_sigreturn) SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper) SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */ SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper) @@ -194,11 +194,11 @@ SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper) SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper) SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */ -SYSCALL(sys_sigaltstack_glue,sys_sigaltstack_glue,sys32_sigaltstack_glue) +SYSCALL(sys_sigaltstack,sys_sigaltstack,sys32_sigaltstack) SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper) NI_SYSCALL /* streams1 */ NI_SYSCALL /* streams2 */ -SYSCALL(sys_vfork_glue,sys_vfork_glue,sys_vfork_glue) /* 190 */ +SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */ SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper) SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper) SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index e1ad464b6f2..711dae8da7a 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -280,7 +280,6 @@ static void clock_comparator_interrupt(__u16 code) } static void etr_reset(void); -static void etr_init(void); static void etr_ext_handler(__u16); /* @@ -355,7 +354,6 @@ void __init time_init(void) #ifdef CONFIG_VIRT_TIMER vtime_init(); #endif - etr_init(); } /* @@ -426,11 +424,11 @@ static struct etr_aib etr_port1; static int etr_port1_uptodate; static unsigned long etr_events; static struct timer_list etr_timer; -static struct tasklet_struct etr_tasklet; static DEFINE_PER_CPU(atomic_t, etr_sync_word); static void etr_timeout(unsigned long dummy); -static void etr_tasklet_fn(unsigned long dummy); +static void etr_work_fn(struct work_struct *work); +static DECLARE_WORK(etr_work, etr_work_fn); /* * The etr get_clock function. It will write the current clock value @@ -507,29 +505,31 @@ static void etr_reset(void) } } -static void etr_init(void) +static int __init etr_init(void) { struct etr_aib aib; if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) - return; + return 0; /* Check if this machine has the steai instruction. */ if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) set_bit(ETR_FLAG_STEAI, &etr_flags); setup_timer(&etr_timer, etr_timeout, 0UL); - tasklet_init(&etr_tasklet, etr_tasklet_fn, 0); if (!etr_port0_online && !etr_port1_online) set_bit(ETR_FLAG_EACCES, &etr_flags); if (etr_port0_online) { set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } if (etr_port1_online) { set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } + return 0; } +arch_initcall(etr_init); + /* * Two sorts of ETR machine checks. The architecture reads: * "When a machine-check niterruption occurs and if a switch-to-local or @@ -549,7 +549,7 @@ void etr_switch_to_local(void) return; etr_disable_sync_clock(NULL); set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } /* @@ -564,7 +564,7 @@ void etr_sync_check(void) return; etr_disable_sync_clock(NULL); set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } /* @@ -591,13 +591,13 @@ static void etr_ext_handler(__u16 code) * Both ports are not up-to-date now. */ set_bit(ETR_EVENT_PORT_ALERT, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } static void etr_timeout(unsigned long dummy) { set_bit(ETR_EVENT_UPDATE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } /* @@ -927,7 +927,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib, if (!eacr.e0 && !eacr.e1) return eacr; - /* Update port0 or port1 with aib stored in etr_tasklet_fn. */ + /* Update port0 or port1 with aib stored in etr_work_fn. */ if (aib->esw.q == 0) { /* Information for port 0 stored. */ if (eacr.p0 && !etr_port0_uptodate) { @@ -1007,7 +1007,7 @@ static void etr_update_eacr(struct etr_eacr eacr) * particular this is the only function that calls etr_update_eacr(), * it "controls" the etr control register. */ -static void etr_tasklet_fn(unsigned long dummy) +static void etr_work_fn(struct work_struct *work) { unsigned long long now; struct etr_eacr eacr; @@ -1220,13 +1220,13 @@ static ssize_t etr_online_store(struct sys_device *dev, return count; /* Nothing to do. */ etr_port0_online = value; set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } else { if (etr_port1_online == value) return count; /* Nothing to do. */ etr_port1_online = value; set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } return count; } diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index f0e5a320e2e..49dec830373 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -30,7 +30,7 @@ #include <linux/kallsyms.h> #include <linux/reboot.h> #include <linux/kprobes.h> - +#include <linux/bug.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -188,18 +188,31 @@ void dump_stack(void) EXPORT_SYMBOL(dump_stack); +static inline int mask_bits(struct pt_regs *regs, unsigned long bits) +{ + return (regs->psw.mask & bits) / ((~bits + 1) & bits); +} + void show_registers(struct pt_regs *regs) { - mm_segment_t old_fs; char *mode; - int i; mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; printk("%s PSW : %p %p", mode, (void *) regs->psw.mask, (void *) regs->psw.addr); print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); - printk("%s GPRS: " FOURLONG, mode, + printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " + "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), + mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), + mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), + mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), + mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), + mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); +#ifdef CONFIG_64BIT + printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS)); +#endif + printk("\n%s GPRS: " FOURLONG, mode, regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); printk(" " FOURLONG, regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); @@ -208,41 +221,7 @@ void show_registers(struct pt_regs *regs) printk(" " FOURLONG, regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); -#if 0 - /* FIXME: this isn't needed any more but it changes the ksymoops - * input. To remove or not to remove ... */ - save_access_regs(regs->acrs); - printk("%s ACRS: %08x %08x %08x %08x\n", mode, - regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]); - printk(" %08x %08x %08x %08x\n", - regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]); - printk(" %08x %08x %08x %08x\n", - regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]); - printk(" %08x %08x %08x %08x\n", - regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]); -#endif - - /* - * Print the first 20 byte of the instruction stream at the - * time of the fault. - */ - old_fs = get_fs(); - if (regs->psw.mask & PSW_MASK_PSTATE) - set_fs(USER_DS); - else - set_fs(KERNEL_DS); - printk("%s Code: ", mode); - for (i = 0; i < 20; i++) { - unsigned char c; - if (__get_user(c, (char __user *)(regs->psw.addr + i))) { - printk(" Bad PSW."); - break; - } - printk("%02x ", c); - } - set_fs(old_fs); - - printk("\n"); + show_code(regs); } /* This is called from fs/proc/array.c */ @@ -318,6 +297,11 @@ report_user_fault(long interruption_code, struct pt_regs *regs) #endif } +int is_valid_bugaddr(unsigned long addr) +{ + return 1; +} + static void __kprobes inline do_trap(long interruption_code, int signr, char *str, struct pt_regs *regs, siginfo_t *info) @@ -344,8 +328,14 @@ static void __kprobes inline do_trap(long interruption_code, int signr, fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (fixup) regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; - else - die(str, regs, interruption_code); + else { + enum bug_trap_type btt; + + btt = report_bug(regs->psw.addr & PSW_ADDR_INSN); + if (btt == BUG_TRAP_TYPE_WARN) + return; + die(str, regs, interruption_code); + } } } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index c30716ae130..418f6426a94 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -45,6 +45,8 @@ SECTIONS __ex_table : { *(__ex_table) } __stop___ex_table = .; + BUG_TABLE + .data : { /* Data */ *(.data) CONSTRUCTORS @@ -77,6 +79,12 @@ SECTIONS *(.init.text) _einittext = .; } + /* + * .exit.text is discarded at runtime, not link time, + * to deal with references from __bug_table + */ + .exit.text : { *(.exit.text) } + .init.data : { *(.init.data) } . = ALIGN(256); __setup_start = .; @@ -116,7 +124,7 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exit.text) *(.exit.data) *(.exitcall.exit) + *(.exit.data) *(.exitcall.exit) } /* Stabs debugging sections. */ diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 9d5b02801b4..1e1a6ee2cac 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -128,7 +128,7 @@ static inline void set_vtimer(__u64 expires) S390_lowcore.last_update_timer = expires; /* store expire time for this CPU timer */ - per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; + __get_cpu_var(virt_cpu_timer).to_expire = expires; } #else static inline void set_vtimer(__u64 expires) @@ -137,7 +137,7 @@ static inline void set_vtimer(__u64 expires) asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); /* store expire time for this CPU timer */ - per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; + __get_cpu_var(virt_cpu_timer).to_expire = expires; } #endif @@ -145,7 +145,7 @@ static void start_cpu_timer(void) { struct vtimer_queue *vt_list; - vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + vt_list = &__get_cpu_var(virt_cpu_timer); /* CPU timer interrupt is pending, don't reprogramm it */ if (vt_list->idle & 1LL<<63) @@ -159,7 +159,7 @@ static void stop_cpu_timer(void) { struct vtimer_queue *vt_list; - vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + vt_list = &__get_cpu_var(virt_cpu_timer); /* nothing to do */ if (list_empty(&vt_list->list)) { @@ -219,7 +219,7 @@ static void do_callbacks(struct list_head *cb_list) if (list_empty(cb_list)) return; - vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + vt_list = &__get_cpu_var(virt_cpu_timer); list_for_each_entry_safe(event, tmp, cb_list, entry) { fn = event->function; @@ -244,7 +244,6 @@ static void do_callbacks(struct list_head *cb_list) */ static void do_cpu_timer_interrupt(__u16 error_code) { - int cpu; __u64 next, delta; struct vtimer_queue *vt_list; struct vtimer_list *event, *tmp; @@ -253,8 +252,7 @@ static void do_cpu_timer_interrupt(__u16 error_code) struct list_head cb_list; INIT_LIST_HEAD(&cb_list); - cpu = smp_processor_id(); - vt_list = &per_cpu(virt_cpu_timer, cpu); + vt_list = &__get_cpu_var(virt_cpu_timer); /* walk timer list, fire all expired events */ spin_lock(&vt_list->lock); @@ -534,7 +532,7 @@ void init_cpu_vtimer(void) /* enable cpu timer interrupts */ __ctl_set_bit(0,10); - vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + vt_list = &__get_cpu_var(virt_cpu_timer); INIT_LIST_HEAD(&vt_list->list); spin_lock_init(&vt_list->lock); vt_list->to_expire = 0; diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index 7a44fed21b3..59aea65ce99 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -5,6 +5,6 @@ EXTRA_AFLAGS := -traditional lib-y += delay.o string.o uaccess_std.o uaccess_pt.o qrnnd.o -lib-$(CONFIG_32BIT) += div64.o +obj-$(CONFIG_32BIT) += div64.o lib-$(CONFIG_64BIT) += uaccess_mvcos.o lib-$(CONFIG_SMP) += spinlock.o diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c index 0481f3424a1..a5f8300bf3e 100644 --- a/arch/s390/lib/div64.c +++ b/arch/s390/lib/div64.c @@ -147,5 +147,3 @@ uint32_t __div64_32(uint64_t *n, uint32_t base) } #endif /* MARCH_G5 */ - -EXPORT_SYMBOL(__div64_32); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 7462aebd3eb..91f705adc3f 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -26,9 +26,9 @@ #include <linux/module.h> #include <linux/hardirq.h> #include <linux/kprobes.h> +#include <linux/uaccess.h> #include <asm/system.h> -#include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/kdebug.h> #include <asm/s390_ext.h> @@ -52,34 +52,24 @@ extern int sysctl_userprocess_debug; extern void die(const char *,struct pt_regs *,long); #ifdef CONFIG_KPROBES -static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); -int register_page_fault_notifier(struct notifier_block *nb) +static inline int notify_page_fault(struct pt_regs *regs, long err) { - return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); -} + int ret = 0; + + /* kprobe_running() needs smp_processor_id() */ + if (!user_mode(regs)) { + preempt_disable(); + if (kprobe_running() && kprobe_fault_handler(regs, 14)) + ret = 1; + preempt_enable(); + } -int unregister_page_fault_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); -} - -static inline int notify_page_fault(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) -{ - struct die_args args = { - .regs = regs, - .str = str, - .err = err, - .trapnr = trap, - .signr = sig - }; - return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); + return ret; } #else -static inline int notify_page_fault(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) +static inline int notify_page_fault(struct pt_regs *regs, long err) { - return NOTIFY_DONE; + return 0; } #endif @@ -170,74 +160,127 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, force_sig_info(SIGSEGV, &si, current); } +static void do_no_context(struct pt_regs *regs, unsigned long error_code, + unsigned long address) +{ + const struct exception_table_entry *fixup; + + /* Are we prepared to handle this kernel fault? */ + fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); + if (fixup) { + regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; + return; + } + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + if (check_space(current) == 0) + printk(KERN_ALERT "Unable to handle kernel pointer dereference" + " at virtual kernel address %p\n", (void *)address); + else + printk(KERN_ALERT "Unable to handle kernel paging request" + " at virtual user address %p\n", (void *)address); + + die("Oops", regs, error_code); + do_exit(SIGKILL); +} + +static void do_low_address(struct pt_regs *regs, unsigned long error_code) +{ + /* Low-address protection hit in kernel mode means + NULL pointer write access in kernel mode. */ + if (regs->psw.mask & PSW_MASK_PSTATE) { + /* Low-address protection hit in user mode 'cannot happen'. */ + die ("Low-address protection", regs, error_code); + do_exit(SIGKILL); + } + + do_no_context(regs, error_code, 0); +} + +/* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ +static int do_out_of_memory(struct pt_regs *regs, unsigned long error_code, + unsigned long address) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + + up_read(&mm->mmap_sem); + if (is_init(tsk)) { + yield(); + down_read(&mm->mmap_sem); + return 1; + } + printk("VM: killing process %s\n", tsk->comm); + if (regs->psw.mask & PSW_MASK_PSTATE) + do_exit(SIGKILL); + do_no_context(regs, error_code, address); + return 0; +} + +static void do_sigbus(struct pt_regs *regs, unsigned long error_code, + unsigned long address) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + + up_read(&mm->mmap_sem); + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + tsk->thread.prot_addr = address; + tsk->thread.trap_no = error_code; + force_sig(SIGBUS, tsk); + + /* Kernel mode? Handle exceptions or die */ + if (!(regs->psw.mask & PSW_MASK_PSTATE)) + do_no_context(regs, error_code, address); +} + #ifdef CONFIG_S390_EXEC_PROTECT extern long sys_sigreturn(struct pt_regs *regs); extern long sys_rt_sigreturn(struct pt_regs *regs); extern long sys32_sigreturn(struct pt_regs *regs); extern long sys32_rt_sigreturn(struct pt_regs *regs); -static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs, - int rt) +static int signal_return(struct mm_struct *mm, struct pt_regs *regs, + unsigned long address, unsigned long error_code) { + u16 instruction; + int rc, compat; + + pagefault_disable(); + rc = __get_user(instruction, (u16 __user *) regs->psw.addr); + pagefault_enable(); + if (rc) + return -EFAULT; + up_read(&mm->mmap_sem); clear_tsk_thread_flag(current, TIF_SINGLE_STEP); #ifdef CONFIG_COMPAT - if (test_tsk_thread_flag(current, TIF_31BIT)) { - if (rt) - sys32_rt_sigreturn(regs); - else - sys32_sigreturn(regs); - return; - } -#endif /* CONFIG_COMPAT */ - if (rt) - sys_rt_sigreturn(regs); + compat = test_tsk_thread_flag(current, TIF_31BIT); + if (compat && instruction == 0x0a77) + sys32_sigreturn(regs); + else if (compat && instruction == 0x0aad) + sys32_rt_sigreturn(regs); else +#endif + if (instruction == 0x0a77) sys_sigreturn(regs); - return; -} - -static int signal_return(struct mm_struct *mm, struct pt_regs *regs, - unsigned long address, unsigned long error_code) -{ - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte; - u16 *instruction; - unsigned long pfn, uaddr = regs->psw.addr; - - spin_lock(&mm->page_table_lock); - pgd = pgd_offset(mm, uaddr); - if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) - goto out_fault; - pmd = pmd_offset(pgd, uaddr); - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) - goto out_fault; - pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr); - if (!pte || !pte_present(*pte)) - goto out_fault; - pfn = pte_pfn(*pte); - if (!pfn_valid(pfn)) - goto out_fault; - spin_unlock(&mm->page_table_lock); - - instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1))); - if (*instruction == 0x0a77) - do_sigreturn(mm, regs, 0); - else if (*instruction == 0x0aad) - do_sigreturn(mm, regs, 1); + else if (instruction == 0x0aad) + sys_rt_sigreturn(regs); else { - printk("- XXX - do_exception: task = %s, primary, NO EXEC " - "-> SIGSEGV\n", current->comm); - up_read(&mm->mmap_sem); current->thread.prot_addr = address; current->thread.trap_no = error_code; do_sigsegv(regs, error_code, SEGV_MAPERR, address); } return 0; -out_fault: - spin_unlock(&mm->page_table_lock); - return -EFAULT; } #endif /* CONFIG_S390_EXEC_PROTECT */ @@ -253,49 +296,23 @@ out_fault: * 3b Region third trans. -> Not present (nullification) */ static inline void -do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) +do_exception(struct pt_regs *regs, unsigned long error_code, int write) { - struct task_struct *tsk; - struct mm_struct *mm; - struct vm_area_struct * vma; - unsigned long address; - const struct exception_table_entry *fixup; - int si_code; + struct task_struct *tsk; + struct mm_struct *mm; + struct vm_area_struct *vma; + unsigned long address; int space; + int si_code; - tsk = current; - mm = tsk->mm; - - if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, - SIGSEGV) == NOTIFY_STOP) + if (notify_page_fault(regs, error_code)) return; - /* - * Check for low-address protection. This needs to be treated - * as a special case because the translation exception code - * field is not guaranteed to contain valid data in this case. - */ - if (is_protection && !(S390_lowcore.trans_exc_code & 4)) { - - /* Low-address protection hit in kernel mode means - NULL pointer write access in kernel mode. */ - if (!(regs->psw.mask & PSW_MASK_PSTATE)) { - address = 0; - space = 0; - goto no_context; - } - - /* Low-address protection hit in user mode 'cannot happen'. */ - die ("Low-address protection", regs, error_code); - do_exit(SIGKILL); - } + tsk = current; + mm = tsk->mm; - /* - * get the failing address - * more specific the segment and page table portion of - * the address - */ - address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; + /* get the failing address and the affected space */ + address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; space = check_space(tsk); /* @@ -313,7 +330,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) */ local_irq_enable(); - down_read(&mm->mmap_sem); + down_read(&mm->mmap_sem); si_code = SEGV_MAPERR; vma = find_vma(mm, address); @@ -330,19 +347,19 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) return; #endif - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + if (expand_stack(vma, address)) + goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: si_code = SEGV_ACCERR; - if (!is_protection) { + if (!write) { /* page not present, check vm flags */ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) goto bad_area; @@ -357,7 +374,7 @@ survive: * make sure we exit gracefully rather than endlessly redo * the fault. */ - switch (handle_mm_fault(mm, vma, address, is_protection)) { + switch (handle_mm_fault(mm, vma, address, write)) { case VM_FAULT_MINOR: tsk->min_flt++; break; @@ -365,9 +382,12 @@ survive: tsk->maj_flt++; break; case VM_FAULT_SIGBUS: - goto do_sigbus; + do_sigbus(regs, error_code, address); + return; case VM_FAULT_OOM: - goto out_of_memory; + if (do_out_of_memory(regs, error_code, address)) + goto survive; + return; default: BUG(); } @@ -385,75 +405,34 @@ survive: * Fix it, but check if it's kernel or user first.. */ bad_area: - up_read(&mm->mmap_sem); + up_read(&mm->mmap_sem); - /* User mode accesses just cause a SIGSEGV */ - if (regs->psw.mask & PSW_MASK_PSTATE) { - tsk->thread.prot_addr = address; - tsk->thread.trap_no = error_code; + /* User mode accesses just cause a SIGSEGV */ + if (regs->psw.mask & PSW_MASK_PSTATE) { + tsk->thread.prot_addr = address; + tsk->thread.trap_no = error_code; do_sigsegv(regs, error_code, si_code, address); - return; + return; } no_context: - /* Are we prepared to handle this kernel fault? */ - fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); - if (fixup) { - regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; - return; - } - -/* - * Oops. The kernel tried to access some bad page. We'll have to - * terminate things with extreme prejudice. - */ - if (space == 0) - printk(KERN_ALERT "Unable to handle kernel pointer dereference" - " at virtual kernel address %p\n", (void *)address); - else - printk(KERN_ALERT "Unable to handle kernel paging request" - " at virtual user address %p\n", (void *)address); - - die("Oops", regs, error_code); - do_exit(SIGKILL); - - -/* - * We ran out of memory, or some other thing happened to us that made - * us unable to handle the page fault gracefully. -*/ -out_of_memory: - up_read(&mm->mmap_sem); - if (is_init(tsk)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk("VM: killing process %s\n", tsk->comm); - if (regs->psw.mask & PSW_MASK_PSTATE) - do_exit(SIGKILL); - goto no_context; - -do_sigbus: - up_read(&mm->mmap_sem); - - /* - * Send a sigbus, regardless of whether we were in kernel - * or user mode. - */ - tsk->thread.prot_addr = address; - tsk->thread.trap_no = error_code; - force_sig(SIGBUS, tsk); - - /* Kernel mode? Handle exceptions or die */ - if (!(regs->psw.mask & PSW_MASK_PSTATE)) - goto no_context; + do_no_context(regs, error_code, address); } void __kprobes do_protection_exception(struct pt_regs *regs, unsigned long error_code) { + /* Protection exception is supressing, decrement psw address. */ regs->psw.addr -= (error_code >> 16); + /* + * Check for low-address protection. This needs to be treated + * as a special case because the translation exception code + * field is not guaranteed to contain valid data in this case. + */ + if (unlikely(!(S390_lowcore.trans_exc_code & 4))) { + do_low_address(regs, error_code); + return; + } do_exception(regs, 4, 1); } |