diff options
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/compat_linux.c | 9 | ||||
-rw-r--r-- | arch/s390/kernel/compat_signal.c | 10 | ||||
-rw-r--r-- | arch/s390/kernel/crash_dump.c | 219 | ||||
-rw-r--r-- | arch/s390/kernel/dumpstack.c | 20 | ||||
-rw-r--r-- | arch/s390/kernel/entry.h | 18 | ||||
-rw-r--r-- | arch/s390/kernel/ftrace.c | 5 | ||||
-rw-r--r-- | arch/s390/kernel/irq.c | 85 | ||||
-rw-r--r-- | arch/s390/kernel/kprobes.c | 144 | ||||
-rw-r--r-- | arch/s390/kernel/machine_kexec.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/perf_cpum_cf.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/perf_event.c | 5 | ||||
-rw-r--r-- | arch/s390/kernel/runtime_instr.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/suspend.c | 1 |
14 files changed, 385 insertions, 143 deletions
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 8b6e4f5288a..1f1b8c70ab9 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -221,25 +221,26 @@ static int groups16_from_user(struct group_info *group_info, u16 __user *groupli asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist) { + const struct cred *cred = current_cred(); int i; if (gidsetsize < 0) return -EINVAL; - get_group_info(current->cred->group_info); - i = current->cred->group_info->ngroups; + get_group_info(cred->group_info); + i = cred->group_info->ngroups; if (gidsetsize) { if (i > gidsetsize) { i = -EINVAL; goto out; } - if (groups16_to_user(grouplist, current->cred->group_info)) { + if (groups16_to_user(grouplist, cred->group_info)) { i = -EFAULT; goto out; } } out: - put_group_info(current->cred->group_info); + put_group_info(cred->group_info); return i; } diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index c439ac9ced0..1389b637dae 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -332,9 +332,9 @@ static int setup_frame32(int sig, struct k_sigaction *ka, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { - regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE; + regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE; } else { - regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE; + regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, (u16 __force __user *)(frame->retcode))) goto give_sigsegv; @@ -400,9 +400,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { - regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE; + regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE; } else { - regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE; + regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, (u16 __force __user *)(frame->retcode)); } @@ -417,7 +417,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, regs->psw.mask = PSW_MASK_BA | (psw_user_bits & PSW_MASK_ASC) | (regs->psw.mask & ~PSW_MASK_ASC); - regs->psw.addr = (__u64) ka->sa.sa_handler; + regs->psw.addr = (__u64 __force) ka->sa.sa_handler; regs->gprs[2] = map_signal(sig); regs->gprs[3] = (__force __u64) &frame->info; diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index d8f35565717..c84f33d51f7 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -16,6 +16,7 @@ #include <asm/os_info.h> #include <asm/elf.h> #include <asm/ipl.h> +#include <asm/sclp.h> #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y))) #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) @@ -64,22 +65,46 @@ static ssize_t copy_page_real(void *buf, void *src, size_t csize) } /* - * Copy one page from "oldmem" + * Pointer to ELF header in new kernel + */ +static void *elfcorehdr_newmem; + +/* + * Copy one page from zfcpdump "oldmem" + * + * For pages below ZFCPDUMP_HSA_SIZE memory from the HSA is copied. Otherwise + * real memory copy is used. + */ +static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize, + unsigned long src, int userbuf) +{ + int rc; + + if (src < ZFCPDUMP_HSA_SIZE) { + rc = memcpy_hsa(buf, src, csize, userbuf); + } else { + if (userbuf) + rc = copy_to_user_real((void __force __user *) buf, + (void *) src, csize); + else + rc = memcpy_real(buf, (void *) src, csize); + } + return rc ? rc : csize; +} + +/* + * Copy one page from kdump "oldmem" * * For the kdump reserved memory this functions performs a swap operation: * - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE]. * - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] */ -ssize_t copy_oldmem_page(unsigned long pfn, char *buf, - size_t csize, unsigned long offset, int userbuf) +static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize, + unsigned long src, int userbuf) + { - unsigned long src; int rc; - if (!csize) - return 0; - - src = (pfn << PAGE_SHIFT) + offset; if (src < OLDMEM_SIZE) src += OLDMEM_BASE; else if (src > OLDMEM_BASE && @@ -90,7 +115,88 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, (void *) src, csize); else rc = copy_page_real(buf, (void *) src, csize); - return (rc == 0) ? csize : rc; + return (rc == 0) ? rc : csize; +} + +/* + * Copy one page from "oldmem" + */ +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, + unsigned long offset, int userbuf) +{ + unsigned long src; + + if (!csize) + return 0; + src = (pfn << PAGE_SHIFT) + offset; + if (OLDMEM_BASE) + return copy_oldmem_page_kdump(buf, csize, src, userbuf); + else + return copy_oldmem_page_zfcpdump(buf, csize, src, userbuf); +} + +/* + * Remap "oldmem" for kdump + * + * For the kdump reserved memory this functions performs a swap operation: + * [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] + */ +static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma, + unsigned long from, unsigned long pfn, + unsigned long size, pgprot_t prot) +{ + unsigned long size_old; + int rc; + + if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) { + size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT)); + rc = remap_pfn_range(vma, from, + pfn + (OLDMEM_BASE >> PAGE_SHIFT), + size_old, prot); + if (rc || size == size_old) + return rc; + size -= size_old; + from += size_old; + pfn += size_old >> PAGE_SHIFT; + } + return remap_pfn_range(vma, from, pfn, size, prot); +} + +/* + * Remap "oldmem" for zfcpdump + * + * We only map available memory above ZFCPDUMP_HSA_SIZE. Memory below + * ZFCPDUMP_HSA_SIZE is read on demand using the copy_oldmem_page() function. + */ +static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma, + unsigned long from, + unsigned long pfn, + unsigned long size, pgprot_t prot) +{ + unsigned long size_hsa; + + if (pfn < ZFCPDUMP_HSA_SIZE >> PAGE_SHIFT) { + size_hsa = min(size, ZFCPDUMP_HSA_SIZE - (pfn << PAGE_SHIFT)); + if (size == size_hsa) + return 0; + size -= size_hsa; + from += size_hsa; + pfn += size_hsa >> PAGE_SHIFT; + } + return remap_pfn_range(vma, from, pfn, size, prot); +} + +/* + * Remap "oldmem" for kdump or zfcpdump + */ +int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from, + unsigned long pfn, unsigned long size, pgprot_t prot) +{ + if (OLDMEM_BASE) + return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot); + else + return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size, + prot); } /* @@ -101,11 +207,21 @@ int copy_from_oldmem(void *dest, void *src, size_t count) unsigned long copied = 0; int rc; - if ((unsigned long) src < OLDMEM_SIZE) { - copied = min(count, OLDMEM_SIZE - (unsigned long) src); - rc = memcpy_real(dest, src + OLDMEM_BASE, copied); - if (rc) - return rc; + if (OLDMEM_BASE) { + if ((unsigned long) src < OLDMEM_SIZE) { + copied = min(count, OLDMEM_SIZE - (unsigned long) src); + rc = memcpy_real(dest, src + OLDMEM_BASE, copied); + if (rc) + return rc; + } + } else { + if ((unsigned long) src < ZFCPDUMP_HSA_SIZE) { + copied = min(count, + ZFCPDUMP_HSA_SIZE - (unsigned long) src); + rc = memcpy_hsa(dest, (unsigned long) src, copied, 0); + if (rc) + return rc; + } } return memcpy_real(dest + copied, src + copied, count - copied); } @@ -368,14 +484,6 @@ static int get_mem_chunk_cnt(void) } /* - * Relocate pointer in order to allow vmcore code access the data - */ -static inline unsigned long relocate(unsigned long addr) -{ - return OLDMEM_BASE + addr; -} - -/* * Initialize ELF loads (new kernel) */ static int loads_init(Elf64_Phdr *phdr, u64 loads_offset) @@ -426,7 +534,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) ptr = nt_vmcoreinfo(ptr); memset(phdr, 0, sizeof(*phdr)); phdr->p_type = PT_NOTE; - phdr->p_offset = relocate(notes_offset); + phdr->p_offset = notes_offset; phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start); phdr->p_memsz = phdr->p_filesz; return ptr; @@ -435,7 +543,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) /* * Create ELF core header (new kernel) */ -static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz) +int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) { Elf64_Phdr *phdr_notes, *phdr_loads; int mem_chunk_cnt; @@ -443,6 +551,12 @@ static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz) u32 alloc_size; u64 hdr_off; + /* If we are not in kdump or zfcpdump mode return */ + if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP) + return 0; + /* If elfcorehdr= has been passed via cmdline, we use that one */ + if (elfcorehdr_addr != ELFCORE_ADDR_MAX) + return 0; mem_chunk_cnt = get_mem_chunk_cnt(); alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + @@ -460,27 +574,52 @@ static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz) ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off); /* Init loads */ hdr_off = PTR_DIFF(ptr, hdr); - loads_init(phdr_loads, ((unsigned long) hdr) + hdr_off); - *elfcorebuf_sz = hdr_off; - *elfcorebuf = (void *) relocate((unsigned long) hdr); - BUG_ON(*elfcorebuf_sz > alloc_size); + loads_init(phdr_loads, hdr_off); + *addr = (unsigned long long) hdr; + elfcorehdr_newmem = hdr; + *size = (unsigned long long) hdr_off; + BUG_ON(elfcorehdr_size > alloc_size); + return 0; } /* - * Create kdump ELF core header in new kernel, if it has not been passed via - * the "elfcorehdr" kernel parameter + * Free ELF core header (new kernel) */ -static int setup_kdump_elfcorehdr(void) +void elfcorehdr_free(unsigned long long addr) { - size_t elfcorebuf_sz; - char *elfcorebuf; + if (!elfcorehdr_newmem) + return; + kfree((void *)(unsigned long)addr); +} - if (!OLDMEM_BASE || is_kdump_kernel()) - return -EINVAL; - s390_elf_corehdr_create(&elfcorebuf, &elfcorebuf_sz); - elfcorehdr_addr = (unsigned long long) elfcorebuf; - elfcorehdr_size = elfcorebuf_sz; - return 0; +/* + * Read from ELF header + */ +ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) +{ + void *src = (void *)(unsigned long)*ppos; + + src = elfcorehdr_newmem ? src : src - OLDMEM_BASE; + memcpy(buf, src, count); + *ppos += count; + return count; } -subsys_initcall(setup_kdump_elfcorehdr); +/* + * Read from ELF notes data + */ +ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos) +{ + void *src = (void *)(unsigned long)*ppos; + int rc; + + if (elfcorehdr_newmem) { + memcpy(buf, src, count); + } else { + rc = copy_from_oldmem(buf, src, count); + if (rc) + return rc; + } + *ppos += count; + return count; +} diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index 87acc38f73c..99e7f603589 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -40,14 +40,15 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high) { struct stack_frame *sf; struct pt_regs *regs; + unsigned long addr; while (1) { sp = sp & PSW_ADDR_INSN; if (sp < low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; - printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); - print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); + addr = sf->gprs[8] & PSW_ADDR_INSN; + printk("([<%016lx>] %pSR)\n", addr, (void *)addr); /* Follow the backchain. */ while (1) { low = sp; @@ -57,16 +58,16 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high) if (sp <= low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; - printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); - print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); + addr = sf->gprs[8] & PSW_ADDR_INSN; + printk(" [<%016lx>] %pSR\n", addr, (void *)addr); } /* Zero backchain detected, check for interrupt frame. */ sp = (unsigned long) (sf + 1); if (sp <= low || sp > high - sizeof(*regs)) return sp; regs = (struct pt_regs *) sp; - printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); - print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); + addr = regs->psw.addr & PSW_ADDR_INSN; + printk(" [<%016lx>] %pSR\n", addr, (void *)addr); low = sp; sp = regs->gprs[15]; } @@ -128,8 +129,7 @@ static void show_last_breaking_event(struct pt_regs *regs) { #ifdef CONFIG_64BIT printk("Last Breaking-Event-Address:\n"); - printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); - print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); + printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]); #endif } @@ -143,10 +143,10 @@ void show_registers(struct pt_regs *regs) char *mode; mode = user_mode(regs) ? "User" : "Krnl"; - printk("%s PSW : %p %p", + printk("%s PSW : %p %p (%pSR)\n", mode, (void *) regs->psw.mask, + (void *) regs->psw.addr, (void *) regs->psw.addr); - print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 3ddbc26d246..e9b04c33d38 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -53,27 +53,21 @@ void handle_signal32(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); void do_notify_resume(struct pt_regs *regs); -struct ext_code; -void do_extint(struct pt_regs *regs); +void __init init_IRQ(void); +void do_IRQ(struct pt_regs *regs, int irq); void do_restart(void); void __init startup_init(void); void die(struct pt_regs *regs, const char *str); - +int setup_profiling_timer(unsigned int multiplier); void __init time_init(void); +int pfn_is_nosave(unsigned long); +void s390_early_resume(void); +unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip); struct s390_mmap_arg_struct; struct fadvise64_64_args; struct old_sigaction; -long sys_mmap2(struct s390_mmap_arg_struct __user *arg); -long sys_s390_ipc(uint call, int first, unsigned long second, - unsigned long third, void __user *ptr); -long sys_s390_personality(unsigned int personality); -long sys_s390_fadvise64(int fd, u32 offset_high, u32 offset_low, - size_t len, int advice); -long sys_s390_fadvise64_64(struct fadvise64_64_args __user *args); -long sys_s390_fallocate(int fd, int mode, loff_t offset, u32 len_high, - u32 len_low); long sys_sigreturn(void); long sys_rt_sigreturn(void); long sys32_sigreturn(void); diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index e3043aef87a..1014ad5f769 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -15,6 +15,7 @@ #include <linux/kprobes.h> #include <trace/syscall.h> #include <asm/asm-offsets.h> +#include "entry.h" #ifdef CONFIG_DYNAMIC_FTRACE @@ -177,7 +178,7 @@ int ftrace_enable_ftrace_graph_caller(void) offset = ((void *) prepare_ftrace_return - (void *) ftrace_graph_caller) / 2; - return probe_kernel_write(ftrace_graph_caller + 2, + return probe_kernel_write((void *) ftrace_graph_caller + 2, &offset, sizeof(offset)); } @@ -185,7 +186,7 @@ int ftrace_disable_ftrace_graph_caller(void) { static unsigned short offset = 0x0002; - return probe_kernel_write(ftrace_graph_caller + 2, + return probe_kernel_write((void *) ftrace_graph_caller + 2, &offset, sizeof(offset)); } diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index b34ba0ea96a..8ac2097f13d 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -196,21 +196,23 @@ asmlinkage void do_softirq(void) * ext_int_hash[index] is the list head for all external interrupts that hash * to this index. */ -static struct list_head ext_int_hash[256]; +static struct hlist_head ext_int_hash[32] ____cacheline_aligned; struct ext_int_info { ext_int_handler_t handler; - u16 code; - struct list_head entry; + struct hlist_node entry; struct rcu_head rcu; + u16 code; }; /* ext_int_hash_lock protects the handler lists for external interrupts */ -DEFINE_SPINLOCK(ext_int_hash_lock); +static DEFINE_SPINLOCK(ext_int_hash_lock); static inline int ext_hash(u16 code) { - return (code + (code >> 9)) & 0xff; + BUILD_BUG_ON(!is_power_of_2(ARRAY_SIZE(ext_int_hash))); + + return (code + (code >> 9)) & (ARRAY_SIZE(ext_int_hash) - 1); } int register_external_interrupt(u16 code, ext_int_handler_t handler) @@ -227,7 +229,7 @@ int register_external_interrupt(u16 code, ext_int_handler_t handler) index = ext_hash(code); spin_lock_irqsave(&ext_int_hash_lock, flags); - list_add_rcu(&p->entry, &ext_int_hash[index]); + hlist_add_head_rcu(&p->entry, &ext_int_hash[index]); spin_unlock_irqrestore(&ext_int_hash_lock, flags); return 0; } @@ -240,9 +242,9 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler) int index = ext_hash(code); spin_lock_irqsave(&ext_int_hash_lock, flags); - list_for_each_entry_rcu(p, &ext_int_hash[index], entry) { + hlist_for_each_entry_rcu(p, &ext_int_hash[index], entry) { if (p->code == code && p->handler == handler) { - list_del_rcu(&p->entry); + hlist_del_rcu(&p->entry); kfree_rcu(p, rcu); } } @@ -264,12 +266,12 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy) index = ext_hash(ext_code.code); rcu_read_lock(); - list_for_each_entry_rcu(p, &ext_int_hash[index], entry) - if (likely(p->code == ext_code.code)) - p->handler(ext_code, regs->int_parm, - regs->int_parm_long); + hlist_for_each_entry_rcu(p, &ext_int_hash[index], entry) { + if (unlikely(p->code != ext_code.code)) + continue; + p->handler(ext_code, regs->int_parm, regs->int_parm_long); + } rcu_read_unlock(); - return IRQ_HANDLED; } @@ -283,55 +285,32 @@ void __init init_ext_interrupts(void) int idx; for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++) - INIT_LIST_HEAD(&ext_int_hash[idx]); + INIT_HLIST_HEAD(&ext_int_hash[idx]); irq_set_chip_and_handler(EXT_INTERRUPT, &dummy_irq_chip, handle_percpu_irq); setup_irq(EXT_INTERRUPT, &external_interrupt); } -static DEFINE_SPINLOCK(sc_irq_lock); -static int sc_irq_refcount; - -void service_subclass_irq_register(void) -{ - spin_lock(&sc_irq_lock); - if (!sc_irq_refcount) - ctl_set_bit(0, 9); - sc_irq_refcount++; - spin_unlock(&sc_irq_lock); -} -EXPORT_SYMBOL(service_subclass_irq_register); - -void service_subclass_irq_unregister(void) -{ - spin_lock(&sc_irq_lock); - sc_irq_refcount--; - if (!sc_irq_refcount) - ctl_clear_bit(0, 9); - spin_unlock(&sc_irq_lock); -} -EXPORT_SYMBOL(service_subclass_irq_unregister); - -static DEFINE_SPINLOCK(ma_subclass_lock); -static int ma_subclass_refcount; +static DEFINE_SPINLOCK(irq_subclass_lock); +static unsigned char irq_subclass_refcount[64]; -void measurement_alert_subclass_register(void) +void irq_subclass_register(enum irq_subclass subclass) { - spin_lock(&ma_subclass_lock); - if (!ma_subclass_refcount) - ctl_set_bit(0, 5); - ma_subclass_refcount++; - spin_unlock(&ma_subclass_lock); + spin_lock(&irq_subclass_lock); + if (!irq_subclass_refcount[subclass]) + ctl_set_bit(0, subclass); + irq_subclass_refcount[subclass]++; + spin_unlock(&irq_subclass_lock); } -EXPORT_SYMBOL(measurement_alert_subclass_register); +EXPORT_SYMBOL(irq_subclass_register); -void measurement_alert_subclass_unregister(void) +void irq_subclass_unregister(enum irq_subclass subclass) { - spin_lock(&ma_subclass_lock); - ma_subclass_refcount--; - if (!ma_subclass_refcount) - ctl_clear_bit(0, 5); - spin_unlock(&ma_subclass_lock); + spin_lock(&irq_subclass_lock); + irq_subclass_refcount[subclass]--; + if (!irq_subclass_refcount[subclass]) + ctl_clear_bit(0, subclass); + spin_unlock(&irq_subclass_lock); } -EXPORT_SYMBOL(measurement_alert_subclass_unregister); +EXPORT_SYMBOL(irq_subclass_unregister); diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index adbbe7f1cb0..0ce9fb24503 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -37,6 +37,26 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); struct kretprobe_blackpoint kretprobe_blacklist[] = { }; +DEFINE_INSN_CACHE_OPS(dmainsn); + +static void *alloc_dmainsn_page(void) +{ + return (void *)__get_free_page(GFP_KERNEL | GFP_DMA); +} + +static void free_dmainsn_page(void *page) +{ + free_page((unsigned long)page); +} + +struct kprobe_insn_cache kprobe_dmainsn_slots = { + .mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex), + .alloc = alloc_dmainsn_page, + .free = free_dmainsn_page, + .pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages), + .insn_size = MAX_INSN_SIZE, +}; + static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn) { switch (insn[0] >> 8) { @@ -100,9 +120,8 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn) fixup |= FIXUP_RETURN_REGISTER; break; case 0xc0: - if ((insn[0] & 0x0f) == 0x00 || /* larl */ - (insn[0] & 0x0f) == 0x05) /* brasl */ - fixup |= FIXUP_RETURN_REGISTER; + if ((insn[0] & 0x0f) == 0x05) /* brasl */ + fixup |= FIXUP_RETURN_REGISTER; break; case 0xeb: switch (insn[2] & 0xff) { @@ -134,18 +153,128 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn) return fixup; } +static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn) +{ + /* Check if we have a RIL-b or RIL-c format instruction which + * we need to modify in order to avoid instruction emulation. */ + switch (insn[0] >> 8) { + case 0xc0: + if ((insn[0] & 0x0f) == 0x00) /* larl */ + return true; + break; + case 0xc4: + switch (insn[0] & 0x0f) { + case 0x02: /* llhrl */ + case 0x04: /* lghrl */ + case 0x05: /* lhrl */ + case 0x06: /* llghrl */ + case 0x07: /* sthrl */ + case 0x08: /* lgrl */ + case 0x0b: /* stgrl */ + case 0x0c: /* lgfrl */ + case 0x0d: /* lrl */ + case 0x0e: /* llgfrl */ + case 0x0f: /* strl */ + return true; + } + break; + case 0xc6: + switch (insn[0] & 0x0f) { + case 0x00: /* exrl */ + case 0x02: /* pfdrl */ + case 0x04: /* cghrl */ + case 0x05: /* chrl */ + case 0x06: /* clghrl */ + case 0x07: /* clhrl */ + case 0x08: /* cgrl */ + case 0x0a: /* clgrl */ + case 0x0c: /* cgfrl */ + case 0x0d: /* crl */ + case 0x0e: /* clgfrl */ + case 0x0f: /* clrl */ + return true; + } + break; + } + return false; +} + +static void __kprobes copy_instruction(struct kprobe *p) +{ + s64 disp, new_disp; + u64 addr, new_addr; + + memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2); + if (!is_insn_relative_long(p->ainsn.insn)) + return; + /* + * For pc-relative instructions in RIL-b or RIL-c format patch the + * RI2 displacement field. We have already made sure that the insn + * slot for the patched instruction is within the same 2GB area + * as the original instruction (either kernel image or module area). + * Therefore the new displacement will always fit. + */ + disp = *(s32 *)&p->ainsn.insn[1]; + addr = (u64)(unsigned long)p->addr; + new_addr = (u64)(unsigned long)p->ainsn.insn; + new_disp = ((addr + (disp * 2)) - new_addr) / 2; + *(s32 *)&p->ainsn.insn[1] = new_disp; +} + +static inline int is_kernel_addr(void *addr) +{ + return addr < (void *)_end; +} + +static inline int is_module_addr(void *addr) +{ +#ifdef CONFIG_64BIT + BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); + if (addr < (void *)MODULES_VADDR) + return 0; + if (addr > (void *)MODULES_END) + return 0; +#endif + return 1; +} + +static int __kprobes s390_get_insn_slot(struct kprobe *p) +{ + /* + * Get an insn slot that is within the same 2GB area like the original + * instruction. That way instructions with a 32bit signed displacement + * field can be patched and executed within the insn slot. + */ + p->ainsn.insn = NULL; + if (is_kernel_addr(p->addr)) + p->ainsn.insn = get_dmainsn_slot(); + if (is_module_addr(p->addr)) + p->ainsn.insn = get_insn_slot(); + return p->ainsn.insn ? 0 : -ENOMEM; +} + +static void __kprobes s390_free_insn_slot(struct kprobe *p) +{ + if (!p->ainsn.insn) + return; + if (is_kernel_addr(p->addr)) + free_dmainsn_slot(p->ainsn.insn, 0); + else + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; +} + int __kprobes arch_prepare_kprobe(struct kprobe *p) { if ((unsigned long) p->addr & 0x01) return -EINVAL; - /* Make sure the probe isn't going on a difficult instruction */ if (is_prohibited_opcode(p->addr)) return -EINVAL; - + if (s390_get_insn_slot(p)) + return -ENOMEM; p->opcode = *p->addr; - memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2); - + copy_instruction(p); return 0; } @@ -186,6 +315,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { + s390_free_insn_slot(p); } static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index ac2178161ec..719e27b2cf2 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -50,7 +50,7 @@ static void add_elf_notes(int cpu) /* * Initialize CPU ELF notes */ -void setup_regs(void) +static void setup_regs(void) { unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; int cpu, this_cpu; diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index fb99c2057b8..1105502bf6e 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -274,7 +274,7 @@ static int reserve_pmc_hardware(void) int flags = PMC_INIT; on_each_cpu(setup_pmc_cpu, &flags, 1); - measurement_alert_subclass_register(); + irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); return 0; } @@ -285,7 +285,7 @@ static void release_pmc_hardware(void) int flags = PMC_RELEASE; on_each_cpu(setup_pmc_cpu, &flags, 1); - measurement_alert_subclass_unregister(); + irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); } /* Release the PMU if event is the last perf event */ diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index 500aa1029bc..2343c218b8f 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -105,13 +105,10 @@ void perf_event_print_debug(void) cpu = smp_processor_id(); memset(&cf_info, 0, sizeof(cf_info)); - if (!qctri(&cf_info)) { + if (!qctri(&cf_info)) pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n", cpu, cf_info.cfvn, cf_info.csvn, cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl); - print_hex_dump_bytes("CPUMF Query: ", DUMP_PREFIX_OFFSET, - &cf_info, sizeof(cf_info)); - } local_irq_restore(flags); } diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index 077a99389b0..e1c9d1c292f 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -139,10 +139,10 @@ static int __init runtime_instr_init(void) if (!runtime_instr_avail()) return 0; - measurement_alert_subclass_register(); + irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); rc = register_external_interrupt(0x1407, runtime_instr_int_handler); if (rc) - measurement_alert_subclass_unregister(); + irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); else pr_info("Runtime instrumentation facility initialized\n"); return rc; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index d386c4e9d2e..1a4313a1b60 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -362,7 +362,7 @@ void smp_yield_cpu(int cpu) * Send cpus emergency shutdown signal. This gives the cpus the * opportunity to complete outstanding interrupts. */ -void smp_emergency_stop(cpumask_t *cpumask) +static void smp_emergency_stop(cpumask_t *cpumask) { u64 end; int cpu; diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index 737bff38e3e..a7a7537ce1e 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c @@ -13,6 +13,7 @@ #include <asm/ipl.h> #include <asm/cio.h> #include <asm/pci.h> +#include "entry.h" /* * References to section boundaries |