diff options
Diffstat (limited to 'arch/s390')
67 files changed, 1681 insertions, 1205 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 90d77bd078f..ed5cb5af528 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -81,6 +81,7 @@ config S390 select INIT_ALL_POSSIBLE select HAVE_IRQ_WORK select HAVE_PERF_EVENTS + select ARCH_HAVE_NMI_SAFE_CMPXCHG select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_LZMA @@ -273,11 +274,11 @@ config MARCH_Z10 on older machines. config MARCH_Z196 - bool "IBM zEnterprise 196" + bool "IBM zEnterprise 114 and 196" help - Select this to enable optimizations for IBM zEnterprise 196 - (2817 series). The kernel will be slightly faster but will not work - on older machines. + Select this to enable optimizations for IBM zEnterprise 114 and 196 + (2818 and 2817 series). The kernel will be slightly faster but will + not work on older machines. endchoice @@ -579,6 +580,7 @@ config S390_GUEST def_bool y prompt "s390 guest support for KVM (EXPERIMENTAL)" depends on 64BIT && EXPERIMENTAL + select VIRTUALIZATION select VIRTIO select VIRTIO_RING select VIRTIO_CONSOLE diff --git a/arch/s390/boot/compressed/head31.S b/arch/s390/boot/compressed/head31.S index 2a5523a32bc..e8c9e18b803 100644 --- a/arch/s390/boot/compressed/head31.S +++ b/arch/s390/boot/compressed/head31.S @@ -7,14 +7,14 @@ */ #include <linux/init.h> +#include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/page.h> #include "sizes.h" __HEAD - .globl startup_continue -startup_continue: +ENTRY(startup_continue) basr %r13,0 # get base .LPG1: # setup stack diff --git a/arch/s390/boot/compressed/head64.S b/arch/s390/boot/compressed/head64.S index 2982cb14055..f86a4eef28a 100644 --- a/arch/s390/boot/compressed/head64.S +++ b/arch/s390/boot/compressed/head64.S @@ -7,14 +7,14 @@ */ #include <linux/init.h> +#include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/page.h> #include "sizes.h" __HEAD - .globl startup_continue -startup_continue: +ENTRY(startup_continue) basr %r13,0 # get base .LPG1: # setup stack diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index 5ed8d64fc2e..0317a3547cb 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c @@ -1,15 +1,12 @@ /* * Cryptographic API. * - * s390 implementation of the SHA256 Secure Hash Algorithm. + * s390 implementation of the SHA256 and SHA224 Secure Hash Algorithm. * * s390 Version: - * Copyright IBM Corp. 2005,2007 + * Copyright IBM Corp. 2005,2011 * Author(s): Jan Glauber (jang@de.ibm.com) * - * Derived from "crypto/sha256_generic.c" - * and "arch/s390/crypto/sha1_s390.c" - * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) @@ -65,7 +62,7 @@ static int sha256_import(struct shash_desc *desc, const void *in) return 0; } -static struct shash_alg alg = { +static struct shash_alg sha256_alg = { .digestsize = SHA256_DIGEST_SIZE, .init = sha256_init, .update = s390_sha_update, @@ -84,22 +81,69 @@ static struct shash_alg alg = { } }; -static int sha256_s390_init(void) +static int sha224_init(struct shash_desc *desc) { + struct s390_sha_ctx *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA224_H0; + sctx->state[1] = SHA224_H1; + sctx->state[2] = SHA224_H2; + sctx->state[3] = SHA224_H3; + sctx->state[4] = SHA224_H4; + sctx->state[5] = SHA224_H5; + sctx->state[6] = SHA224_H6; + sctx->state[7] = SHA224_H7; + sctx->count = 0; + sctx->func = KIMD_SHA_256; + + return 0; +} + +static struct shash_alg sha224_alg = { + .digestsize = SHA224_DIGEST_SIZE, + .init = sha224_init, + .update = s390_sha_update, + .final = s390_sha_final, + .export = sha256_export, + .import = sha256_import, + .descsize = sizeof(struct s390_sha_ctx), + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha224", + .cra_driver_name= "sha224-s390", + .cra_priority = CRYPT_S390_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static int __init sha256_s390_init(void) +{ + int ret; + if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA)) return -EOPNOTSUPP; - - return crypto_register_shash(&alg); + ret = crypto_register_shash(&sha256_alg); + if (ret < 0) + goto out; + ret = crypto_register_shash(&sha224_alg); + if (ret < 0) + crypto_unregister_shash(&sha256_alg); +out: + return ret; } static void __exit sha256_s390_fini(void) { - crypto_unregister_shash(&alg); + crypto_unregister_shash(&sha224_alg); + crypto_unregister_shash(&sha256_alg); } module_init(sha256_s390_init); module_exit(sha256_s390_fini); MODULE_ALIAS("sha256"); +MODULE_ALIAS("sha224"); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm"); +MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm"); diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index d9db13810d1..8517d2ae3b5 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -93,7 +93,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return old; } -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -105,10 +105,9 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != u; + return c; } -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #undef __CS_LOOP @@ -332,6 +331,4 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() -#include <asm-generic/atomic-long.h> - #endif /* __ARCH_S390_ATOMIC__ */ diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 667c6e9f6a3..e5beb490959 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -832,10 +832,7 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size, #include <asm-generic/bitops/le.h> -#define ext2_set_bit_atomic(lock, nr, addr) \ - test_and_set_bit_le(nr, addr) -#define ext2_clear_bit_atomic(lock, nr, addr) \ - test_and_clear_bit_le(nr, addr) +#include <asm-generic/bitops/ext2-atomic-setbit.h> #endif /* __KERNEL__ */ diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 5e95d95450b..97cc4403fab 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h @@ -167,5 +167,6 @@ enum diag308_rc { }; extern int diag308(unsigned long subcode, void *addr); +extern void diag308_reset(void); #endif /* _ASM_S390_IPL_H */ diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h index 865d6d891ac..38fdf451feb 100644 --- a/arch/s390/include/asm/irqflags.h +++ b/arch/s390/include/asm/irqflags.h @@ -29,42 +29,42 @@ }) /* set system mask. */ -static inline void __arch_local_irq_ssm(unsigned long flags) +static inline notrace void __arch_local_irq_ssm(unsigned long flags) { asm volatile("ssm %0" : : "Q" (flags) : "memory"); } -static inline unsigned long arch_local_save_flags(void) +static inline notrace unsigned long arch_local_save_flags(void) { return __arch_local_irq_stosm(0x00); } -static inline unsigned long arch_local_irq_save(void) +static inline notrace unsigned long arch_local_irq_save(void) { return __arch_local_irq_stnsm(0xfc); } -static inline void arch_local_irq_disable(void) +static inline notrace void arch_local_irq_disable(void) { arch_local_irq_save(); } -static inline void arch_local_irq_enable(void) +static inline notrace void arch_local_irq_enable(void) { __arch_local_irq_stosm(0x03); } -static inline void arch_local_irq_restore(unsigned long flags) +static inline notrace void arch_local_irq_restore(unsigned long flags) { __arch_local_irq_ssm(flags); } -static inline bool arch_irqs_disabled_flags(unsigned long flags) +static inline notrace bool arch_irqs_disabled_flags(unsigned long flags) { return !(flags & (3UL << (BITS_PER_LONG - 8))); } -static inline bool arch_irqs_disabled(void) +static inline notrace bool arch_irqs_disabled(void) { return arch_irqs_disabled_flags(arch_local_save_flags()); } diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index cef7dbf69df..00ff00dfb24 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -93,9 +93,7 @@ struct kvm_s390_sie_block { __u32 scaol; /* 0x0064 */ __u8 reserved68[4]; /* 0x0068 */ __u32 todpr; /* 0x006c */ - __u8 reserved70[16]; /* 0x0070 */ - __u64 gmsor; /* 0x0080 */ - __u64 gmslm; /* 0x0088 */ + __u8 reserved70[32]; /* 0x0070 */ psw_t gpsw; /* 0x0090 */ __u64 gg14; /* 0x00a0 */ __u64 gg15; /* 0x00a8 */ @@ -138,6 +136,7 @@ struct kvm_vcpu_stat { u32 instruction_chsc; u32 instruction_stsi; u32 instruction_stfl; + u32 instruction_tprot; u32 instruction_sigp_sense; u32 instruction_sigp_emergency; u32 instruction_sigp_stop; @@ -175,6 +174,10 @@ struct kvm_s390_prefix_info { __u32 address; }; +struct kvm_s390_emerg_info { + __u16 code; +}; + struct kvm_s390_interrupt_info { struct list_head list; u64 type; @@ -182,6 +185,7 @@ struct kvm_s390_interrupt_info { struct kvm_s390_io_info io; struct kvm_s390_ext_info ext; struct kvm_s390_pgm_info pgm; + struct kvm_s390_emerg_info emerg; struct kvm_s390_prefix_info prefix; }; }; @@ -226,6 +230,7 @@ struct kvm_vcpu_arch { struct cpuid cpu_id; u64 stidp_data; }; + struct gmap *gmap; }; struct kvm_vm_stat { @@ -236,6 +241,7 @@ struct kvm_arch{ struct sca_block *sca; debug_info_t *dbf; struct kvm_s390_float_interrupt float_int; + struct gmap *gmap; }; extern int sie64a(struct kvm_s390_sie_block *, unsigned long *); diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h index 291c2d01c44..fc8a8284778 100644 --- a/arch/s390/include/asm/linkage.h +++ b/arch/s390/include/asm/linkage.h @@ -1,6 +1,9 @@ #ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H -/* Nothing to see here... */ +#include <linux/stringify.h> + +#define __ALIGN .align 4, 0x07 +#define __ALIGN_STR __stringify(__ALIGN) #endif diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 228cf0b295d..e85c911aabf 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -18,6 +18,7 @@ void system_call(void); void pgm_check_handler(void); void mcck_int_handler(void); void io_int_handler(void); +void psw_restart_int_handler(void); #ifdef CONFIG_32BIT @@ -150,7 +151,10 @@ struct _lowcore { */ __u32 ipib; /* 0x0e00 */ __u32 ipib_checksum; /* 0x0e04 */ - __u8 pad_0x0e08[0x0f00-0x0e08]; /* 0x0e08 */ + + /* 64 bit save area */ + __u64 save_area_64; /* 0x0e08 */ + __u8 pad_0x0e10[0x0f00-0x0e10]; /* 0x0e10 */ /* Extended facility list */ __u64 stfle_fac_list[32]; /* 0x0f00 */ @@ -268,7 +272,7 @@ struct _lowcore { __u64 vdso_per_cpu_data; /* 0x0358 */ __u64 machine_flags; /* 0x0360 */ __u64 ftrace_func; /* 0x0368 */ - __u64 sie_hook; /* 0x0370 */ + __u64 gmap; /* 0x0370 */ __u64 cmf_hpp; /* 0x0378 */ /* Interrupt response block. */ @@ -286,7 +290,10 @@ struct _lowcore { */ __u64 ipib; /* 0x0e00 */ __u32 ipib_checksum; /* 0x0e08 */ - __u8 pad_0x0e0c[0x0f00-0x0e0c]; /* 0x0e0c */ + + /* 64 bit save area */ + __u64 save_area_64; /* 0x0e0c */ + __u8 pad_0x0e14[0x0f00-0x0e14]; /* 0x0e14 */ /* Extended facility list */ __u64 stfle_fac_list[32]; /* 0x0f00 */ diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 82d0847896a..4506791adcd 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -6,6 +6,7 @@ typedef struct { unsigned int flush_mm; spinlock_t list_lock; struct list_head pgtable_list; + struct list_head gmap_list; unsigned long asce_bits; unsigned long asce_limit; unsigned long vdso_base; @@ -17,6 +18,7 @@ typedef struct { #define INIT_MM_CONTEXT(name) \ .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ - .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), + .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ + .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list), #endif diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 38e71ebcd3c..8eef9b5b3cf 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -20,7 +20,7 @@ unsigned long *crst_table_alloc(struct mm_struct *); void crst_table_free(struct mm_struct *, unsigned long *); -unsigned long *page_table_alloc(struct mm_struct *); +unsigned long *page_table_alloc(struct mm_struct *, unsigned long); void page_table_free(struct mm_struct *, unsigned long *); #ifdef CONFIG_HAVE_RCU_TABLE_FREE void page_table_free_rcu(struct mmu_gather *, unsigned long *); @@ -115,6 +115,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { spin_lock_init(&mm->context.list_lock); INIT_LIST_HEAD(&mm->context.pgtable_list); + INIT_LIST_HEAD(&mm->context.gmap_list); return (pgd_t *) crst_table_alloc(mm); } #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) @@ -133,8 +134,8 @@ static inline void pmd_populate(struct mm_struct *mm, /* * page table entry allocation/free routines. */ -#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) -#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) +#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr)) +#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr)) #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 801fbe1d837..519eb5f187e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -654,6 +654,48 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste) #endif } +/** + * struct gmap_struct - guest address space + * @mm: pointer to the parent mm_struct + * @table: pointer to the page directory + * @crst_list: list of all crst tables used in the guest address space + */ +struct gmap { + struct list_head list; + struct mm_struct *mm; + unsigned long *table; + struct list_head crst_list; +}; + +/** + * struct gmap_rmap - reverse mapping for segment table entries + * @next: pointer to the next gmap_rmap structure in the list + * @entry: pointer to a segment table entry + */ +struct gmap_rmap { + struct list_head list; + unsigned long *entry; +}; + +/** + * struct gmap_pgtable - gmap information attached to a page table + * @vmaddr: address of the 1MB segment in the process virtual memory + * @mapper: list of segment table entries maping a page table + */ +struct gmap_pgtable { + unsigned long vmaddr; + struct list_head mapper; +}; + +struct gmap *gmap_alloc(struct mm_struct *mm); +void gmap_free(struct gmap *gmap); +void gmap_enable(struct gmap *gmap); +void gmap_disable(struct gmap *gmap); +int gmap_map_segment(struct gmap *gmap, unsigned long from, + unsigned long to, unsigned long length); +int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); +unsigned long gmap_fault(unsigned long address, struct gmap *); + /* * Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 1300c302533..a4b6229e5d4 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -80,6 +80,7 @@ struct thread_struct { mm_segment_t mm_segment; unsigned long prot_addr; /* address of protection-excep. */ unsigned int trap_no; + unsigned long gmap_addr; /* address of last gmap fault. */ struct per_regs per_user; /* User specified PER registers */ struct per_event per_event; /* Cause of the last PER trap */ /* pfault_wait is used to block the process on a pfault event */ @@ -118,14 +119,12 @@ struct stack_frame { * Do necessary setup to start up a new thread. */ #define start_thread(regs, new_psw, new_stackp) do { \ - set_fs(USER_DS); \ regs->psw.mask = psw_user_bits; \ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ regs->gprs[15] = new_stackp; \ } while (0) #define start_thread31(regs, new_psw, new_stackp) do { \ - set_fs(USER_DS); \ regs->psw.mask = psw_user32_bits; \ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ regs->gprs[15] = new_stackp; \ diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 9ad628a8574..62fd80c9e98 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -539,7 +539,6 @@ struct user_regs_struct * These are defined as per linux/ptrace.h, which see. */ #define arch_has_single_step() (1) -extern void show_regs(struct pt_regs * regs); #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) #define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index d382629a017..6582f69f238 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -113,6 +113,7 @@ extern void pfault_fini(void); extern void cmma_init(void); extern int memcpy_real(void *, void *, size_t); +extern void copy_to_absolute_zero(void *dest, void *src, size_t count); #define finish_arch_switch(prev) do { \ set_fs(current->thread.mm_segment); \ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index ad1382f7932..1a5dbb6f149 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -94,6 +94,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ #define TIF_SECCOMP 10 /* secure computing */ #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ +#define TIF_SIE 12 /* guest execution active */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_31BIT 17 /* 32bit process */ @@ -113,6 +114,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) +#define _TIF_SIE (1<<TIF_SIE) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_31BIT (1<<TIF_31BIT) #define _TIF_SINGLE_STEP (1<<TIF_FREEZE) diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index b7a4f2eb005..30444538238 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -80,7 +80,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) * on all cpus instead of doing a local flush if the mm * only ran on the local cpu. */ - if (MACHINE_HAS_IDTE) + if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) __tlb_flush_idte((unsigned long) mm->pgd | mm->context.asce_bits); else diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index edfbd17d708..532fd432215 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -27,12 +27,9 @@ int main(void) BLANK(); DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); BLANK(); - DEFINE(__THREAD_per_cause, - offsetof(struct task_struct, thread.per_event.cause)); - DEFINE(__THREAD_per_address, - offsetof(struct task_struct, thread.per_event.address)); - DEFINE(__THREAD_per_paid, - offsetof(struct task_struct, thread.per_event.paid)); + DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause)); + DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address)); + DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid)); BLANK(); DEFINE(__TI_task, offsetof(struct thread_info, task)); DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); @@ -142,6 +139,7 @@ int main(void) DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); + DEFINE(__LC_SAVE_AREA_64, offsetof(struct _lowcore, save_area_64)); #ifdef CONFIG_32BIT DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); #else /* CONFIG_32BIT */ @@ -151,7 +149,7 @@ int main(void) DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); - DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook)); + DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); #endif /* CONFIG_32BIT */ return 0; diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index 15e46ca9433..255435663bf 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S @@ -6,13 +6,13 @@ * Michael Holzheu <holzheu@de.ibm.com> */ +#include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> #ifdef CONFIG_64BIT - .globl s390_base_mcck_handler -s390_base_mcck_handler: +ENTRY(s390_base_mcck_handler) basr %r13,0 0: lg %r15,__LC_PANIC_STACK # load panic stack aghi %r15,-STACK_FRAME_OVERHEAD @@ -26,13 +26,13 @@ s390_base_mcck_handler: lpswe __LC_MCK_OLD_PSW .section .bss + .align 8 .globl s390_base_mcck_handler_fn s390_base_mcck_handler_fn: .quad 0 .previous - .globl s390_base_ext_handler -s390_base_ext_handler: +ENTRY(s390_base_ext_handler) stmg %r0,%r15,__LC_SAVE_AREA basr %r13,0 0: aghi %r15,-STACK_FRAME_OVERHEAD @@ -46,13 +46,13 @@ s390_base_ext_handler: lpswe __LC_EXT_OLD_PSW .section .bss + .align 8 .globl s390_base_ext_handler_fn s390_base_ext_handler_fn: .quad 0 .previous - .globl s390_base_pgm_handler -s390_base_pgm_handler: +ENTRY(s390_base_pgm_handler) stmg %r0,%r15,__LC_SAVE_AREA basr %r13,0 0: aghi %r15,-STACK_FRAME_OVERHEAD @@ -70,15 +70,51 @@ disabled_wait_psw: .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler .section .bss + .align 8 .globl s390_base_pgm_handler_fn s390_base_pgm_handler_fn: .quad 0 .previous +# +# Calls diag 308 subcode 1 and continues execution +# +# The following conditions must be ensured before calling this function: +# * Prefix register = 0 +# * Lowcore protection is disabled +# +ENTRY(diag308_reset) + larl %r4,.Lctlregs # Save control registers + stctg %c0,%c15,0(%r4) + larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0 + lghi %r3,0 + lg %r4,0(%r4) # Save PSW + sturg %r4,%r3 # Use sturg, because of large pages + lghi %r1,1 + diag %r1,%r1,0x308 +.Lrestart_part2: + lhi %r0,0 # Load r0 with zero + lhi %r1,2 # Use mode 2 = ESAME (dump) + sigp %r1,%r0,0x12 # Switch to ESAME mode + sam64 # Switch to 64 bit addressing mode + larl %r4,.Lctlregs # Restore control registers + lctlg %c0,%c15,0(%r4) + br %r14 +.align 16 +.Lrestart_psw: + .long 0x00080000,0x80000000 + .Lrestart_part2 + + .section .bss +.align 8 +.Lctlregs: + .rept 16 + .quad 0 + .endr + .previous + #else /* CONFIG_64BIT */ - .globl s390_base_mcck_handler -s390_base_mcck_handler: +ENTRY(s390_base_mcck_handler) basr %r13,0 0: l %r15,__LC_PANIC_STACK # load panic stack ahi %r15,-STACK_FRAME_OVERHEAD @@ -93,13 +129,13 @@ s390_base_mcck_handler: 2: .long s390_base_mcck_handler_fn .section .bss + .align 4 .globl s390_base_mcck_handler_fn s390_base_mcck_handler_fn: .long 0 .previous - .globl s390_base_ext_handler -s390_base_ext_handler: +ENTRY(s390_base_ext_handler) stm %r0,%r15,__LC_SAVE_AREA basr %r13,0 0: ahi %r15,-STACK_FRAME_OVERHEAD @@ -115,13 +151,13 @@ s390_base_ext_handler: 2: .long s390_base_ext_handler_fn .section .bss + .align 4 .globl s390_base_ext_handler_fn s390_base_ext_handler_fn: .long 0 .previous - .globl s390_base_pgm_handler -s390_base_pgm_handler: +ENTRY(s390_base_pgm_handler) stm %r0,%r15,__LC_SAVE_AREA basr %r13,0 0: ahi %r15,-STACK_FRAME_OVERHEAD @@ -142,6 +178,7 @@ disabled_wait_psw: .long 0x000a0000,0x00000000 + s390_base_pgm_handler .section .bss + .align 4 .globl s390_base_pgm_handler_fn s390_base_pgm_handler_fn: .long 0 diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index eee999853a7..a9a285b8c4a 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -380,20 +380,13 @@ asmlinkage long sys32_sigreturn(void) goto badframe; if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - + set_current_blocked(&set); if (restore_sigregs32(regs, &frame->sregs)) goto badframe; if (restore_sigregs_gprs_high(regs, frame->gprs_high)) goto badframe; - return regs->gprs[2]; - badframe: force_sig(SIGSEGV, current); return 0; @@ -413,31 +406,22 @@ asmlinkage long sys32_rt_sigreturn(void) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - + set_current_blocked(&set); if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_sigregs_gprs_high(regs, frame->gprs_high)) goto badframe; - err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp); st.ss_sp = compat_ptr(ss_sp); err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size); err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags); if (err) goto badframe; - set_fs (KERNEL_DS); do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]); set_fs (old_fs); - return regs->gprs[2]; - badframe: force_sig(SIGSEGV, current); return 0; @@ -605,10 +589,10 @@ give_sigsegv: * OK, we're invoking a handler */ -int -handle_signal32(unsigned long sig, struct k_sigaction *ka, - siginfo_t *info, sigset_t *oldset, struct pt_regs * regs) +int handle_signal32(unsigned long sig, struct k_sigaction *ka, + siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { + sigset_t blocked; int ret; /* Set up the stack frame */ @@ -616,15 +600,12 @@ handle_signal32(unsigned long sig, struct k_sigaction *ka, ret = setup_rt_frame32(sig, ka, info, oldset, regs); else ret = setup_frame32(sig, ka, oldset, regs); - - if (ret == 0) { - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked,sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - } - return ret; + if (ret) + return ret; + sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); + if (!(ka->sa.sa_flags & SA_NODEFER)) + sigaddset(&blocked, sig); + set_current_blocked(&blocked); + return 0; } diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 1f5eb789c3a..7526db6bf50 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -7,86 +7,74 @@ * Thomas Spatzier (tspat@de.ibm.com) */ - .globl sys32_exit_wrapper -sys32_exit_wrapper: +#include <linux/linkage.h> + +ENTRY(sys32_exit_wrapper) lgfr %r2,%r2 # int jg sys_exit # branch to sys_exit - .globl sys32_read_wrapper -sys32_read_wrapper: +ENTRY(sys32_read_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # char * llgfr %r4,%r4 # size_t jg sys32_read # branch to sys_read - .globl sys32_write_wrapper -sys32_write_wrapper: +ENTRY(sys32_write_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # const char * llgfr %r4,%r4 # size_t jg sys32_write # branch to system call - .globl sys32_open_wrapper -sys32_open_wrapper: +ENTRY(sys32_open_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int lgfr %r4,%r4 # int jg sys_open # branch to system call - .globl sys32_close_wrapper -sys32_close_wrapper: +ENTRY(sys32_close_wrapper) llgfr %r2,%r2 # unsigned int jg sys_close # branch to system call - .globl sys32_creat_wrapper -sys32_creat_wrapper: +ENTRY(sys32_creat_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int jg sys_creat # branch to system call - .globl sys32_link_wrapper -sys32_link_wrapper: +ENTRY(sys32_link_wrapper) llgtr %r2,%r2 # const char * llgtr %r3,%r3 # const char * jg sys_link # branch to system call - .globl sys32_unlink_wrapper -sys32_unlink_wrapper: +ENTRY(sys32_unlink_wrapper) llgtr %r2,%r2 # const char * jg sys_unlink # branch to system call - .globl sys32_chdir_wrapper -sys32_chdir_wrapper: +ENTRY(sys32_chdir_wrapper) llgtr %r2,%r2 # const char * jg sys_chdir # branch to system call - .globl sys32_time_wrapper -sys32_time_wrapper: +ENTRY(sys32_time_wrapper) llgtr %r2,%r2 # int * jg compat_sys_time # branch to system call - .globl sys32_mknod_wrapper -sys32_mknod_wrapper: +ENTRY(sys32_mknod_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int llgfr %r4,%r4 # dev jg sys_mknod # branch to system call - .globl sys32_chmod_wrapper -sys32_chmod_wrapper: +ENTRY(sys32_chmod_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # mode_t jg sys_chmod # branch to system call - .globl sys32_lchown16_wrapper -sys32_lchown16_wrapper: +ENTRY(sys32_lchown16_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # __kernel_old_uid_emu31_t llgfr %r4,%r4 # __kernel_old_uid_emu31_t jg sys32_lchown16 # branch to system call - .globl sys32_lseek_wrapper -sys32_lseek_wrapper: +ENTRY(sys32_lseek_wrapper) llgfr %r2,%r2 # unsigned int lgfr %r3,%r3 # off_t llgfr %r4,%r4 # unsigned int @@ -94,8 +82,7 @@ sys32_lseek_wrapper: #sys32_getpid_wrapper # void - .globl sys32_mount_wrapper -sys32_mount_wrapper: +ENTRY(sys32_mount_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * llgtr %r4,%r4 # char * @@ -103,102 +90,85 @@ sys32_mount_wrapper: llgtr %r6,%r6 # void * jg compat_sys_mount # branch to system call - .globl sys32_oldumount_wrapper -sys32_oldumount_wrapper: +ENTRY(sys32_oldumount_wrapper) llgtr %r2,%r2 # char * jg sys_oldumount # branch to system call - .globl sys32_setuid16_wrapper -sys32_setuid16_wrapper: +ENTRY(sys32_setuid16_wrapper) llgfr %r2,%r2 # __kernel_old_uid_emu31_t jg sys32_setuid16 # branch to system call #sys32_getuid16_wrapper # void - .globl sys32_ptrace_wrapper -sys32_ptrace_wrapper: +ENTRY(sys32_ptrace_wrapper) lgfr %r2,%r2 # long lgfr %r3,%r3 # long llgtr %r4,%r4 # long llgfr %r5,%r5 # long jg compat_sys_ptrace # branch to system call - .globl sys32_alarm_wrapper -sys32_alarm_wrapper: +ENTRY(sys32_alarm_wrapper) llgfr %r2,%r2 # unsigned int jg sys_alarm # branch to system call - .globl compat_sys_utime_wrapper -compat_sys_utime_wrapper: +ENTRY(compat_sys_utime_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct compat_utimbuf * jg compat_sys_utime # branch to system call - .globl sys32_access_wrapper -sys32_access_wrapper: +ENTRY(sys32_access_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int jg sys_access # branch to system call - .globl sys32_nice_wrapper -sys32_nice_wrapper: +ENTRY(sys32_nice_wrapper) lgfr %r2,%r2 # int jg sys_nice # branch to system call #sys32_sync_wrapper # void - .globl sys32_kill_wrapper -sys32_kill_wrapper: +ENTRY(sys32_kill_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int jg sys_kill # branch to system call - .globl sys32_rename_wrapper -sys32_rename_wrapper: +ENTRY(sys32_rename_wrapper) llgtr %r2,%r2 # const char * llgtr %r3,%r3 # const char * jg sys_rename # branch to system call - .globl sys32_mkdir_wrapper -sys32_mkdir_wrapper: +ENTRY(sys32_mkdir_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int jg sys_mkdir # branch to system call - .globl sys32_rmdir_wrapper -sys32_rmdir_wrapper: +ENTRY(sys32_rmdir_wrapper) llgtr %r2,%r2 # const char * jg sys_rmdir # branch to system call - .globl sys32_dup_wrapper -sys32_dup_wrapper: +ENTRY(sys32_dup_wrapper) llgfr %r2,%r2 # unsigned int jg sys_dup # branch to system call - .globl sys32_pipe_wrapper -sys32_pipe_wrapper: +ENTRY(sys32_pipe_wrapper) llgtr %r2,%r2 # u32 * jg sys_pipe # branch to system call - .globl compat_sys_times_wrapper -compat_sys_times_wrapper: +ENTRY(compat_sys_times_wrapper) llgtr %r2,%r2 # struct compat_tms * jg compat_sys_times # branch to system call - .globl sys32_brk_wrapper -sys32_brk_wrapper: +ENTRY(sys32_brk_wrapper) llgtr %r2,%r2 # unsigned long jg sys_brk # branch to system call - .globl sys32_setgid16_wrapper -sys32_setgid16_wrapper: +ENTRY(sys32_setgid16_wrapper) llgfr %r2,%r2 # __kernel_old_gid_emu31_t jg sys32_setgid16 # branch to system call #sys32_getgid16_wrapper # void - .globl sys32_signal_wrapper -sys32_signal_wrapper: +ENTRY(sys32_signal_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # __sighandler_t jg sys_signal @@ -207,55 +177,46 @@ sys32_signal_wrapper: #sys32_getegid16_wrapper # void - .globl sys32_acct_wrapper -sys32_acct_wrapper: +ENTRY(sys32_acct_wrapper) llgtr %r2,%r2 # char * jg sys_acct # branch to system call - .globl sys32_umount_wrapper -sys32_umount_wrapper: +ENTRY(sys32_umount_wrapper) llgtr %r2,%r2 # char * lgfr %r3,%r3 # int jg sys_umount # branch to system call - .globl compat_sys_ioctl_wrapper -compat_sys_ioctl_wrapper: +ENTRY(compat_sys_ioctl_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned int llgfr %r4,%r4 # unsigned int jg compat_sys_ioctl # branch to system call - .globl compat_sys_fcntl_wrapper -compat_sys_fcntl_wrapper: +ENTRY(compat_sys_fcntl_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned int llgfr %r4,%r4 # unsigned long jg compat_sys_fcntl # branch to system call - .globl sys32_setpgid_wrapper -sys32_setpgid_wrapper: +ENTRY(sys32_setpgid_wrapper) lgfr %r2,%r2 # pid_t lgfr %r3,%r3 # pid_t jg sys_setpgid # branch to system call - .globl sys32_umask_wrapper -sys32_umask_wrapper: +ENTRY(sys32_umask_wrapper) lgfr %r2,%r2 # int jg sys_umask # branch to system call - .globl sys32_chroot_wrapper -sys32_chroot_wrapper: +ENTRY(sys32_chroot_wrapper) llgtr %r2,%r2 # char * jg sys_chroot # branch to system call - .globl sys32_ustat_wrapper -sys32_ustat_wrapper: +ENTRY(sys32_ustat_wrapper) llgfr %r2,%r2 # dev_t llgtr %r3,%r3 # struct ustat * jg compat_sys_ustat - .globl sys32_dup2_wrapper -sys32_dup2_wrapper: +ENTRY(sys32_dup2_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned int jg sys_dup2 # branch to system call @@ -266,262 +227,220 @@ sys32_dup2_wrapper: #sys32_setsid_wrapper # void - .globl sys32_sigaction_wrapper -sys32_sigaction_wrapper: +ENTRY(sys32_sigaction_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const struct old_sigaction * llgtr %r4,%r4 # struct old_sigaction32 * jg sys32_sigaction # branch to system call - .globl sys32_setreuid16_wrapper -sys32_setreuid16_wrapper: +ENTRY(sys32_setreuid16_wrapper) llgfr %r2,%r2 # __kernel_old_uid_emu31_t llgfr %r3,%r3 # __kernel_old_uid_emu31_t jg sys32_setreuid16 # branch to system call - .globl sys32_setregid16_wrapper -sys32_setregid16_wrapper: +ENTRY(sys32_setregid16_wrapper) llgfr %r2,%r2 # __kernel_old_gid_emu31_t llgfr %r3,%r3 # __kernel_old_gid_emu31_t jg sys32_setregid16 # branch to system call - .globl sys_sigsuspend_wrapper -sys_sigsuspend_wrapper: +ENTRY(sys_sigsuspend_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int llgfr %r4,%r4 # old_sigset_t jg sys_sigsuspend - .globl compat_sys_sigpending_wrapper -compat_sys_sigpending_wrapper: +ENTRY(compat_sys_sigpending_wrapper) llgtr %r2,%r2 # compat_old_sigset_t * jg compat_sys_sigpending # branch to system call - .globl sys32_sethostname_wrapper -sys32_sethostname_wrapper: +ENTRY(sys32_sethostname_wrapper) llgtr %r2,%r2 # char * lgfr %r3,%r3 # int jg sys_sethostname # branch to system call - .globl compat_sys_setrlimit_wrapper -compat_sys_setrlimit_wrapper: +ENTRY(compat_sys_setrlimit_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # struct rlimit_emu31 * jg compat_sys_setrlimit # branch to system call - .globl compat_sys_old_getrlimit_wrapper -compat_sys_old_getrlimit_wrapper: +ENTRY(compat_sys_old_getrlimit_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # struct rlimit_emu31 * jg compat_sys_old_getrlimit # branch to system call - .globl compat_sys_getrlimit_wrapper -compat_sys_getrlimit_wrapper: +ENTRY(compat_sys_getrlimit_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # struct rlimit_emu31 * jg compat_sys_getrlimit # branch to system call - .globl sys32_mmap2_wrapper -sys32_mmap2_wrapper: +ENTRY(sys32_mmap2_wrapper) llgtr %r2,%r2 # struct mmap_arg_struct_emu31 * jg sys32_mmap2 # branch to system call - .globl compat_sys_getrusage_wrapper -compat_sys_getrusage_wrapper: +ENTRY(compat_sys_getrusage_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct rusage_emu31 * jg compat_sys_getrusage # branch to system call - .globl compat_sys_gettimeofday_wrapper -compat_sys_gettimeofday_wrapper: +ENTRY(compat_sys_gettimeofday_wrapper) llgtr %r2,%r2 # struct timeval_emu31 * llgtr %r3,%r3 # struct timezone * jg compat_sys_gettimeofday # branch to system call - .globl compat_sys_settimeofday_wrapper -compat_sys_settimeofday_wrapper: +ENTRY(compat_sys_settimeofday_wrapper) llgtr %r2,%r2 # struct timeval_emu31 * llgtr %r3,%r3 # struct timezone * jg compat_sys_settimeofday # branch to system call - .globl sys32_getgroups16_wrapper -sys32_getgroups16_wrapper: +ENTRY(sys32_getgroups16_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # __kernel_old_gid_emu31_t * jg sys32_getgroups16 # branch to system call - .globl sys32_setgroups16_wrapper -sys32_setgroups16_wrapper: +ENTRY(sys32_setgroups16_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # __kernel_old_gid_emu31_t * jg sys32_setgroups16 # branch to system call - .globl sys32_symlink_wrapper -sys32_symlink_wrapper: +ENTRY(sys32_symlink_wrapper) llgtr %r2,%r2 # const char * llgtr %r3,%r3 # const char * jg sys_symlink # branch to system call - .globl sys32_readlink_wrapper -sys32_readlink_wrapper: +ENTRY(sys32_readlink_wrapper) llgtr %r2,%r2 # const char * llgtr %r3,%r3 # char * lgfr %r4,%r4 # int jg sys_readlink # branch to system call - .globl sys32_uselib_wrapper -sys32_uselib_wrapper: +ENTRY(sys32_uselib_wrapper) llgtr %r2,%r2 # const char * jg sys_uselib # branch to system call - .globl sys32_swapon_wrapper -sys32_swapon_wrapper: +ENTRY(sys32_swapon_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int jg sys_swapon # branch to system call - .globl sys32_reboot_wrapper -sys32_reboot_wrapper: +ENTRY(sys32_reboot_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int llgfr %r4,%r4 # unsigned int llgtr %r5,%r5 # void * jg sys_reboot # branch to system call - .globl old32_readdir_wrapper -old32_readdir_wrapper: +ENTRY(old32_readdir_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # void * llgfr %r4,%r4 # unsigned int jg compat_sys_old_readdir # branch to system call - .globl old32_mmap_wrapper -old32_mmap_wrapper: +ENTRY(old32_mmap_wrapper) llgtr %r2,%r2 # struct mmap_arg_struct_emu31 * jg old32_mmap # branch to system call - .globl sys32_munmap_wrapper -sys32_munmap_wrapper: +ENTRY(sys32_munmap_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # size_t jg sys_munmap # branch to system call - .globl sys32_truncate_wrapper -sys32_truncate_wrapper: +ENTRY(sys32_truncate_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # long jg sys_truncate # branch to system call - .globl sys32_ftruncate_wrapper -sys32_ftruncate_wrapper: +ENTRY(sys32_ftruncate_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned long jg sys_ftruncate # branch to system call - .globl sys32_fchmod_wrapper -sys32_fchmod_wrapper: +ENTRY(sys32_fchmod_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # mode_t jg sys_fchmod # branch to system call - .globl sys32_fchown16_wrapper -sys32_fchown16_wrapper: +ENTRY(sys32_fchown16_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # compat_uid_t llgfr %r4,%r4 # compat_uid_t jg sys32_fchown16 # branch to system call - .globl sys32_getpriority_wrapper -sys32_getpriority_wrapper: +ENTRY(sys32_getpriority_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int jg sys_getpriority # branch to system call - .globl sys32_setpriority_wrapper -sys32_setpriority_wrapper: +ENTRY(sys32_setpriority_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int lgfr %r4,%r4 # int jg sys_setpriority # branch to system call - .globl compat_sys_statfs_wrapper -compat_sys_statfs_wrapper: +ENTRY(compat_sys_statfs_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct compat_statfs * jg compat_sys_statfs # branch to system call - .globl compat_sys_fstatfs_wrapper -compat_sys_fstatfs_wrapper: +ENTRY(compat_sys_fstatfs_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # struct compat_statfs * jg compat_sys_fstatfs # branch to system call - .globl compat_sys_socketcall_wrapper -compat_sys_socketcall_wrapper: +ENTRY(compat_sys_socketcall_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # u32 * jg compat_sys_socketcall # branch to system call - .globl sys32_syslog_wrapper -sys32_syslog_wrapper: +ENTRY(sys32_syslog_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # char * lgfr %r4,%r4 # int jg sys_syslog # branch to system call - .globl compat_sys_setitimer_wrapper -compat_sys_setitimer_wrapper: +ENTRY(compat_sys_setitimer_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct itimerval_emu31 * llgtr %r4,%r4 # struct itimerval_emu31 * jg compat_sys_setitimer # branch to system call - .globl compat_sys_getitimer_wrapper -compat_sys_getitimer_wrapper: +ENTRY(compat_sys_getitimer_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct itimerval_emu31 * jg compat_sys_getitimer # branch to system call - .globl compat_sys_newstat_wrapper -compat_sys_newstat_wrapper: +ENTRY(compat_sys_newstat_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct stat_emu31 * jg compat_sys_newstat # branch to system call - .globl compat_sys_newlstat_wrapper -compat_sys_newlstat_wrapper: +ENTRY(compat_sys_newlstat_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct stat_emu31 * jg compat_sys_newlstat # branch to system call - .globl compat_sys_newfstat_wrapper -compat_sys_newfstat_wrapper: +ENTRY(compat_sys_newfstat_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # struct stat_emu31 * jg compat_sys_newfstat # branch to system call #sys32_vhangup_wrapper # void - .globl compat_sys_wait4_wrapper -compat_sys_wait4_wrapper: +ENTRY(compat_sys_wait4_wrapper) lgfr %r2,%r2 # pid_t llgtr %r3,%r3 # unsigned int * lgfr %r4,%r4 # int llgtr %r5,%r5 # struct rusage * jg compat_sys_wait4 # branch to system call - .globl sys32_swapoff_wrapper -sys32_swapoff_wrapper: +ENTRY(sys32_swapoff_wrapper) llgtr %r2,%r2 # const char * jg sys_swapoff # branch to system call - .globl compat_sys_sysinfo_wrapper -compat_sys_sysinfo_wrapper: +ENTRY(compat_sys_sysinfo_wrapper) llgtr %r2,%r2 # struct sysinfo_emu31 * jg compat_sys_sysinfo # branch to system call - .globl sys32_ipc_wrapper -sys32_ipc_wrapper: +ENTRY(sys32_ipc_wrapper) llgfr %r2,%r2 # uint lgfr %r3,%r3 # int lgfr %r4,%r4 # int @@ -529,8 +448,7 @@ sys32_ipc_wrapper: llgfr %r6,%r6 # u32 jg sys32_ipc # branch to system call - .globl sys32_fsync_wrapper -sys32_fsync_wrapper: +ENTRY(sys32_fsync_wrapper) llgfr %r2,%r2 # unsigned int jg sys_fsync # branch to system call @@ -538,97 +456,81 @@ sys32_fsync_wrapper: #sys32_clone_wrapper # done in clone_glue - .globl sys32_setdomainname_wrapper -sys32_setdomainname_wrapper: +ENTRY(sys32_setdomainname_wrapper) llgtr %r2,%r2 # char * lgfr %r3,%r3 # int jg sys_setdomainname # branch to system call - .globl sys32_newuname_wrapper -sys32_newuname_wrapper: +ENTRY(sys32_newuname_wrapper) llgtr %r2,%r2 # struct new_utsname * jg sys_newuname # branch to system call - .globl compat_sys_adjtimex_wrapper -compat_sys_adjtimex_wrapper: +ENTRY(compat_sys_adjtimex_wrapper) llgtr %r2,%r2 # struct compat_timex * jg compat_sys_adjtimex # branch to system call - .globl sys32_mprotect_wrapper -sys32_mprotect_wrapper: +ENTRY(sys32_mprotect_wrapper) llgtr %r2,%r2 # unsigned long (actually pointer llgfr %r3,%r3 # size_t llgfr %r4,%r4 # unsigned long jg sys_mprotect # branch to system call - .globl compat_sys_sigprocmask_wrapper -compat_sys_sigprocmask_wrapper: +ENTRY(compat_sys_sigprocmask_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # compat_old_sigset_t * llgtr %r4,%r4 # compat_old_sigset_t * jg compat_sys_sigprocmask # branch to system call - .globl sys_init_module_wrapper -sys_init_module_wrapper: +ENTRY(sys_init_module_wrapper) llgtr %r2,%r2 # void * llgfr %r3,%r3 # unsigned long llgtr %r4,%r4 # char * jg sys_init_module # branch to system call - .globl sys_delete_module_wrapper -sys_delete_module_wrapper: +ENTRY(sys_delete_module_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # unsigned int jg sys_delete_module # branch to system call - .globl sys32_quotactl_wrapper -sys32_quotactl_wrapper: +ENTRY(sys32_quotactl_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # const char * llgfr %r4,%r4 # qid_t llgtr %r5,%r5 # caddr_t jg sys_quotactl # branch to system call - .globl sys32_getpgid_wrapper -sys32_getpgid_wrapper: +ENTRY(sys32_getpgid_wrapper) lgfr %r2,%r2 # pid_t jg sys_getpgid # branch to system call - .globl sys32_fchdir_wrapper -sys32_fchdir_wrapper: +ENTRY(sys32_fchdir_wrapper) llgfr %r2,%r2 # unsigned int jg sys_fchdir # branch to system call - .globl sys32_bdflush_wrapper -sys32_bdflush_wrapper: +ENTRY(sys32_bdflush_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # long jg sys_bdflush # branch to system call - .globl sys32_sysfs_wrapper -sys32_sysfs_wrapper: +ENTRY(sys32_sysfs_wrapper) lgfr %r2,%r2 # int llgfr %r3,%r3 # unsigned long llgfr %r4,%r4 # unsigned long jg sys_sysfs # branch to system call - .globl sys32_personality_wrapper -sys32_personality_wrapper: +ENTRY(sys32_personality_wrapper) llgfr %r2,%r2 # unsigned int jg sys_s390_personality # branch to system call - .globl sys32_setfsuid16_wrapper -sys32_setfsuid16_wrapper: +ENTRY(sys32_setfsuid16_wrapper) llgfr %r2,%r2 # __kernel_old_uid_emu31_t jg sys32_setfsuid16 # branch to system call - .globl sys32_setfsgid16_wrapper -sys32_setfsgid16_wrapper: +ENTRY(sys32_setfsgid16_wrapper) llgfr %r2,%r2 # __kernel_old_gid_emu31_t jg sys32_setfsgid16 # branch to system call - .globl sys32_llseek_wrapper -sys32_llseek_wrapper: +ENTRY(sys32_llseek_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned long llgfr %r4,%r4 # unsigned long @@ -636,15 +538,13 @@ sys32_llseek_wrapper: llgfr %r6,%r6 # unsigned int jg sys_llseek # branch to system call - .globl sys32_getdents_wrapper -sys32_getdents_wrapper: +ENTRY(sys32_getdents_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # void * llgfr %r4,%r4 # unsigned int jg compat_sys_getdents # branch to system call - .globl compat_sys_select_wrapper -compat_sys_select_wrapper: +ENTRY(compat_sys_select_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # compat_fd_set * llgtr %r4,%r4 # compat_fd_set * @@ -652,112 +552,94 @@ compat_sys_select_wrapper: llgtr %r6,%r6 # struct compat_timeval * jg compat_sys_select # branch to system call - .globl sys32_flock_wrapper -sys32_flock_wrapper: +ENTRY(sys32_flock_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned int jg sys_flock # branch to system call - .globl sys32_msync_wrapper -sys32_msync_wrapper: +ENTRY(sys32_msync_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # size_t lgfr %r4,%r4 # int jg sys_msync # branch to system call - .globl compat_sys_readv_wrapper -compat_sys_readv_wrapper: +ENTRY(compat_sys_readv_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const struct compat_iovec * llgfr %r4,%r4 # unsigned long jg compat_sys_readv # branch to system call - .globl compat_sys_writev_wrapper -compat_sys_writev_wrapper: +ENTRY(compat_sys_writev_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const struct compat_iovec * llgfr %r4,%r4 # unsigned long jg compat_sys_writev # branch to system call - .globl sys32_getsid_wrapper -sys32_getsid_wrapper: +ENTRY(sys32_getsid_wrapper) lgfr %r2,%r2 # pid_t jg sys_getsid # branch to system call - .globl sys32_fdatasync_wrapper -sys32_fdatasync_wrapper: +ENTRY(sys32_fdatasync_wrapper) llgfr %r2,%r2 # unsigned int jg sys_fdatasync # branch to system call - .globl sys32_mlock_wrapper -sys32_mlock_wrapper: +ENTRY(sys32_mlock_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # size_t jg sys_mlock # branch to system call - .globl sys32_munlock_wrapper -sys32_munlock_wrapper: +ENTRY(sys32_munlock_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # size_t jg sys_munlock # branch to system call - .globl sys32_mlockall_wrapper -sys32_mlockall_wrapper: +ENTRY(sys32_mlockall_wrapper) lgfr %r2,%r2 # int jg sys_mlockall # branch to system call #sys32_munlockall_wrapper # void - .globl sys32_sched_setparam_wrapper -sys32_sched_setparam_wrapper: +ENTRY(sys32_sched_setparam_wrapper) lgfr %r2,%r2 # pid_t llgtr %r3,%r3 # struct sched_param * jg sys_sched_setparam # branch to system call - .globl sys32_sched_getparam_wrapper -sys32_sched_getparam_wrapper: +ENTRY(sys32_sched_getparam_wrapper) lgfr %r2,%r2 # pid_t llgtr %r3,%r3 # struct sched_param * jg sys_sched_getparam # branch to system call - .globl sys32_sched_setscheduler_wrapper -sys32_sched_setscheduler_wrapper: +ENTRY(sys32_sched_setscheduler_wrapper) lgfr %r2,%r2 # pid_t lgfr %r3,%r3 # int llgtr %r4,%r4 # struct sched_param * jg sys_sched_setscheduler # branch to system call - .globl sys32_sched_getscheduler_wrapper -sys32_sched_getscheduler_wrapper: +ENTRY(sys32_sched_getscheduler_wrapper) lgfr %r2,%r2 # pid_t jg sys_sched_getscheduler # branch to system call #sys32_sched_yield_wrapper # void - .globl sys32_sched_get_priority_max_wrapper -sys32_sched_get_priority_max_wrapper: +ENTRY(sys32_sched_get_priority_max_wrapper) lgfr %r2,%r2 # int jg sys_sched_get_priority_max # branch to system call - .globl sys32_sched_get_priority_min_wrapper -sys32_sched_get_priority_min_wrapper: +ENTRY(sys32_sched_get_priority_min_wrapper) lgfr %r2,%r2 # int jg sys_sched_get_priority_min # branch to system call - .globl sys32_sched_rr_get_interval_wrapper -sys32_sched_rr_get_interval_wrapper: +ENTRY(sys32_sched_rr_get_interval_wrapper) lgfr %r2,%r2 # pid_t llgtr %r3,%r3 # struct compat_timespec * jg sys32_sched_rr_get_interval # branch to system call - .globl compat_sys_nanosleep_wrapper -compat_sys_nanosleep_wrapper: +ENTRY(compat_sys_nanosleep_wrapper) llgtr %r2,%r2 # struct compat_timespec * llgtr %r3,%r3 # struct compat_timespec * jg compat_sys_nanosleep # branch to system call - .globl sys32_mremap_wrapper -sys32_mremap_wrapper: +ENTRY(sys32_mremap_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # unsigned long llgfr %r4,%r4 # unsigned long @@ -765,50 +647,37 @@ sys32_mremap_wrapper: llgfr %r6,%r6 # unsigned long jg sys_mremap # branch to system call - .globl sys32_setresuid16_wrapper -sys32_setresuid16_wrapper: +ENTRY(sys32_setresuid16_wrapper) llgfr %r2,%r2 # __kernel_old_uid_emu31_t llgfr %r3,%r3 # __kernel_old_uid_emu31_t llgfr %r4,%r4 # __kernel_old_uid_emu31_t jg sys32_setresuid16 # branch to system call - .globl sys32_getresuid16_wrapper -sys32_getresuid16_wrapper: +ENTRY(sys32_getresuid16_wrapper) llgtr %r2,%r2 # __kernel_old_uid_emu31_t * llgtr %r3,%r3 # __kernel_old_uid_emu31_t * llgtr %r4,%r4 # __kernel_old_uid_emu31_t * jg sys32_getresuid16 # branch to system call - .globl sys32_poll_wrapper -sys32_poll_wrapper: +ENTRY(sys32_poll_wrapper) llgtr %r2,%r2 # struct pollfd * llgfr %r3,%r3 # unsigned int lgfr %r4,%r4 # long jg sys_poll # branch to system call - .globl compat_sys_nfsservctl_wrapper -compat_sys_nfsservctl_wrapper: - lgfr %r2,%r2 # int - llgtr %r3,%r3 # struct compat_nfsctl_arg* - llgtr %r4,%r4 # union compat_nfsctl_res* - jg compat_sys_nfsservctl # branch to system call - - .globl sys32_setresgid16_wrapper -sys32_setresgid16_wrapper: +ENTRY(sys32_setresgid16_wrapper) llgfr %r2,%r2 # __kernel_old_gid_emu31_t llgfr %r3,%r3 # __kernel_old_gid_emu31_t llgfr %r4,%r4 # __kernel_old_gid_emu31_t jg sys32_setresgid16 # branch to system call - .globl sys32_getresgid16_wrapper -sys32_getresgid16_wrapper: +ENTRY(sys32_getresgid16_wrapper) llgtr %r2,%r2 # __kernel_old_gid_emu31_t * llgtr %r3,%r3 # __kernel_old_gid_emu31_t * llgtr %r4,%r4 # __kernel_old_gid_emu31_t * jg sys32_getresgid16 # branch to system call - .globl sys32_prctl_wrapper -sys32_prctl_wrapper: +ENTRY(sys32_prctl_wrapper) lgfr %r2,%r2 # int llgfr %r3,%r3 # unsigned long llgfr %r4,%r4 # unsigned long @@ -818,51 +687,44 @@ sys32_prctl_wrapper: #sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue - .globl sys32_rt_sigaction_wrapper -sys32_rt_sigaction_wrapper: +ENTRY(sys32_rt_sigaction_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const struct sigaction_emu31 * llgtr %r4,%r4 # const struct sigaction_emu31 * llgfr %r5,%r5 # size_t jg sys32_rt_sigaction # branch to system call - .globl sys32_rt_sigprocmask_wrapper -sys32_rt_sigprocmask_wrapper: +ENTRY(sys32_rt_sigprocmask_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # old_sigset_emu31 * llgtr %r4,%r4 # old_sigset_emu31 * llgfr %r5,%r5 # size_t jg sys32_rt_sigprocmask # branch to system call - .globl sys32_rt_sigpending_wrapper -sys32_rt_sigpending_wrapper: +ENTRY(sys32_rt_sigpending_wrapper) llgtr %r2,%r2 # sigset_emu31 * llgfr %r3,%r3 # size_t jg sys32_rt_sigpending # branch to system call - .globl compat_sys_rt_sigtimedwait_wrapper -compat_sys_rt_sigtimedwait_wrapper: +ENTRY(compat_sys_rt_sigtimedwait_wrapper) llgtr %r2,%r2 # const sigset_emu31_t * llgtr %r3,%r3 # siginfo_emu31_t * llgtr %r4,%r4 # const struct compat_timespec * llgfr %r5,%r5 # size_t jg compat_sys_rt_sigtimedwait # branch to system call - .globl sys32_rt_sigqueueinfo_wrapper -sys32_rt_sigqueueinfo_wrapper: +ENTRY(sys32_rt_sigqueueinfo_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int llgtr %r4,%r4 # siginfo_emu31_t * jg sys32_rt_sigqueueinfo # branch to system call - .globl compat_sys_rt_sigsuspend_wrapper -compat_sys_rt_sigsuspend_wrapper: +ENTRY(compat_sys_rt_sigsuspend_wrapper) llgtr %r2,%r2 # compat_sigset_t * llgfr %r3,%r3 # compat_size_t jg compat_sys_rt_sigsuspend - .globl sys32_pread64_wrapper -sys32_pread64_wrapper: +ENTRY(sys32_pread64_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # char * llgfr %r4,%r4 # size_t @@ -870,8 +732,7 @@ sys32_pread64_wrapper: llgfr %r6,%r6 # u32 jg sys32_pread64 # branch to system call - .globl sys32_pwrite64_wrapper -sys32_pwrite64_wrapper: +ENTRY(sys32_pwrite64_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # const char * llgfr %r4,%r4 # size_t @@ -879,39 +740,33 @@ sys32_pwrite64_wrapper: llgfr %r6,%r6 # u32 jg sys32_pwrite64 # branch to system call - .globl sys32_chown16_wrapper -sys32_chown16_wrapper: +ENTRY(sys32_chown16_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # __kernel_old_uid_emu31_t llgfr %r4,%r4 # __kernel_old_gid_emu31_t jg sys32_chown16 # branch to system call - .globl sys32_getcwd_wrapper -sys32_getcwd_wrapper: +ENTRY(sys32_getcwd_wrapper) llgtr %r2,%r2 # char * llgfr %r3,%r3 # unsigned long jg sys_getcwd # branch to system call - .globl sys32_capget_wrapper -sys32_capget_wrapper: +ENTRY(sys32_capget_wrapper) llgtr %r2,%r2 # cap_user_header_t llgtr %r3,%r3 # cap_user_data_t jg sys_capget # branch to system call - .globl sys32_capset_wrapper -sys32_capset_wrapper: +ENTRY(sys32_capset_wrapper) llgtr %r2,%r2 # cap_user_header_t llgtr %r3,%r3 # const cap_user_data_t jg sys_capset # branch to system call - .globl sys32_sigaltstack_wrapper -sys32_sigaltstack_wrapper: +ENTRY(sys32_sigaltstack_wrapper) llgtr %r2,%r2 # const stack_emu31_t * llgtr %r3,%r3 # stack_emu31_t * jg sys32_sigaltstack - .globl sys32_sendfile_wrapper -sys32_sendfile_wrapper: +ENTRY(sys32_sendfile_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int llgtr %r4,%r4 # __kernel_off_emu31_t * @@ -920,22 +775,19 @@ sys32_sendfile_wrapper: #sys32_vfork_wrapper # done in vfork_glue - .globl sys32_truncate64_wrapper -sys32_truncate64_wrapper: +ENTRY(sys32_truncate64_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # unsigned long llgfr %r4,%r4 # unsigned long jg sys32_truncate64 # branch to system call - .globl sys32_ftruncate64_wrapper -sys32_ftruncate64_wrapper: +ENTRY(sys32_ftruncate64_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned long llgfr %r4,%r4 # unsigned long jg sys32_ftruncate64 # branch to system call - .globl sys32_lchown_wrapper -sys32_lchown_wrapper: +ENTRY(sys32_lchown_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # uid_t llgfr %r4,%r4 # gid_t @@ -946,156 +798,131 @@ sys32_lchown_wrapper: #sys32_geteuid_wrapper # void #sys32_getegid_wrapper # void - .globl sys32_setreuid_wrapper -sys32_setreuid_wrapper: +ENTRY(sys32_setreuid_wrapper) llgfr %r2,%r2 # uid_t llgfr %r3,%r3 # uid_t jg sys_setreuid # branch to system call - .globl sys32_setregid_wrapper -sys32_setregid_wrapper: +ENTRY(sys32_setregid_wrapper) llgfr %r2,%r2 # gid_t llgfr %r3,%r3 # gid_t jg sys_setregid # branch to system call - .globl sys32_getgroups_wrapper -sys32_getgroups_wrapper: +ENTRY(sys32_getgroups_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # gid_t * jg sys_getgroups # branch to system call - .globl sys32_setgroups_wrapper -sys32_setgroups_wrapper: +ENTRY(sys32_setgroups_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # gid_t * jg sys_setgroups # branch to system call - .globl sys32_fchown_wrapper -sys32_fchown_wrapper: +ENTRY(sys32_fchown_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # uid_t llgfr %r4,%r4 # gid_t jg sys_fchown # branch to system call - .globl sys32_setresuid_wrapper -sys32_setresuid_wrapper: +ENTRY(sys32_setresuid_wrapper) llgfr %r2,%r2 # uid_t llgfr %r3,%r3 # uid_t llgfr %r4,%r4 # uid_t jg sys_setresuid # branch to system call - .globl sys32_getresuid_wrapper -sys32_getresuid_wrapper: +ENTRY(sys32_getresuid_wrapper) llgtr %r2,%r2 # uid_t * llgtr %r3,%r3 # uid_t * llgtr %r4,%r4 # uid_t * jg sys_getresuid # branch to system call - .globl sys32_setresgid_wrapper -sys32_setresgid_wrapper: +ENTRY(sys32_setresgid_wrapper) llgfr %r2,%r2 # gid_t llgfr %r3,%r3 # gid_t llgfr %r4,%r4 # gid_t jg sys_setresgid # branch to system call - .globl sys32_getresgid_wrapper -sys32_getresgid_wrapper: +ENTRY(sys32_getresgid_wrapper) llgtr %r2,%r2 # gid_t * llgtr %r3,%r3 # gid_t * llgtr %r4,%r4 # gid_t * jg sys_getresgid # branch to system call - .globl sys32_chown_wrapper -sys32_chown_wrapper: +ENTRY(sys32_chown_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # uid_t llgfr %r4,%r4 # gid_t jg sys_chown # branch to system call - .globl sys32_setuid_wrapper -sys32_setuid_wrapper: +ENTRY(sys32_setuid_wrapper) llgfr %r2,%r2 # uid_t jg sys_setuid # branch to system call - .globl sys32_setgid_wrapper -sys32_setgid_wrapper: +ENTRY(sys32_setgid_wrapper) llgfr %r2,%r2 # gid_t jg sys_setgid # branch to system call - .globl sys32_setfsuid_wrapper -sys32_setfsuid_wrapper: +ENTRY(sys32_setfsuid_wrapper) llgfr %r2,%r2 # uid_t jg sys_setfsuid # branch to system call - .globl sys32_setfsgid_wrapper -sys32_setfsgid_wrapper: +ENTRY(sys32_setfsgid_wrapper) llgfr %r2,%r2 # gid_t jg sys_setfsgid # branch to system call - .globl sys32_pivot_root_wrapper -sys32_pivot_root_wrapper: +ENTRY(sys32_pivot_root_wrapper) llgtr %r2,%r2 # const char * llgtr %r3,%r3 # const char * jg sys_pivot_root # branch to system call - .globl sys32_mincore_wrapper -sys32_mincore_wrapper: +ENTRY(sys32_mincore_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # size_t llgtr %r4,%r4 # unsigned char * jg sys_mincore # branch to system call - .globl sys32_madvise_wrapper -sys32_madvise_wrapper: +ENTRY(sys32_madvise_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # size_t lgfr %r4,%r4 # int jg sys_madvise # branch to system call - .globl sys32_getdents64_wrapper -sys32_getdents64_wrapper: +ENTRY(sys32_getdents64_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # void * llgfr %r4,%r4 # unsigned int jg sys_getdents64 # branch to system call - .globl compat_sys_fcntl64_wrapper -compat_sys_fcntl64_wrapper: +ENTRY(compat_sys_fcntl64_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned int llgfr %r4,%r4 # unsigned long jg compat_sys_fcntl64 # branch to system call - .globl sys32_stat64_wrapper -sys32_stat64_wrapper: +ENTRY(sys32_stat64_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct stat64 * jg sys32_stat64 # branch to system call - .globl sys32_lstat64_wrapper -sys32_lstat64_wrapper: +ENTRY(sys32_lstat64_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct stat64 * jg sys32_lstat64 # branch to system call - .globl sys32_stime_wrapper -sys32_stime_wrapper: +ENTRY(sys32_stime_wrapper) llgtr %r2,%r2 # long * jg compat_sys_stime # branch to system call - .globl sys32_sysctl_wrapper -sys32_sysctl_wrapper: +ENTRY(sys32_sysctl_wrapper) llgtr %r2,%r2 # struct compat_sysctl_args * jg compat_sys_sysctl - .globl sys32_fstat64_wrapper -sys32_fstat64_wrapper: +ENTRY(sys32_fstat64_wrapper) llgfr %r2,%r2 # unsigned long llgtr %r3,%r3 # struct stat64 * jg sys32_fstat64 # branch to system call - .globl compat_sys_futex_wrapper -compat_sys_futex_wrapper: +ENTRY(compat_sys_futex_wrapper) llgtr %r2,%r2 # u32 * lgfr %r3,%r3 # int lgfr %r4,%r4 # int @@ -1105,8 +932,7 @@ compat_sys_futex_wrapper: stg %r0,160(%r15) jg compat_sys_futex # branch to system call - .globl sys32_setxattr_wrapper -sys32_setxattr_wrapper: +ENTRY(sys32_setxattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * llgtr %r4,%r4 # void * @@ -1114,8 +940,7 @@ sys32_setxattr_wrapper: lgfr %r6,%r6 # int jg sys_setxattr - .globl sys32_lsetxattr_wrapper -sys32_lsetxattr_wrapper: +ENTRY(sys32_lsetxattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * llgtr %r4,%r4 # void * @@ -1123,8 +948,7 @@ sys32_lsetxattr_wrapper: lgfr %r6,%r6 # int jg sys_lsetxattr - .globl sys32_fsetxattr_wrapper -sys32_fsetxattr_wrapper: +ENTRY(sys32_fsetxattr_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # char * llgtr %r4,%r4 # void * @@ -1132,124 +956,106 @@ sys32_fsetxattr_wrapper: lgfr %r6,%r6 # int jg sys_fsetxattr - .globl sys32_getxattr_wrapper -sys32_getxattr_wrapper: +ENTRY(sys32_getxattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * llgtr %r4,%r4 # void * llgfr %r5,%r5 # size_t jg sys_getxattr - .globl sys32_lgetxattr_wrapper -sys32_lgetxattr_wrapper: +ENTRY(sys32_lgetxattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * llgtr %r4,%r4 # void * llgfr %r5,%r5 # size_t jg sys_lgetxattr - .globl sys32_fgetxattr_wrapper -sys32_fgetxattr_wrapper: +ENTRY(sys32_fgetxattr_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # char * llgtr %r4,%r4 # void * llgfr %r5,%r5 # size_t jg sys_fgetxattr - .globl sys32_listxattr_wrapper -sys32_listxattr_wrapper: +ENTRY(sys32_listxattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * llgfr %r4,%r4 # size_t jg sys_listxattr - .globl sys32_llistxattr_wrapper -sys32_llistxattr_wrapper: +ENTRY(sys32_llistxattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * llgfr %r4,%r4 # size_t jg sys_llistxattr - .globl sys32_flistxattr_wrapper -sys32_flistxattr_wrapper: +ENTRY(sys32_flistxattr_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # char * llgfr %r4,%r4 # size_t jg sys_flistxattr - .globl sys32_removexattr_wrapper -sys32_removexattr_wrapper: +ENTRY(sys32_removexattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * jg sys_removexattr - .globl sys32_lremovexattr_wrapper -sys32_lremovexattr_wrapper: +ENTRY(sys32_lremovexattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * jg sys_lremovexattr - .globl sys32_fremovexattr_wrapper -sys32_fremovexattr_wrapper: +ENTRY(sys32_fremovexattr_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # char * jg sys_fremovexattr - .globl sys32_sched_setaffinity_wrapper -sys32_sched_setaffinity_wrapper: +ENTRY(sys32_sched_setaffinity_wrapper) lgfr %r2,%r2 # int llgfr %r3,%r3 # unsigned int llgtr %r4,%r4 # unsigned long * jg compat_sys_sched_setaffinity - .globl sys32_sched_getaffinity_wrapper -sys32_sched_getaffinity_wrapper: +ENTRY(sys32_sched_getaffinity_wrapper) lgfr %r2,%r2 # int llgfr %r3,%r3 # unsigned int llgtr %r4,%r4 # unsigned long * jg compat_sys_sched_getaffinity - .globl sys32_exit_group_wrapper -sys32_exit_group_wrapper: +ENTRY(sys32_exit_group_wrapper) lgfr %r2,%r2 # int jg sys_exit_group # branch to system call - .globl sys32_set_tid_address_wrapper -sys32_set_tid_address_wrapper: +ENTRY(sys32_set_tid_address_wrapper) llgtr %r2,%r2 # int * jg sys_set_tid_address # branch to system call - .globl sys_epoll_create_wrapper -sys_epoll_create_wrapper: +ENTRY(sys_epoll_create_wrapper) lgfr %r2,%r2 # int jg sys_epoll_create # branch to system call - .globl sys_epoll_ctl_wrapper -sys_epoll_ctl_wrapper: +ENTRY(sys_epoll_ctl_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int lgfr %r4,%r4 # int llgtr %r5,%r5 # struct epoll_event * jg sys_epoll_ctl # branch to system call - .globl sys_epoll_wait_wrapper -sys_epoll_wait_wrapper: +ENTRY(sys_epoll_wait_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct epoll_event * lgfr %r4,%r4 # int lgfr %r5,%r5 # int jg sys_epoll_wait # branch to system call - .globl sys32_lookup_dcookie_wrapper -sys32_lookup_dcookie_wrapper: +ENTRY(sys32_lookup_dcookie_wrapper) sllg %r2,%r2,32 # get high word of 64bit dcookie or %r2,%r3 # get low word of 64bit dcookie llgtr %r3,%r4 # char * llgfr %r4,%r5 # size_t jg sys_lookup_dcookie - .globl sys32_fadvise64_wrapper -sys32_fadvise64_wrapper: +ENTRY(sys32_fadvise64_wrapper) lgfr %r2,%r2 # int sllg %r3,%r3,32 # get high word of 64bit loff_t or %r3,%r4 # get low word of 64bit loff_t @@ -1257,81 +1063,68 @@ sys32_fadvise64_wrapper: lgfr %r5,%r6 # int jg sys32_fadvise64 - .globl sys32_fadvise64_64_wrapper -sys32_fadvise64_64_wrapper: +ENTRY(sys32_fadvise64_64_wrapper) llgtr %r2,%r2 # struct fadvise64_64_args * jg sys32_fadvise64_64 - .globl sys32_clock_settime_wrapper -sys32_clock_settime_wrapper: +ENTRY(sys32_clock_settime_wrapper) lgfr %r2,%r2 # clockid_t (int) llgtr %r3,%r3 # struct compat_timespec * jg compat_sys_clock_settime - .globl sys32_clock_gettime_wrapper -sys32_clock_gettime_wrapper: +ENTRY(sys32_clock_gettime_wrapper) lgfr %r2,%r2 # clockid_t (int) llgtr %r3,%r3 # struct compat_timespec * jg compat_sys_clock_gettime - .globl sys32_clock_getres_wrapper -sys32_clock_getres_wrapper: +ENTRY(sys32_clock_getres_wrapper) lgfr %r2,%r2 # clockid_t (int) llgtr %r3,%r3 # struct compat_timespec * jg compat_sys_clock_getres - .globl sys32_clock_nanosleep_wrapper -sys32_clock_nanosleep_wrapper: +ENTRY(sys32_clock_nanosleep_wrapper) lgfr %r2,%r2 # clockid_t (int) lgfr %r3,%r3 # int llgtr %r4,%r4 # struct compat_timespec * llgtr %r5,%r5 # struct compat_timespec * jg compat_sys_clock_nanosleep - .globl sys32_timer_create_wrapper -sys32_timer_create_wrapper: +ENTRY(sys32_timer_create_wrapper) lgfr %r2,%r2 # timer_t (int) llgtr %r3,%r3 # struct compat_sigevent * llgtr %r4,%r4 # timer_t * jg compat_sys_timer_create - .globl sys32_timer_settime_wrapper -sys32_timer_settime_wrapper: +ENTRY(sys32_timer_settime_wrapper) lgfr %r2,%r2 # timer_t (int) lgfr %r3,%r3 # int llgtr %r4,%r4 # struct compat_itimerspec * llgtr %r5,%r5 # struct compat_itimerspec * jg compat_sys_timer_settime - .globl sys32_timer_gettime_wrapper -sys32_timer_gettime_wrapper: +ENTRY(sys32_timer_gettime_wrapper) lgfr %r2,%r2 # timer_t (int) llgtr %r3,%r3 # struct compat_itimerspec * jg compat_sys_timer_gettime - .globl sys32_timer_getoverrun_wrapper -sys32_timer_getoverrun_wrapper: +ENTRY(sys32_timer_getoverrun_wrapper) lgfr %r2,%r2 # timer_t (int) jg sys_timer_getoverrun - .globl sys32_timer_delete_wrapper -sys32_timer_delete_wrapper: +ENTRY(sys32_timer_delete_wrapper) lgfr %r2,%r2 # timer_t (int) jg sys_timer_delete - .globl sys32_io_setup_wrapper -sys32_io_setup_wrapper: +ENTRY(sys32_io_setup_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # u32 * jg compat_sys_io_setup - .globl sys32_io_destroy_wrapper -sys32_io_destroy_wrapper: +ENTRY(sys32_io_destroy_wrapper) llgfr %r2,%r2 # (aio_context_t) u32 jg sys_io_destroy - .globl sys32_io_getevents_wrapper -sys32_io_getevents_wrapper: +ENTRY(sys32_io_getevents_wrapper) llgfr %r2,%r2 # (aio_context_t) u32 lgfr %r3,%r3 # long lgfr %r4,%r4 # long @@ -1339,49 +1132,42 @@ sys32_io_getevents_wrapper: llgtr %r6,%r6 # struct compat_timespec * jg compat_sys_io_getevents - .globl sys32_io_submit_wrapper -sys32_io_submit_wrapper: +ENTRY(sys32_io_submit_wrapper) llgfr %r2,%r2 # (aio_context_t) u32 lgfr %r3,%r3 # long llgtr %r4,%r4 # struct iocb ** jg compat_sys_io_submit - .globl sys32_io_cancel_wrapper -sys32_io_cancel_wrapper: +ENTRY(sys32_io_cancel_wrapper) llgfr %r2,%r2 # (aio_context_t) u32 llgtr %r3,%r3 # struct iocb * llgtr %r4,%r4 # struct io_event * jg sys_io_cancel - .globl compat_sys_statfs64_wrapper -compat_sys_statfs64_wrapper: +ENTRY(compat_sys_statfs64_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # compat_size_t llgtr %r4,%r4 # struct compat_statfs64 * jg compat_sys_statfs64 - .globl compat_sys_fstatfs64_wrapper -compat_sys_fstatfs64_wrapper: +ENTRY(compat_sys_fstatfs64_wrapper) llgfr %r2,%r2 # unsigned int fd llgfr %r3,%r3 # compat_size_t llgtr %r4,%r4 # struct compat_statfs64 * jg compat_sys_fstatfs64 - .globl compat_sys_mq_open_wrapper -compat_sys_mq_open_wrapper: +ENTRY(compat_sys_mq_open_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int llgfr %r4,%r4 # mode_t llgtr %r5,%r5 # struct compat_mq_attr * jg compat_sys_mq_open - .globl sys32_mq_unlink_wrapper -sys32_mq_unlink_wrapper: +ENTRY(sys32_mq_unlink_wrapper) llgtr %r2,%r2 # const char * jg sys_mq_unlink - .globl compat_sys_mq_timedsend_wrapper -compat_sys_mq_timedsend_wrapper: +ENTRY(compat_sys_mq_timedsend_wrapper) lgfr %r2,%r2 # mqd_t llgtr %r3,%r3 # const char * llgfr %r4,%r4 # size_t @@ -1389,8 +1175,7 @@ compat_sys_mq_timedsend_wrapper: llgtr %r6,%r6 # const struct compat_timespec * jg compat_sys_mq_timedsend - .globl compat_sys_mq_timedreceive_wrapper -compat_sys_mq_timedreceive_wrapper: +ENTRY(compat_sys_mq_timedreceive_wrapper) lgfr %r2,%r2 # mqd_t llgtr %r3,%r3 # char * llgfr %r4,%r4 # size_t @@ -1398,21 +1183,18 @@ compat_sys_mq_timedreceive_wrapper: llgtr %r6,%r6 # const struct compat_timespec * jg compat_sys_mq_timedreceive - .globl compat_sys_mq_notify_wrapper -compat_sys_mq_notify_wrapper: +ENTRY(compat_sys_mq_notify_wrapper) lgfr %r2,%r2 # mqd_t llgtr %r3,%r3 # struct compat_sigevent * jg compat_sys_mq_notify - .globl compat_sys_mq_getsetattr_wrapper -compat_sys_mq_getsetattr_wrapper: +ENTRY(compat_sys_mq_getsetattr_wrapper) lgfr %r2,%r2 # mqd_t llgtr %r3,%r3 # struct compat_mq_attr * llgtr %r4,%r4 # struct compat_mq_attr * jg compat_sys_mq_getsetattr - .globl compat_sys_add_key_wrapper -compat_sys_add_key_wrapper: +ENTRY(compat_sys_add_key_wrapper) llgtr %r2,%r2 # const char * llgtr %r3,%r3 # const char * llgtr %r4,%r4 # const void * @@ -1420,16 +1202,14 @@ compat_sys_add_key_wrapper: llgfr %r6,%r6 # (key_serial_t) u32 jg sys_add_key - .globl compat_sys_request_key_wrapper -compat_sys_request_key_wrapper: +ENTRY(compat_sys_request_key_wrapper) llgtr %r2,%r2 # const char * llgtr %r3,%r3 # const char * llgtr %r4,%r4 # const void * llgfr %r5,%r5 # (key_serial_t) u32 jg sys_request_key - .globl sys32_remap_file_pages_wrapper -sys32_remap_file_pages_wrapper: +ENTRY(sys32_remap_file_pages_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # unsigned long llgfr %r4,%r4 # unsigned long @@ -1437,8 +1217,7 @@ sys32_remap_file_pages_wrapper: llgfr %r6,%r6 # unsigned long jg sys_remap_file_pages - .globl compat_sys_waitid_wrapper -compat_sys_waitid_wrapper: +ENTRY(compat_sys_waitid_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # pid_t llgtr %r4,%r4 # siginfo_emu31_t * @@ -1446,65 +1225,56 @@ compat_sys_waitid_wrapper: llgtr %r6,%r6 # struct rusage_emu31 * jg compat_sys_waitid - .globl compat_sys_kexec_load_wrapper -compat_sys_kexec_load_wrapper: +ENTRY(compat_sys_kexec_load_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # unsigned long llgtr %r4,%r4 # struct kexec_segment * llgfr %r5,%r5 # unsigned long jg compat_sys_kexec_load - .globl sys_ioprio_set_wrapper -sys_ioprio_set_wrapper: +ENTRY(sys_ioprio_set_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int lgfr %r4,%r4 # int jg sys_ioprio_set - .globl sys_ioprio_get_wrapper -sys_ioprio_get_wrapper: +ENTRY(sys_ioprio_get_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int jg sys_ioprio_get - .globl sys_inotify_add_watch_wrapper -sys_inotify_add_watch_wrapper: +ENTRY(sys_inotify_add_watch_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * llgfr %r4,%r4 # u32 jg sys_inotify_add_watch - .globl sys_inotify_rm_watch_wrapper -sys_inotify_rm_watch_wrapper: +ENTRY(sys_inotify_rm_watch_wrapper) lgfr %r2,%r2 # int llgfr %r3,%r3 # u32 jg sys_inotify_rm_watch - .globl compat_sys_openat_wrapper -compat_sys_openat_wrapper: +ENTRY(compat_sys_openat_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # const char * lgfr %r4,%r4 # int lgfr %r5,%r5 # int jg compat_sys_openat - .globl sys_mkdirat_wrapper -sys_mkdirat_wrapper: +ENTRY(sys_mkdirat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * lgfr %r4,%r4 # int jg sys_mkdirat - .globl sys_mknodat_wrapper -sys_mknodat_wrapper: +ENTRY(sys_mknodat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * lgfr %r4,%r4 # int llgfr %r5,%r5 # unsigned int jg sys_mknodat - .globl sys_fchownat_wrapper -sys_fchownat_wrapper: +ENTRY(sys_fchownat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * llgfr %r4,%r4 # uid_t @@ -1512,38 +1282,33 @@ sys_fchownat_wrapper: lgfr %r6,%r6 # int jg sys_fchownat - .globl compat_sys_futimesat_wrapper -compat_sys_futimesat_wrapper: +ENTRY(compat_sys_futimesat_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # char * llgtr %r4,%r4 # struct timeval * jg compat_sys_futimesat - .globl sys32_fstatat64_wrapper -sys32_fstatat64_wrapper: +ENTRY(sys32_fstatat64_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # char * llgtr %r4,%r4 # struct stat64 * lgfr %r5,%r5 # int jg sys32_fstatat64 - .globl sys_unlinkat_wrapper -sys_unlinkat_wrapper: +ENTRY(sys_unlinkat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * lgfr %r4,%r4 # int jg sys_unlinkat - .globl sys_renameat_wrapper -sys_renameat_wrapper: +ENTRY(sys_renameat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * lgfr %r4,%r4 # int llgtr %r5,%r5 # const char * jg sys_renameat - .globl sys_linkat_wrapper -sys_linkat_wrapper: +ENTRY(sys_linkat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * lgfr %r4,%r4 # int @@ -1551,37 +1316,32 @@ sys_linkat_wrapper: lgfr %r6,%r6 # int jg sys_linkat - .globl sys_symlinkat_wrapper -sys_symlinkat_wrapper: +ENTRY(sys_symlinkat_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int llgtr %r4,%r4 # const char * jg sys_symlinkat - .globl sys_readlinkat_wrapper -sys_readlinkat_wrapper: +ENTRY(sys_readlinkat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * llgtr %r4,%r4 # char * lgfr %r5,%r5 # int jg sys_readlinkat - .globl sys_fchmodat_wrapper -sys_fchmodat_wrapper: +ENTRY(sys_fchmodat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * llgfr %r4,%r4 # mode_t jg sys_fchmodat - .globl sys_faccessat_wrapper -sys_faccessat_wrapper: +ENTRY(sys_faccessat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * lgfr %r4,%r4 # int jg sys_faccessat - .globl compat_sys_pselect6_wrapper -compat_sys_pselect6_wrapper: +ENTRY(compat_sys_pselect6_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # fd_set * llgtr %r4,%r4 # fd_set * @@ -1591,8 +1351,7 @@ compat_sys_pselect6_wrapper: stg %r0,160(%r15) jg compat_sys_pselect6 - .globl compat_sys_ppoll_wrapper -compat_sys_ppoll_wrapper: +ENTRY(compat_sys_ppoll_wrapper) llgtr %r2,%r2 # struct pollfd * llgfr %r3,%r3 # unsigned int llgtr %r4,%r4 # struct timespec * @@ -1600,26 +1359,22 @@ compat_sys_ppoll_wrapper: llgfr %r6,%r6 # size_t jg compat_sys_ppoll - .globl sys_unshare_wrapper -sys_unshare_wrapper: +ENTRY(sys_unshare_wrapper) llgfr %r2,%r2 # unsigned long jg sys_unshare - .globl compat_sys_set_robust_list_wrapper -compat_sys_set_robust_list_wrapper: +ENTRY(compat_sys_set_robust_list_wrapper) llgtr %r2,%r2 # struct compat_robust_list_head * llgfr %r3,%r3 # size_t jg compat_sys_set_robust_list - .globl compat_sys_get_robust_list_wrapper -compat_sys_get_robust_list_wrapper: +ENTRY(compat_sys_get_robust_list_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # compat_uptr_t_t * llgtr %r4,%r4 # compat_size_t * jg compat_sys_get_robust_list - .globl sys_splice_wrapper -sys_splice_wrapper: +ENTRY(sys_splice_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # loff_t * lgfr %r4,%r4 # int @@ -1629,8 +1384,7 @@ sys_splice_wrapper: stg %r0,160(%r15) jg sys_splice - .globl sys_sync_file_range_wrapper -sys_sync_file_range_wrapper: +ENTRY(sys_sync_file_range_wrapper) lgfr %r2,%r2 # int sllg %r3,%r3,32 # get high word of 64bit loff_t or %r3,%r4 # get low word of 64bit loff_t @@ -1639,31 +1393,27 @@ sys_sync_file_range_wrapper: llgf %r5,164(%r15) # unsigned int jg sys_sync_file_range - .globl sys_tee_wrapper -sys_tee_wrapper: +ENTRY(sys_tee_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int llgfr %r4,%r4 # size_t llgfr %r5,%r5 # unsigned int jg sys_tee - .globl compat_sys_vmsplice_wrapper -compat_sys_vmsplice_wrapper: +ENTRY(compat_sys_vmsplice_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # compat_iovec * llgfr %r4,%r4 # unsigned int llgfr %r5,%r5 # unsigned int jg compat_sys_vmsplice - .globl sys_getcpu_wrapper -sys_getcpu_wrapper: +ENTRY(sys_getcpu_wrapper) llgtr %r2,%r2 # unsigned * llgtr %r3,%r3 # unsigned * llgtr %r4,%r4 # struct getcpu_cache * jg sys_getcpu - .globl compat_sys_epoll_pwait_wrapper -compat_sys_epoll_pwait_wrapper: +ENTRY(compat_sys_epoll_pwait_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct compat_epoll_event * lgfr %r4,%r4 # int @@ -1673,34 +1423,29 @@ compat_sys_epoll_pwait_wrapper: stg %r0,160(%r15) jg compat_sys_epoll_pwait - .globl compat_sys_utimes_wrapper -compat_sys_utimes_wrapper: +ENTRY(compat_sys_utimes_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct compat_timeval * jg compat_sys_utimes - .globl compat_sys_utimensat_wrapper -compat_sys_utimensat_wrapper: +ENTRY(compat_sys_utimensat_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # char * llgtr %r4,%r4 # struct compat_timespec * lgfr %r5,%r5 # int jg compat_sys_utimensat - .globl compat_sys_signalfd_wrapper -compat_sys_signalfd_wrapper: +ENTRY(compat_sys_signalfd_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # compat_sigset_t * llgfr %r4,%r4 # compat_size_t jg compat_sys_signalfd - .globl sys_eventfd_wrapper -sys_eventfd_wrapper: +ENTRY(sys_eventfd_wrapper) llgfr %r2,%r2 # unsigned int jg sys_eventfd - .globl sys_fallocate_wrapper -sys_fallocate_wrapper: +ENTRY(sys_fallocate_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int sllg %r4,%r4,32 # get high word of 64bit loff_t @@ -1709,94 +1454,80 @@ sys_fallocate_wrapper: l %r5,164(%r15) # get low word of 64bit loff_t jg sys_fallocate - .globl sys_timerfd_create_wrapper -sys_timerfd_create_wrapper: +ENTRY(sys_timerfd_create_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int jg sys_timerfd_create - .globl compat_sys_timerfd_settime_wrapper -compat_sys_timerfd_settime_wrapper: +ENTRY(compat_sys_timerfd_settime_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int llgtr %r4,%r4 # struct compat_itimerspec * llgtr %r5,%r5 # struct compat_itimerspec * jg compat_sys_timerfd_settime - .globl compat_sys_timerfd_gettime_wrapper -compat_sys_timerfd_gettime_wrapper: +ENTRY(compat_sys_timerfd_gettime_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct compat_itimerspec * jg compat_sys_timerfd_gettime - .globl compat_sys_signalfd4_wrapper -compat_sys_signalfd4_wrapper: +ENTRY(compat_sys_signalfd4_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # compat_sigset_t * llgfr %r4,%r4 # compat_size_t lgfr %r5,%r5 # int jg compat_sys_signalfd4 - .globl sys_eventfd2_wrapper -sys_eventfd2_wrapper: +ENTRY(sys_eventfd2_wrapper) llgfr %r2,%r2 # unsigned int lgfr %r3,%r3 # int jg sys_eventfd2 - .globl sys_inotify_init1_wrapper -sys_inotify_init1_wrapper: +ENTRY(sys_inotify_init1_wrapper) lgfr %r2,%r2 # int jg sys_inotify_init1 - .globl sys_pipe2_wrapper -sys_pipe2_wrapper: +ENTRY(sys_pipe2_wrapper) llgtr %r2,%r2 # u32 * lgfr %r3,%r3 # int jg sys_pipe2 # branch to system call - .globl sys_dup3_wrapper -sys_dup3_wrapper: +ENTRY(sys_dup3_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned int lgfr %r4,%r4 # int jg sys_dup3 # branch to system call - .globl sys_epoll_create1_wrapper -sys_epoll_create1_wrapper: +ENTRY(sys_epoll_create1_wrapper) lgfr %r2,%r2 # int jg sys_epoll_create1 # branch to system call - .globl sys32_readahead_wrapper -sys32_readahead_wrapper: +ENTRY(sys32_readahead_wrapper) lgfr %r2,%r2 # int llgfr %r3,%r3 # u32 llgfr %r4,%r4 # u32 lgfr %r5,%r5 # s32 jg sys32_readahead # branch to system call - .globl sys32_sendfile64_wrapper -sys32_sendfile64_wrapper: +ENTRY(sys32_sendfile64_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int llgtr %r4,%r4 # compat_loff_t * lgfr %r5,%r5 # s32 jg sys32_sendfile64 # branch to system call - .globl sys_tkill_wrapper -sys_tkill_wrapper: +ENTRY(sys_tkill_wrapper) lgfr %r2,%r2 # pid_t lgfr %r3,%r3 # int jg sys_tkill # branch to system call - .globl sys_tgkill_wrapper -sys_tgkill_wrapper: +ENTRY(sys_tgkill_wrapper) lgfr %r2,%r2 # pid_t lgfr %r3,%r3 # pid_t lgfr %r4,%r4 # int jg sys_tgkill # branch to system call - .globl compat_sys_keyctl_wrapper -compat_sys_keyctl_wrapper: +ENTRY(compat_sys_keyctl_wrapper) llgfr %r2,%r2 # u32 llgfr %r3,%r3 # u32 llgfr %r4,%r4 # u32 @@ -1804,8 +1535,7 @@ compat_sys_keyctl_wrapper: llgfr %r6,%r6 # u32 jg compat_sys_keyctl # branch to system call - .globl compat_sys_preadv_wrapper -compat_sys_preadv_wrapper: +ENTRY(compat_sys_preadv_wrapper) llgfr %r2,%r2 # unsigned long llgtr %r3,%r3 # compat_iovec * llgfr %r4,%r4 # unsigned long @@ -1813,8 +1543,7 @@ compat_sys_preadv_wrapper: llgfr %r6,%r6 # u32 jg compat_sys_preadv # branch to system call - .globl compat_sys_pwritev_wrapper -compat_sys_pwritev_wrapper: +ENTRY(compat_sys_pwritev_wrapper) llgfr %r2,%r2 # unsigned long llgtr %r3,%r3 # compat_iovec * llgfr %r4,%r4 # unsigned long @@ -1822,16 +1551,14 @@ compat_sys_pwritev_wrapper: llgfr %r6,%r6 # u32 jg compat_sys_pwritev # branch to system call - .globl compat_sys_rt_tgsigqueueinfo_wrapper -compat_sys_rt_tgsigqueueinfo_wrapper: +ENTRY(compat_sys_rt_tgsigqueueinfo_wrapper) lgfr %r2,%r2 # compat_pid_t lgfr %r3,%r3 # compat_pid_t lgfr %r4,%r4 # int llgtr %r5,%r5 # struct compat_siginfo * jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call - .globl sys_perf_event_open_wrapper -sys_perf_event_open_wrapper: +ENTRY(sys_perf_event_open_wrapper) llgtr %r2,%r2 # const struct perf_event_attr * lgfr %r3,%r3 # pid_t lgfr %r4,%r4 # int @@ -1839,29 +1566,25 @@ sys_perf_event_open_wrapper: llgfr %r6,%r6 # unsigned long jg sys_perf_event_open # branch to system call - .globl sys_clone_wrapper -sys_clone_wrapper: +ENTRY(sys_clone_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # unsigned long llgtr %r4,%r4 # int * llgtr %r5,%r5 # int * jg sys_clone # branch to system call - .globl sys32_execve_wrapper -sys32_execve_wrapper: +ENTRY(sys32_execve_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # compat_uptr_t * llgtr %r4,%r4 # compat_uptr_t * jg sys32_execve # branch to system call - .globl sys_fanotify_init_wrapper -sys_fanotify_init_wrapper: +ENTRY(sys_fanotify_init_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned int jg sys_fanotify_init # branch to system call - .globl sys_fanotify_mark_wrapper -sys_fanotify_mark_wrapper: +ENTRY(sys_fanotify_mark_wrapper) lgfr %r2,%r2 # int llgfr %r3,%r3 # unsigned int sllg %r4,%r4,32 # get high word of 64bit mask @@ -1870,16 +1593,14 @@ sys_fanotify_mark_wrapper: llgt %r6,164(%r15) # char * jg sys_fanotify_mark # branch to system call - .globl sys_prlimit64_wrapper -sys_prlimit64_wrapper: +ENTRY(sys_prlimit64_wrapper) lgfr %r2,%r2 # pid_t llgfr %r3,%r3 # unsigned int llgtr %r4,%r4 # const struct rlimit64 __user * llgtr %r5,%r5 # struct rlimit64 __user * jg sys_prlimit64 # branch to system call - .globl sys_name_to_handle_at_wrapper -sys_name_to_handle_at_wrapper: +ENTRY(sys_name_to_handle_at_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char __user * llgtr %r4,%r4 # struct file_handle __user * @@ -1887,21 +1608,18 @@ sys_name_to_handle_at_wrapper: lgfr %r6,%r6 # int jg sys_name_to_handle_at - .globl compat_sys_open_by_handle_at_wrapper -compat_sys_open_by_handle_at_wrapper: +ENTRY(compat_sys_open_by_handle_at_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct file_handle __user * lgfr %r4,%r4 # int jg compat_sys_open_by_handle_at - .globl compat_sys_clock_adjtime_wrapper -compat_sys_clock_adjtime_wrapper: +ENTRY(compat_sys_clock_adjtime_wrapper) lgfr %r2,%r2 # clockid_t (int) llgtr %r3,%r3 # struct compat_timex __user * jg compat_sys_clock_adjtime - .globl sys_syncfs_wrapper -sys_syncfs_wrapper: +ENTRY(sys_syncfs_wrapper) lgfr %r2,%r2 # int jg sys_syncfs diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 1ca3d1d6a86..45df6d456aa 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -27,7 +27,7 @@ #include <asm/system.h> #include <asm/uaccess.h> #include <asm/io.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/mathemu.h> #include <asm/cpcmd.h> #include <asm/lowcore.h> diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 068f8465c4e..f297456dba7 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -396,17 +396,19 @@ static __init void detect_machine_facilities(void) static __init void rescue_initrd(void) { #ifdef CONFIG_BLK_DEV_INITRD + unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20); /* - * Move the initrd right behind the bss section in case it starts - * within the bss section. So we don't overwrite it when the bss - * section gets cleared. + * Just like in case of IPL from VM reader we make sure there is a + * gap of 4MB between end of kernel and start of initrd. + * That way we can also be sure that saving an NSS will succeed, + * which however only requires different segments. */ if (!INITRD_START || !INITRD_SIZE) return; - if (INITRD_START >= (unsigned long) __bss_stop) + if (INITRD_START >= min_initrd_addr) return; - memmove(__bss_stop, (void *) INITRD_START, INITRD_SIZE); - INITRD_START = (unsigned long) __bss_stop; + memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE); + INITRD_START = min_initrd_addr; #endif } diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 0476174dfff..02ec8fe7d03 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -9,8 +9,8 @@ * Heiko Carstens <heiko.carstens@de.ibm.com> */ -#include <linux/linkage.h> #include <linux/init.h> +#include <linux/linkage.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm/ptrace.h> @@ -197,8 +197,7 @@ STACK_SIZE = 1 << STACK_SHIFT * Returns: * gpr2 = prev */ - .globl __switch_to -__switch_to: +ENTRY(__switch_to) basr %r1,0 0: l %r4,__THREAD_info(%r2) # get thread_info of prev l %r5,__THREAD_info(%r3) # get thread_info of next @@ -224,8 +223,7 @@ __critical_start: * are executed with interrupts enabled. */ - .globl system_call -system_call: +ENTRY(system_call) stpt __LC_SYNC_ENTER_TIMER sysc_saveall: SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA @@ -388,8 +386,7 @@ sysc_tracenogo: # # a new process exits the kernel with ret_from_fork # - .globl ret_from_fork -ret_from_fork: +ENTRY(ret_from_fork) l %r13,__LC_SVC_NEW_PSW+4 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? @@ -405,8 +402,7 @@ ret_from_fork: # kernel_execve function needs to deal with pt_regs that is not # at the usual place # - .globl kernel_execve -kernel_execve: +ENTRY(kernel_execve) stm %r12,%r15,48(%r15) lr %r14,%r15 l %r13,__LC_SVC_NEW_PSW+4 @@ -438,8 +434,7 @@ kernel_execve: * Program check handler routine */ - .globl pgm_check_handler -pgm_check_handler: +ENTRY(pgm_check_handler) /* * First we need to check for a special case: * Single stepping an instruction that disables the PER event mask will @@ -565,8 +560,7 @@ kernel_per: * IO interrupt handler routine */ - .globl io_int_handler -io_int_handler: +ENTRY(io_int_handler) stck __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 @@ -703,8 +697,7 @@ io_notify_resume: * External interrupt handler routine */ - .globl ext_int_handler -ext_int_handler: +ENTRY(ext_int_handler) stck __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 @@ -731,8 +724,7 @@ __critical_end: * Machine check handler routines */ - .globl mcck_int_handler -mcck_int_handler: +ENTRY(mcck_int_handler) stck __LC_MCCK_CLOCK spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs @@ -818,8 +810,7 @@ mcck_return: */ #ifdef CONFIG_SMP __CPUINIT - .globl restart_int_handler -restart_int_handler: +ENTRY(restart_int_handler) basr %r1,0 restart_base: spt restart_vtime-restart_base(%r1) @@ -848,8 +839,7 @@ restart_vtime: /* * If we do not run with SMP enabled, let the new CPU crash ... */ - .globl restart_int_handler -restart_int_handler: +ENTRY(restart_int_handler) basr %r1,0 restart_base: lpsw restart_crash-restart_base(%r1) @@ -859,6 +849,34 @@ restart_crash: restart_go: #endif +# +# PSW restart interrupt handler +# +ENTRY(psw_restart_int_handler) + st %r15,__LC_SAVE_AREA_64(%r0) # save r15 + basr %r15,0 +0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack + l %r15,0(%r15) + ahi %r15,-SP_SIZE # make room for pt_regs + stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack + mvc SP_R15(4,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack + mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw + xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 + basr %r14,0 +1: l %r14,.Ldo_restart-1b(%r14) + basr %r14,%r14 + + basr %r14,0 # load disabled wait PSW if +2: lpsw restart_psw_crash-2b(%r14) # do_restart returns + .align 4 +.Ldo_restart: + .long do_restart +.Lrestart_stack: + .long restart_stack + .align 8 +restart_psw_crash: + .long 0x000a0000,0x00000000 + restart_psw_crash + .section .kprobes.text, "ax" #ifdef CONFIG_CHECK_STACK diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 17a6f83a2d6..66729eb7bbc 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -5,10 +5,9 @@ #include <linux/signal.h> #include <asm/ptrace.h> -typedef void pgm_check_handler_t(struct pt_regs *, long, unsigned long); -extern pgm_check_handler_t *pgm_check_table[128]; -pgm_check_handler_t do_protection_exception; -pgm_check_handler_t do_dat_exception; +void do_protection_exception(struct pt_regs *, long, unsigned long); +void do_dat_exception(struct pt_regs *, long, unsigned long); +void do_asce_exception(struct pt_regs *, long, unsigned long); extern int sysctl_userprocess_debug; diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index d61967e2eab..5f729d627ce 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -9,8 +9,8 @@ * Heiko Carstens <heiko.carstens@de.ibm.com> */ -#include <linux/linkage.h> #include <linux/init.h> +#include <linux/linkage.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm/ptrace.h> @@ -56,15 +56,28 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_MCCK_PENDING) _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) +_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) #define BASED(name) name-system_call(%r13) + .macro SPP newpp +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP + jz .+8 + .insn s,0xb2800000,\newpp +#endif + .endm + .macro HANDLE_SIE_INTERCEPT #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) - lg %r3,__LC_SIE_HOOK - ltgr %r3,%r3 + tm __TI_flags+6(%r12),_TIF_SIE>>8 jz 0f - basr %r14,%r3 + SPP __LC_CMF_HPP # set host id + clc SP_PSW+8(8,%r15),BASED(.Lsie_loop) + jl 0f + clc SP_PSW+8(8,%r15),BASED(.Lsie_done) + jhe 0f + mvc SP_PSW+8(8,%r15),BASED(.Lsie_loop) 0: #endif .endm @@ -206,8 +219,7 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ * Returns: * gpr2 = prev */ - .globl __switch_to -__switch_to: +ENTRY(__switch_to) lg %r4,__THREAD_info(%r2) # get thread_info of prev lg %r5,__THREAD_info(%r3) # get thread_info of next tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? @@ -232,8 +244,7 @@ __critical_start: * are executed with interrupts enabled. */ - .globl system_call -system_call: +ENTRY(system_call) stpt __LC_SYNC_ENTER_TIMER sysc_saveall: SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA @@ -395,8 +406,7 @@ sysc_tracenogo: # # a new process exits the kernel with ret_from_fork # - .globl ret_from_fork -ret_from_fork: +ENTRY(ret_from_fork) lg %r13,__LC_SVC_NEW_PSW+8 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? @@ -411,8 +421,7 @@ ret_from_fork: # kernel_execve function needs to deal with pt_regs that is not # at the usual place # - .globl kernel_execve -kernel_execve: +ENTRY(kernel_execve) stmg %r12,%r15,96(%r15) lgr %r14,%r15 aghi %r15,-SP_SIZE @@ -442,8 +451,7 @@ kernel_execve: * Program check handler routine */ - .globl pgm_check_handler -pgm_check_handler: +ENTRY(pgm_check_handler) /* * First we need to check for a special case: * Single stepping an instruction that disables the PER event mask will @@ -465,6 +473,7 @@ pgm_check_handler: xc SP_ILC(4,%r15),SP_ILC(%r15) mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct + HANDLE_SIE_INTERCEPT tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz pgm_no_vtime UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER @@ -472,7 +481,6 @@ pgm_check_handler: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER LAST_BREAK pgm_no_vtime: - HANDLE_SIE_INTERCEPT stg %r11,SP_ARGS(%r15) lgf %r3,__LC_PGM_ILC # load program interruption code lg %r4,__LC_TRANS_EXC_CODE @@ -507,6 +515,7 @@ pgm_per_std: CREATE_STACK_FRAME __LC_SAVE_AREA mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct + HANDLE_SIE_INTERCEPT tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz pgm_no_vtime2 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER @@ -514,7 +523,6 @@ pgm_per_std: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER LAST_BREAK pgm_no_vtime2: - HANDLE_SIE_INTERCEPT lg %r1,__TI_task(%r12) tm SP_PSW+1(%r15),0x01 # kernel per event ? jz kernel_per @@ -571,14 +579,14 @@ kernel_per: /* * IO interrupt handler routine */ - .globl io_int_handler -io_int_handler: +ENTRY(io_int_handler) stck __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40 CREATE_STACK_FRAME __LC_SAVE_AREA+40 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct + HANDLE_SIE_INTERCEPT tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz io_no_vtime UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER @@ -586,7 +594,6 @@ io_int_handler: mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER LAST_BREAK io_no_vtime: - HANDLE_SIE_INTERCEPT TRACE_IRQS_OFF la %r2,SP_PTREGS(%r15) # address of register-save area brasl %r14,do_IRQ # call standard irq handler @@ -706,14 +713,14 @@ io_notify_resume: /* * External interrupt handler routine */ - .globl ext_int_handler -ext_int_handler: +ENTRY(ext_int_handler) stck __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40 CREATE_STACK_FRAME __LC_SAVE_AREA+40 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct + HANDLE_SIE_INTERCEPT tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz ext_no_vtime UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER @@ -721,7 +728,6 @@ ext_int_handler: mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER LAST_BREAK ext_no_vtime: - HANDLE_SIE_INTERCEPT TRACE_IRQS_OFF lghi %r1,4096 la %r2,SP_PTREGS(%r15) # address of register-save area @@ -736,8 +742,7 @@ __critical_end: /* * Machine check handler routines */ - .globl mcck_int_handler -mcck_int_handler: +ENTRY(mcck_int_handler) stck __LC_MCCK_CLOCK la %r1,4095 # revalidate r1 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer @@ -785,6 +790,7 @@ mcck_int_main: lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? jno mcck_no_vtime # no -> no timer update + HANDLE_SIE_INTERCEPT tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz mcck_no_vtime UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER @@ -804,7 +810,6 @@ mcck_no_vtime: stosm __SF_EMPTY(%r15),0x04 # turn dat on tm __TI_flags+7(%r12),_TIF_MCCK_PENDING jno mcck_return - HANDLE_SIE_INTERCEPT TRACE_IRQS_OFF brasl %r14,s390_handle_mcck TRACE_IRQS_ON @@ -823,8 +828,7 @@ mcck_done: */ #ifdef CONFIG_SMP __CPUINIT - .globl restart_int_handler -restart_int_handler: +ENTRY(restart_int_handler) basr %r1,0 restart_base: spt restart_vtime-restart_base(%r1) @@ -851,8 +855,7 @@ restart_vtime: /* * If we do not run with SMP enabled, let the new CPU crash ... */ - .globl restart_int_handler -restart_int_handler: +ENTRY(restart_int_handler) basr %r1,0 restart_base: lpswe restart_crash-restart_base(%r1) @@ -862,6 +865,26 @@ restart_crash: restart_go: #endif +# +# PSW restart interrupt handler +# +ENTRY(psw_restart_int_handler) + stg %r15,__LC_SAVE_AREA_64(%r0) # save r15 + larl %r15,restart_stack # load restart stack + lg %r15,0(%r15) + aghi %r15,-SP_SIZE # make room for pt_regs + stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack + mvc SP_R15(8,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack + mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 + brasl %r14,do_restart + + larl %r14,restart_psw_crash # load disabled wait PSW if + lpswe 0(%r14) # do_restart returns + .align 8 +restart_psw_crash: + .quad 0x0002000080000000,0x0000000000000000 + restart_psw_crash + .section .kprobes.text, "ax" #ifdef CONFIG_CHECK_STACK @@ -1036,6 +1059,56 @@ cleanup_io_restore_insn: .Lcritical_end: .quad __critical_end +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +/* + * sie64a calling convention: + * %r2 pointer to sie control block + * %r3 guest register save area + */ +ENTRY(sie64a) + stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers + stg %r2,__SF_EMPTY(%r15) # save control block pointer + stg %r3,__SF_EMPTY+8(%r15) # save guest register save area + lmg %r0,%r13,0(%r3) # load guest gprs 0-13 + lg %r14,__LC_THREAD_INFO # pointer thread_info struct + oi __TI_flags+6(%r14),_TIF_SIE>>8 +sie_loop: + lg %r14,__LC_THREAD_INFO # pointer thread_info struct + tm __TI_flags+7(%r14),_TIF_EXIT_SIE + jnz sie_exit + lg %r14,__SF_EMPTY(%r15) # get control block pointer + SPP __SF_EMPTY(%r15) # set guest id + sie 0(%r14) +sie_done: + SPP __LC_CMF_HPP # set host id + lg %r14,__LC_THREAD_INFO # pointer thread_info struct +sie_exit: + ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) + lg %r14,__SF_EMPTY+8(%r15) # load guest register save area + stmg %r0,%r13,0(%r14) # save guest gprs 0-13 + lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers + lghi %r2,0 + br %r14 +sie_fault: + lg %r14,__LC_THREAD_INFO # pointer thread_info struct + ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) + lg %r14,__SF_EMPTY+8(%r15) # load guest register save area + stmg %r0,%r13,0(%r14) # save guest gprs 0-13 + lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers + lghi %r2,-EFAULT + br %r14 + + .align 8 +.Lsie_loop: + .quad sie_loop +.Lsie_done: + .quad sie_done + + .section __ex_table,"a" + .quad sie_loop,sie_fault + .previous +#endif + .section .rodata, "a" #define SYSCALL(esa,esame,emu) .long esame .globl sys_call_table diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index fb317bf2c37..2d781bab37b 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -22,6 +22,7 @@ */ #include <linux/init.h> +#include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/page.h> @@ -383,8 +384,7 @@ iplstart: # doesn't need a builtin ipl record. # .org 0x800 - .globl start -start: +ENTRY(start) stm %r0,%r15,0x07b0 # store registers basr %r12,%r0 .base: @@ -448,8 +448,7 @@ start: # or linload or SALIPL # .org 0x10000 - .globl startup -startup: +ENTRY(startup) basr %r13,0 # get base .LPG0: xc 0x200(256),0x200 # partially clear lowcore diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index b8f8dc12610..f21954b44dc 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -11,13 +11,13 @@ */ #include <linux/init.h> +#include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/page.h> __HEAD - .globl startup_continue -startup_continue: +ENTRY(startup_continue) basr %r13,0 # get base .LPG1: @@ -45,7 +45,7 @@ startup_continue: # virtual and never return ... .align 8 .Lentry:.long 0x00080000,0x80000000 + _stext -.Lctl: .long 0x04b50002 # cr0: various things +.Lctl: .long 0x04b50000 # cr0: various things .long 0 # cr1: primary space segment table .long .Lduct # cr2: dispatchable unit control table .long 0 # cr3: instruction authorization @@ -78,8 +78,7 @@ startup_continue: .Lbase_cc: .long sched_clock_base_cc - .globl _ehead -_ehead: +ENTRY(_ehead) #ifdef CONFIG_SHARED_KERNEL .org 0x100000 - 0x11000 # head.o ends at 0x11000 @@ -88,8 +87,8 @@ _ehead: # # startup-code, running in absolute addressing mode # - .globl _stext -_stext: basr %r13,0 # get base +ENTRY(_stext) + basr %r13,0 # get base .LPG3: # check control registers stctl %c0,%c15,0(%r15) diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index cdef6871741..ae5d492b069 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -11,13 +11,13 @@ */ #include <linux/init.h> +#include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/page.h> __HEAD - .globl startup_continue -startup_continue: +ENTRY(startup_continue) larl %r1,sched_clock_base_cc mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK larl %r13,.LPG1 # get base @@ -46,7 +46,7 @@ startup_continue: .align 16 .LPG1: .Lentry:.quad 0x0000000180000000,_stext -.Lctl: .quad 0x04350002 # cr0: various things +.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space .quad 0 # cr1: primary space segment table .quad .Lduct # cr2: dispatchable unit control table .quad 0 # cr3: instruction authorization @@ -76,8 +76,7 @@ startup_continue: .long 0x80000000,0,0,0 # invalid access-list entries .endr - .globl _ehead -_ehead: +ENTRY(_ehead) #ifdef CONFIG_SHARED_KERNEL .org 0x100000 - 0x11000 # head.o ends at 0x11000 @@ -86,8 +85,8 @@ _ehead: # # startup-code, running in absolute addressing mode # - .globl _stext -_stext: basr %r13,0 # get base +ENTRY(_stext) + basr %r13,0 # get base .LPG3: # check control registers stctg %c0,%c15,0(%r15) diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index a689070be28..48c71020636 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -45,11 +45,13 @@ * - halt * - power off * - reipl + * - restart */ #define ON_PANIC_STR "on_panic" #define ON_HALT_STR "on_halt" #define ON_POFF_STR "on_poff" #define ON_REIPL_STR "on_reboot" +#define ON_RESTART_STR "on_restart" struct shutdown_action; struct shutdown_trigger { @@ -1218,7 +1220,7 @@ static int __init reipl_fcp_init(void) /* sysfs: create fcp kset for mixing attr group and bin attrs */ reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL, &reipl_kset->kobj); - if (!reipl_kset) { + if (!reipl_fcp_kset) { free_page((unsigned long) reipl_block_fcp); return -ENOMEM; } @@ -1544,17 +1546,20 @@ static char vmcmd_on_reboot[128]; static char vmcmd_on_panic[128]; static char vmcmd_on_halt[128]; static char vmcmd_on_poff[128]; +static char vmcmd_on_restart[128]; DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot); DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic); DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt); DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff); +DEFINE_IPL_ATTR_STR_RW(vmcmd, on_restart, "%s\n", "%s\n", vmcmd_on_restart); static struct attribute *vmcmd_attrs[] = { &sys_vmcmd_on_reboot_attr.attr, &sys_vmcmd_on_panic_attr.attr, &sys_vmcmd_on_halt_attr.attr, &sys_vmcmd_on_poff_attr.attr, + &sys_vmcmd_on_restart_attr.attr, NULL, }; @@ -1576,6 +1581,8 @@ static void vmcmd_run(struct shutdown_trigger *trigger) cmd = vmcmd_on_halt; else if (strcmp(trigger->name, ON_POFF_STR) == 0) cmd = vmcmd_on_poff; + else if (strcmp(trigger->name, ON_RESTART_STR) == 0) + cmd = vmcmd_on_restart; else return; @@ -1611,7 +1618,8 @@ static struct shutdown_action vmcmd_action = {SHUTDOWN_ACTION_VMCMD_STR, static void stop_run(struct shutdown_trigger *trigger) { - if (strcmp(trigger->name, ON_PANIC_STR) == 0) + if (strcmp(trigger->name, ON_PANIC_STR) == 0 || + strcmp(trigger->name, ON_RESTART_STR) == 0) disabled_wait((unsigned long) __builtin_return_address(0)); while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) cpu_relax(); @@ -1707,6 +1715,34 @@ static void do_panic(void) stop_run(&on_panic_trigger); } +/* on restart */ + +static struct shutdown_trigger on_restart_trigger = {ON_RESTART_STR, + &stop_action}; + +static ssize_t on_restart_show(struct kobject *kobj, + struct kobj_attribute *attr, char *page) +{ + return sprintf(page, "%s\n", on_restart_trigger.action->name); +} + +static ssize_t on_restart_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t len) +{ + return set_trigger(buf, &on_restart_trigger, len); +} + +static struct kobj_attribute on_restart_attr = + __ATTR(on_restart, 0644, on_restart_show, on_restart_store); + +void do_restart(void) +{ + smp_send_stop(); + on_restart_trigger.action->fn(&on_restart_trigger); + stop_run(&on_restart_trigger); +} + /* on halt */ static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action}; @@ -1783,7 +1819,9 @@ static void __init shutdown_triggers_init(void) if (sysfs_create_file(&shutdown_actions_kset->kobj, &on_poff_attr.attr)) goto fail; - + if (sysfs_create_file(&shutdown_actions_kset->kobj, + &on_restart_attr.attr)) + goto fail; return; fail: panic("shutdown_triggers_init failed\n"); @@ -1959,6 +1997,12 @@ static void do_reset_calls(void) { struct reset_call *reset; +#ifdef CONFIG_64BIT + if (diag308_set_works) { + diag308_reset(); + return; + } +#endif list_for_each_entry(reset, &rcall, list) reset->fn(); } diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index e3264f6a972..1f4050d45f7 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -88,15 +88,6 @@ int show_interrupts(struct seq_file *p, void *v) } /* - * For compatibilty only. S/390 specific setup of interrupts et al. is done - * much later in init_channel_subsystem(). - */ -void __init init_IRQ(void) -{ - /* nothing... */ -} - -/* * Switch to the asynchronous interrupt stack for softirq execution. */ asmlinkage void do_softirq(void) @@ -144,28 +135,45 @@ void init_irq_proc(void) #endif /* - * ext_int_hash[index] is the start of the list for all external interrupts - * that hash to this index. With the current set of external interrupts - * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000 - * iucv and 0x2603 pfault) this is always the first element. + * ext_int_hash[index] is the list head for all external interrupts that hash + * to this index. */ +static struct list_head ext_int_hash[256]; struct ext_int_info { - struct ext_int_info *next; ext_int_handler_t handler; u16 code; + struct list_head entry; + struct rcu_head rcu; }; -static struct ext_int_info *ext_int_hash[256]; +/* ext_int_hash_lock protects the handler lists for external interrupts */ +DEFINE_SPINLOCK(ext_int_hash_lock); + +static void __init init_external_interrupts(void) +{ + int idx; + + for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++) + INIT_LIST_HEAD(&ext_int_hash[idx]); +} static inline int ext_hash(u16 code) { return (code + (code >> 9)) & 0xff; } +static void ext_int_hash_update(struct rcu_head *head) +{ + struct ext_int_info *p = container_of(head, struct ext_int_info, rcu); + + kfree(p); +} + int register_external_interrupt(u16 code, ext_int_handler_t handler) { struct ext_int_info *p; + unsigned long flags; int index; p = kmalloc(sizeof(*p), GFP_ATOMIC); @@ -174,33 +182,27 @@ int register_external_interrupt(u16 code, ext_int_handler_t handler) p->code = code; p->handler = handler; index = ext_hash(code); - p->next = ext_int_hash[index]; - ext_int_hash[index] = p; + + spin_lock_irqsave(&ext_int_hash_lock, flags); + list_add_rcu(&p->entry, &ext_int_hash[index]); + spin_unlock_irqrestore(&ext_int_hash_lock, flags); return 0; } EXPORT_SYMBOL(register_external_interrupt); int unregister_external_interrupt(u16 code, ext_int_handler_t handler) { - struct ext_int_info *p, *q; - int index; + struct ext_int_info *p; + unsigned long flags; + int index = ext_hash(code); - index = ext_hash(code); - q = NULL; - p = ext_int_hash[index]; - while (p) { - if (p->code == code && p->handler == handler) - break; - q = p; - p = p->next; - } - if (!p) - return -ENOENT; - if (q) - q->next = p->next; - else - ext_int_hash[index] = p->next; - kfree(p); + spin_lock_irqsave(&ext_int_hash_lock, flags); + list_for_each_entry_rcu(p, &ext_int_hash[index], entry) + if (p->code == code && p->handler == handler) { + list_del_rcu(&p->entry); + call_rcu(&p->rcu, ext_int_hash_update); + } + spin_unlock_irqrestore(&ext_int_hash_lock, flags); return 0; } EXPORT_SYMBOL(unregister_external_interrupt); @@ -224,15 +226,22 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; if (code != 0x1004) __get_cpu_var(s390_idle).nohz_delay = 1; + index = ext_hash(code); - for (p = ext_int_hash[index]; p; p = p->next) { + rcu_read_lock(); + list_for_each_entry_rcu(p, &ext_int_hash[index], entry) if (likely(p->code == code)) p->handler(ext_int_code, param32, param64); - } + rcu_read_unlock(); irq_exit(); set_irq_regs(old_regs); } +void __init init_IRQ(void) +{ + init_external_interrupts(); +} + static DEFINE_SPINLOCK(sc_irq_lock); static int sc_irq_refcount; diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 1e6a5579562..7e2c38ba137 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -5,21 +5,19 @@ * */ +#include <linux/linkage.h> #include <asm/asm-offsets.h> .section .kprobes.text, "ax" - .globl ftrace_stub -ftrace_stub: +ENTRY(ftrace_stub) br %r14 - .globl _mcount -_mcount: +ENTRY(_mcount) #ifdef CONFIG_DYNAMIC_FTRACE br %r14 - .globl ftrace_caller -ftrace_caller: +ENTRY(ftrace_caller) #endif stm %r2,%r5,16(%r15) bras %r1,2f @@ -41,8 +39,7 @@ ftrace_caller: #ifdef CONFIG_FUNCTION_GRAPH_TRACER l %r2,100(%r15) l %r3,152(%r15) - .globl ftrace_graph_caller -ftrace_graph_caller: +ENTRY(ftrace_graph_caller) # The bras instruction gets runtime patched to call prepare_ftrace_return. # See ftrace_enable_ftrace_graph_caller. The patched instruction is: # bras %r14,prepare_ftrace_return @@ -56,8 +53,7 @@ ftrace_graph_caller: #ifdef CONFIG_FUNCTION_GRAPH_TRACER - .globl return_to_handler -return_to_handler: +ENTRY(return_to_handler) stm %r2,%r5,16(%r15) st %r14,56(%r15) lr %r0,%r15 diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S index e73667286ac..f70cadec68f 100644 --- a/arch/s390/kernel/mcount64.S +++ b/arch/s390/kernel/mcount64.S @@ -5,21 +5,19 @@ * */ +#include <linux/linkage.h> #include <asm/asm-offsets.h> .section .kprobes.text, "ax" - .globl ftrace_stub -ftrace_stub: +ENTRY(ftrace_stub) br %r14 - .globl _mcount -_mcount: +ENTRY(_mcount) #ifdef CONFIG_DYNAMIC_FTRACE br %r14 - .globl ftrace_caller -ftrace_caller: +ENTRY(ftrace_caller) #endif larl %r1,function_trace_stop icm %r1,0xf,0(%r1) @@ -37,8 +35,7 @@ ftrace_caller: #ifdef CONFIG_FUNCTION_GRAPH_TRACER lg %r2,168(%r15) lg %r3,272(%r15) - .globl ftrace_graph_caller -ftrace_graph_caller: +ENTRY(ftrace_graph_caller) # The bras instruction gets runtime patched to call prepare_ftrace_return. # See ftrace_enable_ftrace_graph_caller. The patched instruction is: # bras %r14,prepare_ftrace_return @@ -52,8 +49,7 @@ ftrace_graph_caller: #ifdef CONFIG_FUNCTION_GRAPH_TRACER - .globl return_to_handler -return_to_handler: +ENTRY(return_to_handler) stmg %r2,%r5,32(%r15) lgr %r1,%r15 aghi %r15,-160 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index f7167ee4604..dfcb3436bad 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -45,13 +45,6 @@ #define PLT_ENTRY_SIZE 20 #endif /* CONFIG_64BIT */ -void *module_alloc(unsigned long size) -{ - if (size == 0) - return NULL; - return vmalloc(size); -} - /* Free memory returned from module_alloc */ void module_free(struct module *mod, void *module_region) { @@ -176,15 +169,6 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, return 0; } -int -apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, - unsigned int relsec, struct module *me) -{ - printk(KERN_ERR "module %s: RELOCATION unsupported\n", - me->name); - return -ENOEXEC; -} - static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, struct module *me) @@ -409,7 +393,3 @@ int module_finalize(const Elf_Ehdr *hdr, me->arch.syminfo = NULL; return 0; } - -void module_arch_cleanup(struct module *mod) -{ -} diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index cb899d9f850..303d961c3bb 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S @@ -6,14 +6,15 @@ * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) */ +#include <linux/linkage.h> #include <asm/asm-offsets.h> # # do_reipl_asm # Parameter: r2 = schid of reipl device # - .globl do_reipl_asm -do_reipl_asm: basr %r13,0 +ENTRY(do_reipl_asm) + basr %r13,0 .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) .Lpg1: # do store status of all registers diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index 9eabbc90795..e690975403f 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S @@ -1,35 +1,79 @@ /* - * Copyright IBM Corp 2000,2009 + * Copyright IBM Corp 2000,2011 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, * Denis Joseph Barrow, */ +#include <linux/linkage.h> #include <asm/asm-offsets.h> # +# store_status +# +# Prerequisites to run this function: +# - Prefix register is set to zero +# - Original prefix register is stored in "dump_prefix_page" +# - Lowcore protection is off +# +ENTRY(store_status) + /* Save register one and load save area base */ + stg %r1,__LC_SAVE_AREA_64(%r0) + lghi %r1,SAVE_AREA_BASE + /* General purpose registers */ + stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + lg %r2,__LC_SAVE_AREA_64(%r0) + stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) + /* Control registers */ + stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* Access registers */ + stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* Floating point registers */ + std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* Floating point control register */ + stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* CPU timer */ + stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* Saved prefix register */ + larl %r2,dump_prefix_page + mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2) + /* Clock comparator - seven bytes */ + larl %r2,.Lclkcmp + stckc 0(%r2) + mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2) + /* Program status word */ + epsw %r2,%r3 + st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1) + st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1) + larl %r2,store_status + stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) + br %r14 +.align 8 +.Lclkcmp: .quad 0x0000000000000000 + +# # do_reipl_asm # Parameter: r2 = schid of reipl device # - .globl do_reipl_asm -do_reipl_asm: basr %r13,0 +ENTRY(do_reipl_asm) + basr %r13,0 .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) -.Lpg1: # do store status of all registers - - stg %r1,.Lregsave-.Lpg0(%r13) - lghi %r1,0x1000 - stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1) - lg %r0,.Lregsave-.Lpg0(%r13) - stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1) - stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1) - stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1) - lg %r10,.Ldump_pfx-.Lpg0(%r13) - mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10) - stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1) - stckc .Lclkcmp-.Lpg0(%r13) - mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(7,%r1),.Lclkcmp-.Lpg0(%r13) - stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1) - stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1) +.Lpg1: brasl %r14,store_status lctlg %c6,%c6,.Lall-.Lpg0(%r13) lgr %r1,%r2 @@ -66,10 +110,7 @@ do_reipl_asm: basr %r13,0 st %r14,.Ldispsw+12-.Lpg0(%r13) lpswe .Ldispsw-.Lpg0(%r13) .align 8 -.Lclkcmp: .quad 0x0000000000000000 .Lall: .quad 0x00000000ff000000 -.Ldump_pfx: .quad dump_prefix_page -.Lregsave: .quad 0x0000000000000000 .align 16 /* * These addresses have to be 31 bit otherwise diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S index 3b456b80bce..c91d70aede9 100644 --- a/arch/s390/kernel/relocate_kernel.S +++ b/arch/s390/kernel/relocate_kernel.S @@ -8,6 +8,8 @@ * */ +#include <linux/linkage.h> + /* * moves the new kernel to its destination... * %r2 = pointer to first kimage_entry_t @@ -22,8 +24,7 @@ */ .text - .globl relocate_kernel - relocate_kernel: +ENTRY(relocate_kernel) basr %r13,0 # base address .base: stnsm sys_msk-.base(%r13),0xfb # disable DAT @@ -112,6 +113,7 @@ .byte 0 .align 8 relocate_kernel_end: + .align 8 .globl relocate_kernel_len relocate_kernel_len: .quad relocate_kernel_end - relocate_kernel diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S index 1f9ea2067b5..7c3ce589a7f 100644 --- a/arch/s390/kernel/relocate_kernel64.S +++ b/arch/s390/kernel/relocate_kernel64.S @@ -8,6 +8,8 @@ * */ +#include <linux/linkage.h> + /* * moves the new kernel to its destination... * %r2 = pointer to first kimage_entry_t @@ -23,8 +25,7 @@ */ .text - .globl relocate_kernel - relocate_kernel: +ENTRY(relocate_kernel) basr %r13,0 # base address .base: stnsm sys_msk-.base(%r13),0xfb # disable DAT @@ -115,6 +116,7 @@ .byte 0 .align 8 relocate_kernel_end: + .align 8 .globl relocate_kernel_len relocate_kernel_len: .quad relocate_kernel_end - relocate_kernel diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 656fcbb9bd8..57b536649b0 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -1,6 +1,10 @@ #include <linux/module.h> +#include <linux/kvm_host.h> #include <asm/ftrace.h> #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); #endif +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +EXPORT_SYMBOL(sie64a); +#endif diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index 2e82fdd8932..95792d846bb 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S @@ -8,6 +8,8 @@ * */ +#include <linux/linkage.h> + LC_EXT_NEW_PSW = 0x58 # addr of ext int handler LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter @@ -260,8 +262,7 @@ _sclp_print: # R2 = 0 on success, 1 on failure # - .globl _sclp_print_early -_sclp_print_early: +ENTRY(_sclp_print_early) stm %r6,%r15,24(%r15) # save registers ahi %r15,-96 # create stack frame #ifdef CONFIG_64BIT diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 0c35dee10b0..7b371c37061 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -346,7 +346,7 @@ setup_lowcore(void) lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; lc->restart_psw.addr = - PSW_ADDR_AMODE | (unsigned long) restart_int_handler; + PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; if (user_mode != HOME_SPACE_MODE) lc->restart_psw.mask |= PSW_ASC_HOME; lc->external_new_psw.mask = psw_kernel_bits; @@ -529,6 +529,27 @@ static void __init setup_memory_end(void) memory_end = memory_size; } +void *restart_stack __attribute__((__section__(".data"))); + +/* + * Setup new PSW and allocate stack for PSW restart interrupt + */ +static void __init setup_restart_psw(void) +{ + psw_t psw; + + restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); + restart_stack += ASYNC_SIZE; + + /* + * Setup restart PSW for absolute zero lowcore. This is necesary + * if PSW restart is done on an offline CPU that has lowcore zero + */ + psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; + psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; + copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); +} + static void __init setup_memory(void) { @@ -731,6 +752,7 @@ static void __init setup_hwcaps(void) strcpy(elf_platform, "z10"); break; case 0x2817: + case 0x2818: strcpy(elf_platform, "z196"); break; } @@ -792,6 +814,7 @@ setup_arch(char **cmdline_p) setup_addressing_mode(); setup_memory(); setup_resources(); + setup_restart_psw(); setup_lowcore(); cpu_init(); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index abbb3c3c7aa..9a40e1cc5ec 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -57,17 +57,15 @@ typedef struct */ SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask) { - mask &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); - current->saved_sigmask = current->blocked; - siginitset(¤t->blocked, mask); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + sigset_t blocked; + current->saved_sigmask = current->blocked; + mask &= _BLOCKABLE; + siginitset(&blocked, mask); + set_current_blocked(&blocked); set_current_state(TASK_INTERRUPTIBLE); schedule(); - set_thread_flag(TIF_RESTORE_SIGMASK); - + set_restore_sigmask(); return -ERESTARTNOHAND; } @@ -172,18 +170,11 @@ SYSCALL_DEFINE0(sigreturn) goto badframe; if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - + set_current_blocked(&set); if (restore_sigregs(regs, &frame->sregs)) goto badframe; - return regs->gprs[2]; - badframe: force_sig(SIGSEGV, current); return 0; @@ -199,21 +190,14 @@ SYSCALL_DEFINE0(rt_sigreturn) goto badframe; if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - + set_current_blocked(&set); if (restore_sigregs(regs, &frame->uc.uc_mcontext)) goto badframe; - if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->gprs[15]) == -EFAULT) goto badframe; return regs->gprs[2]; - badframe: force_sig(SIGSEGV, current); return 0; @@ -385,14 +369,11 @@ give_sigsegv: return -EFAULT; } -/* - * OK, we're invoking a handler - */ - -static int -handle_signal(unsigned long sig, struct k_sigaction *ka, - siginfo_t *info, sigset_t *oldset, struct pt_regs * regs) +static int handle_signal(unsigned long sig, struct k_sigaction *ka, + siginfo_t *info, sigset_t *oldset, + struct pt_regs *regs) { + sigset_t blocked; int ret; /* Set up the stack frame */ @@ -400,17 +381,13 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, ret = setup_rt_frame(sig, ka, info, oldset, regs); else ret = setup_frame(sig, ka, oldset, regs); - - if (ret == 0) { - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked,sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - } - - return ret; + if (ret) + return ret; + sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); + if (!(ka->sa.sa_flags & SA_NODEFER)) + sigaddset(&blocked, sig); + set_current_blocked(&blocked); + return 0; } /* diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 52420d2785b..6ab16ac64d2 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -262,7 +262,7 @@ void smp_ctl_set_bit(int cr, int bit) memset(&parms.orvals, 0, sizeof(parms.orvals)); memset(&parms.andvals, 0xff, sizeof(parms.andvals)); - parms.orvals[cr] = 1 << bit; + parms.orvals[cr] = 1UL << bit; on_each_cpu(smp_ctl_bit_callback, &parms, 1); } EXPORT_SYMBOL(smp_ctl_set_bit); @@ -276,7 +276,7 @@ void smp_ctl_clear_bit(int cr, int bit) memset(&parms.orvals, 0, sizeof(parms.orvals)); memset(&parms.andvals, 0xff, sizeof(parms.andvals)); - parms.andvals[cr] = ~(1L << bit); + parms.andvals[cr] = ~(1UL << bit); on_each_cpu(smp_ctl_bit_callback, &parms, 1); } EXPORT_SYMBOL(smp_ctl_clear_bit); @@ -452,23 +452,27 @@ out: */ int __cpuinit start_secondary(void *cpuvoid) { - /* Setup the cpu */ cpu_init(); preempt_disable(); - /* Enable TOD clock interrupts on the secondary cpu. */ init_cpu_timer(); - /* Enable cpu timer interrupts on the secondary cpu. */ init_cpu_vtimer(); - /* Enable pfault pseudo page faults on this cpu. */ pfault_init(); - /* call cpu notifiers */ notify_cpu_starting(smp_processor_id()); - /* Mark this cpu as online */ ipi_call_lock(); set_cpu_online(smp_processor_id(), true); ipi_call_unlock(); - /* Switch on interrupts */ + __ctl_clear_bit(0, 28); /* Disable lowcore protection */ + S390_lowcore.restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; + S390_lowcore.restart_psw.addr = + PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; + __ctl_set_bit(0, 28); /* Enable lowcore protection */ + /* + * Wait until the cpu which brought this one up marked it + * active before enabling interrupts. + */ + while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask)) + cpu_relax(); local_irq_enable(); /* cpu_idle will call schedule for us */ cpu_idle(); @@ -507,7 +511,11 @@ static int __cpuinit smp_alloc_lowcore(int cpu) memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); lowcore->async_stack = async_stack + ASYNC_SIZE; lowcore->panic_stack = panic_stack + PAGE_SIZE; - + lowcore->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; + lowcore->restart_psw.addr = + PSW_ADDR_AMODE | (unsigned long) restart_int_handler; + if (user_mode != HOME_SPACE_MODE) + lowcore->restart_psw.mask |= PSW_ASC_HOME; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { unsigned long save_area; @@ -654,7 +662,8 @@ int __cpu_disable(void) /* disable all external interrupts */ cr_parms.orvals[0] = 0; cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 | - 1 << 10 | 1 << 9 | 1 << 6 | 1 << 4); + 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 | + 1 << 4); /* disable all I/O interrupts */ cr_parms.orvals[6] = 0; cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S index 20530dd2eab..bfe070bc765 100644 --- a/arch/s390/kernel/switch_cpu.S +++ b/arch/s390/kernel/switch_cpu.S @@ -5,6 +5,7 @@ * */ +#include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> @@ -16,9 +17,7 @@ # %r6 - destination cpu .section .text - .align 4 - .globl smp_switch_to_cpu -smp_switch_to_cpu: +ENTRY(smp_switch_to_cpu) stm %r6,%r15,__SF_GPRS(%r15) lr %r1,%r15 ahi %r15,-STACK_FRAME_OVERHEAD @@ -33,8 +32,7 @@ smp_switch_to_cpu: brc 2,2b /* busy, try again */ 3: j 3b - .globl smp_restart_cpu -smp_restart_cpu: +ENTRY(smp_restart_cpu) basr %r13,0 0: la %r1,.gprregs_addr-0b(%r13) l %r1,0(%r1) diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S index 5be3f43898f..fcc42d799e4 100644 --- a/arch/s390/kernel/switch_cpu64.S +++ b/arch/s390/kernel/switch_cpu64.S @@ -5,6 +5,7 @@ * */ +#include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> @@ -16,9 +17,7 @@ # %r6 - destination cpu .section .text - .align 4 - .globl smp_switch_to_cpu -smp_switch_to_cpu: +ENTRY(smp_switch_to_cpu) stmg %r6,%r15,__SF_GPRS(%r15) lgr %r1,%r15 aghi %r15,-STACK_FRAME_OVERHEAD @@ -31,8 +30,7 @@ smp_switch_to_cpu: brc 2,2b /* busy, try again */ 3: j 3b - .globl smp_restart_cpu -smp_restart_cpu: +ENTRY(smp_restart_cpu) larl %r1,.gprregs lmg %r0,%r15,0(%r1) 1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */ diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 1f066e46e83..51bcdb50a23 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -7,6 +7,7 @@ * Michael Holzheu <holzheu@linux.vnet.ibm.com> */ +#include <linux/linkage.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/thread_info.h> @@ -22,9 +23,7 @@ * This function runs with disabled interrupts. */ .section .text - .align 4 - .globl swsusp_arch_suspend -swsusp_arch_suspend: +ENTRY(swsusp_arch_suspend) stmg %r6,%r15,__SF_GPRS(%r15) lgr %r1,%r15 aghi %r15,-STACK_FRAME_OVERHEAD @@ -112,8 +111,7 @@ swsusp_arch_suspend: * Then we return to the function that called swsusp_arch_suspend(). * swsusp_arch_resume() runs with disabled interrupts. */ - .globl swsusp_arch_resume -swsusp_arch_resume: +ENTRY(swsusp_arch_resume) stmg %r6,%r15,__SF_GPRS(%r15) lgr %r1,%r15 aghi %r15,-STACK_FRAME_OVERHEAD diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 6ee39ef8fe4..73eb08c874f 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -177,7 +177,7 @@ SYSCALL(sys_getresuid16,sys_ni_syscall,sys32_getresuid16_wrapper) /* 165 old get NI_SYSCALL /* for vm86 */ NI_SYSCALL /* old sys_query_module */ SYSCALL(sys_poll,sys_poll,sys32_poll_wrapper) -SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper) +NI_SYSCALL /* old nfsservctl */ SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */ SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */ SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper) diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index a65d2e82f61..ffabcd9d336 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -18,7 +18,7 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> -#include <linux/tracehook.h> +#include <linux/ptrace.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/smp.h> @@ -36,21 +36,17 @@ #include <asm/system.h> #include <asm/uaccess.h> #include <asm/io.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/mathemu.h> #include <asm/cpcmd.h> #include <asm/lowcore.h> #include <asm/debug.h> #include "entry.h" -pgm_check_handler_t *pgm_check_table[128]; +void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long); int show_unhandled_signals; -extern pgm_check_handler_t do_protection_exception; -extern pgm_check_handler_t do_dat_exception; -extern pgm_check_handler_t do_asce_exception; - #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) #ifndef CONFIG_64BIT @@ -329,10 +325,17 @@ static inline void __user *get_psw_address(struct pt_regs *regs, void __kprobes do_per_trap(struct pt_regs *regs) { + siginfo_t info; + if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) return; - if (tracehook_consider_fatal_signal(current, SIGTRAP)) - force_sig(SIGTRAP, current); + if (!current->ptrace) + return; + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = TRAP_HWBKPT; + info.si_addr = (void *) current->thread.per_event.address; + force_sig_info(SIGTRAP, &info, current); } static void default_trap_handler(struct pt_regs *regs, long pgm_int_code, @@ -425,9 +428,13 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code, if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) return; if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { - if (tracehook_consider_fatal_signal(current, SIGTRAP)) - force_sig(SIGTRAP, current); - else + if (current->ptrace) { + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = TRAP_BRKPT; + info.si_addr = location; + force_sig_info(SIGTRAP, &info, current); + } else signal = SIGILL; #ifdef CONFIG_MATHEMU } else if (opcode[0] == 0xb3) { @@ -489,9 +496,8 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code, #ifdef CONFIG_MATHEMU -asmlinkage void specification_exception(struct pt_regs *regs, - long pgm_int_code, - unsigned long trans_exc_code) +void specification_exception(struct pt_regs *regs, long pgm_int_code, + unsigned long trans_exc_code) { __u8 opcode[6]; __u16 __user *location = NULL; @@ -648,7 +654,7 @@ static void space_switch_exception(struct pt_regs *regs, long pgm_int_code, do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info); } -asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs) +void __kprobes kernel_stack_overflow(struct pt_regs * regs) { bust_spinlocks(1); printk("Kernel stack overflow.\n"); diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index f66a1bdbb61..a21634173a6 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -37,6 +37,5 @@ config KVM # OK, it's a little counter-intuitive to do this, but it puts it neatly under # the virtualization menu. source drivers/vhost/Kconfig -source drivers/virtio/Kconfig endif # VIRTUALIZATION diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile index 860d26514c0..3975722bb19 100644 --- a/arch/s390/kvm/Makefile +++ b/arch/s390/kvm/Makefile @@ -10,5 +10,5 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o) ccflags-y := -Ivirt/kvm -Iarch/s390/kvm -kvm-objs := $(common-objs) kvm-s390.o sie64a.o intercept.o interrupt.o priv.o sigp.o diag.o +kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o diag.o obj-$(CONFIG_KVM) += kvm.o diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 03c716a0f01..c86f6ae43f7 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -1,5 +1,5 @@ /* - * gaccess.h - access guest memory + * access.h - access guest memory * * Copyright IBM Corp. 2008,2009 * @@ -22,20 +22,13 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, unsigned long guestaddr) { unsigned long prefix = vcpu->arch.sie_block->prefix; - unsigned long origin = vcpu->arch.sie_block->gmsor; - unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); if (guestaddr < 2 * PAGE_SIZE) guestaddr += prefix; else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) guestaddr -= prefix; - if (guestaddr > memsize) - return (void __user __force *) ERR_PTR(-EFAULT); - - guestaddr += origin; - - return (void __user *) guestaddr; + return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap); } static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, @@ -141,11 +134,11 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, unsigned long guestdest, - const void *from, unsigned long n) + void *from, unsigned long n) { int rc; unsigned long i; - const u8 *data = from; + u8 *data = from; for (i = 0; i < n; i++) { rc = put_guest_u8(vcpu, guestdest++, *(data++)); @@ -155,12 +148,95 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, return 0; } +static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu, + unsigned long guestdest, + void *from, unsigned long n) +{ + int r; + void __user *uptr; + unsigned long size; + + if (guestdest + n < guestdest) + return -EFAULT; + + /* simple case: all within one segment table entry? */ + if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) { + uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap); + + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); + + r = copy_to_user(uptr, from, n); + + if (r) + r = -EFAULT; + + goto out; + } + + /* copy first segment */ + uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); + + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); + + size = PMD_SIZE - (guestdest & ~PMD_MASK); + + r = copy_to_user(uptr, from, size); + + if (r) { + r = -EFAULT; + goto out; + } + from += size; + n -= size; + guestdest += size; + + /* copy full segments */ + while (n >= PMD_SIZE) { + uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); + + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); + + r = copy_to_user(uptr, from, PMD_SIZE); + + if (r) { + r = -EFAULT; + goto out; + } + from += PMD_SIZE; + n -= PMD_SIZE; + guestdest += PMD_SIZE; + } + + /* copy the tail segment */ + if (n) { + uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); + + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); + + r = copy_to_user(uptr, from, n); + + if (r) + r = -EFAULT; + } +out: + return r; +} + +static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, + unsigned long guestdest, + void *from, unsigned long n) +{ + return __copy_to_guest_fast(vcpu, guestdest, from, n); +} + static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, - const void *from, unsigned long n) + void *from, unsigned long n) { unsigned long prefix = vcpu->arch.sie_block->prefix; - unsigned long origin = vcpu->arch.sie_block->gmsor; - unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) goto slowpath; @@ -177,15 +253,7 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE)) guestdest -= prefix; - if (guestdest + n > memsize) - return -EFAULT; - - if (guestdest + n < guestdest) - return -EFAULT; - - guestdest += origin; - - return copy_to_user((void __user *) guestdest, from, n); + return __copy_to_guest_fast(vcpu, guestdest, from, n); slowpath: return __copy_to_guest_slow(vcpu, guestdest, from, n); } @@ -206,74 +274,113 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, return 0; } -static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, - unsigned long guestsrc, unsigned long n) +static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to, + unsigned long guestsrc, + unsigned long n) { - unsigned long prefix = vcpu->arch.sie_block->prefix; - unsigned long origin = vcpu->arch.sie_block->gmsor; - unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); + int r; + void __user *uptr; + unsigned long size; - if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) - goto slowpath; + if (guestsrc + n < guestsrc) + return -EFAULT; - if ((guestsrc < prefix) && (guestsrc + n > prefix)) - goto slowpath; + /* simple case: all within one segment table entry? */ + if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) { + uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap); - if ((guestsrc < prefix + 2 * PAGE_SIZE) - && (guestsrc + n > prefix + 2 * PAGE_SIZE)) - goto slowpath; + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); - if (guestsrc < 2 * PAGE_SIZE) - guestsrc += prefix; - else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE)) - guestsrc -= prefix; + r = copy_from_user(to, uptr, n); - if (guestsrc + n > memsize) - return -EFAULT; + if (r) + r = -EFAULT; - if (guestsrc + n < guestsrc) - return -EFAULT; + goto out; + } - guestsrc += origin; + /* copy first segment */ + uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); - return copy_from_user(to, (void __user *) guestsrc, n); -slowpath: - return __copy_from_guest_slow(vcpu, to, guestsrc, n); -} + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); -static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, - unsigned long guestdest, - const void *from, unsigned long n) -{ - unsigned long origin = vcpu->arch.sie_block->gmsor; - unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); + size = PMD_SIZE - (guestsrc & ~PMD_MASK); - if (guestdest + n > memsize) - return -EFAULT; + r = copy_from_user(to, uptr, size); - if (guestdest + n < guestdest) - return -EFAULT; + if (r) { + r = -EFAULT; + goto out; + } + to += size; + n -= size; + guestsrc += size; + + /* copy full segments */ + while (n >= PMD_SIZE) { + uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); + + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); + + r = copy_from_user(to, uptr, PMD_SIZE); + + if (r) { + r = -EFAULT; + goto out; + } + to += PMD_SIZE; + n -= PMD_SIZE; + guestsrc += PMD_SIZE; + } + + /* copy the tail segment */ + if (n) { + uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); - guestdest += origin; + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); - return copy_to_user((void __user *) guestdest, from, n); + r = copy_from_user(to, uptr, n); + + if (r) + r = -EFAULT; + } +out: + return r; } static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, unsigned long guestsrc, unsigned long n) { - unsigned long origin = vcpu->arch.sie_block->gmsor; - unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); + return __copy_from_guest_fast(vcpu, to, guestsrc, n); +} - if (guestsrc + n > memsize) - return -EFAULT; +static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, + unsigned long guestsrc, unsigned long n) +{ + unsigned long prefix = vcpu->arch.sie_block->prefix; - if (guestsrc + n < guestsrc) - return -EFAULT; + if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) + goto slowpath; - guestsrc += origin; + if ((guestsrc < prefix) && (guestsrc + n > prefix)) + goto slowpath; + + if ((guestsrc < prefix + 2 * PAGE_SIZE) + && (guestsrc + n > prefix + 2 * PAGE_SIZE)) + goto slowpath; + + if (guestsrc < 2 * PAGE_SIZE) + guestsrc += prefix; + else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE)) + guestsrc -= prefix; - return copy_from_user(to, (void __user *) guestsrc, n); + return __copy_from_guest_fast(vcpu, to, guestsrc, n); +slowpath: + return __copy_from_guest_slow(vcpu, to, guestsrc, n); } #endif diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index f7b6df45d8b..c7c51898984 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -105,6 +105,7 @@ static intercept_handler_t instruction_handlers[256] = { [0xae] = kvm_s390_handle_sigp, [0xb2] = kvm_s390_handle_b2, [0xb7] = handle_lctl, + [0xe5] = kvm_s390_handle_e5, [0xeb] = handle_lctlg, }; @@ -159,22 +160,42 @@ static int handle_stop(struct kvm_vcpu *vcpu) static int handle_validity(struct kvm_vcpu *vcpu) { + unsigned long vmaddr; int viwhy = vcpu->arch.sie_block->ipb >> 16; int rc; vcpu->stat.exit_validity++; - if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix - <= kvm_s390_vcpu_get_memsize(vcpu) - 2*PAGE_SIZE)) { - rc = fault_in_pages_writeable((char __user *) - vcpu->arch.sie_block->gmsor + - vcpu->arch.sie_block->prefix, - 2*PAGE_SIZE); - if (rc) + if (viwhy == 0x37) { + vmaddr = gmap_fault(vcpu->arch.sie_block->prefix, + vcpu->arch.gmap); + if (IS_ERR_VALUE(vmaddr)) { + rc = -EOPNOTSUPP; + goto out; + } + rc = fault_in_pages_writeable((char __user *) vmaddr, + PAGE_SIZE); + if (rc) { + /* user will receive sigsegv, exit to user */ + rc = -EOPNOTSUPP; + goto out; + } + vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE, + vcpu->arch.gmap); + if (IS_ERR_VALUE(vmaddr)) { + rc = -EOPNOTSUPP; + goto out; + } + rc = fault_in_pages_writeable((char __user *) vmaddr, + PAGE_SIZE); + if (rc) { /* user will receive sigsegv, exit to user */ rc = -EOPNOTSUPP; + goto out; + } } else rc = -EOPNOTSUPP; +out: if (rc) VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", viwhy); diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 35c21bf910c..c9aeb4b4d0b 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -128,6 +128,10 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, if (rc == -EFAULT) exception = 1; + rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->emerg.code); + if (rc == -EFAULT) + exception = 1; + rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); if (rc == -EFAULT) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 67345ae7ce8..f17296e4fc8 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -62,6 +62,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "instruction_chsc", VCPU_STAT(instruction_chsc) }, { "instruction_stsi", VCPU_STAT(instruction_stsi) }, { "instruction_stfl", VCPU_STAT(instruction_stfl) }, + { "instruction_tprot", VCPU_STAT(instruction_tprot) }, { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, @@ -189,7 +190,13 @@ int kvm_arch_init_vm(struct kvm *kvm) debug_register_view(kvm->arch.dbf, &debug_sprintf_view); VM_EVENT(kvm, 3, "%s", "vm created"); + kvm->arch.gmap = gmap_alloc(current->mm); + if (!kvm->arch.gmap) + goto out_nogmap; + return 0; +out_nogmap: + debug_unregister(kvm->arch.dbf); out_nodbf: free_page((unsigned long)(kvm->arch.sca)); out_err: @@ -234,11 +241,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_free_vcpus(kvm); free_page((unsigned long)(kvm->arch.sca)); debug_unregister(kvm->arch.dbf); + gmap_free(kvm->arch.gmap); } /* Section: vcpu related */ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { + vcpu->arch.gmap = vcpu->kvm->arch.gmap; return 0; } @@ -284,8 +293,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { - atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); - set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests); + atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM); vcpu->arch.sie_block->ecb = 6; vcpu->arch.sie_block->eca = 0xC1002001U; vcpu->arch.sie_block->fac = (int) (long) facilities; @@ -453,6 +461,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) local_irq_disable(); kvm_guest_enter(); local_irq_enable(); + gmap_enable(vcpu->arch.gmap); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { @@ -461,6 +470,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) } VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", vcpu->arch.sie_block->icptcode); + gmap_disable(vcpu->arch.gmap); local_irq_disable(); kvm_guest_exit(); local_irq_enable(); @@ -474,17 +484,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) sigset_t sigsaved; rerun_vcpu: - if (vcpu->requests) - if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) - kvm_s390_vcpu_set_mem(vcpu); - - /* verify, that memory has been registered */ - if (!vcpu->arch.sie_block->gmslm) { - vcpu_put(vcpu); - VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu"); - return -EINVAL; - } - if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); @@ -545,7 +544,7 @@ rerun_vcpu: return rc; } -static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from, +static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from, unsigned long n, int prefix) { if (prefix) @@ -562,7 +561,7 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from, */ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) { - const unsigned char archmode = 1; + unsigned char archmode = 1; int prefix; if (addr == KVM_S390_STORE_STATUS_NOADDR) { @@ -680,10 +679,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if (mem->guest_phys_addr) return -EINVAL; - if (mem->userspace_addr & (PAGE_SIZE - 1)) + if (mem->userspace_addr & 0xffffful) return -EINVAL; - if (mem->memory_size & (PAGE_SIZE - 1)) + if (mem->memory_size & 0xffffful) return -EINVAL; if (!user_alloc) @@ -697,15 +696,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_memory_slot old, int user_alloc) { - int i; - struct kvm_vcpu *vcpu; + int rc; - /* request update of sie control block for all available vcpus */ - kvm_for_each_vcpu(i, vcpu, kvm) { - if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) - continue; - kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); - } + + rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, + mem->guest_phys_addr, mem->memory_size); + if (rc) + printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); + return; } void kvm_arch_flush_shadow(struct kvm *kvm) diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index a7b7586626d..99b0b759711 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -58,35 +58,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); -static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.sie_block->gmslm - - vcpu->arch.sie_block->gmsor - - VIRTIODESCSPACE + 1ul; -} - -static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu) -{ - int idx; - struct kvm_memory_slot *mem; - struct kvm_memslots *memslots; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - memslots = kvm_memslots(vcpu->kvm); - - mem = &memslots->memslots[0]; - - vcpu->arch.sie_block->gmsor = mem->userspace_addr; - vcpu->arch.sie_block->gmslm = - mem->userspace_addr + - (mem->npages << PAGE_SHIFT) + - VIRTIODESCSPACE - 1ul; - - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - /* implemented in priv.c */ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); +int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); /* implemented in sigp.c */ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 73c47bd95db..39162636108 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -326,3 +326,52 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) } return -EOPNOTSUPP; } + +static int handle_tprot(struct kvm_vcpu *vcpu) +{ + int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; + int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; + int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12; + int disp2 = vcpu->arch.sie_block->ipb & 0x0fff; + u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0; + u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0; + struct vm_area_struct *vma; + + vcpu->stat.instruction_tprot++; + + /* we only handle the Linux memory detection case: + * access key == 0 + * guest DAT == off + * everything else goes to userspace. */ + if (address2 & 0xf0) + return -EOPNOTSUPP; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) + return -EOPNOTSUPP; + + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, + (unsigned long) __guestaddr_to_user(vcpu, address1)); + if (!vma) { + up_read(¤t->mm->mmap_sem); + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + } + + vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); + if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ)) + vcpu->arch.sie_block->gpsw.mask |= (1ul << 44); + if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ)) + vcpu->arch.sie_block->gpsw.mask |= (2ul << 44); + + up_read(¤t->mm->mmap_sem); + return 0; +} + +int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) +{ + /* For e5xx... instructions we only handle TPROT */ + if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) + return handle_tprot(vcpu); + return -EOPNOTSUPP; +} + diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S deleted file mode 100644 index 5faa1b1b23f..00000000000 --- a/arch/s390/kvm/sie64a.S +++ /dev/null @@ -1,98 +0,0 @@ -/* - * sie64a.S - low level sie call - * - * Copyright IBM Corp. 2008,2010 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License (version 2 only) - * as published by the Free Software Foundation. - * - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> - * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> - */ - -#include <linux/errno.h> -#include <asm/asm-offsets.h> -#include <asm/setup.h> -#include <asm/asm-offsets.h> -#include <asm/ptrace.h> -#include <asm/thread_info.h> - -_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) - -/* - * offsets into stackframe - * SP_ = offsets into stack sie64 is called with - * SPI_ = offsets into irq stack - */ -SP_GREGS = __SF_EMPTY -SP_HOOK = __SF_EMPTY+8 -SP_GPP = __SF_EMPTY+16 -SPI_PSW = STACK_FRAME_OVERHEAD + __PT_PSW - - - .macro SPP newpp - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP - jz 0f - .insn s,0xb2800000,\newpp -0: - .endm - -sie_irq_handler: - SPP __LC_CMF_HPP # set host id - larl %r2,sie_inst - clg %r2,SPI_PSW+8(0,%r15) # intercepted sie - jne 1f - xc __LC_SIE_HOOK(8),__LC_SIE_HOOK - lg %r2,__LC_THREAD_INFO # pointer thread_info struct - tm __TI_flags+7(%r2),_TIF_EXIT_SIE - jz 0f - larl %r2,sie_exit # work pending, leave sie - stg %r2,SPI_PSW+8(0,%r15) - br %r14 -0: larl %r2,sie_reenter # re-enter with guest id - stg %r2,SPI_PSW+8(0,%r15) -1: br %r14 - -/* - * sie64a calling convention: - * %r2 pointer to sie control block - * %r3 guest register save area - */ - .globl sie64a -sie64a: - stg %r3,SP_GREGS(%r15) # save guest register save area - stmg %r6,%r14,__SF_GPRS(%r15) # save registers on entry - lgr %r14,%r2 # pointer to sie control block - larl %r5,sie_irq_handler - stg %r2,SP_GPP(%r15) - stg %r5,SP_HOOK(%r15) # save hook target - lmg %r0,%r13,0(%r3) # load guest gprs 0-13 -sie_reenter: - mvc __LC_SIE_HOOK(8),SP_HOOK(%r15) - SPP SP_GPP(%r15) # set guest id -sie_inst: - sie 0(%r14) - xc __LC_SIE_HOOK(8),__LC_SIE_HOOK - SPP __LC_CMF_HPP # set host id -sie_exit: - lg %r14,SP_GREGS(%r15) - stmg %r0,%r13,0(%r14) # save guest gprs 0-13 - lghi %r2,0 - lmg %r6,%r14,__SF_GPRS(%r15) - br %r14 - -sie_err: - xc __LC_SIE_HOOK(8),__LC_SIE_HOOK - SPP __LC_CMF_HPP # set host id - lg %r14,SP_GREGS(%r15) - stmg %r0,%r13,0(%r14) # save guest gprs 0-13 - lghi %r2,-EFAULT - lmg %r6,%r14,__SF_GPRS(%r15) - br %r14 - - .section __ex_table,"a" - .quad sie_inst,sie_err - .quad sie_exit,sie_err - .quad sie_reenter,sie_err - .previous diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 702276f5e2f..d6a50c1fb2e 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -189,10 +189,8 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, /* make sure that the new value is valid memory */ address = address & 0x7fffe000u; - if ((copy_from_user(&tmp, (void __user *) - (address + vcpu->arch.sie_block->gmsor) , 1)) || - (copy_from_user(&tmp, (void __user *)(address + - vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) { + if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || + copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) { *reg |= SIGP_STAT_INVALID_PARAMETER; return 1; /* invalid parameter */ } diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S index eb1df632e74..d321329130e 100644 --- a/arch/s390/lib/qrnnd.S +++ b/arch/s390/lib/qrnnd.S @@ -1,5 +1,7 @@ # S/390 __udiv_qrnnd +#include <linux/linkage.h> + # r2 : &__r # r3 : upper half of 64 bit word n # r4 : lower half of 64 bit word n @@ -8,8 +10,7 @@ # the quotient q is to be returned .text - .globl __udiv_qrnnd -__udiv_qrnnd: +ENTRY(__udiv_qrnnd) st %r2,24(%r15) # store pointer to reminder for later lr %r0,%r3 # reload n lr %r1,%r4 diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index fe103e891e7..9564fc779b2 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -299,13 +299,28 @@ static inline int do_exception(struct pt_regs *regs, int access, goto out; address = trans_exc_code & __FAIL_ADDR_MASK; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); flags = FAULT_FLAG_ALLOW_RETRY; if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) flags |= FAULT_FLAG_WRITE; -retry: down_read(&mm->mmap_sem); +#ifdef CONFIG_PGSTE + if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) { + address = gmap_fault(address, + (struct gmap *) S390_lowcore.gmap); + if (address == -EFAULT) { + fault = VM_FAULT_BADMAP; + goto out_up; + } + if (address == -ENOMEM) { + fault = VM_FAULT_OOM; + goto out_up; + } + } +#endif + +retry: fault = VM_FAULT_BADMAP; vma = find_vma(mm, address); if (!vma) @@ -345,17 +360,18 @@ retry: if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } if (fault & VM_FAULT_RETRY) { /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk * of starvation. */ flags &= ~FAULT_FLAG_ALLOW_RETRY; + down_read(&mm->mmap_sem); goto retry; } } diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index a4d856db915..597bb2d27c3 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -35,7 +35,7 @@ int arch_prepare_hugepage(struct page *page) if (MACHINE_HAS_HPAGE) return 0; - ptep = (pte_t *) pte_alloc_one(&init_mm, address); + ptep = (pte_t *) pte_alloc_one(&init_mm, addr); if (!ptep) return -ENOMEM; diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 51e5cd9b906..5dbbaa6e594 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -85,3 +85,19 @@ int memcpy_real(void *dest, void *src, size_t count) arch_local_irq_restore(flags); return rc; } + +/* + * Copy memory to absolute zero + */ +void copy_to_absolute_zero(void *dest, void *src, size_t count) +{ + unsigned long cr0; + + BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore)); + preempt_disable(); + __ctl_store(cr0, 0, 0); + __ctl_clear_bit(0, 28); /* disable lowcore protection */ + memcpy_real(dest + store_prefix(), src, count); + __ctl_load(cr0, 0, 0); + preempt_enable(); +} diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 37a23c22370..4d1f2bce87b 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -16,6 +16,7 @@ #include <linux/module.h> #include <linux/quicklist.h> #include <linux/rcupdate.h> +#include <linux/slab.h> #include <asm/system.h> #include <asm/pgtable.h> @@ -133,30 +134,374 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) } #endif -static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) +#ifdef CONFIG_PGSTE + +/** + * gmap_alloc - allocate a guest address space + * @mm: pointer to the parent mm_struct + * + * Returns a guest address space structure. + */ +struct gmap *gmap_alloc(struct mm_struct *mm) { - unsigned int old, new; + struct gmap *gmap; + struct page *page; + unsigned long *table; - do { - old = atomic_read(v); - new = old ^ bits; - } while (atomic_cmpxchg(v, old, new) != old); - return new; + gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); + if (!gmap) + goto out; + INIT_LIST_HEAD(&gmap->crst_list); + gmap->mm = mm; + page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); + if (!page) + goto out_free; + list_add(&page->lru, &gmap->crst_list); + table = (unsigned long *) page_to_phys(page); + crst_table_init(table, _REGION1_ENTRY_EMPTY); + gmap->table = table; + list_add(&gmap->list, &mm->context.gmap_list); + return gmap; + +out_free: + kfree(gmap); +out: + return NULL; } +EXPORT_SYMBOL_GPL(gmap_alloc); -/* - * page table entry allocation/free routines. +static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) +{ + struct gmap_pgtable *mp; + struct gmap_rmap *rmap; + struct page *page; + + if (*table & _SEGMENT_ENTRY_INV) + return 0; + page = pfn_to_page(*table >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + list_for_each_entry(rmap, &mp->mapper, list) { + if (rmap->entry != table) + continue; + list_del(&rmap->list); + kfree(rmap); + break; + } + *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; + return 1; +} + +static void gmap_flush_tlb(struct gmap *gmap) +{ + if (MACHINE_HAS_IDTE) + __tlb_flush_idte((unsigned long) gmap->table | + _ASCE_TYPE_REGION1); + else + __tlb_flush_global(); +} + +/** + * gmap_free - free a guest address space + * @gmap: pointer to the guest address space structure */ -#ifdef CONFIG_PGSTE -static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) +void gmap_free(struct gmap *gmap) +{ + struct page *page, *next; + unsigned long *table; + int i; + + + /* Flush tlb. */ + if (MACHINE_HAS_IDTE) + __tlb_flush_idte((unsigned long) gmap->table | + _ASCE_TYPE_REGION1); + else + __tlb_flush_global(); + + /* Free all segment & region tables. */ + down_read(&gmap->mm->mmap_sem); + list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { + table = (unsigned long *) page_to_phys(page); + if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) + /* Remove gmap rmap structures for segment table. */ + for (i = 0; i < PTRS_PER_PMD; i++, table++) + gmap_unlink_segment(gmap, table); + __free_pages(page, ALLOC_ORDER); + } + up_read(&gmap->mm->mmap_sem); + list_del(&gmap->list); + kfree(gmap); +} +EXPORT_SYMBOL_GPL(gmap_free); + +/** + * gmap_enable - switch primary space to the guest address space + * @gmap: pointer to the guest address space structure + */ +void gmap_enable(struct gmap *gmap) +{ + /* Load primary space page table origin. */ + S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | + _ASCE_USER_BITS | __pa(gmap->table); + asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); + S390_lowcore.gmap = (unsigned long) gmap; +} +EXPORT_SYMBOL_GPL(gmap_enable); + +/** + * gmap_disable - switch back to the standard primary address space + * @gmap: pointer to the guest address space structure + */ +void gmap_disable(struct gmap *gmap) +{ + /* Load primary space page table origin. */ + S390_lowcore.user_asce = + gmap->mm->context.asce_bits | __pa(gmap->mm->pgd); + asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); + S390_lowcore.gmap = 0UL; +} +EXPORT_SYMBOL_GPL(gmap_disable); + +static int gmap_alloc_table(struct gmap *gmap, + unsigned long *table, unsigned long init) +{ + struct page *page; + unsigned long *new; + + page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); + if (!page) + return -ENOMEM; + new = (unsigned long *) page_to_phys(page); + crst_table_init(new, init); + down_read(&gmap->mm->mmap_sem); + if (*table & _REGION_ENTRY_INV) { + list_add(&page->lru, &gmap->crst_list); + *table = (unsigned long) new | _REGION_ENTRY_LENGTH | + (*table & _REGION_ENTRY_TYPE_MASK); + } else + __free_pages(page, ALLOC_ORDER); + up_read(&gmap->mm->mmap_sem); + return 0; +} + +/** + * gmap_unmap_segment - unmap segment from the guest address space + * @gmap: pointer to the guest address space structure + * @addr: address in the guest address space + * @len: length of the memory area to unmap + * + * Returns 0 if the unmap succeded, -EINVAL if not. + */ +int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) +{ + unsigned long *table; + unsigned long off; + int flush; + + if ((to | len) & (PMD_SIZE - 1)) + return -EINVAL; + if (len == 0 || to + len < to) + return -EINVAL; + + flush = 0; + down_read(&gmap->mm->mmap_sem); + for (off = 0; off < len; off += PMD_SIZE) { + /* Walk the guest addr space page table */ + table = gmap->table + (((to + off) >> 53) & 0x7ff); + if (*table & _REGION_ENTRY_INV) + return 0; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 42) & 0x7ff); + if (*table & _REGION_ENTRY_INV) + return 0; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 31) & 0x7ff); + if (*table & _REGION_ENTRY_INV) + return 0; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 20) & 0x7ff); + + /* Clear segment table entry in guest address space. */ + flush |= gmap_unlink_segment(gmap, table); + *table = _SEGMENT_ENTRY_INV; + } + up_read(&gmap->mm->mmap_sem); + if (flush) + gmap_flush_tlb(gmap); + return 0; +} +EXPORT_SYMBOL_GPL(gmap_unmap_segment); + +/** + * gmap_mmap_segment - map a segment to the guest address space + * @gmap: pointer to the guest address space structure + * @from: source address in the parent address space + * @to: target address in the guest address space + * + * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. + */ +int gmap_map_segment(struct gmap *gmap, unsigned long from, + unsigned long to, unsigned long len) +{ + unsigned long *table; + unsigned long off; + int flush; + + if ((from | to | len) & (PMD_SIZE - 1)) + return -EINVAL; + if (len == 0 || from + len > PGDIR_SIZE || + from + len < from || to + len < to) + return -EINVAL; + + flush = 0; + down_read(&gmap->mm->mmap_sem); + for (off = 0; off < len; off += PMD_SIZE) { + /* Walk the gmap address space page table */ + table = gmap->table + (((to + off) >> 53) & 0x7ff); + if ((*table & _REGION_ENTRY_INV) && + gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) + goto out_unmap; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 42) & 0x7ff); + if ((*table & _REGION_ENTRY_INV) && + gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) + goto out_unmap; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 31) & 0x7ff); + if ((*table & _REGION_ENTRY_INV) && + gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) + goto out_unmap; + table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 20) & 0x7ff); + + /* Store 'from' address in an invalid segment table entry. */ + flush |= gmap_unlink_segment(gmap, table); + *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); + } + up_read(&gmap->mm->mmap_sem); + if (flush) + gmap_flush_tlb(gmap); + return 0; + +out_unmap: + up_read(&gmap->mm->mmap_sem); + gmap_unmap_segment(gmap, to, len); + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(gmap_map_segment); + +unsigned long gmap_fault(unsigned long address, struct gmap *gmap) +{ + unsigned long *table, vmaddr, segment; + struct mm_struct *mm; + struct gmap_pgtable *mp; + struct gmap_rmap *rmap; + struct vm_area_struct *vma; + struct page *page; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + current->thread.gmap_addr = address; + mm = gmap->mm; + /* Walk the gmap address space page table */ + table = gmap->table + ((address >> 53) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -EFAULT; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + ((address >> 42) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -EFAULT; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + ((address >> 31) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -EFAULT; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + ((address >> 20) & 0x7ff); + + /* Convert the gmap address to an mm address. */ + segment = *table; + if (likely(!(segment & _SEGMENT_ENTRY_INV))) { + page = pfn_to_page(segment >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + return mp->vmaddr | (address & ~PMD_MASK); + } else if (segment & _SEGMENT_ENTRY_RO) { + vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; + vma = find_vma(mm, vmaddr); + if (!vma || vma->vm_start > vmaddr) + return -EFAULT; + + /* Walk the parent mm page table */ + pgd = pgd_offset(mm, vmaddr); + pud = pud_alloc(mm, pgd, vmaddr); + if (!pud) + return -ENOMEM; + pmd = pmd_alloc(mm, pud, vmaddr); + if (!pmd) + return -ENOMEM; + if (!pmd_present(*pmd) && + __pte_alloc(mm, vma, pmd, vmaddr)) + return -ENOMEM; + /* pmd now points to a valid segment table entry. */ + rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); + if (!rmap) + return -ENOMEM; + /* Link gmap segment table entry location to page table. */ + page = pmd_page(*pmd); + mp = (struct gmap_pgtable *) page->index; + rmap->entry = table; + list_add(&rmap->list, &mp->mapper); + /* Set gmap segment table entry to page table. */ + *table = pmd_val(*pmd) & PAGE_MASK; + return vmaddr | (address & ~PMD_MASK); + } + return -EFAULT; + +} +EXPORT_SYMBOL_GPL(gmap_fault); + +void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) +{ + struct gmap_rmap *rmap, *next; + struct gmap_pgtable *mp; + struct page *page; + int flush; + + flush = 0; + spin_lock(&mm->page_table_lock); + page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + list_for_each_entry_safe(rmap, next, &mp->mapper, list) { + *rmap->entry = + _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; + list_del(&rmap->list); + kfree(rmap); + flush = 1; + } + spin_unlock(&mm->page_table_lock); + if (flush) + __tlb_flush_global(); +} + +static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, + unsigned long vmaddr) { struct page *page; unsigned long *table; + struct gmap_pgtable *mp; page = alloc_page(GFP_KERNEL|__GFP_REPEAT); if (!page) return NULL; + mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT); + if (!mp) { + __free_page(page); + return NULL; + } pgtable_page_ctor(page); + mp->vmaddr = vmaddr & PMD_MASK; + INIT_LIST_HEAD(&mp->mapper); + page->index = (unsigned long) mp; atomic_set(&page->_mapcount, 3); table = (unsigned long *) page_to_phys(page); clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); @@ -167,24 +512,58 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) static inline void page_table_free_pgste(unsigned long *table) { struct page *page; + struct gmap_pgtable *mp; page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + BUG_ON(!list_empty(&mp->mapper)); pgtable_page_ctor(page); atomic_set(&page->_mapcount, -1); + kfree(mp); __free_page(page); } -#endif -unsigned long *page_table_alloc(struct mm_struct *mm) +#else /* CONFIG_PGSTE */ + +static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, + unsigned long vmaddr) +{ + return NULL; +} + +static inline void page_table_free_pgste(unsigned long *table) +{ +} + +static inline void gmap_unmap_notifier(struct mm_struct *mm, + unsigned long *table) +{ +} + +#endif /* CONFIG_PGSTE */ + +static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) +{ + unsigned int old, new; + + do { + old = atomic_read(v); + new = old ^ bits; + } while (atomic_cmpxchg(v, old, new) != old); + return new; +} + +/* + * page table entry allocation/free routines. + */ +unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) { struct page *page; unsigned long *table; unsigned int mask, bit; -#ifdef CONFIG_PGSTE if (mm_has_pgste(mm)) - return page_table_alloc_pgste(mm); -#endif + return page_table_alloc_pgste(mm, vmaddr); /* Allocate fragments of a 4K page as 1K/2K page table */ spin_lock_bh(&mm->context.list_lock); mask = FRAG_MASK; @@ -222,10 +601,10 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) struct page *page; unsigned int bit, mask; -#ifdef CONFIG_PGSTE - if (mm_has_pgste(mm)) + if (mm_has_pgste(mm)) { + gmap_unmap_notifier(mm, table); return page_table_free_pgste(table); -#endif + } /* Free 1K/2K page table fragment of a 4K page */ page = pfn_to_page(__pa(table) >> PAGE_SHIFT); bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); @@ -249,10 +628,8 @@ static void __page_table_free_rcu(void *table, unsigned bit) { struct page *page; -#ifdef CONFIG_PGSTE if (bit == FRAG_MASK) return page_table_free_pgste(table); -#endif /* Free 1K/2K page table fragment of a 4K page */ page = pfn_to_page(__pa(table) >> PAGE_SHIFT); if (atomic_xor_bits(&page->_mapcount, bit) == 0) { @@ -269,13 +646,12 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) unsigned int bit, mask; mm = tlb->mm; -#ifdef CONFIG_PGSTE if (mm_has_pgste(mm)) { + gmap_unmap_notifier(mm, table); table = (unsigned long *) (__pa(table) | FRAG_MASK); tlb_remove_table(tlb, table); return; } -#endif bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); page = pfn_to_page(__pa(table) >> PAGE_SHIFT); spin_lock_bh(&mm->context.list_lock); diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 8c1970d1dd9..781ff516956 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -61,12 +61,12 @@ static inline pmd_t *vmem_pmd_alloc(void) return pmd; } -static pte_t __ref *vmem_pte_alloc(void) +static pte_t __ref *vmem_pte_alloc(unsigned long address) { pte_t *pte; if (slab_is_available()) - pte = (pte_t *) page_table_alloc(&init_mm); + pte = (pte_t *) page_table_alloc(&init_mm, address); else pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); if (!pte) @@ -120,7 +120,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) } #endif if (pmd_none(*pm_dir)) { - pt_dir = vmem_pte_alloc(); + pt_dir = vmem_pte_alloc(address); if (!pt_dir) goto out; pmd_populate(&init_mm, pm_dir, pt_dir); @@ -205,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) pm_dir = pmd_offset(pu_dir, address); if (pmd_none(*pm_dir)) { - pt_dir = vmem_pte_alloc(); + pt_dir = vmem_pte_alloc(address); if (!pt_dir) goto out; pmd_populate(&init_mm, pm_dir, pt_dir); diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 5995e9bc72d..6efc18b5e60 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -13,8 +13,6 @@ #include <linux/oprofile.h> #include <linux/init.h> #include <linux/errno.h> -#include <linux/oprofile.h> -#include <linux/errno.h> #include <linux/fs.h> #include "../../../drivers/oprofile/oprof.h" @@ -25,7 +23,7 @@ extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth); #include "hwsampler.h" -#define DEFAULT_INTERVAL 4096 +#define DEFAULT_INTERVAL 4127518 #define DEFAULT_SDBT_BLOCKS 1 #define DEFAULT_SDB_BLOCKS 511 @@ -151,6 +149,12 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) if (oprofile_max_interval == 0) return -ENODEV; + /* The initial value should be sane */ + if (oprofile_hw_interval < oprofile_min_interval) + oprofile_hw_interval = oprofile_min_interval; + if (oprofile_hw_interval > oprofile_max_interval) + oprofile_hw_interval = oprofile_max_interval; + if (oprofile_timer_init(ops)) return -ENODEV; |