diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/Kconfig | 28 | ||||
-rw-r--r-- | arch/s390/include/asm/bug.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/cputime.h | 6 | ||||
-rw-r--r-- | arch/s390/include/asm/spinlock.h | 29 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 9 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 8 | ||||
-rw-r--r-- | arch/s390/kernel/entry64.S | 8 | ||||
-rw-r--r-- | arch/s390/kernel/ftrace.c | 67 | ||||
-rw-r--r-- | arch/s390/kernel/ipl.c | 7 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 7 | ||||
-rw-r--r-- | arch/s390/kernel/swsusp_asm64.S | 2 |
11 files changed, 60 insertions, 113 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 43c0acad716..16c673096a2 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -95,6 +95,34 @@ config S390 select HAVE_ARCH_TRACEHOOK select INIT_ALL_POSSIBLE select HAVE_PERF_EVENTS + select ARCH_INLINE_SPIN_TRYLOCK + select ARCH_INLINE_SPIN_TRYLOCK_BH + select ARCH_INLINE_SPIN_LOCK + select ARCH_INLINE_SPIN_LOCK_BH + select ARCH_INLINE_SPIN_LOCK_IRQ + select ARCH_INLINE_SPIN_LOCK_IRQSAVE + select ARCH_INLINE_SPIN_UNLOCK + select ARCH_INLINE_SPIN_UNLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK_IRQ + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE + select ARCH_INLINE_READ_TRYLOCK + select ARCH_INLINE_READ_LOCK + select ARCH_INLINE_READ_LOCK_BH + select ARCH_INLINE_READ_LOCK_IRQ + select ARCH_INLINE_READ_LOCK_IRQSAVE + select ARCH_INLINE_READ_UNLOCK + select ARCH_INLINE_READ_UNLOCK_BH + select ARCH_INLINE_READ_UNLOCK_IRQ + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE + select ARCH_INLINE_WRITE_TRYLOCK + select ARCH_INLINE_WRITE_LOCK + select ARCH_INLINE_WRITE_LOCK_BH + select ARCH_INLINE_WRITE_LOCK_IRQ + select ARCH_INLINE_WRITE_LOCK_IRQSAVE + select ARCH_INLINE_WRITE_UNLOCK + select ARCH_INLINE_WRITE_UNLOCK_BH + select ARCH_INLINE_WRITE_UNLOCK_IRQ + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE config SCHED_OMIT_FRAME_POINTER bool diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index 7efd0abe888..efb74fd5156 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h @@ -49,7 +49,7 @@ #define BUG() do { \ __EMIT_BUG(0); \ - for (;;); \ + unreachable(); \ } while (0) #define WARN_ON(x) ({ \ diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 24b1244aadb..f23961ada7f 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -78,7 +78,7 @@ cputime64_to_jiffies64(cputime64_t cputime) static inline unsigned int cputime_to_msecs(const cputime_t cputime) { - return __div(cputime, 4096000); + return cputime_div(cputime, 4096000); } static inline cputime_t @@ -160,7 +160,7 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value) static inline clock_t cputime_to_clock_t(cputime_t cputime) { - return __div(cputime, 4096000000ULL / USER_HZ); + return cputime_div(cputime, 4096000000ULL / USER_HZ); } static inline cputime_t @@ -175,7 +175,7 @@ clock_t_to_cputime(unsigned long x) static inline clock_t cputime64_to_clock_t(cputime64_t cputime) { - return __div(cputime, 4096000000ULL / USER_HZ); + return cputime_div(cputime, 4096000000ULL / USER_HZ); } struct s390_idle_data { diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 41ce6861174..c9af0d19c7a 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -191,33 +191,4 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) #define _raw_read_relax(lock) cpu_relax() #define _raw_write_relax(lock) cpu_relax() -#define __always_inline__spin_lock -#define __always_inline__read_lock -#define __always_inline__write_lock -#define __always_inline__spin_lock_bh -#define __always_inline__read_lock_bh -#define __always_inline__write_lock_bh -#define __always_inline__spin_lock_irq -#define __always_inline__read_lock_irq -#define __always_inline__write_lock_irq -#define __always_inline__spin_lock_irqsave -#define __always_inline__read_lock_irqsave -#define __always_inline__write_lock_irqsave -#define __always_inline__spin_trylock -#define __always_inline__read_trylock -#define __always_inline__write_trylock -#define __always_inline__spin_trylock_bh -#define __always_inline__spin_unlock -#define __always_inline__read_unlock -#define __always_inline__write_unlock -#define __always_inline__spin_unlock_bh -#define __always_inline__read_unlock_bh -#define __always_inline__write_unlock_bh -#define __always_inline__spin_unlock_irq -#define __always_inline__read_unlock_irq -#define __always_inline__write_unlock_irq -#define __always_inline__spin_unlock_irqrestore -#define __always_inline__read_unlock_irqrestore -#define __always_inline__write_unlock_irqrestore - #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index bf8b4ae7ff2..e49e9e0c69f 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -55,6 +55,7 @@ static void __init reset_tod_clock(void) disabled_wait(0); sched_clock_base_cc = TOD_UNIX_EPOCH; + S390_lowcore.last_update_clock = sched_clock_base_cc; } #ifdef CONFIG_SHARED_KERNEL @@ -167,6 +168,14 @@ static noinline __init void create_kernel_nss(void) return; } + /* re-initialize cputime accounting. */ + sched_clock_base_cc = get_clock(); + S390_lowcore.last_update_clock = sched_clock_base_cc; + S390_lowcore.last_update_timer = 0x7fffffffffffffffULL; + S390_lowcore.user_timer = 0; + S390_lowcore.system_timer = 0; + asm volatile("SPT 0(%0)" : : "a" (&S390_lowcore.last_update_timer)); + /* re-setup boot command line with new ipl vm parms */ ipl_update_parameters(); setup_boot_command_line(); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index f43d2ee5446..48215d15762 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -565,10 +565,10 @@ pgm_svcper: lh %r7,0x8a # get svc number from lowcore l %r9,__LC_THREAD_INFO # load pointer to thread_info struct TRACE_IRQS_OFF - l %r1,__TI_task(%r9) - mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID - mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS - mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID + l %r8,__TI_task(%r9) + mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID + mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS + mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP TRACE_IRQS_ON stosm __SF_EMPTY(%r15),0x03 # reenable interrupts diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index a6f7b20df61..9aff1d449b6 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -543,10 +543,10 @@ pgm_svcper: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct - lg %r1,__TI_task(%r9) - mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID - mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS - mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID + lg %r8,__TI_task(%r9) + mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID + mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS + mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP TRACE_IRQS_ON stosm __SF_EMPTY(%r15),0x03 # reenable interrupts diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index f5fe34dd821..5a82bc68193 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -203,73 +203,10 @@ out: #ifdef CONFIG_FTRACE_SYSCALLS -extern unsigned long __start_syscalls_metadata[]; -extern unsigned long __stop_syscalls_metadata[]; extern unsigned int sys_call_table[]; -static struct syscall_metadata **syscalls_metadata; - -struct syscall_metadata *syscall_nr_to_meta(int nr) -{ - if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) - return NULL; - - return syscalls_metadata[nr]; -} - -int syscall_name_to_nr(char *name) -{ - int i; - - if (!syscalls_metadata) - return -1; - for (i = 0; i < NR_syscalls; i++) - if (syscalls_metadata[i]) - if (!strcmp(syscalls_metadata[i]->name, name)) - return i; - return -1; -} - -void set_syscall_enter_id(int num, int id) -{ - syscalls_metadata[num]->enter_id = id; -} - -void set_syscall_exit_id(int num, int id) +unsigned long __init arch_syscall_addr(int nr) { - syscalls_metadata[num]->exit_id = id; -} - -static struct syscall_metadata *find_syscall_meta(unsigned long syscall) -{ - struct syscall_metadata *start; - struct syscall_metadata *stop; - char str[KSYM_SYMBOL_LEN]; - - start = (struct syscall_metadata *)__start_syscalls_metadata; - stop = (struct syscall_metadata *)__stop_syscalls_metadata; - kallsyms_lookup(syscall, NULL, NULL, NULL, str); - - for ( ; start < stop; start++) { - if (start->name && !strcmp(start->name + 3, str + 3)) - return start; - } - return NULL; -} - -static int __init arch_init_ftrace_syscalls(void) -{ - struct syscall_metadata *meta; - int i; - syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, - GFP_KERNEL); - if (!syscalls_metadata) - return -ENOMEM; - for (i = 0; i < NR_syscalls; i++) { - meta = find_syscall_meta((unsigned long)sys_call_table[i]); - syscalls_metadata[i] = meta; - } - return 0; + return (unsigned long)sys_call_table[nr]; } -arch_initcall(arch_init_ftrace_syscalls); #endif diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index ee57a42e6e9..4890ac6d7fa 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -1595,10 +1595,9 @@ static void stop_run(struct shutdown_trigger *trigger) { if (strcmp(trigger->name, ON_PANIC_STR) == 0) disabled_wait((unsigned long) __builtin_return_address(0)); - else { - signal_processor(smp_processor_id(), sigp_stop); - for (;;); - } + while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) + cpu_relax(); + for (;;); } static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR, diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index c932caa5e85..93e52039321 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -76,7 +76,6 @@ static int cpu_stopped(int cpu) __u32 status; switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { - case sigp_order_code_accepted: case sigp_status_stored: /* Check for stopped and check stop state */ if (status & 0x50) @@ -638,6 +637,8 @@ void __cpu_die(unsigned int cpu) /* Wait until target cpu is down */ while (!cpu_stopped(cpu)) cpu_relax(); + while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy) + udelay(10); smp_free_lowcore(cpu); pr_info("Processor %d stopped\n", cpu); } @@ -645,8 +646,8 @@ void __cpu_die(unsigned int cpu) void cpu_die(void) { idle_task_exit(); - signal_processor(smp_processor_id(), sigp_stop); - BUG(); + while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) + cpu_relax(); for (;;); } diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 7c8653e27db..0c26cc1898e 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -199,6 +199,7 @@ pgm_check_entry: brc 2,4b /* busy, try again */ 5: sigp %r9,%r2,__SIGP_STOP /* stop resume (current) CPU */ + brc 2,5b /* busy, try again */ 6: j 6b restart_suspend: @@ -206,6 +207,7 @@ restart_suspend: llgh %r2,0(%r1) 7: sigp %r9,%r2,__SIGP_SENSE /* Wait for resume CPU */ + brc 8,7b /* accepted, status 0, still running */ brc 2,7b /* busy, try again */ tmll %r9,0x40 /* Test if resume CPU is stopped */ jz 7b |