diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-24 09:49:09 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-24 09:49:09 -0700 |
commit | e017507f37d5cb8b541df165a824958bc333bec3 (patch) | |
tree | 87ab0986474486623d9957efda9f3f9ea3b14069 /arch/s390/kernel | |
parent | 759e2a253aaefbb6da9253d517a5a8fe1801ce0f (diff) | |
parent | 27f6b416626a240e1b46f646d2e0c5266f4eac95 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 changes from Martin Schwidefsky:
"No new functions, a few changes to make the code more robust, some
cleanups and bug fixes."
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (21 commits)
s390/vtimer: rework virtual timer interface
s390/dis: Add the servc instruction to the disassembler.
s390/comments: unify copyright messages and remove file names
s390/lgr: Add init check to lgr_info_log()
s390/cpu init: use __get_cpu_var instead of per_cpu
s390/idle: reduce size of s390_idle_data structure
s390/idle: fix sequence handling vs cpu hotplug
s390/ap: resend enable adapter interrupt request.
s390/hypfs: Add missing get_next_ino()
s390/dasd: add shutdown action
s390/ipl: Fix ipib handling for "dumpreipl" shutdown action
s390/smp: make absolute lowcore / cpu restart parameter accesses more robust
s390/vmlogrdr: cleanup driver attribute usage
s390/vmlogrdr: cleanup device attribute usage
s390/ccwgroup: remove unused ccwgroup_device member
s390/cio/chp: cleanup attribute usage
s390/sigp: use sigp order code defines in assembly code
s390/smp: use sigp cpu status definitions
s390/smp/kvm: unifiy sigp definitions
s390/smp: remove redundant check
...
Diffstat (limited to 'arch/s390/kernel')
48 files changed, 302 insertions, 497 deletions
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 83e6edf5cf1..45ef1a7b08f 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -9,7 +9,6 @@ #include <linux/kbuild.h> #include <linux/sched.h> #include <asm/cputime.h> -#include <asm/timer.h> #include <asm/vdso.h> #include <asm/pgtable.h> @@ -72,11 +71,10 @@ int main(void) DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); BLANK(); /* idle data offsets */ - DEFINE(__IDLE_ENTER, offsetof(struct s390_idle_data, idle_enter)); - DEFINE(__IDLE_EXIT, offsetof(struct s390_idle_data, idle_exit)); - /* vtimer queue offsets */ - DEFINE(__VQ_IDLE_ENTER, offsetof(struct vtimer_queue, idle_enter)); - DEFINE(__VQ_IDLE_EXIT, offsetof(struct vtimer_queue, idle_exit)); + DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter)); + DEFINE(__CLOCK_IDLE_EXIT, offsetof(struct s390_idle_data, clock_idle_exit)); + DEFINE(__TIMER_IDLE_ENTER, offsetof(struct s390_idle_data, timer_idle_enter)); + DEFINE(__TIMER_IDLE_EXIT, offsetof(struct s390_idle_data, timer_idle_exit)); /* lowcore offsets */ DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params)); DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr)); @@ -131,6 +129,8 @@ int main(void) DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack)); DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn)); + DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data)); + DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source)); DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index c880ff72db4..797a823a227 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S @@ -1,7 +1,7 @@ /* * arch/s390/kernel/base.S * - * Copyright IBM Corp. 2006,2007 + * Copyright IBM Corp. 2006, 2007 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> */ @@ -9,6 +9,7 @@ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> +#include <asm/sigp.h> #ifdef CONFIG_64BIT @@ -100,7 +101,7 @@ ENTRY(diag308_reset) .Lrestart_part2: lhi %r0,0 # Load r0 with zero lhi %r1,2 # Use mode 2 = ESAME (dump) - sigp %r1,%r0,0x12 # Switch to ESAME mode + sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode sam64 # Switch to 64 bit addressing mode larl %r4,.Lctlregs # Restore control registers lctlg %c0,%c15,0(%r4) diff --git a/arch/s390/kernel/bitmap.c b/arch/s390/kernel/bitmap.c index 3ae4757b006..102da5e2303 100644 --- a/arch/s390/kernel/bitmap.c +++ b/arch/s390/kernel/bitmap.c @@ -2,7 +2,7 @@ * Bitmaps for set_bit, clear_bit, test_and_set_bit, ... * See include/asm/{bitops.h|posix_types.h} for details * - * Copyright IBM Corp. 1999,2009 + * Copyright IBM Corp. 1999, 2009 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, */ diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c index 914d49444f9..765fabdada9 100644 --- a/arch/s390/kernel/compat_exec_domain.c +++ b/arch/s390/kernel/compat_exec_domain.c @@ -1,7 +1,7 @@ /* * Support for 32-bit Linux for S390 personality. * - * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2000 * Author(s): Gerhard Tonn (ton@de.ibm.com) * * diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 65426525d9f..d1225089a4b 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -1,8 +1,6 @@ /* - * arch/s390x/kernel/linux32.c - * * S390 version - * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2000 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Gerhard Tonn (ton@de.ibm.com) * Thomas Spatzier (tspat@de.ibm.com) diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 3c0c19830c3..a1e8a8694bb 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/compat_signal.c - * - * Copyright (C) IBM Corp. 2000,2006 + * Copyright IBM Corp. 2000, 2006 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) * Gerhard Tonn (ton@de.ibm.com) * diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index ff605a39cf4..e835d6d5b7f 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1,8 +1,7 @@ /* -* arch/s390/kernel/compat_wrapper.S * wrapper for 31 bit compatible system calls. * -* Copyright (C) IBM Corp. 2000,2006 +* Copyright IBM Corp. 2000, 2006 * Author(s): Gerhard Tonn (ton@de.ibm.com), * Thomas Spatzier (tspat@de.ibm.com) */ diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index e3dd886e1b3..d7b0c4d2788 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c @@ -1,8 +1,6 @@ /* - * arch/s390/kernel/cpcmd.c - * * S390 version - * Copyright IBM Corp. 1999,2007 + * Copyright IBM Corp. 1999, 2007 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Christian Borntraeger (cborntra@de.ibm.com), */ diff --git a/arch/s390/kernel/crash.c b/arch/s390/kernel/crash.c index 8cc7c9fa64f..3819153de8b 100644 --- a/arch/s390/kernel/crash.c +++ b/arch/s390/kernel/crash.c @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/crash.c - * - * (C) Copyright IBM Corp. 2005 + * Copyright IBM Corp. 2005 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> * diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 19e5e9eba54..21be961e8a4 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -1,5 +1,4 @@ /* - * arch/s390/kernel/debug.c * S/390 debug facility * * Copyright IBM Corp. 1999, 2012 diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 3221c6fca8b..1f6b428e276 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -1,6 +1,4 @@ /* - * arch/s390/kernel/dis.c - * * Disassemble s390 instructions. * * Copyright IBM Corp. 2007 @@ -613,6 +611,7 @@ static struct insn opcode_b2[] = { { "sie", 0x14, INSTR_S_RD }, { "pc", 0x18, INSTR_S_RD }, { "sac", 0x19, INSTR_S_RD }, + { "servc", 0x20, INSTR_RRE_RR }, { "cfc", 0x1a, INSTR_S_RD }, { "ipte", 0x21, INSTR_RRE_RR }, { "ipm", 0x22, INSTR_RRE_R0 }, diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 6684fff1755..bc95a8ebd9c 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -1,6 +1,4 @@ /* - * arch/s390/kernel/early.c - * * Copyright IBM Corp. 2007, 2009 * Author(s): Hongjie Yang <hongjie@us.ibm.com>, * Heiko Carstens <heiko.carstens@de.ibm.com> diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c index cc0dc609d73..b971c6be629 100644 --- a/arch/s390/kernel/ebcdic.c +++ b/arch/s390/kernel/ebcdic.c @@ -1,10 +1,9 @@ /* - * arch/s390/kernel/ebcdic.c * ECBDIC -> ASCII, ASCII -> ECBDIC, * upper to lower case (EBCDIC) conversion tables. * * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 1999 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Martin Peschke <peschke@fh-brandenburg.de> */ diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 1ae93b573d7..870bad6d56f 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -1,8 +1,7 @@ /* - * arch/s390/kernel/entry.S * S390 low-level entry points. * - * Copyright (C) IBM Corp. 1999,2012 + * Copyright IBM Corp. 1999, 2012 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Hartmut Penner (hp@de.ibm.com), * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), @@ -18,6 +17,7 @@ #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/page.h> +#include <asm/sigp.h> __PT_R0 = __PT_GPRS __PT_R1 = __PT_GPRS + 4 @@ -616,17 +616,13 @@ ext_skip: * Load idle PSW. The second "half" of this function is in cleanup_idle. */ ENTRY(psw_idle) - st %r4,__SF_EMPTY(%r15) + st %r3,__SF_EMPTY(%r15) basr %r1,0 la %r1,psw_idle_lpsw+4-.(%r1) st %r1,__SF_EMPTY+4(%r15) oi __SF_EMPTY+4(%r15),0x80 - la %r1,.Lvtimer_max-psw_idle_lpsw-4(%r1) - stck __IDLE_ENTER(%r2) - ltr %r5,%r5 - stpt __VQ_IDLE_ENTER(%r3) - jz psw_idle_lpsw - spt 0(%r1) + stck __CLOCK_IDLE_ENTER(%r2) + stpt __TIMER_IDLE_ENTER(%r2) psw_idle_lpsw: lpsw __SF_EMPTY(%r15) br %r14 @@ -723,15 +719,17 @@ ENTRY(restart_int_handler) mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) - lm %r1,%r3,__LC_RESTART_FN # load fn, parm & source cpu + l %r1,__LC_RESTART_FN # load fn, parm & source cpu + l %r2,__LC_RESTART_DATA + l %r3,__LC_RESTART_SOURCE ltr %r3,%r3 # test source cpu address jm 1f # negative -> skip source stop -0: sigp %r4,%r3,1 # sigp sense to source cpu +0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu brc 10,0b # wait for status stored 1: basr %r14,%r1 # call function stap __SF_EMPTY(%r15) # store cpu address lh %r3,__SF_EMPTY(%r15) -2: sigp %r4,%r3,5 # sigp stop to current cpu +2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu brc 2,2b 3: j 3b @@ -883,33 +881,28 @@ cleanup_io_restore_insn: cleanup_idle: # copy interrupt clock & cpu timer - mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK - mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER + mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK + mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER chi %r11,__LC_SAVE_AREA_ASYNC je 0f - mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK - mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER + mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK + mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 0: # check if stck has been executed cl %r9,BASED(cleanup_idle_insn) jhe 1f - mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2) - mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3) - j 2f -1: # check if the cpu timer has been reprogrammed - ltr %r5,%r5 - jz 2f - spt __VQ_IDLE_ENTER(%r3) -2: # account system time going idle + mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) + mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3) +1: # account system time going idle lm %r9,%r10,__LC_STEAL_TIMER - ADD64 %r9,%r10,__IDLE_ENTER(%r2) + ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2) SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK stm %r9,%r10,__LC_STEAL_TIMER - mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2) + mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) lm %r9,%r10,__LC_SYSTEM_TIMER ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER - SUB64 %r9,%r10,__VQ_IDLE_ENTER(%r3) + SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2) stm %r9,%r10,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3) + mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) # prepare return psw n %r8,BASED(cleanup_idle_wait) # clear wait state bit l %r9,24(%r11) # return from psw_idle diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index f66a229ab0b..a5f4dc42a5d 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -5,7 +5,6 @@ #include <linux/signal.h> #include <asm/ptrace.h> #include <asm/cputime.h> -#include <asm/timer.h> extern void (*pgm_check_table[128])(struct pt_regs *); extern void *restart_stack; @@ -17,8 +16,7 @@ void io_int_handler(void); void mcck_int_handler(void); void restart_int_handler(void); void restart_call_handler(void); -void psw_idle(struct s390_idle_data *, struct vtimer_queue *, - unsigned long, int); +void psw_idle(struct s390_idle_data *, unsigned long); asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 229fe1d0774..349b7eeb348 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -1,8 +1,7 @@ /* - * arch/s390/kernel/entry64.S * S390 low-level entry points. * - * Copyright (C) IBM Corp. 1999,2012 + * Copyright IBM Corp. 1999, 2012 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Hartmut Penner (hp@de.ibm.com), * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), @@ -18,6 +17,7 @@ #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/page.h> +#include <asm/sigp.h> __PT_R0 = __PT_GPRS __PT_R1 = __PT_GPRS + 8 @@ -642,15 +642,11 @@ ext_skip: * Load idle PSW. The second "half" of this function is in cleanup_idle. */ ENTRY(psw_idle) - stg %r4,__SF_EMPTY(%r15) + stg %r3,__SF_EMPTY(%r15) larl %r1,psw_idle_lpsw+4 stg %r1,__SF_EMPTY+8(%r15) - larl %r1,.Lvtimer_max - STCK __IDLE_ENTER(%r2) - ltr %r5,%r5 - stpt __VQ_IDLE_ENTER(%r3) - jz psw_idle_lpsw - spt 0(%r1) + STCK __CLOCK_IDLE_ENTER(%r2) + stpt __TIMER_IDLE_ENTER(%r2) psw_idle_lpsw: lpswe __SF_EMPTY(%r15) br %r14 @@ -750,15 +746,17 @@ ENTRY(restart_int_handler) mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) - lmg %r1,%r3,__LC_RESTART_FN # load fn, parm & source cpu + lg %r1,__LC_RESTART_FN # load fn, parm & source cpu + lg %r2,__LC_RESTART_DATA + lg %r3,__LC_RESTART_SOURCE ltgr %r3,%r3 # test source cpu address jm 1f # negative -> skip source stop -0: sigp %r4,%r3,1 # sigp sense to source cpu +0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu brc 10,0b # wait for status stored 1: basr %r14,%r1 # call function stap __SF_EMPTY(%r15) # store cpu address llgh %r3,__SF_EMPTY(%r15) -2: sigp %r4,%r3,5 # sigp stop to current cpu +2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu brc 2,2b 3: j 3b @@ -916,33 +914,28 @@ cleanup_io_restore_insn: cleanup_idle: # copy interrupt clock & cpu timer - mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK - mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER + mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK + mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER cghi %r11,__LC_SAVE_AREA_ASYNC je 0f - mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK - mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER + mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK + mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 0: # check if stck & stpt have been executed clg %r9,BASED(cleanup_idle_insn) jhe 1f - mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2) - mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3) - j 2f -1: # check if the cpu timer has been reprogrammed - ltr %r5,%r5 - jz 2f - spt __VQ_IDLE_ENTER(%r3) -2: # account system time going idle + mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) + mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) +1: # account system time going idle lg %r9,__LC_STEAL_TIMER - alg %r9,__IDLE_ENTER(%r2) + alg %r9,__CLOCK_IDLE_ENTER(%r2) slg %r9,__LC_LAST_UPDATE_CLOCK stg %r9,__LC_STEAL_TIMER - mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2) + mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) lg %r9,__LC_SYSTEM_TIMER alg %r9,__LC_LAST_UPDATE_TIMER - slg %r9,__VQ_IDLE_ENTER(%r3) + slg %r9,__TIMER_IDLE_ENTER(%r2) stg %r9,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3) + mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) # prepare return psw nihh %r8,0xfffd # clear wait state bit lg %r9,48(%r11) # return from psw_idle @@ -958,8 +951,6 @@ cleanup_idle_insn: .quad __critical_start .Lcritical_length: .quad __critical_end - __critical_start -.Lvtimer_max: - .quad 0x7fffffffffffffff #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) @@ -974,7 +965,6 @@ ENTRY(sie64a) stg %r3,__SF_EMPTY+8(%r15) # save guest register save area xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 - lg %r14,__LC_THREAD_INFO # pointer thread_info struct sie_loop: lg %r14,__LC_THREAD_INFO # pointer thread_info struct tm __TI_flags+7(%r14),_TIF_EXIT_SIE diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 4939d15375a..805b6686b64 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 1999,2010 + * Copyright IBM Corp. 1999, 2010 * * Author(s): Hartmut Penner <hp@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index d3f1ab7d90a..a1372ae24ae 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/head31.S - * - * Copyright (C) IBM Corp. 2005,2010 + * Copyright IBM Corp. 2005, 2010 * * Author(s): Hartmut Penner <hp@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 99348c0eaa4..c108af28bbe 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/head64.S - * - * Copyright (C) IBM Corp. 1999,2010 + * Copyright IBM Corp. 1999, 2010 * * Author(s): Hartmut Penner <hp@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S index 796c976b5fd..acaaaf4b705 100644 --- a/arch/s390/kernel/head_kdump.S +++ b/arch/s390/kernel/head_kdump.S @@ -5,6 +5,8 @@ * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> */ +#include <asm/sigp.h> + #define DATAMOVER_ADDR 0x4000 #define COPY_PAGE_ADDR 0x6000 @@ -19,7 +21,7 @@ .align 2 .Lep_startup_kdump: lhi %r1,2 # mode 2 = esame (dump) - sigp %r1,%r0,0x12 # Switch to esame mode + sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to esame mode sam64 # Switch to 64 bit addressing basr %r13,0 .Lbase: @@ -88,7 +90,7 @@ startup_kdump_relocated: sam31 # Switch to 31 bit addr mode sr %r1,%r1 # Erase register r1 sr %r2,%r2 # Erase register r2 - sigp %r1,%r2,0x12 # Switch to 31 bit arch mode + sigp %r1,%r2,SIGP_SET_ARCHITECTURE # Switch to 31 bit arch mode lpsw 0 # Start new kernel... .align 8 .Lrestart_psw: diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 2f6cfd460cb..e64d141555c 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -1,8 +1,7 @@ /* - * arch/s390/kernel/ipl.c * ipl/reipl/dump support for Linux on s390. * - * Copyright IBM Corp. 2005,2012 + * Copyright IBM Corp. 2005, 2012 * Author(s): Michael Holzheu <holzheu@de.ibm.com> * Heiko Carstens <heiko.carstens@de.ibm.com> * Volker Sameske <sameske@de.ibm.com> @@ -1528,15 +1527,12 @@ static struct shutdown_action __refdata dump_action = { static void dump_reipl_run(struct shutdown_trigger *trigger) { - struct { - void *addr; - __u32 csum; - } __packed ipib; + unsigned long ipib = (unsigned long) reipl_block_actual; + unsigned int csum; - ipib.csum = csum_partial(reipl_block_actual, - reipl_block_actual->hdr.len, 0); - ipib.addr = reipl_block_actual; - memcpy_absolute(&S390_lowcore.ipib, &ipib, sizeof(ipib)); + csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0); + mem_assign_absolute(S390_lowcore.ipib, ipib); + mem_assign_absolute(S390_lowcore.ipib_checksum, csum); dump_run(trigger); } diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index b4f4a7133fa..dd7630d8aab 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 2004,2011 + * Copyright IBM Corp. 2004, 2011 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, * Holger Smolinski <Holger.Smolinski@de.ibm.com>, * Thomas Spatzier <tspat@de.ibm.com>, diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 64b761aef00..8aa634f5944 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -15,7 +15,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * Copyright (C) IBM Corporation, 2002, 2006 + * Copyright IBM Corp. 2002, 2006 * * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> */ diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c index 87f080b17af..eca94e74d19 100644 --- a/arch/s390/kernel/lgr.c +++ b/arch/s390/kernel/lgr.c @@ -45,7 +45,7 @@ struct lgr_info { /* * LGR globals */ -static void *lgr_page; +static char lgr_page[PAGE_SIZE] __aligned(PAGE_SIZE); static struct lgr_info lgr_info_last; static struct lgr_info lgr_info_cur; static struct debug_info *lgr_dbf; @@ -74,7 +74,7 @@ static void cpascii(char *dst, char *src, int size) */ static void lgr_stsi_1_1_1(struct lgr_info *lgr_info) { - struct sysinfo_1_1_1 *si = lgr_page; + struct sysinfo_1_1_1 *si = (void *) lgr_page; if (stsi(si, 1, 1, 1) == -ENOSYS) return; @@ -91,7 +91,7 @@ static void lgr_stsi_1_1_1(struct lgr_info *lgr_info) */ static void lgr_stsi_2_2_2(struct lgr_info *lgr_info) { - struct sysinfo_2_2_2 *si = lgr_page; + struct sysinfo_2_2_2 *si = (void *) lgr_page; if (stsi(si, 2, 2, 2) == -ENOSYS) return; @@ -105,7 +105,7 @@ static void lgr_stsi_2_2_2(struct lgr_info *lgr_info) */ static void lgr_stsi_3_2_2(struct lgr_info *lgr_info) { - struct sysinfo_3_2_2 *si = lgr_page; + struct sysinfo_3_2_2 *si = (void *) lgr_page; int i; if (stsi(si, 3, 2, 2) == -ENOSYS) @@ -183,14 +183,9 @@ static void lgr_timer_set(void) */ static int __init lgr_init(void) { - lgr_page = (void *) __get_free_pages(GFP_KERNEL, 0); - if (!lgr_page) - return -ENOMEM; lgr_dbf = debug_register("lgr", 1, 1, sizeof(struct lgr_info)); - if (!lgr_dbf) { - free_page((unsigned long) lgr_page); + if (!lgr_dbf) return -ENOMEM; - } debug_register_view(lgr_dbf, &debug_hex_ascii_view); lgr_info_get(&lgr_info_last); debug_event(lgr_dbf, 1, &lgr_info_last, sizeof(lgr_info_last)); diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index cdacf8f91b2..493304bdf1c 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/machine_kexec.c - * - * Copyright IBM Corp. 2005,2011 + * Copyright IBM Corp. 2005, 2011 * * Author(s): Rolf Adelsberger, * Heiko Carstens <heiko.carstens@de.ibm.com> diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 7e2c38ba137..4567ce20d90 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 2008,2009 + * Copyright IBM Corp. 2008, 2009 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S index f70cadec68f..11332193db3 100644 --- a/arch/s390/kernel/mcount64.S +++ b/arch/s390/kernel/mcount64.S @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 2008,2009 + * Copyright IBM Corp. 2008, 2009 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index dfcb3436bad..46412b1d7e1 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -1,9 +1,8 @@ /* - * arch/s390/kernel/module.c - Kernel module help for s390. + * Kernel module help for s390. * * S390 version - * Copyright (C) 2002, 2003 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 2002, 2003 * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) * diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 8c372ca6135..a6daa5c5cdb 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -1,7 +1,7 @@ /* * Machine check handler * - * Copyright IBM Corp. 2000,2009 + * Copyright IBM Corp. 2000, 2009 * Author(s): Ingo Adlung <adlung@de.ibm.com>, * Martin Schwidefsky <schwidefsky@de.ibm.com>, * Cornelia Huck <cornelia.huck@de.ibm.com>, diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c index 95fa5ac6c4c..46480d81df0 100644 --- a/arch/s390/kernel/os_info.c +++ b/arch/s390/kernel/os_info.c @@ -60,7 +60,7 @@ void __init os_info_init(void) os_info.version_minor = OS_INFO_VERSION_MINOR; os_info.magic = OS_INFO_MAGIC; os_info.csum = os_info_csum(&os_info); - memcpy_absolute(&S390_lowcore.os_info, &ptr, sizeof(ptr)); + mem_assign_absolute(S390_lowcore.os_info, (unsigned long) ptr); } #ifdef CONFIG_CRASH_DUMP diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 60055cefdd0..733175373a4 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -1,7 +1,7 @@ /* * This file handles the architecture dependent parts of process handling. * - * Copyright IBM Corp. 1999,2009 + * Copyright IBM Corp. 1999, 2009 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, * Hartmut Penner <hp@de.ibm.com>, * Denis Joseph Barrow, @@ -25,8 +25,8 @@ #include <linux/module.h> #include <asm/io.h> #include <asm/processor.h> +#include <asm/vtimer.h> #include <asm/irq.h> -#include <asm/timer.h> #include <asm/nmi.h> #include <asm/smp.h> #include <asm/switch_to.h> diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 6e0073e43f5..572d4c9cb33 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -1,6 +1,4 @@ /* - * arch/s390/kernel/processor.c - * * Copyright IBM Corp. 2008 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) */ @@ -25,13 +23,15 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id); */ void __cpuinit cpu_init(void) { - struct cpuid *id = &per_cpu(cpu_id, smp_processor_id()); + struct s390_idle_data *idle = &__get_cpu_var(s390_idle); + struct cpuid *id = &__get_cpu_var(cpu_id); get_cpu_id(id); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); + memset(idle, 0, sizeof(*idle)); } /* diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 4993e689b2c..f4eb37680b9 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -1,7 +1,7 @@ /* * Ptrace user space interface. * - * Copyright IBM Corp. 1999,2010 + * Copyright IBM Corp. 1999, 2010 * Author(s): Denis Joseph Barrow * Martin Schwidefsky (schwidefsky@de.ibm.com) */ diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index ad67c214be0..dd8016b0477 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S @@ -1,13 +1,12 @@ /* - * arch/s390/kernel/reipl.S - * * S390 version - * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2000 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) */ #include <linux/linkage.h> #include <asm/asm-offsets.h> +#include <asm/sigp.h> # # store_status: Empty implementation until kdump is supported on 31 bit @@ -60,7 +59,7 @@ ENTRY(do_reipl_asm) bas %r14,.Ldisab-.Lpg0(%r13) .L003: st %r1,__LC_SUBCHANNEL_ID lpsw 0 - sigp 0,0,0(6) + sigp 0,0,SIGP_RESTART .Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13) lpsw .Ldispsw-.Lpg0(%r13) .align 8 diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index 36b32658fb2..dc3b1273c4d 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S @@ -1,11 +1,12 @@ /* - * Copyright IBM Corp 2000,2011 + * Copyright IBM Corp 2000, 2011 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, * Denis Joseph Barrow, */ #include <linux/linkage.h> #include <asm/asm-offsets.h> +#include <asm/sigp.h> # # store_status @@ -106,7 +107,7 @@ ENTRY(do_reipl_asm) .L003: st %r1,__LC_SUBCHANNEL_ID lhi %r1,0 # mode 0 = esa slr %r0,%r0 # set cpuid to zero - sigp %r1,%r0,0x12 # switch to esa mode + sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode lpsw 0 .Ldisab: sll %r14,1 srl %r14,1 # need to kill hi bit to avoid specification exceptions. diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S index c91d70aede9..f4e6f20e117 100644 --- a/arch/s390/kernel/relocate_kernel.S +++ b/arch/s390/kernel/relocate_kernel.S @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/relocate_kernel.S - * - * (C) Copyright IBM Corp. 2005 + * Copyright IBM Corp. 2005 * * Author(s): Rolf Adelsberger, * Heiko Carstens <heiko.carstens@de.ibm.com> @@ -9,6 +7,7 @@ */ #include <linux/linkage.h> +#include <asm/sigp.h> /* * moves the new kernel to its destination... @@ -93,7 +92,7 @@ ENTRY(relocate_kernel) .no_diag308: sr %r1,%r1 # clear %r1 sr %r2,%r2 # clear %r2 - sigp %r1,%r2,0x12 # set cpuid to zero + sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero lpsw 0 # hopefully start new kernel... .align 8 diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S index 7c3ce589a7f..cfac28330b0 100644 --- a/arch/s390/kernel/relocate_kernel64.S +++ b/arch/s390/kernel/relocate_kernel64.S @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/relocate_kernel64.S - * - * (C) Copyright IBM Corp. 2005 + * Copyright IBM Corp. 2005 * * Author(s): Rolf Adelsberger, * Heiko Carstens <heiko.carstens@de.ibm.com> @@ -9,6 +7,7 @@ */ #include <linux/linkage.h> +#include <asm/sigp.h> /* * moves the new kernel to its destination... @@ -45,7 +44,7 @@ ENTRY(relocate_kernel) diag %r0,%r0,0x308 .back: lhi %r1,1 # mode 1 = esame - sigp %r1,%r0,0x12 # switch to esame mode + sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode sam64 # switch to 64 bit addressing mode basr %r13,0 .back_base: @@ -96,7 +95,7 @@ ENTRY(relocate_kernel) sam31 # 31 bit mode sr %r1,%r1 # erase register r1 sr %r2,%r2 # erase register r2 - sigp %r1,%r2,0x12 # set cpuid to zero + sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero lpsw 0 # hopefully start new kernel... .align 8 diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index 95792d846bb..bf053898630 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S @@ -1,7 +1,7 @@ /* * Mini SCLP driver. * - * Copyright IBM Corp. 2004,2009 + * Copyright IBM Corp. 2004, 2009 * * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>, * Heiko Carstens <heiko.carstens@de.ibm.com>, diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 489d1d8d96b..34d75b50526 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -1,8 +1,6 @@ /* - * arch/s390/kernel/setup.c - * * S390 version - * Copyright (C) IBM Corp. 1999,2012 + * Copyright IBM Corp. 1999, 2012 * Author(s): Hartmut Penner (hp@de.ibm.com), * Martin Schwidefsky (schwidefsky@de.ibm.com) * @@ -430,10 +428,11 @@ static void __init setup_lowcore(void) lc->restart_source = -1UL; /* Setup absolute zero lowcore */ - memcpy_absolute(&S390_lowcore.restart_stack, &lc->restart_stack, - 4 * sizeof(unsigned long)); - memcpy_absolute(&S390_lowcore.restart_psw, &lc->restart_psw, - sizeof(lc->restart_psw)); + mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack); + mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn); + mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data); + mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source); + mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw); set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; @@ -598,9 +597,7 @@ static void __init setup_memory_end(void) static void __init setup_vmcoreinfo(void) { #ifdef CONFIG_KEXEC - unsigned long ptr = paddr_vmcoreinfo_note(); - - memcpy_absolute(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr)); + mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); #endif } diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index ac565b44aab..c13a2a37ef0 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/signal.c - * - * Copyright (C) IBM Corp. 1999,2006 + * Copyright IBM Corp. 1999, 2006 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) * * Based on Intel version diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 8dca9c248ac..720fda1620f 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -1,7 +1,7 @@ /* * SMP related functions * - * Copyright IBM Corp. 1999,2012 + * Copyright IBM Corp. 1999, 2012 * Author(s): Denis Joseph Barrow, * Martin Schwidefsky <schwidefsky@de.ibm.com>, * Heiko Carstens <heiko.carstens@de.ibm.com>, @@ -38,40 +38,16 @@ #include <asm/setup.h> #include <asm/irq.h> #include <asm/tlbflush.h> -#include <asm/timer.h> +#include <asm/vtimer.h> #include <asm/lowcore.h> #include <asm/sclp.h> #include <asm/vdso.h> #include <asm/debug.h> #include <asm/os_info.h> +#include <asm/sigp.h> #include "entry.h" enum { - sigp_sense = 1, - sigp_external_call = 2, - sigp_emergency_signal = 3, - sigp_start = 4, - sigp_stop = 5, - sigp_restart = 6, - sigp_stop_and_store_status = 9, - sigp_initial_cpu_reset = 11, - sigp_cpu_reset = 12, - sigp_set_prefix = 13, - sigp_store_status_at_address = 14, - sigp_store_extended_status_at_address = 15, - sigp_set_architecture = 18, - sigp_conditional_emergency_signal = 19, - sigp_sense_running = 21, -}; - -enum { - sigp_order_code_accepted = 0, - sigp_status_stored = 1, - sigp_busy = 2, - sigp_not_operational = 3, -}; - -enum { ec_schedule = 0, ec_call_function, ec_call_function_single, @@ -124,7 +100,7 @@ static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status) while (1) { cc = __pcpu_sigp(addr, order, parm, status); - if (cc != sigp_busy) + if (cc != SIGP_CC_BUSY) return cc; cpu_relax(); } @@ -136,7 +112,7 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) for (retry = 0; ; retry++) { cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status); - if (cc != sigp_busy) + if (cc != SIGP_CC_BUSY) break; if (retry >= 3) udelay(10); @@ -146,20 +122,19 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) static inline int pcpu_stopped(struct pcpu *pcpu) { - if (__pcpu_sigp(pcpu->address, sigp_sense, - 0, &pcpu->status) != sigp_status_stored) + if (__pcpu_sigp(pcpu->address, SIGP_SENSE, + 0, &pcpu->status) != SIGP_CC_STATUS_STORED) return 0; - /* Check for stopped and check stop state */ - return !!(pcpu->status & 0x50); + return !!(pcpu->status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); } static inline int pcpu_running(struct pcpu *pcpu) { - if (__pcpu_sigp(pcpu->address, sigp_sense_running, - 0, &pcpu->status) != sigp_status_stored) + if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING, + 0, &pcpu->status) != SIGP_CC_STATUS_STORED) return 1; - /* Check for running status */ - return !(pcpu->status & 0x400); + /* Status stored condition code is equivalent to cpu not running. */ + return 0; } /* @@ -181,7 +156,7 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) set_bit(ec_bit, &pcpu->ec_mask); order = pcpu_running(pcpu) ? - sigp_external_call : sigp_emergency_signal; + SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; pcpu_sigp_retry(pcpu, order, 0); } @@ -214,7 +189,7 @@ static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) goto out; #endif lowcore_ptr[cpu] = lc; - pcpu_sigp_retry(pcpu, sigp_set_prefix, (u32)(unsigned long) lc); + pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); return 0; out: if (pcpu != &pcpu_devices[0]) { @@ -229,7 +204,7 @@ out: static void pcpu_free_lowcore(struct pcpu *pcpu) { - pcpu_sigp_retry(pcpu, sigp_set_prefix, 0); + pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); lowcore_ptr[pcpu - pcpu_devices] = NULL; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { @@ -288,7 +263,7 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) lc->restart_fn = (unsigned long) func; lc->restart_data = (unsigned long) data; lc->restart_source = -1UL; - pcpu_sigp_retry(pcpu, sigp_restart, 0); + pcpu_sigp_retry(pcpu, SIGP_RESTART, 0); } /* @@ -298,26 +273,26 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), void *data, unsigned long stack) { struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; - struct { - unsigned long stack; - void *func; - void *data; - unsigned long source; - } restart = { stack, func, data, stap() }; + unsigned long source_cpu = stap(); __load_psw_mask(psw_kernel_bits); - if (pcpu->address == restart.source) + if (pcpu->address == source_cpu) func(data); /* should not return */ /* Stop target cpu (if func returns this stops the current cpu). */ - pcpu_sigp_retry(pcpu, sigp_stop, 0); + pcpu_sigp_retry(pcpu, SIGP_STOP, 0); /* Restart func on the target cpu and stop the current cpu. */ - memcpy_absolute(&lc->restart_stack, &restart, sizeof(restart)); + mem_assign_absolute(lc->restart_stack, stack); + mem_assign_absolute(lc->restart_fn, (unsigned long) func); + mem_assign_absolute(lc->restart_data, (unsigned long) data); + mem_assign_absolute(lc->restart_source, source_cpu); asm volatile( - "0: sigp 0,%0,6 # sigp restart to target cpu\n" + "0: sigp 0,%0,%2 # sigp restart to target cpu\n" " brc 2,0b # busy, try again\n" - "1: sigp 0,%1,5 # sigp stop to current cpu\n" + "1: sigp 0,%1,%3 # sigp stop to current cpu\n" " brc 2,1b # busy, try again\n" - : : "d" (pcpu->address), "d" (restart.source) : "0", "1", "cc"); + : : "d" (pcpu->address), "d" (source_cpu), + "K" (SIGP_RESTART), "K" (SIGP_STOP) + : "0", "1", "cc"); for (;;) ; } @@ -388,8 +363,8 @@ void smp_emergency_stop(cpumask_t *cpumask) for_each_cpu(cpu, cpumask) { struct pcpu *pcpu = pcpu_devices + cpu; set_bit(ec_stop_cpu, &pcpu->ec_mask); - while (__pcpu_sigp(pcpu->address, sigp_emergency_signal, - 0, NULL) == sigp_busy && + while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, + 0, NULL) == SIGP_CC_BUSY && get_clock() < end) cpu_relax(); } @@ -425,7 +400,7 @@ void smp_send_stop(void) /* stop all processors */ for_each_cpu(cpu, &cpumask) { struct pcpu *pcpu = pcpu_devices + cpu; - pcpu_sigp_retry(pcpu, sigp_stop, 0); + pcpu_sigp_retry(pcpu, SIGP_STOP, 0); while (!pcpu_stopped(pcpu)) cpu_relax(); } @@ -436,7 +411,7 @@ void smp_send_stop(void) */ void smp_stop_cpu(void) { - pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0); + pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); for (;;) ; } @@ -590,7 +565,7 @@ static void __init smp_get_save_area(int cpu, u16 address) } #endif /* Get the registers of a non-boot cpu. */ - __pcpu_sigp_relax(address, sigp_stop_and_store_status, 0, NULL); + __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); } @@ -599,8 +574,8 @@ int smp_store_status(int cpu) struct pcpu *pcpu; pcpu = pcpu_devices + cpu; - if (__pcpu_sigp_relax(pcpu->address, sigp_stop_and_store_status, - 0, NULL) != sigp_order_code_accepted) + if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS, + 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO; return 0; } @@ -621,8 +596,8 @@ static struct sclp_cpu_info *smp_get_cpu_info(void) if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { use_sigp_detection = 1; for (address = 0; address <= MAX_CPU_ADDRESS; address++) { - if (__pcpu_sigp_relax(address, sigp_sense, 0, NULL) == - sigp_not_operational) + if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == + SIGP_CC_NOT_OPERATIONAL) continue; info->cpu[info->configured].address = address; info->configured++; @@ -732,8 +707,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) pcpu = pcpu_devices + cpu; if (pcpu->state != CPU_STATE_CONFIGURED) return -EIO; - if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) != - sigp_order_code_accepted) + if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != + SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO; rc = pcpu_alloc_lowcore(pcpu, cpu); @@ -793,7 +768,7 @@ void __cpu_die(unsigned int cpu) void __noreturn cpu_die(void) { idle_task_exit(); - pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0); + pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); for (;;) ; } @@ -940,7 +915,7 @@ static ssize_t show_idle_count(struct device *dev, do { sequence = ACCESS_ONCE(idle->sequence); idle_count = ACCESS_ONCE(idle->idle_count); - if (ACCESS_ONCE(idle->idle_enter)) + if (ACCESS_ONCE(idle->clock_idle_enter)) idle_count++; } while ((sequence & 1) || (idle->sequence != sequence)); return sprintf(buf, "%llu\n", idle_count); @@ -958,8 +933,8 @@ static ssize_t show_idle_time(struct device *dev, now = get_clock(); sequence = ACCESS_ONCE(idle->sequence); idle_time = ACCESS_ONCE(idle->idle_time); - idle_enter = ACCESS_ONCE(idle->idle_enter); - idle_exit = ACCESS_ONCE(idle->idle_exit); + idle_enter = ACCESS_ONCE(idle->clock_idle_enter); + idle_exit = ACCESS_ONCE(idle->clock_idle_exit); } while ((sequence & 1) || (idle->sequence != sequence)); idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; return sprintf(buf, "%llu\n", idle_time >> 12); @@ -982,14 +957,11 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self, unsigned int cpu = (unsigned int)(long)hcpu; struct cpu *c = &pcpu_devices[cpu].cpu; struct device *s = &c->dev; - struct s390_idle_data *idle; int err = 0; switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: - idle = &per_cpu(s390_idle, cpu); - memset(idle, 0, sizeof(struct s390_idle_data)); err = sysfs_create_group(&s->kobj, &cpu_online_attr_group); break; case CPU_DEAD: diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 8841919ef7e..1785cd82253 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -1,9 +1,7 @@ /* - * arch/s390/kernel/stacktrace.c - * * Stack trace management functions * - * Copyright (C) IBM Corp. 2006 + * Copyright IBM Corp. 2006 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index dd70ef04605..d4ca4e0617b 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -12,6 +12,7 @@ #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> +#include <asm/sigp.h> /* * Save register context in absolute 0 lowcore and call swsusp_save() to @@ -163,7 +164,7 @@ ENTRY(swsusp_arch_resume) diag %r0,%r0,0x308 restart_entry: lhi %r1,1 - sigp %r1,%r0,0x12 + sigp %r1,%r0,SIGP_SET_ARCHITECTURE sam64 larl %r1,.Lnew_pgm_check_psw lpswe 0(%r1) @@ -179,7 +180,7 @@ pgm_check_entry: larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ mvc __LC_RST_NEW_PSW(16,%r0),0(%r4) 3: - sigp %r9,%r1,11 /* sigp initial cpu reset */ + sigp %r9,%r1,SIGP_INITIAL_CPU_RESET /* sigp initial cpu reset */ brc 8,4f /* accepted */ brc 2,3b /* busy, try again */ @@ -190,16 +191,16 @@ pgm_check_entry: larl %r3,_sclp_print_early lghi %r1,0 sam31 - sigp %r1,%r0,0x12 + sigp %r1,%r0,SIGP_SET_ARCHITECTURE basr %r14,%r3 larl %r3,.Ldisabled_wait_31 lpsw 0(%r3) 4: /* Switch to suspend CPU */ - sigp %r9,%r1,6 /* sigp restart to suspend CPU */ + sigp %r9,%r1,SIGP_RESTART /* sigp restart to suspend CPU */ brc 2,4b /* busy, try again */ 5: - sigp %r9,%r2,5 /* sigp stop to current resume CPU */ + sigp %r9,%r2,SIGP_STOP /* sigp stop to current resume CPU */ brc 2,5b /* busy, try again */ 6: j 6b @@ -207,7 +208,7 @@ restart_suspend: larl %r1,.Lresume_cpu llgh %r2,0(%r1) 7: - sigp %r9,%r2,1 /* sigp sense, wait for resume CPU */ + sigp %r9,%r2,SIGP_SENSE /* sigp sense, wait for resume CPU */ brc 8,7b /* accepted, status 0, still running */ brc 2,7b /* busy, try again */ tmll %r9,0x40 /* Test if resume CPU is stopped */ diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 78ea1948ff5..b4a29eee41b 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -1,8 +1,6 @@ /* - * arch/s390/kernel/sys_s390.c - * * S390 version - * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 1999, 2000 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Thomas Spatzier (tspat@de.ibm.com) * diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index d4e1cb1dbcd..dcec960fc72 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -1,5 +1,4 @@ /* - * arch/s390/kernel/time.c * Time of day based timer functions. * * S390 version @@ -45,7 +44,7 @@ #include <asm/vdso.h> #include <asm/irq.h> #include <asm/irq_regs.h> -#include <asm/timer.h> +#include <asm/vtimer.h> #include <asm/etr.h> #include <asm/cio.h> #include "entry.h" diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 4f8dc942257..05151e06c38 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 2007,2011 + * Copyright IBM Corp. 2007, 2011 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 77cdf4234eb..af2421a0f31 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -1,8 +1,6 @@ /* - * arch/s390/kernel/traps.c - * * S390 version - * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 1999, 2000 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 39ebff50694..4fc97b40a6e 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -1,71 +1,82 @@ /* - * arch/s390/kernel/vtime.c * Virtual cpu timer based timer functions. * - * S390 version - * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2004, 2012 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> */ -#include <linux/module.h> +#include <linux/kernel_stat.h> +#include <linux/notifier.h> +#include <linux/kprobes.h> +#include <linux/export.h> #include <linux/kernel.h> -#include <linux/time.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/smp.h> -#include <linux/types.h> #include <linux/timex.h> -#include <linux/notifier.h> -#include <linux/kernel_stat.h> -#include <linux/rcupdate.h> -#include <linux/posix-timers.h> +#include <linux/types.h> +#include <linux/time.h> #include <linux/cpu.h> -#include <linux/kprobes.h> +#include <linux/smp.h> -#include <asm/timer.h> #include <asm/irq_regs.h> #include <asm/cputime.h> +#include <asm/vtimer.h> #include <asm/irq.h> #include "entry.h" -static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); +static void virt_timer_expire(void); DEFINE_PER_CPU(struct s390_idle_data, s390_idle); -static inline __u64 get_vtimer(void) +static LIST_HEAD(virt_timer_list); +static DEFINE_SPINLOCK(virt_timer_lock); +static atomic64_t virt_timer_current; +static atomic64_t virt_timer_elapsed; + +static inline u64 get_vtimer(void) { - __u64 timer; + u64 timer; - asm volatile("STPT %0" : "=m" (timer)); + asm volatile("stpt %0" : "=m" (timer)); return timer; } -static inline void set_vtimer(__u64 expires) +static inline void set_vtimer(u64 expires) { - __u64 timer; + u64 timer; - asm volatile (" STPT %0\n" /* Store current cpu timer value */ - " SPT %1" /* Set new value immediately afterwards */ - : "=m" (timer) : "m" (expires) ); + asm volatile( + " stpt %0\n" /* Store current cpu timer value */ + " spt %1" /* Set new value imm. afterwards */ + : "=m" (timer) : "m" (expires)); S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; S390_lowcore.last_update_timer = expires; } +static inline int virt_timer_forward(u64 elapsed) +{ + BUG_ON(!irqs_disabled()); + + if (list_empty(&virt_timer_list)) + return 0; + elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed); + return elapsed >= atomic64_read(&virt_timer_current); +} + /* * Update process times based on virtual cpu times stored by entry.S * to the lowcore fields user_timer, system_timer & steal_clock. */ -static void do_account_vtime(struct task_struct *tsk, int hardirq_offset) +static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) { struct thread_info *ti = task_thread_info(tsk); - __u64 timer, clock, user, system, steal; + u64 timer, clock, user, system, steal; timer = S390_lowcore.last_update_timer; clock = S390_lowcore.last_update_clock; - asm volatile (" STPT %0\n" /* Store current cpu timer value */ - " STCK %1" /* Store current tod clock value */ - : "=m" (S390_lowcore.last_update_timer), - "=m" (S390_lowcore.last_update_clock) ); + asm volatile( + " stpt %0\n" /* Store current cpu timer value */ + " stck %1" /* Store current tod clock value */ + : "=m" (S390_lowcore.last_update_timer), + "=m" (S390_lowcore.last_update_clock)); S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; @@ -84,6 +95,8 @@ static void do_account_vtime(struct task_struct *tsk, int hardirq_offset) S390_lowcore.steal_timer = 0; account_steal_time(steal); } + + return virt_timer_forward(user + system); } void account_vtime(struct task_struct *prev, struct task_struct *next) @@ -101,7 +114,8 @@ void account_vtime(struct task_struct *prev, struct task_struct *next) void account_process_tick(struct task_struct *tsk, int user_tick) { - do_account_vtime(tsk, HARDIRQ_OFFSET); + if (do_account_vtime(tsk, HARDIRQ_OFFSET)) + virt_timer_expire(); } /* @@ -111,7 +125,7 @@ void account_process_tick(struct task_struct *tsk, int user_tick) void account_system_vtime(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); - __u64 timer, system; + u64 timer, system; timer = S390_lowcore.last_update_timer; S390_lowcore.last_update_timer = get_vtimer(); @@ -121,13 +135,14 @@ void account_system_vtime(struct task_struct *tsk) S390_lowcore.steal_timer -= system; ti->system_timer = S390_lowcore.system_timer; account_system_time(tsk, 0, system, system); + + virt_timer_forward(system); } EXPORT_SYMBOL_GPL(account_system_vtime); void __kprobes vtime_stop_cpu(void) { struct s390_idle_data *idle = &__get_cpu_var(s390_idle); - struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); unsigned long long idle_time; unsigned long psw_mask; @@ -141,7 +156,7 @@ void __kprobes vtime_stop_cpu(void) idle->nohz_delay = 0; /* Call the assembler magic in entry.S */ - psw_idle(idle, vq, psw_mask, !list_empty(&vq->list)); + psw_idle(idle, psw_mask); /* Reenable preemption tracer. */ start_critical_timings(); @@ -149,9 +164,9 @@ void __kprobes vtime_stop_cpu(void) /* Account time spent with enabled wait psw loaded as idle time. */ idle->sequence++; smp_wmb(); - idle_time = idle->idle_exit - idle->idle_enter; + idle_time = idle->clock_idle_exit - idle->clock_idle_enter; + idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; idle->idle_time += idle_time; - idle->idle_enter = idle->idle_exit = 0ULL; idle->idle_count++; account_idle_time(idle_time); smp_wmb(); @@ -167,10 +182,10 @@ cputime64_t s390_get_idle_time(int cpu) do { now = get_clock(); sequence = ACCESS_ONCE(idle->sequence); - idle_enter = ACCESS_ONCE(idle->idle_enter); - idle_exit = ACCESS_ONCE(idle->idle_exit); + idle_enter = ACCESS_ONCE(idle->clock_idle_enter); + idle_exit = ACCESS_ONCE(idle->clock_idle_exit); } while ((sequence & 1) || (idle->sequence != sequence)); - return idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; + return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0; } /* @@ -179,11 +194,11 @@ cputime64_t s390_get_idle_time(int cpu) */ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) { - struct vtimer_list *event; + struct vtimer_list *tmp; - list_for_each_entry(event, head, entry) { - if (event->expires > timer->expires) { - list_add_tail(&timer->entry, &event->entry); + list_for_each_entry(tmp, head, entry) { + if (tmp->expires > timer->expires) { + list_add_tail(&timer->entry, &tmp->entry); return; } } @@ -191,82 +206,45 @@ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) } /* - * Do the callback functions of expired vtimer events. - * Called from within the interrupt handler. - */ -static void do_callbacks(struct list_head *cb_list) -{ - struct vtimer_queue *vq; - struct vtimer_list *event, *tmp; - - if (list_empty(cb_list)) - return; - - vq = &__get_cpu_var(virt_cpu_timer); - - list_for_each_entry_safe(event, tmp, cb_list, entry) { - list_del_init(&event->entry); - (event->function)(event->data); - if (event->interval) { - /* Recharge interval timer */ - event->expires = event->interval + vq->elapsed; - spin_lock(&vq->lock); - list_add_sorted(event, &vq->list); - spin_unlock(&vq->lock); - } - } -} - -/* - * Handler for the virtual CPU timer. + * Handler for expired virtual CPU timer. */ -static void do_cpu_timer_interrupt(struct ext_code ext_code, - unsigned int param32, unsigned long param64) +static void virt_timer_expire(void) { - struct vtimer_queue *vq; - struct vtimer_list *event, *tmp; - struct list_head cb_list; /* the callback queue */ - __u64 elapsed, next; - - kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++; - INIT_LIST_HEAD(&cb_list); - vq = &__get_cpu_var(virt_cpu_timer); - - /* walk timer list, fire all expired events */ - spin_lock(&vq->lock); - - elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer); - BUG_ON((s64) elapsed < 0); - vq->elapsed = 0; - list_for_each_entry_safe(event, tmp, &vq->list, entry) { - if (event->expires < elapsed) + struct vtimer_list *timer, *tmp; + unsigned long elapsed; + LIST_HEAD(cb_list); + + /* walk timer list, fire all expired timers */ + spin_lock(&virt_timer_lock); + elapsed = atomic64_read(&virt_timer_elapsed); + list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) { + if (timer->expires < elapsed) /* move expired timer to the callback queue */ - list_move_tail(&event->entry, &cb_list); + list_move_tail(&timer->entry, &cb_list); else - event->expires -= elapsed; + timer->expires -= elapsed; } - spin_unlock(&vq->lock); - - do_callbacks(&cb_list); - - /* next event is first in list */ - next = VTIMER_MAX_SLICE; - spin_lock(&vq->lock); - if (!list_empty(&vq->list)) { - event = list_first_entry(&vq->list, struct vtimer_list, entry); - next = event->expires; + if (!list_empty(&virt_timer_list)) { + timer = list_first_entry(&virt_timer_list, + struct vtimer_list, entry); + atomic64_set(&virt_timer_current, timer->expires); + } + atomic64_sub(elapsed, &virt_timer_elapsed); + spin_unlock(&virt_timer_lock); + + /* Do callbacks and recharge periodic timers */ + list_for_each_entry_safe(timer, tmp, &cb_list, entry) { + list_del_init(&timer->entry); + timer->function(timer->data); + if (timer->interval) { + /* Recharge interval timer */ + timer->expires = timer->interval + + atomic64_read(&virt_timer_elapsed); + spin_lock(&virt_timer_lock); + list_add_sorted(timer, &virt_timer_list); + spin_unlock(&virt_timer_lock); + } } - spin_unlock(&vq->lock); - /* - * To improve precision add the time spent by the - * interrupt handler to the elapsed time. - * Note: CPU timer counts down and we got an interrupt, - * the current content is negative - */ - elapsed = S390_lowcore.async_enter_timer - get_vtimer(); - set_vtimer(next - elapsed); - vq->timer = next - elapsed; - vq->elapsed = elapsed; } void init_virt_timer(struct vtimer_list *timer) @@ -278,179 +256,108 @@ EXPORT_SYMBOL(init_virt_timer); static inline int vtimer_pending(struct vtimer_list *timer) { - return (!list_empty(&timer->entry)); + return !list_empty(&timer->entry); } -/* - * this function should only run on the specified CPU - */ static void internal_add_vtimer(struct vtimer_list *timer) { - struct vtimer_queue *vq; - unsigned long flags; - __u64 left, expires; - - vq = &per_cpu(virt_cpu_timer, timer->cpu); - spin_lock_irqsave(&vq->lock, flags); - - BUG_ON(timer->cpu != smp_processor_id()); - - if (list_empty(&vq->list)) { - /* First timer on this cpu, just program it. */ - list_add(&timer->entry, &vq->list); - set_vtimer(timer->expires); - vq->timer = timer->expires; - vq->elapsed = 0; + if (list_empty(&virt_timer_list)) { + /* First timer, just program it. */ + atomic64_set(&virt_timer_current, timer->expires); + atomic64_set(&virt_timer_elapsed, 0); + list_add(&timer->entry, &virt_timer_list); } else { - /* Check progress of old timers. */ - expires = timer->expires; - left = get_vtimer(); - if (likely((s64) expires < (s64) left)) { + /* Update timer against current base. */ + timer->expires += atomic64_read(&virt_timer_elapsed); + if (likely((s64) timer->expires < + (s64) atomic64_read(&virt_timer_current))) /* The new timer expires before the current timer. */ - set_vtimer(expires); - vq->elapsed += vq->timer - left; - vq->timer = expires; - } else { - vq->elapsed += vq->timer - left; - vq->timer = left; - } - /* Insert new timer into per cpu list. */ - timer->expires += vq->elapsed; - list_add_sorted(timer, &vq->list); + atomic64_set(&virt_timer_current, timer->expires); + /* Insert new timer into the list. */ + list_add_sorted(timer, &virt_timer_list); } - - spin_unlock_irqrestore(&vq->lock, flags); - /* release CPU acquired in prepare_vtimer or mod_virt_timer() */ - put_cpu(); } -static inline void prepare_vtimer(struct vtimer_list *timer) +static void __add_vtimer(struct vtimer_list *timer, int periodic) { - BUG_ON(!timer->function); - BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE); - BUG_ON(vtimer_pending(timer)); - timer->cpu = get_cpu(); + unsigned long flags; + + timer->interval = periodic ? timer->expires : 0; + spin_lock_irqsave(&virt_timer_lock, flags); + internal_add_vtimer(timer); + spin_unlock_irqrestore(&virt_timer_lock, flags); } /* * add_virt_timer - add an oneshot virtual CPU timer */ -void add_virt_timer(void *new) +void add_virt_timer(struct vtimer_list *timer) { - struct vtimer_list *timer; - - timer = (struct vtimer_list *)new; - prepare_vtimer(timer); - timer->interval = 0; - internal_add_vtimer(timer); + __add_vtimer(timer, 0); } EXPORT_SYMBOL(add_virt_timer); /* * add_virt_timer_int - add an interval virtual CPU timer */ -void add_virt_timer_periodic(void *new) +void add_virt_timer_periodic(struct vtimer_list *timer) { - struct vtimer_list *timer; - - timer = (struct vtimer_list *)new; - prepare_vtimer(timer); - timer->interval = timer->expires; - internal_add_vtimer(timer); + __add_vtimer(timer, 1); } EXPORT_SYMBOL(add_virt_timer_periodic); -static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) +static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic) { - struct vtimer_queue *vq; unsigned long flags; - int cpu; + int rc; BUG_ON(!timer->function); - BUG_ON(!expires || expires > VTIMER_MAX_SLICE); if (timer->expires == expires && vtimer_pending(timer)) return 1; - - cpu = get_cpu(); - vq = &per_cpu(virt_cpu_timer, cpu); - - /* disable interrupts before test if timer is pending */ - spin_lock_irqsave(&vq->lock, flags); - - /* if timer isn't pending add it on the current CPU */ - if (!vtimer_pending(timer)) { - spin_unlock_irqrestore(&vq->lock, flags); - - if (periodic) - timer->interval = expires; - else - timer->interval = 0; - timer->expires = expires; - timer->cpu = cpu; - internal_add_vtimer(timer); - return 0; - } - - /* check if we run on the right CPU */ - BUG_ON(timer->cpu != cpu); - - list_del_init(&timer->entry); + spin_lock_irqsave(&virt_timer_lock, flags); + rc = vtimer_pending(timer); + if (rc) + list_del_init(&timer->entry); + timer->interval = periodic ? expires : 0; timer->expires = expires; - if (periodic) - timer->interval = expires; - - /* the timer can't expire anymore so we can release the lock */ - spin_unlock_irqrestore(&vq->lock, flags); internal_add_vtimer(timer); - return 1; + spin_unlock_irqrestore(&virt_timer_lock, flags); + return rc; } /* - * If we change a pending timer the function must be called on the CPU - * where the timer is running on. - * * returns whether it has modified a pending timer (1) or not (0) */ -int mod_virt_timer(struct vtimer_list *timer, __u64 expires) +int mod_virt_timer(struct vtimer_list *timer, u64 expires) { return __mod_vtimer(timer, expires, 0); } EXPORT_SYMBOL(mod_virt_timer); /* - * If we change a pending timer the function must be called on the CPU - * where the timer is running on. - * * returns whether it has modified a pending timer (1) or not (0) */ -int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires) +int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires) { return __mod_vtimer(timer, expires, 1); } EXPORT_SYMBOL(mod_virt_timer_periodic); /* - * delete a virtual timer + * Delete a virtual timer. * * returns whether the deleted timer was pending (1) or not (0) */ int del_virt_timer(struct vtimer_list *timer) { unsigned long flags; - struct vtimer_queue *vq; - /* check if timer is pending */ if (!vtimer_pending(timer)) return 0; - - vq = &per_cpu(virt_cpu_timer, timer->cpu); - spin_lock_irqsave(&vq->lock, flags); - - /* we don't interrupt a running timer, just let it expire! */ + spin_lock_irqsave(&virt_timer_lock, flags); list_del_init(&timer->entry); - - spin_unlock_irqrestore(&vq->lock, flags); + spin_unlock_irqrestore(&virt_timer_lock, flags); return 1; } EXPORT_SYMBOL(del_virt_timer); @@ -458,20 +365,10 @@ EXPORT_SYMBOL(del_virt_timer); /* * Start the virtual CPU timer on the current CPU. */ -void init_cpu_vtimer(void) +void __cpuinit init_cpu_vtimer(void) { - struct vtimer_queue *vq; - - /* initialize per cpu vtimer structure */ - vq = &__get_cpu_var(virt_cpu_timer); - INIT_LIST_HEAD(&vq->list); - spin_lock_init(&vq->lock); - - /* enable cpu timer interrupts */ - __ctl_set_bit(0,10); - /* set initial cpu timer */ - set_vtimer(0x7fffffffffffffffULL); + set_vtimer(VTIMER_MAX_SLICE); } static int __cpuinit s390_nohz_notify(struct notifier_block *self, @@ -493,12 +390,7 @@ static int __cpuinit s390_nohz_notify(struct notifier_block *self, void __init vtime_init(void) { - /* request the cpu timer external interrupt */ - if (register_external_interrupt(0x1005, do_cpu_timer_interrupt)) - panic("Couldn't request external interrupt 0x1005"); - /* Enable cpu timer interrupts on the boot cpu. */ init_cpu_vtimer(); cpu_notifier(s390_nohz_notify, 0); } - |