diff options
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 17 | ||||
-rw-r--r-- | arch/s390/kernel/compat_signal.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/crash_dump.c | 83 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 91 | ||||
-rw-r--r-- | arch/s390/kernel/entry64.S | 86 | ||||
-rw-r--r-- | arch/s390/kernel/head.S | 8 | ||||
-rw-r--r-- | arch/s390/kernel/head31.S | 1 | ||||
-rw-r--r-- | arch/s390/kernel/nmi.c | 8 | ||||
-rw-r--r-- | arch/s390/kernel/process.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/ptrace.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 459 | ||||
-rw-r--r-- | arch/s390/kernel/signal.c | 12 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 20 | ||||
-rw-r--r-- | arch/s390/kernel/time.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/topology.c | 24 |
16 files changed, 365 insertions, 464 deletions
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index cc10cdd4d6a..afe1715a4eb 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -50,6 +50,7 @@ int main(void) DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code)); DEFINE(__PT_INT_PARM, offsetof(struct pt_regs, int_parm)); DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long)); + DEFINE(__PT_FLAGS, offsetof(struct pt_regs, flags)); DEFINE(__PT_SIZE, sizeof(struct pt_regs)); BLANK(); DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); @@ -89,16 +90,22 @@ int main(void) DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); - DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid)); + DEFINE(__LC_MON_CLASS_NR, offsetof(struct _lowcore, mon_class_num)); + DEFINE(__LC_PER_CODE, offsetof(struct _lowcore, per_code)); + DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_atmid)); DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); - DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id)); - DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id)); + DEFINE(__LC_EXC_ACCESS_ID, offsetof(struct _lowcore, exc_access_id)); + DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); + DEFINE(__LC_OP_ACCESS_ID, offsetof(struct _lowcore, op_access_id)); + DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_mode_id)); + DEFINE(__LC_MON_CODE, offsetof(struct _lowcore, monitor_code)); DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm)); DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); + DEFINE(__LC_MCCK_EXT_DAM_CODE, offsetof(struct _lowcore, external_damage_code)); DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); @@ -115,6 +122,7 @@ int main(void) DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync)); DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async)); DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart)); + DEFINE(__LC_CPU_FLAGS, offsetof(struct _lowcore, cpu_flags)); DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); @@ -142,7 +150,6 @@ int main(void) DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); - DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); BLANK(); DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); @@ -156,6 +163,8 @@ int main(void) #ifdef CONFIG_32BIT DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); #else /* CONFIG_32BIT */ + DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); + DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 7df5ed9f44d..f204d692036 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -213,7 +213,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) sizeof(current->thread.fp_regs)); restore_fp_regs(current->thread.fp_regs.fprs); - clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ + clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ return 0; } diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index d7658c4b2ed..a3b9150e680 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/bootmem.h> #include <linux/elf.h> +#include <linux/memblock.h> #include <asm/os_info.h> #include <asm/elf.h> #include <asm/ipl.h> @@ -22,6 +23,24 @@ #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) +static struct memblock_region oldmem_region; + +static struct memblock_type oldmem_type = { + .cnt = 1, + .max = 1, + .total_size = 0, + .regions = &oldmem_region, +}; + +#define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \ + for (i = 0, __next_mem_range(&i, nid, &memblock.physmem, \ + &oldmem_type, p_start, \ + p_end, p_nid); \ + i != (u64)ULLONG_MAX; \ + __next_mem_range(&i, nid, &memblock.physmem, \ + &oldmem_type, \ + p_start, p_end, p_nid)) + struct dump_save_areas dump_save_areas; /* @@ -264,19 +283,6 @@ static void *kzalloc_panic(int len) } /* - * Get memory layout and create hole for oldmem - */ -static struct mem_chunk *get_memory_layout(void) -{ - struct mem_chunk *chunk_array; - - chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk)); - detect_memory_layout(chunk_array, 0); - create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE); - return chunk_array; -} - -/* * Initialize ELF note */ static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len, @@ -490,52 +496,33 @@ static int get_cpu_cnt(void) */ static int get_mem_chunk_cnt(void) { - struct mem_chunk *chunk_array, *mem_chunk; - int i, cnt = 0; + int cnt = 0; + u64 idx; - chunk_array = get_memory_layout(); - for (i = 0; i < MEMORY_CHUNKS; i++) { - mem_chunk = &chunk_array[i]; - if (chunk_array[i].type != CHUNK_READ_WRITE && - chunk_array[i].type != CHUNK_READ_ONLY) - continue; - if (mem_chunk->size == 0) - continue; + for_each_dump_mem_range(idx, NUMA_NO_NODE, NULL, NULL, NULL) cnt++; - } - kfree(chunk_array); return cnt; } /* * Initialize ELF loads (new kernel) */ -static int loads_init(Elf64_Phdr *phdr, u64 loads_offset) +static void loads_init(Elf64_Phdr *phdr, u64 loads_offset) { - struct mem_chunk *chunk_array, *mem_chunk; - int i; + phys_addr_t start, end; + u64 idx; - chunk_array = get_memory_layout(); - for (i = 0; i < MEMORY_CHUNKS; i++) { - mem_chunk = &chunk_array[i]; - if (mem_chunk->size == 0) - continue; - if (chunk_array[i].type != CHUNK_READ_WRITE && - chunk_array[i].type != CHUNK_READ_ONLY) - continue; - else - phdr->p_filesz = mem_chunk->size; + for_each_dump_mem_range(idx, NUMA_NO_NODE, &start, &end, NULL) { + phdr->p_filesz = end - start; phdr->p_type = PT_LOAD; - phdr->p_offset = mem_chunk->addr; - phdr->p_vaddr = mem_chunk->addr; - phdr->p_paddr = mem_chunk->addr; - phdr->p_memsz = mem_chunk->size; + phdr->p_offset = start; + phdr->p_vaddr = start; + phdr->p_paddr = start; + phdr->p_memsz = end - start; phdr->p_flags = PF_R | PF_W | PF_X; phdr->p_align = PAGE_SIZE; phdr++; } - kfree(chunk_array); - return i; } /* @@ -584,6 +571,14 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) /* If we cannot get HSA size for zfcpdump return error */ if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size()) return -ENODEV; + + /* For kdump, exclude previous crashkernel memory */ + if (OLDMEM_BASE) { + oldmem_region.base = OLDMEM_BASE; + oldmem_region.size = OLDMEM_SIZE; + oldmem_type.total_size = OLDMEM_SIZE; + } + mem_chunk_cnt = get_mem_chunk_cnt(); alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index a734f3585ce..0dff972a169 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -258,13 +258,19 @@ static __init void setup_topology(void) static void early_pgm_check_handler(void) { const struct exception_table_entry *fixup; + unsigned long cr0, cr0_new; unsigned long addr; addr = S390_lowcore.program_old_psw.addr; fixup = search_exception_tables(addr & PSW_ADDR_INSN); if (!fixup) disabled_wait(0); + /* Disable low address protection before storing into lowcore. */ + __ctl_store(cr0, 0, 0); + cr0_new = cr0 & ~(1UL << 28); + __ctl_load(cr0_new, 0, 0); S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE; + __ctl_load(cr0, 0, 0); } static noinline __init void setup_lowcore_early(void) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 1662038516c..70203265196 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -10,6 +10,7 @@ #include <linux/init.h> #include <linux/linkage.h> +#include <asm/processor.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm/ptrace.h> @@ -37,18 +38,16 @@ __PT_R13 = __PT_GPRS + 524 __PT_R14 = __PT_GPRS + 56 __PT_R15 = __PT_GPRS + 60 -_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ - _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE) -_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ - _TIF_MCCK_PENDING | _TIF_ASCE) -_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ - _TIF_SYSCALL_TRACEPOINT) -_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) - STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SIZE = 1 << STACK_SHIFT STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE +_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) +_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ + _TIF_SYSCALL_TRACEPOINT) +_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) +_PIF_WORK = (_PIF_PER_TRAP) + #define BASED(name) name-system_call(%r13) .macro TRACE_IRQS_ON @@ -160,13 +159,7 @@ ENTRY(__switch_to) lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next l %r15,__THREAD_ksp(%r3) # load kernel stack of next - lhi %r6,_TIF_TRANSFER # transfer TIF bits - n %r6,__TI_flags(%r4) # isolate TIF bits - jz 0f - o %r6,__TI_flags(%r5) # set TIF bits of next - st %r6,__TI_flags(%r5) - ni __TI_flags+3(%r4),255-_TIF_TRANSFER # clear TIF bits of prev -0: lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task + lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task br %r14 __critical_start: @@ -181,6 +174,7 @@ sysc_stm: stm %r8,%r15,__LC_SAVE_AREA_SYNC l %r12,__LC_THREAD_INFO l %r13,__LC_SVC_NEW_PSW+4 + lhi %r14,_PIF_SYSCALL sysc_per: l %r15,__LC_KERNEL_STACK la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs @@ -190,8 +184,8 @@ sysc_vtime: mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC + st %r14,__PT_FLAGS(%r11) sysc_do_svc: - oi __TI_flags+3(%r12),_TIF_SYSCALL l %r10,__TI_sysc_table(%r12) # 31 bit system call table lh %r8,__PT_INT_CODE+2(%r11) sla %r8,2 # shift and test for svc0 @@ -207,7 +201,7 @@ sysc_nr_ok: st %r2,__PT_ORIG_GPR2(%r11) st %r7,STACK_FRAME_OVERHEAD(%r15) l %r9,0(%r8,%r10) # get system call addr. - tm __TI_flags+2(%r12),_TIF_TRACE >> 8 + tm __TI_flags+3(%r12),_TIF_TRACE jnz sysc_tracesys basr %r14,%r9 # call sys_xxxx st %r2,__PT_R2(%r11) # store return value @@ -217,9 +211,12 @@ sysc_return: sysc_tif: tm __PT_PSW+1(%r11),0x01 # returning to user ? jno sysc_restore - tm __TI_flags+3(%r12),_TIF_WORK_SVC - jnz sysc_work # check for work - ni __TI_flags+3(%r12),255-_TIF_SYSCALL + tm __PT_FLAGS+3(%r11),_PIF_WORK + jnz sysc_work + tm __TI_flags+3(%r12),_TIF_WORK + jnz sysc_work # check for thread work + tm __LC_CPU_FLAGS+3,_CIF_WORK + jnz sysc_work sysc_restore: mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) stpt __LC_EXIT_TIMER @@ -231,17 +228,17 @@ sysc_done: # One of the work bits is on. Find out which one. # sysc_work: - tm __TI_flags+3(%r12),_TIF_MCCK_PENDING + tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING jo sysc_mcck_pending tm __TI_flags+3(%r12),_TIF_NEED_RESCHED jo sysc_reschedule - tm __TI_flags+3(%r12),_TIF_PER_TRAP + tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP jo sysc_singlestep tm __TI_flags+3(%r12),_TIF_SIGPENDING jo sysc_sigpending tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME jo sysc_notify_resume - tm __TI_flags+3(%r12),_TIF_ASCE + tm __LC_CPU_FLAGS+3,_CIF_ASCE jo sysc_uaccess j sysc_return # beware of critical section cleanup @@ -254,7 +251,7 @@ sysc_reschedule: br %r1 # call schedule # -# _TIF_MCCK_PENDING is set, call handler +# _CIF_MCCK_PENDING is set, call handler # sysc_mcck_pending: l %r1,BASED(.Lhandle_mcck) @@ -262,10 +259,10 @@ sysc_mcck_pending: br %r1 # TIF bit will be cleared by handler # -# _TIF_ASCE is set, load user space asce +# _CIF_ASCE is set, load user space asce # sysc_uaccess: - ni __TI_flags+3(%r12),255-_TIF_ASCE + ni __LC_CPU_FLAGS+3,255-_CIF_ASCE lctl %c1,%c1,__LC_USER_ASCE # load primary asce j sysc_return @@ -276,7 +273,7 @@ sysc_sigpending: lr %r2,%r11 # pass pointer to pt_regs l %r1,BASED(.Ldo_signal) basr %r14,%r1 # call do_signal - tm __TI_flags+3(%r12),_TIF_SYSCALL + tm __PT_FLAGS+3(%r11),_PIF_SYSCALL jno sysc_return lm %r2,%r7,__PT_R2(%r11) # load svc arguments l %r10,__TI_sysc_table(%r12) # 31 bit system call table @@ -297,10 +294,10 @@ sysc_notify_resume: br %r1 # call do_notify_resume # -# _TIF_PER_TRAP is set, call do_per_trap +# _PIF_PER_TRAP is set, call do_per_trap # sysc_singlestep: - ni __TI_flags+3(%r12),255-_TIF_PER_TRAP + ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP lr %r2,%r11 # pass pointer to pt_regs l %r1,BASED(.Ldo_per_trap) la %r14,BASED(sysc_return) @@ -330,7 +327,7 @@ sysc_tracego: basr %r14,%r9 # call sys_xxx st %r2,__PT_R2(%r11) # store return value sysc_tracenogo: - tm __TI_flags+2(%r12),_TIF_TRACE >> 8 + tm __TI_flags+3(%r12),_TIF_TRACE jz sysc_return l %r1,BASED(.Ltrace_exit) lr %r2,%r11 # pass pointer to pt_regs @@ -384,15 +381,16 @@ ENTRY(pgm_check_handler) stm %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE + xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) tm __LC_PGM_ILC+3,0x80 # check for per exception jz 0f l %r1,__TI_task(%r12) tmh %r8,0x0001 # kernel per event ? jz pgm_kprobe - oi __TI_flags+3(%r12),_TIF_PER_TRAP + oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS - mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE - mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID + mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE + mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID 0: REENABLE_IRQS xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) l %r1,BASED(.Ljump_table) @@ -420,9 +418,9 @@ pgm_kprobe: # single stepped system call # pgm_svcper: - oi __TI_flags+3(%r12),_TIF_PER_TRAP mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) + lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs /* @@ -445,6 +443,7 @@ io_skip: mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC stm %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID + xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) TRACE_IRQS_OFF xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) io_loop: @@ -466,8 +465,10 @@ io_return: LOCKDEP_SYS_EXIT TRACE_IRQS_ON io_tif: - tm __TI_flags+3(%r12),_TIF_WORK_INT + tm __TI_flags+3(%r12),_TIF_WORK jnz io_work # there is work to do (signals etc.) + tm __LC_CPU_FLAGS+3,_CIF_WORK + jnz io_work io_restore: mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) stpt __LC_EXIT_TIMER @@ -477,7 +478,7 @@ io_done: # # There is work todo, find out in which context we have been interrupted: -# 1) if we return to user space we can do all _TIF_WORK_INT work +# 1) if we return to user space we can do all _TIF_WORK work # 2) if we return to kernel code and preemptive scheduling is enabled check # the preemption counter and if it is zero call preempt_schedule_irq # Before any work can be done, a switch to the kernel stack is required. @@ -520,11 +521,9 @@ io_work_user: # # One of the work bits is on. Find out which one. -# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED -# and _TIF_MCCK_PENDING # io_work_tif: - tm __TI_flags+3(%r12),_TIF_MCCK_PENDING + tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING jo io_mcck_pending tm __TI_flags+3(%r12),_TIF_NEED_RESCHED jo io_reschedule @@ -532,12 +531,12 @@ io_work_tif: jo io_sigpending tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME jo io_notify_resume - tm __TI_flags+3(%r12),_TIF_ASCE + tm __LC_CPU_FLAGS+3,_CIF_ASCE jo io_uaccess j io_return # beware of critical section cleanup # -# _TIF_MCCK_PENDING is set, call handler +# _CIF_MCCK_PENDING is set, call handler # io_mcck_pending: # TRACE_IRQS_ON already done at io_return @@ -547,10 +546,10 @@ io_mcck_pending: j io_return # -# _TIF_ASCE is set, load user space asce +# _CIF_ASCE is set, load user space asce # io_uaccess: - ni __TI_flags+3(%r12),255-_TIF_ASCE + ni __LC_CPU_FLAGS+3,255-_CIF_ASCE lctl %c1,%c1,__LC_USER_ASCE # load primary asce j io_return @@ -613,6 +612,7 @@ ext_skip: stm %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS + xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) TRACE_IRQS_OFF l %r1,BASED(.Ldo_IRQ) lr %r2,%r11 # pass pointer to pt_regs @@ -677,6 +677,7 @@ mcck_skip: stm %r0,%r7,__PT_R0(%r11) mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 stm %r8,%r9,__PT_PSW(%r11) + xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) l %r1,BASED(.Ldo_machine_check) lr %r2,%r11 # pass pointer to pt_regs @@ -689,7 +690,7 @@ mcck_skip: la %r11,STACK_FRAME_OVERHEAD(%r15) lr %r15,%r1 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off - tm __TI_flags+3(%r12),_TIF_MCCK_PENDING + tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING jno mcck_return TRACE_IRQS_OFF l %r1,BASED(.Lhandle_mcck) @@ -842,6 +843,8 @@ cleanup_system_call: stm %r0,%r7,__PT_R0(%r9) mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC + xc __PT_FLAGS(4,%r9),__PT_FLAGS(%r9) + mvi __PT_FLAGS+3(%r9),_PIF_SYSCALL # setup saved register 15 st %r15,28(%r11) # r15 stack pointer # set new psw address and exit diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 5963e43618b..f2e674c702e 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -42,13 +42,11 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SIZE = 1 << STACK_SHIFT STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE -_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ - _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE) -_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ - _TIF_MCCK_PENDING | _TIF_ASCE) -_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ - _TIF_SYSCALL_TRACEPOINT) -_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) +_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) +_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ + _TIF_SYSCALL_TRACEPOINT) +_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) +_PIF_WORK = (_PIF_PER_TRAP) #define BASED(name) name-system_call(%r13) @@ -190,13 +188,7 @@ ENTRY(__switch_to) lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next lg %r15,__THREAD_ksp(%r3) # load kernel stack of next - llill %r6,_TIF_TRANSFER # transfer TIF bits - ng %r6,__TI_flags(%r4) # isolate TIF bits - jz 0f - og %r6,__TI_flags(%r5) # set TIF bits of next - stg %r6,__TI_flags(%r5) - ni __TI_flags+7(%r4),255-_TIF_TRANSFER # clear TIF bits of prev -0: lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task + lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task br %r14 __critical_start: @@ -211,6 +203,7 @@ sysc_stmg: stmg %r8,%r15,__LC_SAVE_AREA_SYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO + lghi %r14,_PIF_SYSCALL sysc_per: lg %r15,__LC_KERNEL_STACK la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs @@ -221,8 +214,8 @@ sysc_vtime: mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC + stg %r14,__PT_FLAGS(%r11) sysc_do_svc: - oi __TI_flags+7(%r12),_TIF_SYSCALL lg %r10,__TI_sysc_table(%r12) # address of system call table llgh %r8,__PT_INT_CODE+2(%r11) slag %r8,%r8,2 # shift and test for svc 0 @@ -238,7 +231,7 @@ sysc_nr_ok: stg %r2,__PT_ORIG_GPR2(%r11) stg %r7,STACK_FRAME_OVERHEAD(%r15) lgf %r9,0(%r8,%r10) # get system call add. - tm __TI_flags+6(%r12),_TIF_TRACE >> 8 + tm __TI_flags+7(%r12),_TIF_TRACE jnz sysc_tracesys basr %r14,%r9 # call sys_xxxx stg %r2,__PT_R2(%r11) # store return value @@ -248,9 +241,12 @@ sysc_return: sysc_tif: tm __PT_PSW+1(%r11),0x01 # returning to user ? jno sysc_restore - tm __TI_flags+7(%r12),_TIF_WORK_SVC + tm __PT_FLAGS+7(%r11),_PIF_WORK + jnz sysc_work + tm __TI_flags+7(%r12),_TIF_WORK jnz sysc_work # check for work - ni __TI_flags+7(%r12),255-_TIF_SYSCALL + tm __LC_CPU_FLAGS+7,_CIF_WORK + jnz sysc_work sysc_restore: lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) @@ -265,17 +261,17 @@ sysc_done: # One of the work bits is on. Find out which one. # sysc_work: - tm __TI_flags+7(%r12),_TIF_MCCK_PENDING + tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING jo sysc_mcck_pending tm __TI_flags+7(%r12),_TIF_NEED_RESCHED jo sysc_reschedule - tm __TI_flags+7(%r12),_TIF_PER_TRAP + tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP jo sysc_singlestep tm __TI_flags+7(%r12),_TIF_SIGPENDING jo sysc_sigpending tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME jo sysc_notify_resume - tm __TI_flags+7(%r12),_TIF_ASCE + tm __LC_CPU_FLAGS+7,_CIF_ASCE jo sysc_uaccess j sysc_return # beware of critical section cleanup @@ -287,17 +283,17 @@ sysc_reschedule: jg schedule # -# _TIF_MCCK_PENDING is set, call handler +# _CIF_MCCK_PENDING is set, call handler # sysc_mcck_pending: larl %r14,sysc_return jg s390_handle_mcck # TIF bit will be cleared by handler # -# _TIF_ASCE is set, load user space asce +# _CIF_ASCE is set, load user space asce # sysc_uaccess: - ni __TI_flags+7(%r12),255-_TIF_ASCE + ni __LC_CPU_FLAGS+7,255-_CIF_ASCE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce j sysc_return @@ -307,7 +303,7 @@ sysc_uaccess: sysc_sigpending: lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_signal - tm __TI_flags+7(%r12),_TIF_SYSCALL + tm __PT_FLAGS+7(%r11),_PIF_SYSCALL jno sysc_return lmg %r2,%r7,__PT_R2(%r11) # load svc arguments lg %r10,__TI_sysc_table(%r12) # address of system call table @@ -327,10 +323,10 @@ sysc_notify_resume: jg do_notify_resume # -# _TIF_PER_TRAP is set, call do_per_trap +# _PIF_PER_TRAP is set, call do_per_trap # sysc_singlestep: - ni __TI_flags+7(%r12),255-_TIF_PER_TRAP + ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP lgr %r2,%r11 # pass pointer to pt_regs larl %r14,sysc_return jg do_per_trap @@ -357,7 +353,7 @@ sysc_tracego: basr %r14,%r9 # call sys_xxx stg %r2,__PT_R2(%r11) # store return value sysc_tracenogo: - tm __TI_flags+6(%r12),_TIF_TRACE >> 8 + tm __TI_flags+7(%r12),_TIF_TRACE jz sysc_return lgr %r2,%r11 # pass pointer to pt_regs larl %r14,sysc_return @@ -416,15 +412,16 @@ ENTRY(pgm_check_handler) stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) stg %r10,__PT_ARGS(%r11) tm __LC_PGM_ILC+3,0x80 # check for per exception jz 0f tmhh %r8,0x0001 # kernel per event ? jz pgm_kprobe - oi __TI_flags+7(%r12),_TIF_PER_TRAP + oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS - mvc __THREAD_per_cause(2,%r14),__LC_PER_CAUSE - mvc __THREAD_per_paid(1,%r14),__LC_PER_PAID + mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE + mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 0: REENABLE_IRQS xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) larl %r1,pgm_check_table @@ -451,10 +448,10 @@ pgm_kprobe: # single stepped system call # pgm_svcper: - oi __TI_flags+7(%r12),_TIF_PER_TRAP mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW larl %r14,sysc_per stg %r14,__LC_RETURN_PSW+8 + lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs /* @@ -479,6 +476,7 @@ io_skip: mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) TRACE_IRQS_OFF xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) io_loop: @@ -499,8 +497,10 @@ io_return: LOCKDEP_SYS_EXIT TRACE_IRQS_ON io_tif: - tm __TI_flags+7(%r12),_TIF_WORK_INT + tm __TI_flags+7(%r12),_TIF_WORK jnz io_work # there is work to do (signals etc.) + tm __LC_CPU_FLAGS+7,_CIF_WORK + jnz io_work io_restore: lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) @@ -513,7 +513,7 @@ io_done: # # There is work todo, find out in which context we have been interrupted: -# 1) if we return to user space we can do all _TIF_WORK_INT work +# 1) if we return to user space we can do all _TIF_WORK work # 2) if we return to kernel code and kvm is enabled check if we need to # modify the psw to leave SIE # 3) if we return to kernel code and preemptive scheduling is enabled check @@ -557,11 +557,9 @@ io_work_user: # # One of the work bits is on. Find out which one. -# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED -# and _TIF_MCCK_PENDING # io_work_tif: - tm __TI_flags+7(%r12),_TIF_MCCK_PENDING + tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING jo io_mcck_pending tm __TI_flags+7(%r12),_TIF_NEED_RESCHED jo io_reschedule @@ -569,12 +567,12 @@ io_work_tif: jo io_sigpending tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME jo io_notify_resume - tm __TI_flags+7(%r12),_TIF_ASCE + tm __LC_CPU_FLAGS+7,_CIF_ASCE jo io_uaccess j io_return # beware of critical section cleanup # -# _TIF_MCCK_PENDING is set, call handler +# _CIF_MCCK_PENDING is set, call handler # io_mcck_pending: # TRACE_IRQS_ON already done at io_return @@ -583,10 +581,10 @@ io_mcck_pending: j io_return # -# _TIF_ASCE is set, load user space asce +# _CIF_ASCE is set, load user space asce # io_uaccess: - ni __TI_flags+7(%r12),255-_TIF_ASCE + ni __LC_CPU_FLAGS+7,255-_CIF_ASCE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce j io_return @@ -650,6 +648,7 @@ ext_skip: mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) TRACE_IRQS_OFF xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs @@ -716,6 +715,7 @@ mcck_skip: stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),0(%r14) stmg %r8,%r9,__PT_PSW(%r11) + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,s390_do_machine_check @@ -727,7 +727,7 @@ mcck_skip: la %r11,STACK_FRAME_OVERHEAD(%r1) lgr %r15,%r1 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off - tm __TI_flags+7(%r12),_TIF_MCCK_PENDING + tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING jno mcck_return TRACE_IRQS_OFF brasl %r14,s390_handle_mcck @@ -884,6 +884,8 @@ cleanup_system_call: stmg %r0,%r7,__PT_R0(%r9) mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC + xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) + mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL # setup saved register r15 stg %r15,56(%r11) # r15 stack pointer # set new psw address and exit diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 429afcc480c..7ba7d678451 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -437,13 +437,13 @@ ENTRY(startup_kdump) #if defined(CONFIG_64BIT) #if defined(CONFIG_MARCH_ZEC12) - .long 3, 0xc100efe3, 0xf46ce800, 0x00400000 + .long 3, 0xc100efea, 0xf46ce800, 0x00400000 #elif defined(CONFIG_MARCH_Z196) - .long 2, 0xc100efe3, 0xf46c0000 + .long 2, 0xc100efea, 0xf46c0000 #elif defined(CONFIG_MARCH_Z10) - .long 2, 0xc100efe3, 0xf0680000 + .long 2, 0xc100efea, 0xf0680000 #elif defined(CONFIG_MARCH_Z9_109) - .long 1, 0xc100efc3 + .long 1, 0xc100efc2 #elif defined(CONFIG_MARCH_Z990) .long 1, 0xc0002000 #elif defined(CONFIG_MARCH_Z900) diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index 9a99856df1c..6dbe80983a2 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -59,7 +59,6 @@ ENTRY(startup_continue) .long 0 # cr13: home space segment table .long 0xc0000000 # cr14: machine check handling off .long 0 # cr15: linkage stack operations -.Lmchunk:.long memory_chunk .Lbss_bgn: .long __bss_start .Lbss_end: .long _end .Lparmaddr: .long PARMAREA diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index c4c03381987..210e1285f75 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -55,7 +55,7 @@ void s390_handle_mcck(void) local_mcck_disable(); mcck = __get_cpu_var(cpu_mcck); memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct)); - clear_thread_flag(TIF_MCCK_PENDING); + clear_cpu_flag(CIF_MCCK_PENDING); local_mcck_enable(); local_irq_restore(flags); @@ -313,7 +313,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) */ mcck->kill_task = 1; mcck->mcck_code = *(unsigned long long *) mci; - set_thread_flag(TIF_MCCK_PENDING); + set_cpu_flag(CIF_MCCK_PENDING); } else { /* * Couldn't restore all register contents while in @@ -352,12 +352,12 @@ void notrace s390_do_machine_check(struct pt_regs *regs) if (mci->cp) { /* Channel report word pending */ mcck->channel_report = 1; - set_thread_flag(TIF_MCCK_PENDING); + set_cpu_flag(CIF_MCCK_PENDING); } if (mci->w) { /* Warning pending */ mcck->warning = 1; - set_thread_flag(TIF_MCCK_PENDING); + set_cpu_flag(CIF_MCCK_PENDING); } nmi_exit(); } diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index dd145321d21..93b9ca42e5c 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -64,7 +64,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk) void arch_cpu_idle(void) { local_mcck_disable(); - if (test_thread_flag(TIF_MCCK_PENDING)) { + if (test_cpu_flag(CIF_MCCK_PENDING)) { local_mcck_enable(); local_irq_enable(); return; @@ -76,7 +76,7 @@ void arch_cpu_idle(void) void arch_cpu_idle_exit(void) { - if (test_thread_flag(TIF_MCCK_PENDING)) + if (test_cpu_flag(CIF_MCCK_PENDING)) s390_handle_mcck(); } @@ -123,7 +123,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); clear_tsk_thread_flag(p, TIF_SINGLE_STEP); - clear_tsk_thread_flag(p, TIF_PER_TRAP); /* Initialize per thread user and system timer values */ ti = task_thread_info(p); ti->user_timer = 0; @@ -152,6 +151,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, } frame->childregs = *current_pt_regs(); frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */ + frame->childregs.flags = 0; if (new_stackp) frame->childregs.gprs[15] = new_stackp; diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 1c82619eb4f..2d716734b5b 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -136,7 +136,7 @@ void ptrace_disable(struct task_struct *task) memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); clear_tsk_thread_flag(task, TIF_SINGLE_STEP); - clear_tsk_thread_flag(task, TIF_PER_TRAP); + clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP); task->thread.per_flags = 0; } @@ -813,7 +813,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) * debugger stored an invalid system call number. Skip * the system call and the system call restart handling. */ - clear_thread_flag(TIF_SYSCALL); + clear_pt_regs_flag(regs, PIF_SYSCALL); ret = -1; } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 88d1ca81e2d..1e2264b46e4 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -78,10 +78,9 @@ EXPORT_SYMBOL(console_irq); unsigned long elf_hwcap = 0; char elf_platform[ELF_PLATFORM_SIZE]; -struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; - int __initdata memory_end_set; unsigned long __initdata memory_end; +unsigned long __initdata max_physmem_end; unsigned long VMALLOC_START; EXPORT_SYMBOL(VMALLOC_START); @@ -212,7 +211,7 @@ static void __init conmode_default(void) } } -#ifdef CONFIG_ZFCPDUMP +#ifdef CONFIG_CRASH_DUMP static void __init setup_zfcpdump(void) { if (ipl_info.type != IPL_TYPE_FCP_DUMP) @@ -224,7 +223,7 @@ static void __init setup_zfcpdump(void) } #else static inline void setup_zfcpdump(void) {} -#endif /* CONFIG_ZFCPDUMP */ +#endif /* CONFIG_CRASH_DUMP */ /* * Reboot, halt and power_off stubs. They just call _machine_restart, @@ -273,6 +272,7 @@ EXPORT_SYMBOL_GPL(pm_power_off); static int __init early_parse_mem(char *p) { memory_end = memparse(p, &p); + memory_end &= PAGE_MASK; memory_end_set = 1; return 0; } @@ -373,6 +373,10 @@ static void __init setup_lowcore(void) mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source); mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw); +#ifdef CONFIG_SMP + lc->spinlock_lockval = arch_spin_lockval(0); +#endif + set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; } @@ -401,7 +405,8 @@ static struct resource __initdata *standard_resources[] = { static void __init setup_resources(void) { struct resource *res, *std_res, *sub_res; - int i, j; + struct memblock_region *reg; + int j; code_resource.start = (unsigned long) &_text; code_resource.end = (unsigned long) &_etext - 1; @@ -410,24 +415,13 @@ static void __init setup_resources(void) bss_resource.start = (unsigned long) &__bss_start; bss_resource.end = (unsigned long) &__bss_stop - 1; - for (i = 0; i < MEMORY_CHUNKS; i++) { - if (!memory_chunk[i].size) - continue; + for_each_memblock(memory, reg) { res = alloc_bootmem_low(sizeof(*res)); res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; - switch (memory_chunk[i].type) { - case CHUNK_READ_WRITE: - res->name = "System RAM"; - break; - case CHUNK_READ_ONLY: - res->name = "System ROM"; - res->flags |= IORESOURCE_READONLY; - break; - default: - res->name = "reserved"; - } - res->start = memory_chunk[i].addr; - res->end = res->start + memory_chunk[i].size - 1; + + res->name = "System RAM"; + res->start = reg->base; + res->end = reg->base + reg->size - 1; request_resource(&iomem_resource, res); for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { @@ -451,48 +445,11 @@ static void __init setup_resources(void) static void __init setup_memory_end(void) { unsigned long vmax, vmalloc_size, tmp; - unsigned long real_memory_size = 0; - int i; - - -#ifdef CONFIG_ZFCPDUMP - if (ipl_info.type == IPL_TYPE_FCP_DUMP && - !OLDMEM_BASE && sclp_get_hsa_size()) { - memory_end = sclp_get_hsa_size(); - memory_end_set = 1; - } -#endif - memory_end &= PAGE_MASK; - - /* - * Make sure all chunks are MAX_ORDER aligned so we don't need the - * extra checks that HOLES_IN_ZONE would require. - */ - for (i = 0; i < MEMORY_CHUNKS; i++) { - unsigned long start, end; - struct mem_chunk *chunk; - unsigned long align; - - chunk = &memory_chunk[i]; - if (!chunk->size) - continue; - align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1); - start = (chunk->addr + align - 1) & ~(align - 1); - end = (chunk->addr + chunk->size) & ~(align - 1); - if (start >= end) - memset(chunk, 0, sizeof(*chunk)); - else { - chunk->addr = start; - chunk->size = end - start; - } - real_memory_size = max(real_memory_size, - chunk->addr + chunk->size); - } /* Choose kernel address space layout: 2, 3, or 4 levels. */ #ifdef CONFIG_64BIT vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; - tmp = (memory_end ?: real_memory_size) / PAGE_SIZE; + tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; if (tmp <= (1UL << 42)) vmax = 1UL << 42; /* 3-level kernel page table */ @@ -520,21 +477,11 @@ static void __init setup_memory_end(void) vmemmap = (struct page *) tmp; /* Take care that memory_end is set and <= vmemmap */ - memory_end = min(memory_end ?: real_memory_size, tmp); - - /* Fixup memory chunk array to fit into 0..memory_end */ - for (i = 0; i < MEMORY_CHUNKS; i++) { - struct mem_chunk *chunk = &memory_chunk[i]; + memory_end = min(memory_end ?: max_physmem_end, tmp); + max_pfn = max_low_pfn = PFN_DOWN(memory_end); + memblock_remove(memory_end, ULONG_MAX); - if (!chunk->size) - continue; - if (chunk->addr >= memory_end) { - memset(chunk, 0, sizeof(*chunk)); - continue; - } - if (chunk->addr + chunk->size > memory_end) - chunk->size = memory_end - chunk->addr; - } + pr_notice("Max memory size: %luMB\n", memory_end >> 20); } static void __init setup_vmcoreinfo(void) @@ -545,89 +492,6 @@ static void __init setup_vmcoreinfo(void) #ifdef CONFIG_CRASH_DUMP /* - * Find suitable location for crashkernel memory - */ -static unsigned long __init find_crash_base(unsigned long crash_size, - char **msg) -{ - unsigned long crash_base; - struct mem_chunk *chunk; - int i; - - if (memory_chunk[0].size < crash_size) { - *msg = "first memory chunk must be at least crashkernel size"; - return 0; - } - if (OLDMEM_BASE && crash_size == OLDMEM_SIZE) - return OLDMEM_BASE; - - for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { - chunk = &memory_chunk[i]; - if (chunk->size == 0) - continue; - if (chunk->type != CHUNK_READ_WRITE) - continue; - if (chunk->size < crash_size) - continue; - crash_base = (chunk->addr + chunk->size) - crash_size; - if (crash_base < crash_size) - continue; - if (crash_base < sclp_get_hsa_size()) - continue; - if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE) - continue; - return crash_base; - } - *msg = "no suitable area found"; - return 0; -} - -/* - * Check if crash_base and crash_size is valid - */ -static int __init verify_crash_base(unsigned long crash_base, - unsigned long crash_size, - char **msg) -{ - struct mem_chunk *chunk; - int i; - - /* - * Because we do the swap to zero, we must have at least 'crash_size' - * bytes free space before crash_base - */ - if (crash_size > crash_base) { - *msg = "crashkernel offset must be greater than size"; - return -EINVAL; - } - - /* First memory chunk must be at least crash_size */ - if (memory_chunk[0].size < crash_size) { - *msg = "first memory chunk must be at least crashkernel size"; - return -EINVAL; - } - /* Check if we fit into the respective memory chunk */ - for (i = 0; i < MEMORY_CHUNKS; i++) { - chunk = &memory_chunk[i]; - if (chunk->size == 0) - continue; - if (crash_base < chunk->addr) - continue; - if (crash_base >= chunk->addr + chunk->size) - continue; - /* we have found the memory chunk */ - if (crash_base + crash_size > chunk->addr + chunk->size) { - *msg = "selected memory chunk is too small for " - "crashkernel memory"; - return -EINVAL; - } - return 0; - } - *msg = "invalid memory range specified"; - return -EINVAL; -} - -/* * When kdump is enabled, we have to ensure that no memory from * the area [0 - crashkernel memory size] and * [crashk_res.start - crashk_res.end] is set offline. @@ -653,23 +517,44 @@ static struct notifier_block kdump_mem_nb = { #endif /* + * Make sure that the area behind memory_end is protected + */ +static void reserve_memory_end(void) +{ +#ifdef CONFIG_CRASH_DUMP + if (ipl_info.type == IPL_TYPE_FCP_DUMP && + !OLDMEM_BASE && sclp_get_hsa_size()) { + memory_end = sclp_get_hsa_size(); + memory_end &= PAGE_MASK; + memory_end_set = 1; + } +#endif + if (!memory_end_set) + return; + memblock_reserve(memory_end, ULONG_MAX); +} + +/* * Make sure that oldmem, where the dump is stored, is protected */ static void reserve_oldmem(void) { #ifdef CONFIG_CRASH_DUMP - unsigned long real_size = 0; - int i; - - if (!OLDMEM_BASE) - return; - for (i = 0; i < MEMORY_CHUNKS; i++) { - struct mem_chunk *chunk = &memory_chunk[i]; + if (OLDMEM_BASE) + /* Forget all memory above the running kdump system */ + memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX); +#endif +} - real_size = max(real_size, chunk->addr + chunk->size); - } - create_mem_hole(memory_chunk, OLDMEM_BASE, OLDMEM_SIZE); - create_mem_hole(memory_chunk, OLDMEM_SIZE, real_size - OLDMEM_SIZE); +/* + * Make sure that oldmem, where the dump is stored, is protected + */ +static void remove_oldmem(void) +{ +#ifdef CONFIG_CRASH_DUMP + if (OLDMEM_BASE) + /* Forget all memory above the running kdump system */ + memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX); #endif } @@ -680,167 +565,132 @@ static void __init reserve_crashkernel(void) { #ifdef CONFIG_CRASH_DUMP unsigned long long crash_base, crash_size; - char *msg = NULL; + phys_addr_t low, high; int rc; rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, &crash_base); - if (rc || crash_size == 0) - return; + crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); - if (register_memory_notifier(&kdump_mem_nb)) + if (rc || crash_size == 0) return; - if (!crash_base) - crash_base = find_crash_base(crash_size, &msg); - if (!crash_base) { - pr_info("crashkernel reservation failed: %s\n", msg); - unregister_memory_notifier(&kdump_mem_nb); + + if (memblock.memory.regions[0].size < crash_size) { + pr_info("crashkernel reservation failed: %s\n", + "first memory chunk must be at least crashkernel size"); return; } - if (verify_crash_base(crash_base, crash_size, &msg)) { - pr_info("crashkernel reservation failed: %s\n", msg); - unregister_memory_notifier(&kdump_mem_nb); + + low = crash_base ?: OLDMEM_BASE; + high = low + crash_size; + if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) { + /* The crashkernel fits into OLDMEM, reuse OLDMEM */ + crash_base = low; + } else { + /* Find suitable area in free memory */ + low = max_t(unsigned long, crash_size, sclp_get_hsa_size()); + high = crash_base ? crash_base + crash_size : ULONG_MAX; + + if (crash_base && crash_base < low) { + pr_info("crashkernel reservation failed: %s\n", + "crash_base too low"); + return; + } + low = crash_base ?: low; + crash_base = memblock_find_in_range(low, high, crash_size, + KEXEC_CRASH_MEM_ALIGN); + } + + if (!crash_base) { + pr_info("crashkernel reservation failed: %s\n", + "no suitable area found"); return; } + + if (register_memory_notifier(&kdump_mem_nb)) + return; + if (!OLDMEM_BASE && MACHINE_IS_VM) diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; insert_resource(&iomem_resource, &crashk_res); - create_mem_hole(memory_chunk, crash_base, crash_size); + memblock_remove(crash_base, crash_size); pr_info("Reserving %lluMB of memory at %lluMB " "for crashkernel (System RAM: %luMB)\n", - crash_size >> 20, crash_base >> 20, memory_end >> 20); + crash_size >> 20, crash_base >> 20, + (unsigned long)memblock.memory.total_size >> 20); os_info_crashkernel_add(crash_base, crash_size); #endif } -static void __init setup_memory(void) +/* + * Reserve the initrd from being used by memblock + */ +static void __init reserve_initrd(void) { - unsigned long bootmap_size; - unsigned long start_pfn, end_pfn; - int i; +#ifdef CONFIG_BLK_DEV_INITRD + initrd_start = INITRD_START; + initrd_end = initrd_start + INITRD_SIZE; + memblock_reserve(INITRD_START, INITRD_SIZE); +#endif +} - /* - * partially used pages are not usable - thus - * we are rounding upwards: - */ +/* + * Check for initrd being in usable memory + */ +static void __init check_initrd(void) +{ +#ifdef CONFIG_BLK_DEV_INITRD + if (INITRD_START && INITRD_SIZE && + !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) { + pr_err("initrd does not fit memory.\n"); + memblock_free(INITRD_START, INITRD_SIZE); + initrd_start = initrd_end = 0; + } +#endif +} + +/* + * Reserve all kernel text + */ +static void __init reserve_kernel(void) +{ + unsigned long start_pfn; start_pfn = PFN_UP(__pa(&_end)); - end_pfn = max_pfn = PFN_DOWN(memory_end); -#ifdef CONFIG_BLK_DEV_INITRD /* - * Move the initrd in case the bitmap of the bootmem allocater - * would overwrite it. + * Reserve memory used for lowcore/command line/kernel image. */ + memblock_reserve(0, (unsigned long)_ehead); + memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn) + - (unsigned long)_stext); +} - if (INITRD_START && INITRD_SIZE) { - unsigned long bmap_size; - unsigned long start; - - bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1); - bmap_size = PFN_PHYS(bmap_size); - - if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { - start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; - +static void __init reserve_elfcorehdr(void) +{ #ifdef CONFIG_CRASH_DUMP - if (OLDMEM_BASE) { - /* Move initrd behind kdump oldmem */ - if (start + INITRD_SIZE > OLDMEM_BASE && - start < OLDMEM_BASE + OLDMEM_SIZE) - start = OLDMEM_BASE + OLDMEM_SIZE; - } -#endif - if (start + INITRD_SIZE > memory_end) { - pr_err("initrd extends beyond end of " - "memory (0x%08lx > 0x%08lx) " - "disabling initrd\n", - start + INITRD_SIZE, memory_end); - INITRD_START = INITRD_SIZE = 0; - } else { - pr_info("Moving initrd (0x%08lx -> " - "0x%08lx, size: %ld)\n", - INITRD_START, start, INITRD_SIZE); - memmove((void *) start, (void *) INITRD_START, - INITRD_SIZE); - INITRD_START = start; - } - } - } + if (is_kdump_kernel()) + memblock_reserve(elfcorehdr_addr - OLDMEM_BASE, + PAGE_ALIGN(elfcorehdr_size)); #endif +} - /* - * Initialize the boot-time allocator - */ - bootmap_size = init_bootmem(start_pfn, end_pfn); +static void __init setup_memory(void) +{ + struct memblock_region *reg; /* - * Register RAM areas with the bootmem allocator. + * Init storage key for present memory */ - - for (i = 0; i < MEMORY_CHUNKS; i++) { - unsigned long start_chunk, end_chunk, pfn; - - if (!memory_chunk[i].size) - continue; - start_chunk = PFN_DOWN(memory_chunk[i].addr); - end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); - end_chunk = min(end_chunk, end_pfn); - if (start_chunk >= end_chunk) - continue; - memblock_add_node(PFN_PHYS(start_chunk), - PFN_PHYS(end_chunk - start_chunk), 0); - pfn = max(start_chunk, start_pfn); - storage_key_init_range(PFN_PHYS(pfn), PFN_PHYS(end_chunk)); + for_each_memblock(memory, reg) { + storage_key_init_range(reg->base, reg->base + reg->size); } - psw_set_key(PAGE_DEFAULT_KEY); - free_bootmem_with_active_regions(0, max_pfn); - - /* - * Reserve memory used for lowcore/command line/kernel image. - */ - reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT); - reserve_bootmem((unsigned long)_stext, - PFN_PHYS(start_pfn) - (unsigned long)_stext, - BOOTMEM_DEFAULT); - /* - * Reserve the bootmem bitmap itself as well. We do this in two - * steps (first step was init_bootmem()) because this catches - * the (very unlikely) case of us accidentally initializing the - * bootmem allocator with an invalid RAM area. - */ - reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, - BOOTMEM_DEFAULT); - -#ifdef CONFIG_CRASH_DUMP - if (crashk_res.start) - reserve_bootmem(crashk_res.start, - crashk_res.end - crashk_res.start + 1, - BOOTMEM_DEFAULT); - if (is_kdump_kernel()) - reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE, - PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT); -#endif -#ifdef CONFIG_BLK_DEV_INITRD - if (INITRD_START && INITRD_SIZE) { - if (INITRD_START + INITRD_SIZE <= memory_end) { - reserve_bootmem(INITRD_START, INITRD_SIZE, - BOOTMEM_DEFAULT); - initrd_start = INITRD_START; - initrd_end = initrd_start + INITRD_SIZE; - } else { - pr_err("initrd extends beyond end of " - "memory (0x%08lx > 0x%08lx) " - "disabling initrd\n", - initrd_start + INITRD_SIZE, memory_end); - initrd_start = initrd_end = 0; - } - } -#endif + /* Only cosmetics */ + memblock_enforce_memory_limit(memblock_end_of_DRAM()); } /* @@ -989,23 +839,46 @@ void __init setup_arch(char **cmdline_p) ROOT_DEV = Root_RAM0; + /* Is init_mm really needed? */ init_mm.start_code = PAGE_OFFSET; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; parse_early_param(); - detect_memory_layout(memory_chunk, memory_end); os_info_init(); setup_ipl(); + + /* Do some memory reservations *before* memory is added to memblock */ + reserve_memory_end(); reserve_oldmem(); + reserve_kernel(); + reserve_initrd(); + reserve_elfcorehdr(); + memblock_allow_resize(); + + /* Get information about *all* installed memory */ + detect_memory_memblock(); + + remove_oldmem(); + + /* + * Make sure all chunks are MAX_ORDER aligned so we don't need the + * extra checks that HOLES_IN_ZONE would require. + * + * Is this still required? + */ + memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT)); + setup_memory_end(); - reserve_crashkernel(); setup_memory(); + + check_initrd(); + reserve_crashkernel(); + setup_resources(); setup_vmcoreinfo(); setup_lowcore(); - smp_fill_possible_mask(); cpu_init(); s390_init_cpu_topology(); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index d8fd508ccd1..42b49f9e19b 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -113,7 +113,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) sizeof(current->thread.fp_regs)); restore_fp_regs(current->thread.fp_regs.fprs); - clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ + clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ return 0; } @@ -356,7 +356,7 @@ void do_signal(struct pt_regs *regs) * call information. */ current_thread_info()->system_call = - test_thread_flag(TIF_SYSCALL) ? regs->int_code : 0; + test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { @@ -384,7 +384,7 @@ void do_signal(struct pt_regs *regs) } } /* No longer in a system call */ - clear_thread_flag(TIF_SYSCALL); + clear_pt_regs_flag(regs, PIF_SYSCALL); if (is_compat_task()) handle_signal32(signr, &ka, &info, oldset, regs); @@ -394,7 +394,7 @@ void do_signal(struct pt_regs *regs) } /* No handlers present - check for system call restart */ - clear_thread_flag(TIF_SYSCALL); + clear_pt_regs_flag(regs, PIF_SYSCALL); if (current_thread_info()->system_call) { regs->int_code = current_thread_info()->system_call; switch (regs->gprs[2]) { @@ -407,9 +407,9 @@ void do_signal(struct pt_regs *regs) case -ERESTARTNOINTR: /* Restart system call with magic TIF bit. */ regs->gprs[2] = regs->orig_gpr2; - set_thread_flag(TIF_SYSCALL); + set_pt_regs_flag(regs, PIF_SYSCALL); if (test_thread_flag(TIF_SINGLE_STEP)) - set_thread_flag(TIF_PER_TRAP); + clear_pt_regs_flag(regs, PIF_PER_TRAP); break; } } diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 86e65ec3422..243c7e51260 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -170,6 +170,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) lc->panic_stack = pcpu->panic_stack + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->cpu_nr = cpu; + lc->spinlock_lockval = arch_spin_lockval(cpu); #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL); @@ -226,6 +227,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); atomic_inc(&init_mm.context.attach_count); lc->cpu_nr = cpu; + lc->spinlock_lockval = arch_spin_lockval(cpu); lc->percpu_offset = __per_cpu_offset[cpu]; lc->kernel_asce = S390_lowcore.kernel_asce; lc->machine_flags = S390_lowcore.machine_flags; @@ -403,15 +405,6 @@ void smp_send_stop(void) } /* - * Stop the current cpu. - */ -void smp_stop_cpu(void) -{ - pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); - for (;;) ; -} - -/* * This is the main routine where commands issued by other * cpus are handled. */ @@ -519,7 +512,7 @@ void smp_ctl_clear_bit(int cr, int bit) } EXPORT_SYMBOL(smp_ctl_clear_bit); -#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) +#ifdef CONFIG_CRASH_DUMP static void __init smp_get_save_area(int cpu, u16 address) { @@ -534,14 +527,12 @@ static void __init smp_get_save_area(int cpu, u16 address) save_area = dump_save_area_create(cpu); if (!save_area) panic("could not allocate memory for save area\n"); -#ifdef CONFIG_CRASH_DUMP if (address == boot_cpu_address) { /* Copy the registers of the boot cpu. */ copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), SAVE_AREA_BASE - PAGE_SIZE, 0); return; } -#endif /* Get the registers of a non-boot cpu. */ __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); @@ -558,11 +549,11 @@ int smp_store_status(int cpu) return 0; } -#else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ +#else /* CONFIG_CRASH_DUMP */ static inline void smp_get_save_area(int cpu, u16 address) { } -#endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ +#endif /* CONFIG_CRASH_DUMP */ void smp_cpu_set_polarization(int cpu, int val) { @@ -809,6 +800,7 @@ void __init smp_cpus_done(unsigned int max_cpus) void __init smp_setup_processor_id(void) { S390_lowcore.cpu_nr = 0; + S390_lowcore.spinlock_lockval = arch_spin_lockval(0); } /* diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 386d37a228b..0931b110c82 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -226,7 +226,7 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->wtom_clock_sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; vdso_data->wtom_clock_nsec = tk->xtime_nsec + - + (tk->wall_to_monotonic.tv_nsec << tk->shift); + + ((u64) tk->wall_to_monotonic.tv_nsec << tk->shift); nsecps = (u64) NSEC_PER_SEC << tk->shift; while (vdso_data->wtom_clock_nsec >= nsecps) { vdso_data->wtom_clock_nsec -= nsecps; diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 6298fed11ce..355a16c5570 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -333,7 +333,9 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info, nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; nr_masks = max(nr_masks, 1); for (i = 0; i < nr_masks; i++) { - mask->next = alloc_bootmem(sizeof(struct mask_info)); + mask->next = alloc_bootmem_align( + roundup_pow_of_two(sizeof(struct mask_info)), + roundup_pow_of_two(sizeof(struct mask_info))); mask = mask->next; } } @@ -443,6 +445,23 @@ int topology_cpu_init(struct cpu *cpu) return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); } +const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return &cpu_topology[cpu].core_mask; +} + +static const struct cpumask *cpu_book_mask(int cpu) +{ + return &cpu_topology[cpu].book_mask; +} + +static struct sched_domain_topology_level s390_topology[] = { + { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, + { cpu_book_mask, SD_INIT_NAME(BOOK) }, + { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { NULL, }, +}; + static int __init topology_init(void) { if (!MACHINE_HAS_TOPOLOGY) { @@ -451,6 +470,9 @@ static int __init topology_init(void) } set_topology_timer(); out: + + set_sched_topology(s390_topology); + return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); } device_initcall(topology_init); |