From 6551fbdfd8b85d1ab5822ac98abb4fb449bcfae0 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 28 Feb 2013 16:28:41 +0100 Subject: s390: critical section cleanup vs. machine checks The current machine check code uses the registers stored by the machine in the lowcore at __LC_GPREGS_SAVE_AREA as the registers of the interrupted context. The registers 0-7 of a user process can get clobbered if a machine checks interrupts the execution of a critical section in entry[64].S. The reason is that the critical section cleanup code may need to modify the PSW and the registers for the previous context to get to the end of a critical section. If registers 0-7 have to be replaced the relevant copy will be in the registers, which invalidates the copy in the lowcore. The machine check handler needs to explicitly store registers 0-7 to the stack. Cc: stable@vger.kernel.org Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/entry.S | 3 ++- arch/s390/kernel/entry64.S | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/s390/kernel') diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 55022852326..94feff7d613 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -636,7 +636,8 @@ ENTRY(mcck_int_handler) UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER mcck_skip: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT - mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA + stm %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 stm %r8,%r9,__PT_PSW(%r11) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) l %r1,BASED(.Ldo_machine_check) diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 9c837c10129..2e6d60c55f9 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -678,8 +678,9 @@ ENTRY(mcck_int_handler) UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER LAST_BREAK %r14 mcck_skip: - lghi %r14,__LC_GPREGS_SAVE_AREA - mvc __PT_R0(128,%r11),0(%r14) + lghi %r14,__LC_GPREGS_SAVE_AREA+64 + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),0(%r14) stmg %r8,%r9,__PT_PSW(%r11) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs -- cgit v1.2.3-70-g09d2 From a7bb1ae749e8051434e54936dcefd37ef1cfa753 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 28 Feb 2013 11:16:26 +0100 Subject: s390/mm: fix vmemmap size calculation The size of the vmemmap must be a multiple of PAGES_PER_SECTION, since the common code always initializes the vmemmap in such pieces. So we must round up in order to not have a too small vmemmap. Fixes an IPL crash on 31 bit with more than 1920MB. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/setup.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/s390/kernel') diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index a5360de85ec..29268859d8e 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -571,6 +571,8 @@ static void __init setup_memory_end(void) /* Split remaining virtual space between 1:1 mapping & vmemmap array */ tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); + /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ + tmp = SECTION_ALIGN_UP(tmp); tmp = VMALLOC_START - tmp * sizeof(struct page); tmp &= ~((vmax >> 11) - 1); /* align to page table level */ tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); -- cgit v1.2.3-70-g09d2