diff options
Diffstat (limited to 'arch/powerpc/kernel/entry_64.S')
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 60 |
1 files changed, 28 insertions, 32 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 748e74fcf54..2551c0884af 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -28,6 +28,7 @@ #include <asm/asm-offsets.h> #include <asm/cputable.h> #include <asm/firmware.h> +#include <asm/bug.h> /* * System calls. @@ -87,15 +88,19 @@ system_call_common: addi r9,r1,STACK_FRAME_OVERHEAD ld r11,exception_marker@toc(r2) std r11,-16(r9) /* "regshere" marker */ + li r10,1 + stb r10,PACASOFTIRQEN(r13) + stb r10,PACAHARDIRQEN(r13) + std r10,SOFTE(r1) #ifdef CONFIG_PPC_ISERIES BEGIN_FW_FTR_SECTION /* Hack for handling interrupts when soft-enabling on iSeries */ cmpdi cr1,r0,0x5555 /* syscall 0x5555 */ andi. r10,r12,MSR_PR /* from kernel */ crand 4*cr0+eq,4*cr1+eq,4*cr0+eq - beq hardware_interrupt_entry - lbz r10,PACAPROCENABLED(r13) - std r10,SOFTE(r1) + bne 2f + b hardware_interrupt_entry +2: END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) #endif mfmsr r11 @@ -460,9 +465,9 @@ _GLOBAL(ret_from_except_lite) #endif restore: + ld r5,SOFTE(r1) #ifdef CONFIG_PPC_ISERIES BEGIN_FW_FTR_SECTION - ld r5,SOFTE(r1) cmpdi 0,r5,0 beq 4f /* Check for pending interrupts (iSeries) */ @@ -472,21 +477,25 @@ BEGIN_FW_FTR_SECTION beq+ 4f /* skip do_IRQ if no interrupts */ li r3,0 - stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */ + stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ ori r10,r10,MSR_EE mtmsrd r10 /* hard-enable again */ addi r3,r1,STACK_FRAME_OVERHEAD bl .do_IRQ b .ret_from_except_lite /* loop back and handle more */ - -4: stb r5,PACAPROCENABLED(r13) +4: END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) #endif + stb r5,PACASOFTIRQEN(r13) ld r3,_MSR(r1) andi. r0,r3,MSR_RI beq- unrecov_restore + /* extract EE bit and use it to restore paca->hard_enabled */ + rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ + stb r4,PACAHARDIRQEN(r13) + andi. r0,r3,MSR_PR /* @@ -538,25 +547,15 @@ do_work: /* Check that preempt_count() == 0 and interrupts are enabled */ lwz r8,TI_PREEMPT(r9) cmpwi cr1,r8,0 -#ifdef CONFIG_PPC_ISERIES -BEGIN_FW_FTR_SECTION ld r0,SOFTE(r1) cmpdi r0,0 -END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) -#endif -BEGIN_FW_FTR_SECTION - andi. r0,r3,MSR_EE -END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) crandc eq,cr1*4+eq,eq bne restore /* here we are preempting the current task */ 1: -#ifdef CONFIG_PPC_ISERIES -BEGIN_FW_FTR_SECTION li r0,1 - stb r0,PACAPROCENABLED(r13) -END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) -#endif + stb r0,PACASOFTIRQEN(r13) + stb r0,PACAHARDIRQEN(r13) ori r10,r10,MSR_EE mtmsrd r10,1 /* reenable interrupts */ bl .preempt_schedule @@ -636,20 +635,21 @@ _GLOBAL(enter_rtas) li r0,0 mtcr r0 +#ifdef CONFIG_BUG /* There is no way it is acceptable to get here with interrupts enabled, * check it with the asm equivalent of WARN_ON */ - mfmsr r6 - andi. r0,r6,MSR_EE + lbz r0,PACASOFTIRQEN(r13) 1: tdnei r0,0 -.section __bug_table,"a" - .llong 1b,__LINE__ + 0x1000000, 1f, 2f -.previous -.section .rodata,"a" -1: .asciz __FILE__ -2: .asciz "enter_rtas" -.previous + EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING +#endif + /* Hard-disable interrupts */ + mfmsr r6 + rldicl r7,r6,48,1 + rotldi r7,r7,16 + mtmsrd r7,1 + /* Unfortunately, the stack pointer and the MSR are also clobbered, * so they are saved in the PACA which allows us to restore * our original state after RTAS returns. @@ -735,8 +735,6 @@ _STATIC(rtas_restore_regs) #endif /* CONFIG_PPC_RTAS */ -#ifdef CONFIG_PPC_MULTIPLATFORM - _GLOBAL(enter_prom) mflr r0 std r0,16(r1) @@ -821,5 +819,3 @@ _GLOBAL(enter_prom) ld r0,16(r1) mtlr r0 blr - -#endif /* CONFIG_PPC_MULTIPLATFORM */ |