summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJohn David Anglin <dave.anglin@bell.net>2013-05-07 00:07:25 +0000
committerHelge Deller <deller@gmx.de>2013-05-07 20:33:03 +0200
commitc207a76bf155cb5cf24cf849c08f6555e9180594 (patch)
treef9f05b4d721dd215e28318337617beada3c238dd /arch
parentf21dda025ab00da197ac30b6c6690c380d838683 (diff)
parisc: only re-enable interrupts if we need to schedule or deliver signals when returning to userspace
Helge and I have found that we have a kernel stack overflow problem which causes a variety of random failures. Currently, we re-enable interrupts when returning from an external interrupt incase we need to schedule or delivery signals. As a result, a potentially unlimited number of interrupts can occur while we are running on the kernel stack. It is very limited in space (currently, 16k). This change defers enabling interrupts until we have actually decided to schedule or delivery signals. This only occurs when we about to return to userspace. This limits the number of interrupts on the kernel stack to one. In other cases, interrupts remain disabled until the final return from interrupt (rfi). Signed-off-by: John David Anglin <dave.anglin@bell.net> Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/parisc/kernel/entry.S14
1 files changed, 9 insertions, 5 deletions
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 36f4f1dcb77..3f3326d876f 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -833,11 +833,6 @@ ENTRY(syscall_exit_rfi)
STREG %r19,PT_SR7(%r16)
intr_return:
- /* NOTE: Need to enable interrupts incase we schedule. */
- ssm PSW_SM_I, %r0
-
-intr_check_resched:
-
/* check for reschedule */
mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
@@ -864,6 +859,11 @@ intr_check_sig:
LDREG PT_IASQ1(%r16), %r20
cmpib,COND(=),n 0,%r20,intr_restore /* backward */
+ /* NOTE: We need to enable interrupts if we have to deliver
+ * signals. We used to do this earlier but it caused kernel
+ * stack overflows. */
+ ssm PSW_SM_I, %r0
+
copy %r0, %r25 /* long in_syscall = 0 */
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
@@ -915,6 +915,10 @@ intr_do_resched:
cmpib,COND(=) 0, %r20, intr_do_preempt
nop
+ /* NOTE: We need to enable interrupts if we schedule. We used
+ * to do this earlier but it caused kernel stack overflows. */
+ ssm PSW_SM_I, %r0
+
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif