summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r--arch/powerpc/lib/code-patching.c2
-rw-r--r--arch/powerpc/lib/copyuser_power7.S35
-rw-r--r--arch/powerpc/lib/memcpy_power7.S8
-rw-r--r--arch/powerpc/lib/sstep.c36
4 files changed, 41 insertions, 40 deletions
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index dd223b3eb33..17e5b236431 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -20,7 +20,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
{
int err;
- err = __put_user(instr, addr);
+ __put_user_size(instr, addr, 4, err);
if (err)
return err;
asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index f9ede7c6606..0d24ff15f5f 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -288,7 +288,7 @@ err1; stb r0,0(r3)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
bl .enter_vmx_usercopy
- cmpwi r3,0
+ cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STACKFRAMESIZE+48(r1)
ld r4,STACKFRAMESIZE+56(r1)
@@ -326,38 +326,7 @@ err1; stb r0,0(r3)
dcbt r0,r8,0b01010 /* GO */
.machine pop
- /*
- * We prefetch both the source and destination using enhanced touch
- * instructions. We use a stream ID of 0 for the load side and
- * 1 for the store side.
- */
- clrrdi r6,r4,7
- clrrdi r9,r3,7
- ori r9,r9,1 /* stream=1 */
-
- srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
- cmpldi cr1,r7,0x3FF
- ble cr1,1f
- li r7,0x3FF
-1: lis r0,0x0E00 /* depth=7 */
- sldi r7,r7,7
- or r7,r7,r0
- ori r10,r7,1 /* stream=1 */
-
- lis r8,0x8000 /* GO=1 */
- clrldi r8,r8,32
-
-.machine push
-.machine "power4"
- dcbt r0,r6,0b01000
- dcbt r0,r7,0b01010
- dcbtst r0,r9,0b01000
- dcbtst r0,r10,0b01010
- eieio
- dcbt r0,r8,0b01010 /* GO */
-.machine pop
-
- beq .Lunwind_stack_nonvmx_copy
+ beq cr1,.Lunwind_stack_nonvmx_copy
/*
* If source and destination are not relatively aligned we use a
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 0efdc51bc71..0663630baf3 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -222,7 +222,7 @@ _GLOBAL(memcpy_power7)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
bl .enter_vmx_copy
- cmpwi r3,0
+ cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STACKFRAMESIZE+48(r1)
ld r4,STACKFRAMESIZE+56(r1)
@@ -239,8 +239,8 @@ _GLOBAL(memcpy_power7)
ori r9,r9,1 /* stream=1 */
srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
- cmpldi cr1,r7,0x3FF
- ble cr1,1f
+ cmpldi r7,0x3FF
+ ble 1f
li r7,0x3FF
1: lis r0,0x0E00 /* depth=7 */
sldi r7,r7,7
@@ -260,7 +260,7 @@ _GLOBAL(memcpy_power7)
dcbt r0,r8,0b01010 /* GO */
.machine pop
- beq .Lunwind_stack_nonvmx_copy
+ beq cr1,.Lunwind_stack_nonvmx_copy
/*
* If source and destination are not relatively aligned we use a
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 9a52349874e..e15c521846c 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -566,7 +566,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
unsigned long int ea;
unsigned int cr, mb, me, sh;
int err;
- unsigned long old_ra;
+ unsigned long old_ra, val3;
long ival;
opcode = instr >> 26;
@@ -1486,11 +1486,43 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
goto ldst_done;
case 36: /* stw */
- case 37: /* stwu */
val = regs->gpr[rd];
err = write_mem(val, dform_ea(instr, regs), 4, regs);
goto ldst_done;
+ case 37: /* stwu */
+ val = regs->gpr[rd];
+ val3 = dform_ea(instr, regs);
+ /*
+ * For PPC32 we always use stwu to change stack point with r1. So
+ * this emulated store may corrupt the exception frame, now we
+ * have to provide the exception frame trampoline, which is pushed
+ * below the kprobed function stack. So we only update gpr[1] but
+ * don't emulate the real store operation. We will do real store
+ * operation safely in exception return code by checking this flag.
+ */
+ if ((ra == 1) && !(regs->msr & MSR_PR) \
+ && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
+ /*
+ * Check if we will touch kernel sack overflow
+ */
+ if (val3 - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
+ printk(KERN_CRIT "Can't kprobe this since Kernel stack overflow.\n");
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * Check if we already set since that means we'll
+ * lose the previous value.
+ */
+ WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
+ set_thread_flag(TIF_EMULATE_STACK_STORE);
+ err = 0;
+ } else
+ err = write_mem(val, val3, 4, regs);
+ goto ldst_done;
+
case 38: /* stb */
case 39: /* stbu */
val = regs->gpr[rd];