summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S74
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/dma-iommu.c9
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c22
-rw-r--r--arch/powerpc/kernel/dma.c3
-rw-r--r--arch/powerpc/kernel/entry_32.S47
-rw-r--r--arch/powerpc/kernel/entry_64.S58
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S212
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S130
-rw-r--r--arch/powerpc/kernel/fadump.c3
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S46
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c27
-rw-r--r--arch/powerpc/kernel/ibmebus.c1
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/iommu.c5
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/kgdb.c27
-rw-r--r--arch/powerpc/kernel/machine_kexec.c14
-rw-r--r--arch/powerpc/kernel/paca.c1
-rw-r--r--arch/powerpc/kernel/pci-common.c36
-rw-r--r--arch/powerpc/kernel/ppc32.h51
-rw-r--r--arch/powerpc/kernel/process.c31
-rw-r--r--arch/powerpc/kernel/prom.c4
-rw-r--r--arch/powerpc/kernel/prom_init.c68
-rw-r--r--arch/powerpc/kernel/ptrace.c3
-rw-r--r--arch/powerpc/kernel/rtas_flash.c7
-rw-r--r--arch/powerpc/kernel/rtas_pci.c5
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal.c8
-rw-r--r--arch/powerpc/kernel/smp.c25
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c45
-rw-r--r--arch/powerpc/kernel/syscalls.c8
-rw-r--r--arch/powerpc/kernel/sysfs.c10
-rw-r--r--arch/powerpc/kernel/time.c70
-rw-r--r--arch/powerpc/kernel/traps.c4
-rw-r--r--arch/powerpc/kernel/uprobes.c184
-rw-r--r--arch/powerpc/kernel/vdso.c4
-rw-r--r--arch/powerpc/kernel/vio.c1
41 files changed, 948 insertions, 317 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index bb282dd8161..cde12f8a4eb 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_MODULES) += ppc_ksyms.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 85b05c463fa..7523539cfe9 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -76,6 +76,7 @@ int main(void)
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(NMI_MASK, NMI_MASK);
DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
+ DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
#endif /* CONFIG_PPC64 */
@@ -205,6 +206,7 @@ int main(void)
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost));
+ DEFINE(PACA_SPRG3, offsetof(struct paca_struct, sprg3));
#endif /* CONFIG_PPC64 */
/* RTAS */
@@ -533,7 +535,6 @@ int main(void)
HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
- HSTATE_FIELD(HSTATE_SPRG3, sprg3);
HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
HSTATE_FIELD(HSTATE_NAPPING, napping);
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 69fdd2322a6..dcd881937f7 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -16,6 +16,8 @@
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
+#include <asm/mmu-book3e.h>
+#include <asm/asm-offsets.h>
_GLOBAL(__e500_icache_setup)
mfspr r0, SPRN_L1CSR1
@@ -73,27 +75,81 @@ _GLOBAL(__setup_cpu_e500v2)
mtlr r4
blr
_GLOBAL(__setup_cpu_e500mc)
- mr r5, r4
- mflr r4
+_GLOBAL(__setup_cpu_e5500)
+ mflr r5
bl __e500_icache_setup
bl __e500_dcache_setup
bl __setup_e500mc_ivors
- mtlr r4
+ /*
+ * We only want to touch IVOR38-41 if we're running on hardware
+ * that supports category E.HV. The architectural way to determine
+ * this is MMUCFG[LPIDSIZE].
+ */
+ mfspr r3, SPRN_MMUCFG
+ rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE
+ beq 1f
+ bl __setup_ehv_ivors
+ b 2f
+1:
+ lwz r3, CPU_SPEC_FEATURES(r4)
+ /* We need this check as cpu_setup is also called for
+ * the secondary cores. So, if we have already cleared
+ * the feature on the primary core, avoid doing it on the
+ * secondary core.
+ */
+ andis. r6, r3, CPU_FTR_EMB_HV@h
+ beq 2f
+ rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
+ stw r3, CPU_SPEC_FEATURES(r4)
+2:
+ mtlr r5
blr
#endif
-/* Right now, restore and setup are the same thing */
+
+#ifdef CONFIG_PPC_BOOK3E_64
_GLOBAL(__restore_cpu_e5500)
-_GLOBAL(__setup_cpu_e5500)
mflr r4
bl __e500_icache_setup
bl __e500_dcache_setup
-#ifdef CONFIG_PPC_BOOK3E_64
bl .__setup_base_ivors
bl .setup_perfmon_ivor
bl .setup_doorbell_ivors
+ /*
+ * We only want to touch IVOR38-41 if we're running on hardware
+ * that supports category E.HV. The architectural way to determine
+ * this is MMUCFG[LPIDSIZE].
+ */
+ mfspr r10,SPRN_MMUCFG
+ rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+ beq 1f
bl .setup_ehv_ivors
-#else
- bl __setup_e500mc_ivors
-#endif
+1:
mtlr r4
blr
+
+_GLOBAL(__setup_cpu_e5500)
+ mflr r5
+ bl __e500_icache_setup
+ bl __e500_dcache_setup
+ bl .__setup_base_ivors
+ bl .setup_perfmon_ivor
+ bl .setup_doorbell_ivors
+ /*
+ * We only want to touch IVOR38-41 if we're running on hardware
+ * that supports category E.HV. The architectural way to determine
+ * this is MMUCFG[LPIDSIZE].
+ */
+ mfspr r10,SPRN_MMUCFG
+ rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+ beq 1f
+ bl .setup_ehv_ivors
+ b 2f
+1:
+ ld r10,CPU_SPEC_FEATURES(r4)
+ LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
+ andc r10,r10,r9
+ std r10,CPU_SPEC_FEATURES(r4)
+2:
+ mtlr r5
+ blr
+#endif
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 455faa38987..0514c21f138 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2016,7 +2016,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_cpu_type = "ppc/e500mc",
.oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e5500,
+#ifndef CONFIG_PPC32
.cpu_restore = __restore_cpu_e5500,
+#endif
.machine_check = machine_check_e500mc,
.platform = "ppce5500",
},
@@ -2034,7 +2036,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_cpu_type = "ppc/e6500",
.oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e5500,
+#ifndef CONFIG_PPC32
.cpu_restore = __restore_cpu_e5500,
+#endif
.machine_check = machine_check_e500mc,
.platform = "ppce6500",
},
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 5b25c8060fd..a892680668d 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void)
void doorbell_cause_ipi(int cpu, unsigned long data)
{
+ /* Order previous accesses vs. msgsnd, which is treated as a store */
+ mb();
ppc_msgsnd(PPC_DBELL, 0, data);
}
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 2d7bb8ced13..e4897523de4 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -83,11 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
return 0;
}
- if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) {
- dev_info(dev, "Warning: IOMMU window too big for device mask\n");
- dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n",
- mask, (tbl->it_offset + tbl->it_size) <<
- IOMMU_PAGE_SHIFT);
+ if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) {
+ dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
+ dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
+ mask, tbl->it_offset << IOMMU_PAGE_SHIFT);
return 0;
} else
return 1;
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 46943651da2..bd1a2aba599 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -12,6 +12,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/memblock.h>
#include <linux/pfn.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -20,7 +21,6 @@
#include <asm/machdep.h>
#include <asm/swiotlb.h>
#include <asm/dma.h>
-#include <asm/abs_addr.h>
unsigned int ppc_swiotlb_enable;
@@ -105,3 +105,23 @@ int __init swiotlb_setup_bus_notifier(void)
&ppc_swiotlb_plat_bus_notifier);
return 0;
}
+
+void swiotlb_detect_4g(void)
+{
+ if ((memblock_end_of_DRAM() - 1) > 0xffffffff)
+ ppc_swiotlb_enable = 1;
+}
+
+static int __init swiotlb_late_init(void)
+{
+ if (ppc_swiotlb_enable) {
+ swiotlb_print_info();
+ set_pci_dma_ops(&swiotlb_dma_ops);
+ ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
+ } else {
+ swiotlb_free();
+ }
+
+ return 0;
+}
+subsys_initcall(swiotlb_late_init);
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 355b9d84b0f..8032b97ccdc 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -14,7 +14,6 @@
#include <linux/pci.h>
#include <asm/vio.h>
#include <asm/bug.h>
-#include <asm/abs_addr.h>
#include <asm/machdep.h>
/*
@@ -50,7 +49,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
return NULL;
ret = page_address(page);
memset(ret, 0, size);
- *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
+ *dma_handle = __pa(ret) + get_dma_offset(dev);
return ret;
#endif
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index ead5016b02d..af37528da49 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -831,19 +831,56 @@ restore_user:
bnel- load_dbcr0
#endif
-#ifdef CONFIG_PREEMPT
b restore
/* N.B. the only way to get here is from the beq following ret_from_except. */
resume_kernel:
- /* check current_thread_info->preempt_count */
+ /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
CURRENT_THREAD_INFO(r9, r1)
+ lwz r8,TI_FLAGS(r9)
+ andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
+ beq+ 1f
+
+ addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
+
+ lwz r3,GPR1(r1)
+ subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
+ mr r4,r1 /* src: current exception frame */
+ mr r1,r3 /* Reroute the trampoline frame to r1 */
+
+ /* Copy from the original to the trampoline. */
+ li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
+ li r6,0 /* start offset: 0 */
+ mtctr r5
+2: lwzx r0,r6,r4
+ stwx r0,r6,r3
+ addi r6,r6,4
+ bdnz 2b
+
+ /* Do real store operation to complete stwu */
+ lwz r5,GPR1(r1)
+ stw r8,0(r5)
+
+ /* Clear _TIF_EMULATE_STACK_STORE flag */
+ lis r11,_TIF_EMULATE_STACK_STORE@h
+ addi r5,r9,TI_FLAGS
+0: lwarx r8,0,r5
+ andc r8,r8,r11
+#ifdef CONFIG_IBM405_ERR77
+ dcbt 0,r5
+#endif
+ stwcx. r8,0,r5
+ bne- 0b
+1:
+
+#ifdef CONFIG_PREEMPT
+ /* check current_thread_info->preempt_count */
lwz r0,TI_PREEMPT(r9)
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
- lwz r0,TI_FLAGS(r9)
- andi. r0,r0,_TIF_NEED_RESCHED
+ andi. r8,r8,_TIF_NEED_RESCHED
beq+ restore
+ lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -864,8 +901,6 @@ resume_kernel:
*/
bl trace_hardirqs_on
#endif
-#else
-resume_kernel:
#endif /* CONFIG_PREEMPT */
/* interrupts are hard-disabled at this point */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 4b01a25e29e..0e931aaffca 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -370,6 +370,12 @@ _GLOBAL(ret_from_fork)
li r3,0
b syscall_exit
+ .section ".toc","aw"
+DSCR_DEFAULT:
+ .tc dscr_default[TC],dscr_default
+
+ .section ".text"
+
/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
@@ -509,9 +515,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
mr r1,r8 /* start using new stack pointer */
std r7,PACAKSAVE(r13)
- ld r6,_CCR(r1)
- mtcrf 0xFF,r6
-
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
ld r0,THREAD_VRSAVE(r4)
@@ -520,14 +523,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
+ lwz r6,THREAD_DSCR_INHERIT(r4)
+ ld r7,DSCR_DEFAULT@toc(2)
ld r0,THREAD_DSCR(r4)
- cmpd r0,r25
- beq 1f
+ cmpwi r6,0
+ bne 1f
+ ld r0,0(r7)
+1: cmpd r0,r25
+ beq 2f
mtspr SPRN_DSCR,r0
-1:
+2:
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#endif
+ ld r6,_CCR(r1)
+ mtcrf 0xFF,r6
+
/* r3-r13 are destroyed -- Cort */
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
@@ -582,6 +593,41 @@ _GLOBAL(ret_from_except_lite)
b .ret_from_except
resume_kernel:
+ /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
+ CURRENT_THREAD_INFO(r9, r1)
+ ld r8,TI_FLAGS(r9)
+ andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
+ beq+ 1f
+
+ addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
+
+ lwz r3,GPR1(r1)
+ subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
+ mr r4,r1 /* src: current exception frame */
+ mr r1,r3 /* Reroute the trampoline frame to r1 */
+
+ /* Copy from the original to the trampoline. */
+ li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
+ li r6,0 /* start offset: 0 */
+ mtctr r5
+2: ldx r0,r6,r4
+ stdx r0,r6,r3
+ addi r6,r6,8
+ bdnz 2b
+
+ /* Do real store operation to complete stwu */
+ lwz r5,GPR1(r1)
+ std r8,0(r5)
+
+ /* Clear _TIF_EMULATE_STACK_STORE flag */
+ lis r11,_TIF_EMULATE_STACK_STORE@h
+ addi r5,r9,TI_FLAGS
+ ldarx r4,0,r5
+ andc r4,r4,r11
+ stdcx. r4,0,r5
+ bne- 0b
+1:
+
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
andi. r0,r4,_TIF_NEED_RESCHED
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 98be7f0cd22..4684e33a26c 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -25,6 +25,8 @@
#include <asm/ppc-opcode.h>
#include <asm/mmu.h>
#include <asm/hw_irq.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_booke_hv_asm.h>
/* XXX This will ultimately add space for a special exception save
* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
@@ -35,16 +37,18 @@
#define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE
/* Exception prolog code for all exceptions */
-#define EXCEPTION_PROLOG(n, type, addition) \
+#define EXCEPTION_PROLOG(n, intnum, type, addition) \
mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \
mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \
std r10,PACA_EX##type+EX_R10(r13); \
std r11,PACA_EX##type+EX_R11(r13); \
+ PROLOG_STORE_RESTORE_SCRATCH_##type; \
mfcr r10; /* save CR */ \
+ mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \
+ DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \
+ stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
addition; /* additional code for that exc. */ \
std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \
- stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
- mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \
type##_SET_KSTACK; /* get special stack if necessary */\
andi. r10,r11,MSR_PR; /* save stack pointer */ \
beq 1f; /* branch around if supervisor */ \
@@ -59,6 +63,10 @@
#define SPRN_GEN_SRR0 SPRN_SRR0
#define SPRN_GEN_SRR1 SPRN_SRR1
+#define GDBELL_SET_KSTACK GEN_SET_KSTACK
+#define SPRN_GDBELL_SRR0 SPRN_GSRR0
+#define SPRN_GDBELL_SRR1 SPRN_GSRR1
+
#define CRIT_SET_KSTACK \
ld r1,PACA_CRIT_STACK(r13); \
subi r1,r1,SPECIAL_EXC_FRAME_SIZE;
@@ -77,29 +85,46 @@
#define SPRN_MC_SRR0 SPRN_MCSRR0
#define SPRN_MC_SRR1 SPRN_MCSRR1
-#define NORMAL_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, GEN, addition##_GEN(n))
+#define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \
+ EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
+
+#define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \
+ EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n))
-#define CRIT_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, CRIT, addition##_CRIT(n))
+#define DBG_EXCEPTION_PROLOG(n, intnum, addition) \
+ EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n))
-#define DBG_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, DBG, addition##_DBG(n))
+#define MC_EXCEPTION_PROLOG(n, intnum, addition) \
+ EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n))
-#define MC_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, MC, addition##_MC(n))
+#define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \
+ EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n))
+/*
+ * Store user-visible scratch in PACA exception slots and restore proper value
+ */
+#define PROLOG_STORE_RESTORE_SCRATCH_GEN
+#define PROLOG_STORE_RESTORE_SCRATCH_GDBELL
+#define PROLOG_STORE_RESTORE_SCRATCH_DBG
+#define PROLOG_STORE_RESTORE_SCRATCH_MC
+
+#define PROLOG_STORE_RESTORE_SCRATCH_CRIT \
+ mfspr r10,SPRN_SPRG_CRIT_SCRATCH; /* get r13 */ \
+ std r10,PACA_EXCRIT+EX_R13(r13); \
+ ld r11,PACA_SPRG3(r13); \
+ mtspr SPRN_SPRG_CRIT_SCRATCH,r11;
/* Variants of the "addition" argument for the prolog
*/
#define PROLOG_ADDITION_NONE_GEN(n)
+#define PROLOG_ADDITION_NONE_GDBELL(n)
#define PROLOG_ADDITION_NONE_CRIT(n)
#define PROLOG_ADDITION_NONE_DBG(n)
#define PROLOG_ADDITION_NONE_MC(n)
#define PROLOG_ADDITION_MASKABLE_GEN(n) \
- lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
- cmpwi cr0,r11,0; /* yes -> go out of line */ \
+ lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
+ cmpwi cr0,r10,0; /* yes -> go out of line */ \
beq masked_interrupt_book3e_##n
#define PROLOG_ADDITION_2REGS_GEN(n) \
@@ -233,9 +258,9 @@ exc_##n##_bad_stack: \
1:
-#define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \
+#define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \
START_EXCEPTION(label); \
- NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \
+ NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\
EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \
ack(r8); \
CHECK_NAPPING(); \
@@ -286,7 +311,8 @@ interrupt_end_book3e:
/* Critical Input Interrupt */
START_EXCEPTION(critical_input);
- CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE)
+ CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
+ PROLOG_ADDITION_NONE)
// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE)
// bl special_reg_save_crit
// CHECK_NAPPING();
@@ -297,7 +323,8 @@ interrupt_end_book3e:
/* Machine Check Interrupt */
START_EXCEPTION(machine_check);
- CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE)
+ MC_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_MACHINE_CHECK,
+ PROLOG_ADDITION_NONE)
// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE)
// bl special_reg_save_mc
// addi r3,r1,STACK_FRAME_OVERHEAD
@@ -308,7 +335,8 @@ interrupt_end_book3e:
/* Data Storage Interrupt */
START_EXCEPTION(data_storage)
- NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS)
+ NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE,
+ PROLOG_ADDITION_2REGS)
mfspr r14,SPRN_DEAR
mfspr r15,SPRN_ESR
EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE)
@@ -316,18 +344,21 @@ interrupt_end_book3e:
/* Instruction Storage Interrupt */
START_EXCEPTION(instruction_storage);
- NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS)
+ NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE,
+ PROLOG_ADDITION_2REGS)
li r15,0
mr r14,r10
EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE)
b storage_fault_common
/* External Input Interrupt */
- MASKABLE_EXCEPTION(0x500, external_input, .do_IRQ, ACK_NONE)
+ MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
+ external_input, .do_IRQ, ACK_NONE)
/* Alignment */
START_EXCEPTION(alignment);
- NORMAL_EXCEPTION_PROLOG(0x600, PROLOG_ADDITION_2REGS)
+ NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT,
+ PROLOG_ADDITION_2REGS)
mfspr r14,SPRN_DEAR
mfspr r15,SPRN_ESR
EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP)
@@ -335,7 +366,8 @@ interrupt_end_book3e:
/* Program Interrupt */
START_EXCEPTION(program);
- NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG)
+ NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
+ PROLOG_ADDITION_1REG)
mfspr r14,SPRN_ESR
EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE)
std r14,_DSISR(r1)
@@ -347,7 +379,8 @@ interrupt_end_book3e:
/* Floating Point Unavailable Interrupt */
START_EXCEPTION(fp_unavailable);
- NORMAL_EXCEPTION_PROLOG(0x800, PROLOG_ADDITION_NONE)
+ NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL,
+ PROLOG_ADDITION_NONE)
/* we can probably do a shorter exception entry for that one... */
EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP)
ld r12,_MSR(r1)
@@ -362,14 +395,17 @@ interrupt_end_book3e:
b .ret_from_except
/* Decrementer Interrupt */
- MASKABLE_EXCEPTION(0x900, decrementer, .timer_interrupt, ACK_DEC)
+ MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
+ decrementer, .timer_interrupt, ACK_DEC)
/* Fixed Interval Timer Interrupt */
- MASKABLE_EXCEPTION(0x980, fixed_interval, .unknown_exception, ACK_FIT)
+ MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
+ fixed_interval, .unknown_exception, ACK_FIT)
/* Watchdog Timer Interrupt */
START_EXCEPTION(watchdog);
- CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE)
+ CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
+ PROLOG_ADDITION_NONE)
// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE)
// bl special_reg_save_crit
// CHECK_NAPPING();
@@ -388,7 +424,8 @@ interrupt_end_book3e:
/* Auxiliary Processor Unavailable Interrupt */
START_EXCEPTION(ap_unavailable);
- NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE)
+ NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,
+ PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE)
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -397,7 +434,8 @@ interrupt_end_book3e:
/* Debug exception as a critical interrupt*/
START_EXCEPTION(debug_crit);
- CRIT_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS)
+ CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
+ PROLOG_ADDITION_2REGS)
/*
* If there is a single step or branch-taken exception in an
@@ -431,7 +469,7 @@ interrupt_end_book3e:
mtcr r10
ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */
ld r11,PACA_EXCRIT+EX_R11(r13)
- mfspr r13,SPRN_SPRG_CRIT_SCRATCH
+ ld r13,PACA_EXCRIT+EX_R13(r13)
rfci
/* Normal debug exception */
@@ -444,7 +482,7 @@ interrupt_end_book3e:
/* Now we mash up things to make it look like we are coming on a
* normal exception
*/
- mfspr r15,SPRN_SPRG_CRIT_SCRATCH
+ ld r15,PACA_EXCRIT+EX_R13(r13)
mtspr SPRN_SPRG_GEN_SCRATCH,r15
mfspr r14,SPRN_DBSR
EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE)
@@ -462,7 +500,8 @@ kernel_dbg_exc:
/* Debug exception as a debug interrupt*/
START_EXCEPTION(debug_debug);
- DBG_EXCEPTION_PROLOG(0xd08, PROLOG_ADDITION_2REGS)
+ DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
+ PROLOG_ADDITION_2REGS)
/*
* If there is a single step or branch-taken exception in an
@@ -523,18 +562,21 @@ kernel_dbg_exc:
b .ret_from_except
START_EXCEPTION(perfmon);
- NORMAL_EXCEPTION_PROLOG(0x260, PROLOG_ADDITION_NONE)
+ NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
+ PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE)
addi r3,r1,STACK_FRAME_OVERHEAD
bl .performance_monitor_exception
b .ret_from_except_lite
/* Doorbell interrupt */
- MASKABLE_EXCEPTION(0x280, doorbell, .doorbell_exception, ACK_NONE)
+ MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
+ doorbell, .doorbell_exception, ACK_NONE)
/* Doorbell critical Interrupt */
START_EXCEPTION(doorbell_crit);
- CRIT_EXCEPTION_PROLOG(0x2a0, PROLOG_ADDITION_NONE)
+ CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
+ PROLOG_ADDITION_NONE)
// EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE)
// bl special_reg_save_crit
// CHECK_NAPPING();
@@ -543,12 +585,24 @@ kernel_dbg_exc:
// b ret_from_crit_except
b .
-/* Guest Doorbell */
- MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE)
+/*
+ * Guest doorbell interrupt
+ * This general exception use GSRRx save/restore registers
+ */
+ START_EXCEPTION(guest_doorbell);
+ GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL,
+ PROLOG_ADDITION_NONE)
+ EXCEPTION_COMMON(0x2c0, PACA_EXGEN, INTS_KEEP)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .save_nvgprs
+ INTS_RESTORE_HARD
+ bl .unknown_exception
+ b .ret_from_except
/* Guest Doorbell critical Interrupt */
START_EXCEPTION(guest_doorbell_crit);
- CRIT_EXCEPTION_PROLOG(0x2e0, PROLOG_ADDITION_NONE)
+ CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
+ PROLOG_ADDITION_NONE)
// EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE)
// bl special_reg_save_crit
// CHECK_NAPPING();
@@ -559,7 +613,8 @@ kernel_dbg_exc:
/* Hypervisor call */
START_EXCEPTION(hypercall);
- NORMAL_EXCEPTION_PROLOG(0x310, PROLOG_ADDITION_NONE)
+ NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL,
+ PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP)
addi r3,r1,STACK_FRAME_OVERHEAD
bl .save_nvgprs
@@ -569,7 +624,8 @@ kernel_dbg_exc:
/* Embedded Hypervisor priviledged */
START_EXCEPTION(ehpriv);
- NORMAL_EXCEPTION_PROLOG(0x320, PROLOG_ADDITION_NONE)
+ NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV,
+ PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP)
addi r3,r1,STACK_FRAME_OVERHEAD
bl .save_nvgprs
@@ -582,44 +638,42 @@ kernel_dbg_exc:
* accordingly and if the interrupt is level sensitive, we hard disable
*/
+.macro masked_interrupt_book3e paca_irq full_mask
+ lbz r10,PACAIRQHAPPENED(r13)
+ ori r10,r10,\paca_irq
+ stb r10,PACAIRQHAPPENED(r13)
+
+ .if \full_mask == 1
+ rldicl r10,r11,48,1 /* clear MSR_EE */
+ rotldi r11,r10,16
+ mtspr SPRN_SRR1,r11
+ .endif
+
+ lwz r11,PACA_EXGEN+EX_CR(r13)
+ mtcr r11
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ ld r11,PACA_EXGEN+EX_R11(r13)
+ mfspr r13,SPRN_SPRG_GEN_SCRATCH
+ rfi
+ b .
+.endm
+
masked_interrupt_book3e_0x500:
- /* XXX When adding support for EPR, use PACA_IRQ_EE_EDGE */
- li r11,PACA_IRQ_EE
- b masked_interrupt_book3e_full_mask
+ // XXX When adding support for EPR, use PACA_IRQ_EE_EDGE
+ masked_interrupt_book3e PACA_IRQ_EE 1
masked_interrupt_book3e_0x900:
- ACK_DEC(r11);
- li r11,PACA_IRQ_DEC
- b masked_interrupt_book3e_no_mask
+ ACK_DEC(r10);
+ masked_interrupt_book3e PACA_IRQ_DEC 0
+
masked_interrupt_book3e_0x980:
- ACK_FIT(r11);
- li r11,PACA_IRQ_DEC
- b masked_interrupt_book3e_no_mask
+ ACK_FIT(r10);
+ masked_interrupt_book3e PACA_IRQ_DEC 0
+
masked_interrupt_book3e_0x280:
masked_interrupt_book3e_0x2c0:
- li r11,PACA_IRQ_DBELL
- b masked_interrupt_book3e_no_mask
+ masked_interrupt_book3e PACA_IRQ_DBELL 0
-masked_interrupt_book3e_no_mask:
- mtcr r10
- lbz r10,PACAIRQHAPPENED(r13)
- or r10,r10,r11
- stb r10,PACAIRQHAPPENED(r13)
- b 1f
-masked_interrupt_book3e_full_mask:
- mtcr r10
- lbz r10,PACAIRQHAPPENED(r13)
- or r10,r10,r11
- stb r10,PACAIRQHAPPENED(r13)
- mfspr r10,SPRN_SRR1
- rldicl r11,r10,48,1 /* clear MSR_EE */
- rotldi r10,r11,16
- mtspr SPRN_SRR1,r10
-1: ld r10,PACA_EXGEN+EX_R10(r13);
- ld r11,PACA_EXGEN+EX_R11(r13);
- mfspr r13,SPRN_SPRG_GEN_SCRATCH;
- rfi
- b .
/*
* Called from arch_local_irq_enable when an interrupt needs
* to be resent. r3 contains either 0x500,0x900,0x260 or 0x280
@@ -1302,25 +1356,11 @@ _GLOBAL(setup_perfmon_ivor)
_GLOBAL(setup_doorbell_ivors)
SET_IVOR(36, 0x280) /* Processor Doorbell */
SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
-
- /* Check MMUCFG[LPIDSIZE] to determine if we have category E.HV */
- mfspr r10,SPRN_MMUCFG
- rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
- beqlr
-
- SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
- SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
blr
_GLOBAL(setup_ehv_ivors)
- /*
- * We may be running as a guest and lack E.HV even on a chip
- * that normally has it.
- */
- mfspr r10,SPRN_MMUCFG
- rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
- beqlr
-
SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
+ SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
+ SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
blr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e894515e77b..10b658ad65e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -186,7 +186,7 @@ hardware_interrupt_hv:
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
- MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
+ STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
@@ -275,6 +275,31 @@ vsx_unavailable_pSeries_1:
STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
+ . = 0x1500
+ .global denorm_Hypervisor
+denorm_exception_hv:
+ HMT_MEDIUM
+ mtspr SPRN_SPRG_HSCRATCH0,r13
+ mfspr r13,SPRN_SPRG_HPACA
+ std r9,PACA_EXGEN+EX_R9(r13)
+ std r10,PACA_EXGEN+EX_R10(r13)
+ std r11,PACA_EXGEN+EX_R11(r13)
+ std r12,PACA_EXGEN+EX_R12(r13)
+ mfspr r9,SPRN_SPRG_HSCRATCH0
+ std r9,PACA_EXGEN+EX_R13(r13)
+ mfcr r9
+
+#ifdef CONFIG_PPC_DENORMALISATION
+ mfspr r10,SPRN_HSRR1
+ mfspr r11,SPRN_HSRR0 /* save HSRR0 */
+ andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
+ addi r11,r11,-4 /* HSRR0 is next instruction */
+ bne+ denorm_assist
+#endif
+
+ EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
+ KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
+
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
@@ -336,6 +361,103 @@ do_stab_bolted_pSeries:
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
+#ifdef CONFIG_PPC_DENORMALISATION
+denorm_assist:
+BEGIN_FTR_SECTION
+/*
+ * To denormalise we need to move a copy of the register to itself.
+ * For POWER6 do that here for all FP regs.
+ */
+ mfmsr r10
+ ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
+ xori r10,r10,(MSR_FE0|MSR_FE1)
+ mtmsrd r10
+ sync
+ fmr 0,0
+ fmr 1,1
+ fmr 2,2
+ fmr 3,3
+ fmr 4,4
+ fmr 5,5
+ fmr 6,6
+ fmr 7,7
+ fmr 8,8
+ fmr 9,9
+ fmr 10,10
+ fmr 11,11
+ fmr 12,12
+ fmr 13,13
+ fmr 14,14
+ fmr 15,15
+ fmr 16,16
+ fmr 17,17
+ fmr 18,18
+ fmr 19,19
+ fmr 20,20
+ fmr 21,21
+ fmr 22,22
+ fmr 23,23
+ fmr 24,24
+ fmr 25,25
+ fmr 26,26
+ fmr 27,27
+ fmr 28,28
+ fmr 29,29
+ fmr 30,30
+ fmr 31,31
+FTR_SECTION_ELSE
+/*
+ * To denormalise we need to move a copy of the register to itself.
+ * For POWER7 do that here for the first 32 VSX registers only.
+ */
+ mfmsr r10
+ oris r10,r10,MSR_VSX@h
+ mtmsrd r10
+ sync
+ XVCPSGNDP(0,0,0)
+ XVCPSGNDP(1,1,1)
+ XVCPSGNDP(2,2,2)
+ XVCPSGNDP(3,3,3)
+ XVCPSGNDP(4,4,4)
+ XVCPSGNDP(5,5,5)
+ XVCPSGNDP(6,6,6)
+ XVCPSGNDP(7,7,7)
+ XVCPSGNDP(8,8,8)
+ XVCPSGNDP(9,9,9)
+ XVCPSGNDP(10,10,10)
+ XVCPSGNDP(11,11,11)
+ XVCPSGNDP(12,12,12)
+ XVCPSGNDP(13,13,13)
+ XVCPSGNDP(14,14,14)
+ XVCPSGNDP(15,15,15)
+ XVCPSGNDP(16,16,16)
+ XVCPSGNDP(17,17,17)
+ XVCPSGNDP(18,18,18)
+ XVCPSGNDP(19,19,19)
+ XVCPSGNDP(20,20,20)
+ XVCPSGNDP(21,21,21)
+ XVCPSGNDP(22,22,22)
+ XVCPSGNDP(23,23,23)
+ XVCPSGNDP(24,24,24)
+ XVCPSGNDP(25,25,25)
+ XVCPSGNDP(26,26,26)
+ XVCPSGNDP(27,27,27)
+ XVCPSGNDP(28,28,28)
+ XVCPSGNDP(29,29,29)
+ XVCPSGNDP(30,30,30)
+ XVCPSGNDP(31,31,31)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
+ mtspr SPRN_HSRR0,r11
+ mtcrf 0x80,r9
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r12,PACA_EXGEN+EX_R12(r13)
+ ld r13,PACA_EXGEN+EX_R13(r13)
+ HRFID
+ b .
+#endif
+
.align 7
/* moved from 0xe00 */
STD_EXCEPTION_HV(., 0xe02, h_data_storage)
@@ -486,6 +608,7 @@ machine_check_common:
STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
+ STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
@@ -494,6 +617,7 @@ machine_check_common:
STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
+ STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
#ifdef CONFIG_ALTIVEC
STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
#else
@@ -959,7 +1083,9 @@ _GLOBAL(do_stab_bolted)
rldimi r10,r11,7,52 /* r10 = first ste of the group */
/* Calculate VSID */
- /* This is a kernel address, so protovsid = ESID */
+ /* This is a kernel address, so protovsid = ESID | 1 << 37 */
+ li r9,0x1
+ rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
ASM_VSID_SCRAMBLE(r11, r9, 256M)
rldic r9,r11,12,16 /* r9 = vsid << 12 */
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 18bdf74fa16..06c8202a69c 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -289,8 +289,7 @@ int __init fadump_reserve_mem(void)
else
memory_limit = memblock_end_of_DRAM();
printk(KERN_INFO "Adjusted memory_limit for firmware-assisted"
- " dump, now %#016llx\n",
- (unsigned long long)memory_limit);
+ " dump, now %#016llx\n", memory_limit);
}
if (memory_limit)
memory_boundary = memory_limit;
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 0f59863c3ad..6f62a737f60 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -895,15 +895,11 @@ _GLOBAL(__setup_e500mc_ivors)
mtspr SPRN_IVOR36,r3
li r3,CriticalDoorbell@l
mtspr SPRN_IVOR37,r3
+ sync
+ blr
- /*
- * We only want to touch IVOR38-41 if we're running on hardware
- * that supports category E.HV. The architectural way to determine
- * this is MMUCFG[LPIDSIZE].
- */
- mfspr r3, SPRN_MMUCFG
- andis. r3, r3, MMUCFG_LPIDSIZE@h
- beq no_hv
+/* setup ehv ivors for */
+_GLOBAL(__setup_ehv_ivors)
li r3,GuestDoorbell@l
mtspr SPRN_IVOR38,r3
li r3,CriticalGuestDoorbell@l
@@ -912,14 +908,8 @@ _GLOBAL(__setup_e500mc_ivors)
mtspr SPRN_IVOR40,r3
li r3,Ehvpriv@l
mtspr SPRN_IVOR41,r3
-skip_hv_ivors:
sync
blr
-no_hv:
- lwz r3, CPU_SPEC_FEATURES(r5)
- rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
- stw r3, CPU_SPEC_FEATURES(r5)
- b skip_hv_ivors
#ifdef CONFIG_SPE
/*
@@ -1043,6 +1033,34 @@ _GLOBAL(flush_dcache_L1)
blr
+/* Flush L1 d-cache, invalidate and disable d-cache and i-cache */
+_GLOBAL(__flush_disable_L1)
+ mflr r10
+ bl flush_dcache_L1 /* Flush L1 d-cache */
+ mtlr r10
+
+ mfspr r4, SPRN_L1CSR0 /* Invalidate and disable d-cache */
+ li r5, 2
+ rlwimi r4, r5, 0, 3
+
+ msync
+ isync
+ mtspr SPRN_L1CSR0, r4
+ isync
+
+1: mfspr r4, SPRN_L1CSR0 /* Wait for the invalidate to finish */
+ andi. r4, r4, 2
+ bne 1b
+
+ mfspr r4, SPRN_L1CSR1 /* Invalidate and disable i-cache */
+ li r5, 2
+ rlwimi r4, r5, 0, 3
+
+ mtspr SPRN_L1CSR1, r4
+ isync
+
+ blr
+
#ifdef CONFIG_SMP
/* When we get here, r24 needs to hold the CPU # */
.globl __secondary_start
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index f3a82dde61d..a89cae481b0 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -73,7 +73,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
* If so, DABR will be populated in single_step_dabr_instruction().
*/
if (current->thread.last_hit_ubp != bp)
- set_dabr(info->address | info->type | DABR_TRANSLATION);
+ set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
return 0;
}
@@ -97,7 +97,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
}
*slot = NULL;
- set_dabr(0);
+ set_dabr(0, 0);
}
/*
@@ -170,6 +170,13 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
info->address = bp->attr.bp_addr;
info->len = bp->attr.bp_len;
+ info->dabrx = DABRX_ALL;
+ if (bp->attr.exclude_user)
+ info->dabrx &= ~DABRX_USER;
+ if (bp->attr.exclude_kernel)
+ info->dabrx &= ~DABRX_KERNEL;
+ if (bp->attr.exclude_hv)
+ info->dabrx &= ~DABRX_HYP;
/*
* Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
@@ -197,7 +204,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
info = counter_arch_bp(tsk->thread.last_hit_ubp);
regs->msr &= ~MSR_SE;
- set_dabr(info->address | info->type | DABR_TRANSLATION);
+ set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
tsk->thread.last_hit_ubp = NULL;
}
@@ -215,7 +222,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
unsigned long dar = regs->dar;
/* Disable breakpoints during exception handling */
- set_dabr(0);
+ set_dabr(0, 0);
/*
* The counter may be concurrently released but that can only
@@ -253,7 +260,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
/* Do not emulate user-space instructions, instead single-step them */
if (user_mode(regs)) {
- bp->ctx->task->thread.last_hit_ubp = bp;
+ current->thread.last_hit_ubp = bp;
regs->msr |= MSR_SE;
goto out;
}
@@ -281,7 +288,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
if (!info->extraneous_interrupt)
perf_bp_event(bp, regs);
- set_dabr(info->address | info->type | DABR_TRANSLATION);
+ set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
out:
rcu_read_unlock();
return rc;
@@ -294,7 +301,7 @@ int __kprobes single_step_dabr_instruction(struct die_args *args)
{
struct pt_regs *regs = args->regs;
struct perf_event *bp = NULL;
- struct arch_hw_breakpoint *bp_info;
+ struct arch_hw_breakpoint *info;
bp = current->thread.last_hit_ubp;
/*
@@ -304,16 +311,16 @@ int __kprobes single_step_dabr_instruction(struct die_args *args)
if (!bp)
return NOTIFY_DONE;
- bp_info = counter_arch_bp(bp);
+ info = counter_arch_bp(bp);
/*
* We shall invoke the user-defined callback function in the single
* stepping handler to confirm to 'trigger-after-execute' semantics
*/
- if (!bp_info->extraneous_interrupt)
+ if (!info->extraneous_interrupt)
perf_bp_event(bp, regs);
- set_dabr(bp_info->address | bp_info->type | DABR_TRANSLATION);
+ set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
current->thread.last_hit_ubp = NULL;
/*
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index b01d14eeca8..8220baa46fa 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -47,7 +47,6 @@
#include <linux/stat.h>
#include <linux/of_platform.h>
#include <asm/ibmebus.h>
-#include <asm/abs_addr.h>
static struct device ibmebus_bus_device = { /* fake "parent" device */
.init_name = "ibmebus",
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 7140d838339..e11863f4e59 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -28,7 +28,9 @@ _GLOBAL(power7_idle)
lwz r4,ADDROFF(powersave_nap)(r3)
cmpwi 0,r4,0
beqlr
+ /* fall through */
+_GLOBAL(power7_nap)
/* NAP is a state loss, we create a regs frame on the
* stack, fill it up with the state we care about and
* stick a pointer to it in PACAR1. We really only
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ff5a6ce027b..8226c6cb348 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -215,7 +215,8 @@ static unsigned long iommu_range_alloc(struct device *dev,
spin_lock_irqsave(&(pool->lock), flags);
again:
- if ((pass == 0) && handle && *handle)
+ if ((pass == 0) && handle && *handle &&
+ (*handle >= pool->start) && (*handle < pool->end))
start = *handle;
else
start = pool->hint;
@@ -236,7 +237,9 @@ again:
* but on second pass, start at 0 in pool 0.
*/
if ((start & mask) >= limit || pass > 0) {
+ spin_unlock(&(pool->lock));
pool = &(tbl->pools[0]);
+ spin_lock(&(pool->lock));
start = pool->start;
} else {
start &= mask;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 1f017bb7a7c..71413f41278 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -489,10 +489,10 @@ void do_IRQ(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
unsigned int irq;
- trace_irq_entry(regs);
-
irq_enter();
+ trace_irq_entry(regs);
+
check_stack_overflow();
/*
@@ -511,10 +511,10 @@ void do_IRQ(struct pt_regs *regs)
else
__get_cpu_var(irq_stat).spurious_irqs++;
+ trace_irq_exit(regs);
+
irq_exit();
set_irq_regs(old_regs);
-
- trace_irq_exit(regs);
}
void __init init_IRQ(void)
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 782bd0a3c2f..c470a40b29f 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -25,6 +25,7 @@
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/debug.h>
+#include <linux/slab.h>
/*
* This table contains the mapping between PowerPC hardware trap types, and
@@ -101,6 +102,21 @@ static int computeSignal(unsigned int tt)
return SIGHUP; /* default for things we don't know about */
}
+/**
+ *
+ * kgdb_skipexception - Bail out of KGDB when we've been triggered.
+ * @exception: Exception vector number
+ * @regs: Current &struct pt_regs.
+ *
+ * On some architectures we need to skip a breakpoint exception when
+ * it occurs after a breakpoint has been removed.
+ *
+ */
+int kgdb_skipexception(int exception, struct pt_regs *regs)
+{
+ return kgdb_isremovedbreak(regs->nip);
+}
+
static int kgdb_call_nmi_hook(struct pt_regs *regs)
{
kgdb_nmicallback(raw_smp_processor_id(), regs);
@@ -138,6 +154,8 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
static int kgdb_singlestep(struct pt_regs *regs)
{
struct thread_info *thread_info, *exception_thread_info;
+ struct thread_info *backup_current_thread_info = \
+ (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
if (user_mode(regs))
return 0;
@@ -155,13 +173,17 @@ static int kgdb_singlestep(struct pt_regs *regs)
thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
exception_thread_info = current_thread_info();
- if (thread_info != exception_thread_info)
+ if (thread_info != exception_thread_info) {
+ /* Save the original current_thread_info. */
+ memcpy(backup_current_thread_info, exception_thread_info, sizeof *thread_info);
memcpy(exception_thread_info, thread_info, sizeof *thread_info);
+ }
kgdb_handle_exception(0, SIGTRAP, 0, regs);
if (thread_info != exception_thread_info)
- memcpy(thread_info, exception_thread_info, sizeof *thread_info);
+ /* Restore current_thread_info lastly. */
+ memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);
return 1;
}
@@ -410,7 +432,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
#else
linux_regs->msr |= MSR_SE;
#endif
- kgdb_single_step = 1;
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
}
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 5df77779440..fa9f6c72f55 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -165,7 +165,7 @@ void __init reserve_crashkernel(void)
if (memory_limit && memory_limit <= crashk_res.end) {
memory_limit = crashk_res.end + 1;
printk("Adjusted memory limit for crashkernel, now 0x%llx\n",
- (unsigned long long)memory_limit);
+ memory_limit);
}
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
@@ -204,6 +204,12 @@ static struct property crashk_size_prop = {
.value = &crashk_size,
};
+static struct property memory_limit_prop = {
+ .name = "linux,memory-limit",
+ .length = sizeof(unsigned long long),
+ .value = &memory_limit,
+};
+
static void __init export_crashk_values(struct device_node *node)
{
struct property *prop;
@@ -223,6 +229,12 @@ static void __init export_crashk_values(struct device_node *node)
crashk_size = resource_size(&crashk_res);
prom_add_property(node, &crashk_size_prop);
}
+
+ /*
+ * memory_limit is required by the kexec-tools to limit the
+ * crash regions to the actual memory used.
+ */
+ prom_update_property(node, &memory_limit_prop);
}
static int __init kexec_setup(void)
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index fbe1a12dc7f..cd6da855090 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -142,6 +142,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
new_paca->hw_cpu_id = 0xffff;
new_paca->kexec_state = KEXEC_STATE_NONE;
new_paca->__current = &init_task;
+ new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
#ifdef CONFIG_PPC_STD_MMU_64
new_paca->slb_shadow_ptr = &slb_shadow[cpu];
#endif /* CONFIG_PPC_STD_MMU_64 */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 2aa04f29e1d..7f94f760dd0 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -99,6 +99,26 @@ void pcibios_free_controller(struct pci_controller *phb)
kfree(phb);
}
+/*
+ * The function is used to return the minimal alignment
+ * for memory or I/O windows of the associated P2P bridge.
+ * By default, 4KiB alignment for I/O windows and 1MiB for
+ * memory windows.
+ */
+resource_size_t pcibios_window_alignment(struct pci_bus *bus,
+ unsigned long type)
+{
+ if (ppc_md.pcibios_window_alignment)
+ return ppc_md.pcibios_window_alignment(bus, type);
+
+ /*
+ * PCI core will figure out the default
+ * alignment: 4KiB for I/O and 1MiB for
+ * memory window.
+ */
+ return 1;
+}
+
static resource_size_t pcibios_io_size(const struct pci_controller *hose)
{
#ifdef CONFIG_PPC64
@@ -960,13 +980,14 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
if (i >= 3 && bus->self->transparent)
continue;
- /* If we are going to re-assign everything, mark the resource
- * as unset and move it down to 0
+ /* If we're going to reassign everything, we can
+ * shrink the P2P resource to have size as being
+ * of 0 in order to save space.
*/
if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
res->flags |= IORESOURCE_UNSET;
- res->end -= res->start;
res->start = 0;
+ res->end = -1;
continue;
}
@@ -1228,7 +1249,14 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
pr_warning("PCI: Cannot allocate resource region "
"%d of PCI bridge %d, will remap\n", i, bus->number);
clear_resource:
- res->start = res->end = 0;
+ /* The resource might be figured out when doing
+ * reassignment based on the resources required
+ * by the downstream PCI devices. Here we set
+ * the size of the resource to be 0 in order to
+ * save more space.
+ */
+ res->start = 0;
+ res->end = -1;
res->flags = 0;
}
diff --git a/arch/powerpc/kernel/ppc32.h b/arch/powerpc/kernel/ppc32.h
index dc16aefe1dd..02fb0ee2609 100644
--- a/arch/powerpc/kernel/ppc32.h
+++ b/arch/powerpc/kernel/ppc32.h
@@ -16,57 +16,6 @@
/* These are here to support 32-bit syscalls on a 64-bit kernel. */
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[SI_PAD_SIZE32];
-
- /* kill() */
- struct {
- compat_pid_t _pid; /* sender's pid */
- compat_uid_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- compat_pid_t _pid; /* sender's pid */
- compat_uid_t _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- compat_pid_t _pid; /* which child */
- compat_uid_t _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
- struct {
- unsigned int _addr; /* faulting insn/memory ref. */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
-} compat_siginfo_t;
-
#define __old_sigaction32 old_sigaction32
struct __old_sigaction32 {
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 710f400476d..d5ad666efd8 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -258,6 +258,7 @@ void do_send_trap(struct pt_regs *regs, unsigned long address,
{
siginfo_t info;
+ current->thread.trap_nr = signal_code;
if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
11, SIGSEGV) == NOTIFY_STOP)
return;
@@ -275,6 +276,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
{
siginfo_t info;
+ current->thread.trap_nr = TRAP_HWBKPT;
if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
11, SIGSEGV) == NOTIFY_STOP)
return;
@@ -283,7 +285,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
return;
/* Clear the DABR */
- set_dabr(0);
+ set_dabr(0, 0);
/* Deliver the signal to userspace */
info.si_signo = SIGTRAP;
@@ -364,18 +366,19 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
{
if (thread->dabr) {
thread->dabr = 0;
- set_dabr(0);
+ thread->dabrx = 0;
+ set_dabr(0, 0);
}
}
#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
-int set_dabr(unsigned long dabr)
+int set_dabr(unsigned long dabr, unsigned long dabrx)
{
__get_cpu_var(current_dabr) = dabr;
if (ppc_md.set_dabr)
- return ppc_md.set_dabr(dabr);
+ return ppc_md.set_dabr(dabr, dabrx);
/* XXX should we have a CPU_FTR_HAS_DABR ? */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -385,9 +388,8 @@ int set_dabr(unsigned long dabr)
#endif
#elif defined(CONFIG_PPC_BOOK3S)
mtspr(SPRN_DABR, dabr);
+ mtspr(SPRN_DABRX, dabrx);
#endif
-
-
return 0;
}
@@ -480,7 +482,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
*/
#ifndef CONFIG_HAVE_HW_BREAKPOINT
if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
- set_dabr(new->thread.dabr);
+ set_dabr(new->thread.dabr, new->thread.dabrx);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
@@ -514,9 +516,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
local_irq_save(flags);
- account_system_vtime(current);
- account_process_vtime(current);
-
/*
* We can't take a PMU exception inside _switch() since there is a
* window where the kernel stack SLB and the kernel stack are out
@@ -802,16 +801,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#endif /* CONFIG_PPC_STD_MMU_64 */
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_DSCR)) {
- if (current->thread.dscr_inherit) {
- p->thread.dscr_inherit = 1;
- p->thread.dscr = current->thread.dscr;
- } else if (0 != dscr_default) {
- p->thread.dscr_inherit = 1;
- p->thread.dscr = dscr_default;
- } else {
- p->thread.dscr_inherit = 0;
- p->thread.dscr = 0;
- }
+ p->thread.dscr_inherit = current->thread.dscr_inherit;
+ p->thread.dscr = current->thread.dscr;
}
#endif
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index f191bf02943..37725e86651 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -78,7 +78,7 @@ static int __init early_parse_mem(char *p)
return 1;
memory_limit = PAGE_ALIGN(memparse(p, &p));
- DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit);
+ DBG("memory limit = 0x%llx\n", memory_limit);
return 0;
}
@@ -661,7 +661,7 @@ void __init early_init_devtree(void *params)
/* make sure we've parsed cmdline for mem= before this */
if (memory_limit)
- first_memblock_size = min(first_memblock_size, memory_limit);
+ first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
setup_initial_memory_limit(memstart_addr, first_memblock_size);
/* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 0794a3017b1..cb6c123722a 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -705,6 +705,7 @@ static void __init early_cmdline_parse(void)
#endif
#define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */
#define OV5_PFO_HW_RNG 0x80 /* PFO Random Number Generator */
+#define OV5_PFO_HW_842 0x40 /* PFO Compression Accelerator */
#define OV5_PFO_HW_ENCR 0x20 /* PFO Encryption Accelerator */
/* Option Vector 6: IBM PAPR hints */
@@ -774,8 +775,7 @@ static unsigned char ibm_architecture_vec[] = {
0,
0,
0,
- OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR,
-
+ OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR | OV5_PFO_HW_842,
/* option vector 6: IBM PAPR hints */
4 - 2, /* length */
0,
@@ -1624,6 +1624,63 @@ static void __init prom_instantiate_rtas(void)
#ifdef CONFIG_PPC64
/*
+ * Allocate room for and instantiate Stored Measurement Log (SML)
+ */
+static void __init prom_instantiate_sml(void)
+{
+ phandle ibmvtpm_node;
+ ihandle ibmvtpm_inst;
+ u32 entry = 0, size = 0;
+ u64 base;
+
+ prom_debug("prom_instantiate_sml: start...\n");
+
+ ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/ibm,vtpm"));
+ prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
+ if (!PHANDLE_VALID(ibmvtpm_node))
+ return;
+
+ ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/ibm,vtpm"));
+ if (!IHANDLE_VALID(ibmvtpm_inst)) {
+ prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
+ return;
+ }
+
+ if (call_prom_ret("call-method", 2, 2, &size,
+ ADDR("sml-get-handover-size"),
+ ibmvtpm_inst) != 0 || size == 0) {
+ prom_printf("SML get handover size failed\n");
+ return;
+ }
+
+ base = alloc_down(size, PAGE_SIZE, 0);
+ if (base == 0)
+ prom_panic("Could not allocate memory for sml\n");
+
+ prom_printf("instantiating sml at 0x%x...", base);
+
+ if (call_prom_ret("call-method", 4, 2, &entry,
+ ADDR("sml-handover"),
+ ibmvtpm_inst, size, base) != 0 || entry == 0) {
+ prom_printf("SML handover failed\n");
+ return;
+ }
+ prom_printf(" done\n");
+
+ reserve_mem(base, size);
+
+ prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-base",
+ &base, sizeof(base));
+ prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-size",
+ &size, sizeof(size));
+
+ prom_debug("sml base = 0x%x\n", base);
+ prom_debug("sml size = 0x%x\n", (long)size);
+
+ prom_debug("prom_instantiate_sml: end...\n");
+}
+
+/*
* Allocate room for and initialize TCE tables
*/
static void __init prom_initialize_tce_table(void)
@@ -1691,7 +1748,7 @@ static void __init prom_initialize_tce_table(void)
* else will impact performance, so we always allocate 8MB.
* Anton
*/
- if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
+ if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
minsize = 8UL << 20;
else
minsize = 4UL << 20;
@@ -2916,6 +2973,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
prom_instantiate_opal();
#endif
+#ifdef CONFIG_PPC64
+ /* instantiate sml */
+ prom_instantiate_sml();
+#endif
+
/*
* On non-powermacs, put all CPUs in spin-loops.
*
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index c10fc28b909..79d8e56470d 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -960,6 +960,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
thread->ptrace_bps[0] = bp;
ptrace_put_breakpoints(task);
thread->dabr = data;
+ thread->dabrx = DABRX_ALL;
return 0;
}
@@ -983,6 +984,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
/* Move contents to the DABR register */
task->thread.dabr = data;
+ task->thread.dabrx = DABRX_ALL;
#else /* CONFIG_PPC_ADV_DEBUG_REGS */
/* As described above, it was assumed 3 bits were passed with the data
* address, but we will assume only the mode bits will be passed
@@ -1397,6 +1399,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
dabr |= DABR_DATA_WRITE;
child->thread.dabr = dabr;
+ child->thread.dabrx = DABRX_ALL;
return 1;
#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 2c0ee640563..20b0120db0c 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -21,7 +21,6 @@
#include <asm/delay.h>
#include <asm/uaccess.h>
#include <asm/rtas.h>
-#include <asm/abs_addr.h>
#define MODULE_VERS "1.0"
#define MODULE_NAME "rtas_flash"
@@ -582,7 +581,7 @@ static void rtas_flash_firmware(int reboot_type)
flist = (struct flash_block_list *)&rtas_data_buf[0];
flist->num_blocks = 0;
flist->next = rtas_firmware_flash_list;
- rtas_block_list = virt_to_abs(flist);
+ rtas_block_list = __pa(flist);
if (rtas_block_list >= 4UL*1024*1024*1024) {
printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
spin_unlock(&rtas_data_buf_lock);
@@ -596,13 +595,13 @@ static void rtas_flash_firmware(int reboot_type)
for (f = flist; f; f = next) {
/* Translate data addrs to absolute */
for (i = 0; i < f->num_blocks; i++) {
- f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data);
+ f->blocks[i].data = (char *)__pa(f->blocks[i].data);
image_size += f->blocks[i].length;
}
next = f->next;
/* Don't translate NULL pointer for last entry */
if (f->next)
- f->next = (struct flash_block_list *)virt_to_abs(f->next);
+ f->next = (struct flash_block_list *)__pa(f->next);
else
f->next = NULL;
/* make num_blocks into the version/length field */
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 179af906dcd..6de63e3250b 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -81,7 +81,7 @@ int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
return PCIBIOS_DEVICE_NOT_FOUND;
if (returnval == EEH_IO_ERROR_VALUE(size) &&
- eeh_dn_check_failure (pdn->node, NULL))
+ eeh_dev_check_failure(of_node_to_eeh_dev(pdn->node)))
return PCIBIOS_DEVICE_NOT_FOUND;
return PCIBIOS_SUCCESSFUL;
@@ -275,9 +275,6 @@ void __init find_and_init_phbs(void)
of_node_put(root);
pci_devs_phb_init();
- /* Create EEH devices for all PHBs */
- eeh_dev_phb_init();
-
/*
* PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
* in chosen.
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 389bd4f0cdb..efb6a41b313 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -208,6 +208,8 @@ void __init early_setup(unsigned long dt_ptr)
/* Fix up paca fields required for the boot cpu */
get_paca()->cpu_start = 1;
+ /* Allow percpu accesses to "work" until we setup percpu data */
+ get_paca()->data_offset = 0;
/* Probe the machine type */
probe_machine();
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 5c023c9cf16..a2dc75793bd 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -11,6 +11,7 @@
#include <linux/tracehook.h>
#include <linux/signal.h>
+#include <linux/uprobes.h>
#include <linux/key.h>
#include <asm/hw_breakpoint.h>
#include <asm/uaccess.h>
@@ -130,7 +131,7 @@ static int do_signal(struct pt_regs *regs)
* triggered inside the kernel.
*/
if (current->thread.dabr)
- set_dabr(current->thread.dabr);
+ set_dabr(current->thread.dabr, current->thread.dabrx);
#endif
/* Re-enable the breakpoints for the signal stack */
thread_change_pc(current, regs);
@@ -157,6 +158,11 @@ static int do_signal(struct pt_regs *regs)
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
{
+ if (thread_info_flags & _TIF_UPROBE) {
+ clear_thread_flag(TIF_UPROBE);
+ uprobe_notify_resume(regs);
+ }
+
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 0321007086f..2b952b5386f 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -102,7 +102,7 @@ int __devinit smp_generic_kick_cpu(int nr)
* Ok it's not there, so it might be soft-unplugged, let's
* try to bring it back
*/
- per_cpu(cpu_state, nr) = CPU_UP_PREPARE;
+ generic_set_cpu_up(nr);
smp_wmb();
smp_send_reschedule(nr);
#endif /* CONFIG_HOTPLUG_CPU */
@@ -171,7 +171,7 @@ int smp_request_message_ipi(int virq, int msg)
}
#endif
err = request_irq(virq, smp_ipi_action[msg],
- IRQF_PERCPU | IRQF_NO_THREAD,
+ IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
smp_ipi_name[msg], 0);
WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
virq, smp_ipi_name[msg], err);
@@ -198,8 +198,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
char *message = (char *)&info->messages;
+ /*
+ * Order previous accesses before accesses in the IPI handler.
+ */
+ smp_mb();
message[msg] = 1;
- mb();
+ /*
+ * cause_ipi functions are required to include a full barrier
+ * before doing whatever causes the IPI.
+ */
smp_ops->cause_ipi(cpu, info->data);
}
@@ -211,7 +218,7 @@ irqreturn_t smp_ipi_demux(void)
mb(); /* order any irq clear */
do {
- all = xchg_local(&info->messages, 0);
+ all = xchg(&info->messages, 0);
#ifdef __BIG_ENDIAN
if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
@@ -406,6 +413,16 @@ void generic_set_cpu_dead(unsigned int cpu)
per_cpu(cpu_state, cpu) = CPU_DEAD;
}
+/*
+ * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
+ * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
+ * which makes the delay in generic_cpu_die() not happen.
+ */
+void generic_set_cpu_up(unsigned int cpu)
+{
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+}
+
int generic_check_cpu_restart(unsigned int cpu)
{
return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index 81c570633ea..abd1112da54 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -143,48 +143,17 @@ long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t pt
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
-asmlinkage long compat_sys_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offset, u32 count)
+asmlinkage long compat_sys_sendfile_wrapper(u32 out_fd, u32 in_fd,
+ compat_off_t __user *offset, u32 count)
{
- mm_segment_t old_fs = get_fs();
- int ret;
- off_t of;
- off_t __user *up;
-
- if (offset && get_user(of, offset))
- return -EFAULT;
-
- /* The __user pointer cast is valid because of the set_fs() */
- set_fs(KERNEL_DS);
- up = offset ? (off_t __user *) &of : NULL;
- ret = sys_sendfile((int)out_fd, (int)in_fd, up, count);
- set_fs(old_fs);
-
- if (offset && put_user(of, offset))
- return -EFAULT;
-
- return ret;
+ return compat_sys_sendfile((int)out_fd, (int)in_fd, offset, count);
}
-asmlinkage int compat_sys_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
+asmlinkage long compat_sys_sendfile64_wrapper(u32 out_fd, u32 in_fd,
+ compat_loff_t __user *offset, u32 count)
{
- mm_segment_t old_fs = get_fs();
- int ret;
- loff_t lof;
- loff_t __user *up;
-
- if (offset && get_user(lof, offset))
- return -EFAULT;
-
- /* The __user pointer cast is valid because of the set_fs() */
- set_fs(KERNEL_DS);
- up = offset ? (loff_t __user *) &lof : NULL;
- ret = sys_sendfile64(out_fd, in_fd, up, count);
- set_fs(old_fs);
-
- if (offset && put_user(lof, offset))
- return -EFAULT;
-
- return ret;
+ return sys_sendfile((int)out_fd, (int)in_fd,
+ (off_t __user *)offset, count);
}
long compat_sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index f2496f2faec..4e3cc47f26b 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)
long ret;
if (personality(current->personality) == PER_LINUX32
- && personality == PER_LINUX)
- personality = PER_LINUX32;
+ && personality(personality) == PER_LINUX)
+ personality = (personality & ~PER_MASK) | PER_LINUX32;
ret = sys_personality(personality);
- if (ret == PER_LINUX32)
- ret = PER_LINUX;
+ if (personality(ret) == PER_LINUX32)
+ ret = (ret & ~PER_MASK) | PER_LINUX;
return ret;
}
#endif
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 3529446c2ab..8302af64921 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -194,6 +194,14 @@ static ssize_t show_dscr_default(struct device *dev,
return sprintf(buf, "%lx\n", dscr_default);
}
+static void update_dscr(void *dummy)
+{
+ if (!current->thread.dscr_inherit) {
+ current->thread.dscr = dscr_default;
+ mtspr(SPRN_DSCR, dscr_default);
+ }
+}
+
static ssize_t __used store_dscr_default(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -206,6 +214,8 @@ static ssize_t __used store_dscr_default(struct device *dev,
return -EINVAL;
dscr_default = val;
+ on_each_cpu(update_dscr, NULL, 1);
+
return count;
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index be171ee73bf..c9986fd400d 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -291,13 +291,12 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
* Account time for a transition between system, hard irq
* or soft irq state.
*/
-void account_system_vtime(struct task_struct *tsk)
+static u64 vtime_delta(struct task_struct *tsk,
+ u64 *sys_scaled, u64 *stolen)
{
- u64 now, nowscaled, delta, deltascaled;
- unsigned long flags;
- u64 stolen, udelta, sys_scaled, user_scaled;
+ u64 now, nowscaled, deltascaled;
+ u64 udelta, delta, user_scaled;
- local_irq_save(flags);
now = mftb();
nowscaled = read_spurr(now);
get_paca()->system_time += now - get_paca()->starttime;
@@ -305,7 +304,7 @@ void account_system_vtime(struct task_struct *tsk)
deltascaled = nowscaled - get_paca()->startspurr;
get_paca()->startspurr = nowscaled;
- stolen = calculate_stolen_time(now);
+ *stolen = calculate_stolen_time(now);
delta = get_paca()->system_time;
get_paca()->system_time = 0;
@@ -322,35 +321,45 @@ void account_system_vtime(struct task_struct *tsk)
* the user ticks get saved up in paca->user_time_scaled to be
* used by account_process_tick.
*/
- sys_scaled = delta;
+ *sys_scaled = delta;
user_scaled = udelta;
if (deltascaled != delta + udelta) {
if (udelta) {
- sys_scaled = deltascaled * delta / (delta + udelta);
- user_scaled = deltascaled - sys_scaled;
+ *sys_scaled = deltascaled * delta / (delta + udelta);
+ user_scaled = deltascaled - *sys_scaled;
} else {
- sys_scaled = deltascaled;
+ *sys_scaled = deltascaled;
}
}
get_paca()->user_time_scaled += user_scaled;
- if (in_interrupt() || idle_task(smp_processor_id()) != tsk) {
- account_system_time(tsk, 0, delta, sys_scaled);
- if (stolen)
- account_steal_time(stolen);
- } else {
- account_idle_time(delta + stolen);
- }
- local_irq_restore(flags);
+ return delta;
+}
+
+void vtime_account_system(struct task_struct *tsk)
+{
+ u64 delta, sys_scaled, stolen;
+
+ delta = vtime_delta(tsk, &sys_scaled, &stolen);
+ account_system_time(tsk, 0, delta, sys_scaled);
+ if (stolen)
+ account_steal_time(stolen);
+}
+
+void vtime_account_idle(struct task_struct *tsk)
+{
+ u64 delta, sys_scaled, stolen;
+
+ delta = vtime_delta(tsk, &sys_scaled, &stolen);
+ account_idle_time(delta + stolen);
}
-EXPORT_SYMBOL_GPL(account_system_vtime);
/*
* Transfer the user and system times accumulated in the paca
* by the exception entry and exit code to the generic process
* user and system time records.
* Must be called with interrupts disabled.
- * Assumes that account_system_vtime() has been called recently
+ * Assumes that vtime_account() has been called recently
* (i.e. since the last entry from usermode) so that
* get_paca()->user_time_scaled is up to date.
*/
@@ -366,6 +375,12 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
account_user_time(tsk, utime, utimescaled);
}
+void vtime_task_switch(struct task_struct *prev)
+{
+ vtime_account(prev);
+ account_process_tick(prev, 0);
+}
+
#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
#define calc_cputime_factors()
#endif
@@ -493,8 +508,6 @@ void timer_interrupt(struct pt_regs * regs)
*/
may_hard_irq_enable();
- trace_timer_interrupt_entry(regs);
-
__get_cpu_var(irq_stat).timer_irqs++;
#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
@@ -505,6 +518,8 @@ void timer_interrupt(struct pt_regs * regs)
old_regs = set_irq_regs(regs);
irq_enter();
+ trace_timer_interrupt_entry(regs);
+
if (test_irq_work_pending()) {
clear_irq_work_pending();
irq_work_run();
@@ -529,10 +544,19 @@ void timer_interrupt(struct pt_regs * regs)
}
#endif
+ trace_timer_interrupt_exit(regs);
+
irq_exit();
set_irq_regs(old_regs);
+}
- trace_timer_interrupt_exit(regs);
+/*
+ * Hypervisor decrementer interrupts shouldn't occur but are sometimes
+ * left pending on exit from a KVM guest. We don't need to do anything
+ * to clear them, as they are edge-triggered.
+ */
+void hdec_interrupt(struct pt_regs *regs)
+{
}
#ifdef CONFIG_SUSPEND
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 158972341a2..32518401af6 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -251,6 +251,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
local_irq_enable();
+ current->thread.trap_nr = code;
memset(&info, 0, sizeof(info));
info.si_signo = signr;
info.si_code = code;
@@ -972,8 +973,9 @@ static int emulate_instruction(struct pt_regs *regs)
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mtdscr, regs);
rd = (instword >> 21) & 0x1f;
- mtspr(SPRN_DSCR, regs->gpr[rd]);
+ current->thread.dscr = regs->gpr[rd];
current->thread.dscr_inherit = 1;
+ mtspr(SPRN_DSCR, current->thread.dscr);
return 0;
}
#endif
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
new file mode 100644
index 00000000000..d2d46d1014f
--- /dev/null
+++ b/arch/powerpc/kernel/uprobes.c
@@ -0,0 +1,184 @@
+/*
+ * User-space Probes (UProbes) for powerpc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2007-2012
+ *
+ * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/uprobes.h>
+#include <linux/uaccess.h>
+#include <linux/kdebug.h>
+
+#include <asm/sstep.h>
+
+#define UPROBE_TRAP_NR UINT_MAX
+
+/**
+ * arch_uprobe_analyze_insn
+ * @mm: the probed address space.
+ * @arch_uprobe: the probepoint information.
+ * @addr: vaddr to probe.
+ * Return 0 on success or a -ve number on error.
+ */
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
+ struct mm_struct *mm, unsigned long addr)
+{
+ if (addr & 0x03)
+ return -EINVAL;
+
+ /*
+ * We currently don't support a uprobe on an already
+ * existing breakpoint instruction underneath
+ */
+ if (is_trap(auprobe->ainsn))
+ return -ENOTSUPP;
+ return 0;
+}
+
+/*
+ * arch_uprobe_pre_xol - prepare to execute out of line.
+ * @auprobe: the probepoint information.
+ * @regs: reflects the saved user state of current task.
+ */
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct arch_uprobe_task *autask = &current->utask->autask;
+
+ autask->saved_trap_nr = current->thread.trap_nr;
+ current->thread.trap_nr = UPROBE_TRAP_NR;
+ regs->nip = current->utask->xol_vaddr;
+ return 0;
+}
+
+/**
+ * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
+ * @regs: Reflects the saved state of the task after it has hit a breakpoint
+ * instruction.
+ * Return the address of the breakpoint instruction.
+ */
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+/*
+ * If xol insn itself traps and generates a signal (SIGILL/SIGSEGV/etc),
+ * then detect the case where a singlestepped instruction jumps back to its
+ * own address. It is assumed that anything like do_page_fault/do_trap/etc
+ * sets thread.trap_nr != UINT_MAX.
+ *
+ * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
+ * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
+ * UPROBE_TRAP_NR == UINT_MAX set by arch_uprobe_pre_xol().
+ */
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+ if (t->thread.trap_nr != UPROBE_TRAP_NR)
+ return true;
+
+ return false;
+}
+
+/*
+ * Called after single-stepping. To avoid the SMP problems that can
+ * occur when we temporarily put back the original opcode to
+ * single-step, we single-stepped a copy of the instruction.
+ *
+ * This function prepares to resume execution after the single-step.
+ */
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
+
+ current->thread.trap_nr = utask->autask.saved_trap_nr;
+
+ /*
+ * On powerpc, except for loads and stores, most instructions
+ * including ones that alter code flow (branches, calls, returns)
+ * are emulated in the kernel. We get here only if the emulation
+ * support doesn't exist and have to fix-up the next instruction
+ * to be executed.
+ */
+ regs->nip = utask->vaddr + MAX_UINSN_BYTES;
+ return 0;
+}
+
+/* callback routine for handling exceptions. */
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+ struct pt_regs *regs = args->regs;
+
+ /* regs == NULL is a kernel bug */
+ if (WARN_ON(!regs))
+ return NOTIFY_DONE;
+
+ /* We are only interested in userspace traps */
+ if (!user_mode(regs))
+ return NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_BPT:
+ if (uprobe_pre_sstep_notifier(regs))
+ return NOTIFY_STOP;
+ break;
+ case DIE_SSTEP:
+ if (uprobe_post_sstep_notifier(regs))
+ return NOTIFY_STOP;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/*
+ * This function gets called when XOL instruction either gets trapped or
+ * the thread has a fatal signal, so reset the instruction pointer to its
+ * probed address.
+ */
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ current->thread.trap_nr = utask->autask.saved_trap_nr;
+ instruction_pointer_set(regs, utask->vaddr);
+}
+
+/*
+ * See if the instruction can be emulated.
+ * Returns true if instruction was emulated, false otherwise.
+ */
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ int ret;
+
+ /*
+ * emulate_step() returns 1 if the insn was successfully emulated.
+ * For all other cases, we need to single-step in hardware.
+ */
+ ret = emulate_step(regs, auprobe->ainsn);
+ if (ret > 0)
+ return true;
+
+ return false;
+}
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index b67db22e102..1b2076f049c 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -723,9 +723,7 @@ int __cpuinit vdso_getcpu_init(void)
val = (cpu & 0xfff) | ((node & 0xffff) << 16);
mtspr(SPRN_SPRG3, val);
-#ifdef CONFIG_KVM_BOOK3S_HANDLER
- get_paca()->kvm_hstate.sprg3 = val;
-#endif
+ get_paca()->sprg3 = val;
put_cpu();
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 02b32216bbc..201ba59738b 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -33,7 +33,6 @@
#include <asm/prom.h>
#include <asm/firmware.h>
#include <asm/tce.h>
-#include <asm/abs_addr.h>
#include <asm/page.h>
#include <asm/hvcall.h>