summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/Kconfig.debug15
-rw-r--r--arch/powerpc/Makefile5
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts5
-rw-r--r--arch/powerpc/boot/dts/glacier.dts8
-rw-r--r--arch/powerpc/boot/dts/mpc8568mds.dts2
-rw-r--r--arch/powerpc/boot/dts/p1010rdb.dts280
-rw-r--r--arch/powerpc/boot/dts/p1010si.dtsi376
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dts2
-rw-r--r--arch/powerpc/boot/dts/p1023rds.dts546
-rw-r--r--arch/powerpc/boot/dts/p2040rdb.dts166
-rw-r--r--arch/powerpc/boot/dts/p2040si.dtsi623
-rw-r--r--arch/powerpc/boot/dts/p3041ds.dts214
-rw-r--r--arch/powerpc/boot/dts/p3041si.dtsi660
-rw-r--r--arch/powerpc/boot/dts/p4080ds.dts533
-rw-r--r--arch/powerpc/boot/dts/p4080si.dtsi661
-rw-r--r--arch/powerpc/boot/dts/p5020ds.dts215
-rw-r--r--arch/powerpc/boot/dts/p5020si.dtsi652
-rw-r--r--arch/powerpc/boot/dts/sequoia.dts12
-rw-r--r--arch/powerpc/boot/dts/socrates.dts2
-rw-r--r--arch/powerpc/boot/dts/taishan.dts4
-rw-r--r--arch/powerpc/boot/dts/tqm8540.dts42
-rw-r--r--arch/powerpc/boot/dts/tqm8548-bigflash.dts2
-rw-r--r--arch/powerpc/boot/dts/tqm8548.dts2
-rw-r--r--arch/powerpc/boot/dts/tqm8560.dts2
-rw-r--r--arch/powerpc/boot/dts/xpedite5200.dts2
-rw-r--r--arch/powerpc/boot/dts/xpedite5200_xmon.dts2
-rw-r--r--arch/powerpc/boot/treeboot-iss4xx.c23
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig6
-rw-r--r--arch/powerpc/configs/85xx/p1023rds_defconfig173
-rw-r--r--arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig5
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig187
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig (renamed from arch/powerpc/configs/e55xx_smp_defconfig)0
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig12
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig10
-rw-r--r--arch/powerpc/configs/ppc64_defconfig16
-rw-r--r--arch/powerpc/configs/pseries_defconfig3
-rw-r--r--arch/powerpc/include/asm/8253pit.h3
-rw-r--r--arch/powerpc/include/asm/atomic.h19
-rw-r--r--arch/powerpc/include/asm/bitops.h5
-rw-r--r--arch/powerpc/include/asm/cputable.h14
-rw-r--r--arch/powerpc/include/asm/dbell.h2
-rw-r--r--arch/powerpc/include/asm/ehv_pic.h40
-rw-r--r--arch/powerpc/include/asm/elf.h4
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h6
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h502
-rw-r--r--arch/powerpc/include/asm/exception-64e.h52
-rw-r--r--arch/powerpc/include/asm/exception-64s.h136
-rw-r--r--arch/powerpc/include/asm/fsl_hcalls.h655
-rw-r--r--arch/powerpc/include/asm/hvcall.h5
-rw-r--r--arch/powerpc/include/asm/hvsi.h94
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h2
-rw-r--r--arch/powerpc/include/asm/irq.h4
-rw-r--r--arch/powerpc/include/asm/jump_label.h47
-rw-r--r--arch/powerpc/include/asm/kvm.h15
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h196
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h41
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h4
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h30
-rw-r--r--arch/powerpc/include/asm/kvm_host.h169
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h41
-rw-r--r--arch/powerpc/include/asm/local.h2
-rw-r--r--arch/powerpc/include/asm/macio.h2
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h10
-rw-r--r--arch/powerpc/include/asm/mmu.h12
-rw-r--r--arch/powerpc/include/asm/pSeries_reconfig.h2
-rw-r--r--arch/powerpc/include/asm/paca.h10
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h79
-rw-r--r--arch/powerpc/include/asm/pci.h5
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h3
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h40
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h28
-rw-r--r--arch/powerpc/include/asm/processor.h5
-rw-r--r--arch/powerpc/include/asm/prom.h16
-rw-r--r--arch/powerpc/include/asm/reg.h29
-rw-r--r--arch/powerpc/include/asm/reg_booke.h1
-rw-r--r--arch/powerpc/include/asm/setup.h4
-rw-r--r--arch/powerpc/include/asm/smp.h3
-rw-r--r--arch/powerpc/include/asm/smu.h2
-rw-r--r--arch/powerpc/include/asm/system.h1
-rw-r--r--arch/powerpc/include/asm/udbg.h1
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c193
-rw-r--r--arch/powerpc/kernel/cpu_setup_power7.S22
-rw-r--r--arch/powerpc/kernel/cpu_setup_ppc970.S26
-rw-r--r--arch/powerpc/kernel/crash.c6
-rw-r--r--arch/powerpc/kernel/dma.c4
-rw-r--r--arch/powerpc/kernel/e500-pmu.c5
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S22
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S228
-rw-r--r--arch/powerpc/kernel/head_44x.S42
-rw-r--r--arch/powerpc/kernel/head_64.S2
-rw-r--r--arch/powerpc/kernel/head_booke.h42
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S57
-rw-r--r--arch/powerpc/kernel/idle_e500.S12
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/irq.c50
-rw-r--r--arch/powerpc/kernel/jump_label.c23
-rw-r--r--arch/powerpc/kernel/machine_kexec.c4
-rw-r--r--arch/powerpc/kernel/misc_64.S4
-rw-r--r--arch/powerpc/kernel/module.c18
-rw-r--r--arch/powerpc/kernel/module_32.c11
-rw-r--r--arch/powerpc/kernel/module_64.c10
-rw-r--r--arch/powerpc/kernel/mpc7450-pmu.c7
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c53
-rw-r--r--arch/powerpc/kernel/pci_32.c171
-rw-r--r--arch/powerpc/kernel/pci_64.c4
-rw-r--r--arch/powerpc/kernel/pci_dn.c47
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c9
-rw-r--r--arch/powerpc/kernel/perf_event.c8
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c6
-rw-r--r--arch/powerpc/kernel/power4-pmu.c7
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c7
-rw-r--r--arch/powerpc/kernel/power5-pmu.c7
-rw-r--r--arch/powerpc/kernel/power6-pmu.c7
-rw-r--r--arch/powerpc/kernel/power7-pmu.c7
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c7
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c2
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/kernel/prom.c16
-rw-r--r--arch/powerpc/kernel/ptrace.c4
-rw-r--r--arch/powerpc/kernel/rtas.c3
-rw-r--r--arch/powerpc/kernel/rtas_flash.c1
-rw-r--r--arch/powerpc/kernel/rtas_pci.c2
-rw-r--r--arch/powerpc/kernel/rtasd.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c30
-rw-r--r--arch/powerpc/kernel/setup_32.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c9
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c2
-rw-r--r--arch/powerpc/kernel/smp.c33
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/traps.c5
-rw-r--r--arch/powerpc/kernel/udbg.c5
-rw-r--r--arch/powerpc/kvm/44x_tlb.c4
-rw-r--r--arch/powerpc/kvm/Kconfig35
-rw-r--r--arch/powerpc/kvm/Makefile27
-rw-r--r--arch/powerpc/kvm/book3s.c1007
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c54
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c180
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c73
-rw-r--r--arch/powerpc/kvm/book3s_exports.c9
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1269
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c155
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S166
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c370
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1345
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S21
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c71
-rw-r--r--arch/powerpc/kvm/book3s_pr.c1029
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S102
-rw-r--r--arch/powerpc/kvm/book3s_segment.S117
-rw-r--r--arch/powerpc/kvm/booke.c132
-rw-r--r--arch/powerpc/kvm/booke.h23
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S66
-rw-r--r--arch/powerpc/kvm/e500.c7
-rw-r--r--arch/powerpc/kvm/e500_emulate.c4
-rw-r--r--arch/powerpc/kvm/e500_tlb.c800
-rw-r--r--arch/powerpc/kvm/e500_tlb.h13
-rw-r--r--arch/powerpc/kvm/powerpc.c78
-rw-r--r--arch/powerpc/kvm/timing.c9
-rw-r--r--arch/powerpc/kvm/trace.h4
-rw-r--r--arch/powerpc/mm/44x_mmu.c13
-rw-r--r--arch/powerpc/mm/fault.c6
-rw-r--r--arch/powerpc/mm/hash_native_64.c6
-rw-r--r--arch/powerpc/mm/init_32.c32
-rw-r--r--arch/powerpc/mm/init_64.c16
-rw-r--r--arch/powerpc/mm/mem.c35
-rw-r--r--arch/powerpc/mm/tlb_hash32.c4
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S206
-rw-r--r--arch/powerpc/mm/tlb_nohash.c64
-rw-r--r--arch/powerpc/net/Makefile4
-rw-r--r--arch/powerpc/net/bpf_jit.h227
-rw-r--r--arch/powerpc/net/bpf_jit_64.S138
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c694
-rw-r--r--arch/powerpc/platforms/40x/ep405.c2
-rw-r--r--arch/powerpc/platforms/40x/ppc40x_simple.c2
-rw-r--r--arch/powerpc/platforms/40x/walnut.c2
-rw-r--r--arch/powerpc/platforms/44x/canyonlands.c2
-rw-r--r--arch/powerpc/platforms/44x/ebony.c2
-rw-r--r--arch/powerpc/platforms/44x/ppc44x_simple.c2
-rw-r--r--arch/powerpc/platforms/44x/sam440ep.c2
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig8
-rw-r--r--arch/powerpc/platforms/52xx/Makefile1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpio.c380
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pci.c10
-rw-r--r--arch/powerpc/platforms/82xx/pq2.c2
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_mds.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c4
-rw-r--r--arch/powerpc/platforms/83xx/sbc834x.c2
-rw-r--r--arch/powerpc/platforms/83xx/usb.c2
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig31
-rw-r--r--arch/powerpc/platforms/85xx/Makefile3
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c41
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c3
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c5
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c122
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c18
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rds.c162
-rw-r--r--arch/powerpc/platforms/85xx/p2040_rdb.c88
-rw-r--r--arch/powerpc/platforms/85xx/p3041_ds.c28
-rw-r--r--arch/powerpc/platforms/85xx/p4080_ds.c38
-rw-r--r--arch/powerpc/platforms/85xx/p5020_ds.c32
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c2
-rw-r--r--arch/powerpc/platforms/85xx/sbc8560.c2
-rw-r--r--arch/powerpc/platforms/85xx/smp.c30
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c2
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c107
-rw-r--r--arch/powerpc/platforms/Kconfig6
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype2
-rw-r--r--arch/powerpc/platforms/amigaone/Kconfig2
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_epci.c8
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c2
-rw-r--r--arch/powerpc/platforms/cell/cpufreq_spudemand.c2
-rw-r--r--arch/powerpc/platforms/cell/smp.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c11
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c29
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c22
-rw-r--r--arch/powerpc/platforms/chrp/Kconfig1
-rw-r--r--arch/powerpc/platforms/chrp/pci.c4
-rw-r--r--arch/powerpc/platforms/chrp/smp.c2
-rw-r--r--arch/powerpc/platforms/iseries/exception.S2
-rw-r--r--arch/powerpc/platforms/iseries/exception.h4
-rw-r--r--arch/powerpc/platforms/iseries/smp.c4
-rw-r--r--arch/powerpc/platforms/maple/setup.c41
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c2
-rw-r--r--arch/powerpc/platforms/powermac/backlight.c2
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c4
-rw-r--r--arch/powerpc/platforms/powermac/pci.c15
-rw-r--r--arch/powerpc/platforms/powermac/setup.c3
-rw-r--r--arch/powerpc/platforms/powermac/smp.c4
-rw-r--r--arch/powerpc/platforms/powermac/time.c2
-rw-r--r--arch/powerpc/platforms/prep/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c10
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c10
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c46
-rw-r--r--arch/powerpc/platforms/pseries/hvconsole.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c191
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h3
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c28
-rw-r--r--arch/powerpc/platforms/pseries/setup.c5
-rw-r--r--arch/powerpc/platforms/pseries/smp.c5
-rw-r--r--arch/powerpc/platforms/wsp/smp.c2
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/axonram.c2
-rw-r--r--arch/powerpc/sysdev/cpm1.c2
-rw-r--r--arch/powerpc/sysdev/cpm_common.c2
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c2
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c302
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c99
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c2
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c29
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h3
-rw-r--r--arch/powerpc/sysdev/grackle.c2
-rw-r--r--arch/powerpc/sysdev/ipic.c2
-rw-r--r--arch/powerpc/sysdev/mmio_nvram.c2
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c38
-rw-r--r--arch/powerpc/sysdev/mv64x60_udbg.c4
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c165
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_io.c2
-rw-r--r--arch/powerpc/sysdev/tsi108_dev.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c11
279 files changed, 16030 insertions, 3992 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2729c6663d8..374c475e56a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -134,6 +134,8 @@ config PPC
select GENERIC_IRQ_SHOW_LEVEL
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_BPF_JIT if (PPC64 && NET)
+ select HAVE_ARCH_JUMP_LABEL
config EARLY_PRINTK
bool
@@ -841,7 +843,7 @@ config LOWMEM_CAM_NUM
config RELOCATABLE
bool "Build a relocatable kernel (EXPERIMENTAL)"
- depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && FSL_BOOKE
+ depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || PPC_47x)
help
This builds a kernel image that is capable of running at the
location the kernel is loaded at (some alignment restrictions may
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index e72dcf6a421..067cb848074 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -167,6 +167,13 @@ config PPC_EARLY_DEBUG_LPAR
Select this to enable early debugging for a machine with a HVC
console on vterm 0.
+config PPC_EARLY_DEBUG_LPAR_HVSI
+ bool "LPAR HVSI Console"
+ depends on PPC_PSERIES
+ help
+ Select this to enable early debugging for a machine with a HVSI
+ console on a specified vterm.
+
config PPC_EARLY_DEBUG_G5
bool "Apple G5"
depends on PPC_PMAC64
@@ -253,6 +260,14 @@ config PPC_EARLY_DEBUG_WSP
endchoice
+config PPC_EARLY_DEBUG_HVSI_VTERMNO
+ hex "vterm number to use with early debug HVSI"
+ depends on PPC_EARLY_DEBUG_LPAR_HVSI
+ default "0x30000000"
+ help
+ You probably want 0x30000000 for your first serial port and
+ 0x30000001 for your second one
+
config PPC_EARLY_DEBUG_44x_PHYSLOW
hex "Low 32 bits of early debug UART physical address"
depends on PPC_EARLY_DEBUG_44x
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index b7212b619c5..57af16edc19 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -67,7 +67,7 @@ LDFLAGS_vmlinux-yy := -Bstatic
LDFLAGS_vmlinux-$(CONFIG_PPC64)$(CONFIG_RELOCATABLE) := -pie
LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-yy)
-CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc
+CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=no -mcall-aixdesc
CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
KBUILD_CPPFLAGS += -Iarch/$(ARCH)
KBUILD_AFLAGS += -Iarch/$(ARCH)
@@ -154,7 +154,8 @@ core-y += arch/powerpc/kernel/ \
arch/powerpc/lib/ \
arch/powerpc/sysdev/ \
arch/powerpc/platforms/ \
- arch/powerpc/math-emu/
+ arch/powerpc/math-emu/ \
+ arch/powerpc/net/
core-$(CONFIG_XMON) += arch/powerpc/xmon/
core-$(CONFIG_KVM) += arch/powerpc/kvm/
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index 22dd6ae84da..3dc75deafbb 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -143,6 +143,11 @@
interrupts = <0x1d 0x4>;
};
+ HWRNG: hwrng@110000 {
+ compatible = "amcc,ppc460ex-rng", "ppc4xx-rng";
+ reg = <4 0x00110000 0x50>;
+ };
+
MAL0: mcmal {
compatible = "ibm,mcmal-460ex", "ibm,mcmal2";
dcr-reg = <0x180 0x062>;
diff --git a/arch/powerpc/boot/dts/glacier.dts b/arch/powerpc/boot/dts/glacier.dts
index e618fc4cbc9..2000060386d 100644
--- a/arch/powerpc/boot/dts/glacier.dts
+++ b/arch/powerpc/boot/dts/glacier.dts
@@ -130,12 +130,18 @@
};
CRYPTO: crypto@180000 {
- compatible = "amcc,ppc460gt-crypto", "amcc,ppc4xx-crypto";
+ compatible = "amcc,ppc460gt-crypto", "amcc,ppc460ex-crypto",
+ "amcc,ppc4xx-crypto";
reg = <4 0x00180000 0x80400>;
interrupt-parent = <&UIC0>;
interrupts = <0x1d 0x4>;
};
+ HWRNG: hwrng@110000 {
+ compatible = "amcc,ppc460ex-rng", "ppc4xx-rng";
+ reg = <4 0x00110000 0x50>;
+ };
+
MAL0: mcmal {
compatible = "ibm,mcmal-460gt", "ibm,mcmal2";
dcr-reg = <0x180 0x062>;
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
index 30cf0e098bb..647daf8e729 100644
--- a/arch/powerpc/boot/dts/mpc8568mds.dts
+++ b/arch/powerpc/boot/dts/mpc8568mds.dts
@@ -60,6 +60,8 @@
compatible = "fsl,mpc8568-localbus", "fsl,pq3-localbus",
"simple-bus";
reg = <0xe0005000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <19 2>;
ranges = <0x0 0x0 0xfe000000 0x02000000
0x1 0x0 0xf8000000 0x00008000
diff --git a/arch/powerpc/boot/dts/p1010rdb.dts b/arch/powerpc/boot/dts/p1010rdb.dts
new file mode 100644
index 00000000000..6b33b73a5ba
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb.dts
@@ -0,0 +1,280 @@
+/*
+ * P1010 RDB Device Tree Source
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "p1010si.dtsi"
+
+/ {
+ model = "fsl,P1010RDB";
+ compatible = "fsl,P1010RDB";
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ ifc@ffe1e000 {
+ /* NOR, NAND Flashes and CPLD on board */
+ ranges = <0x0 0x0 0x0 0xee000000 0x02000000
+ 0x1 0x0 0x0 0xff800000 0x00010000
+ 0x3 0x0 0x0 0xffb00000 0x00000020>;
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x2000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@40000 {
+ /* 256KB for DTB Image */
+ reg = <0x00040000 0x00040000>;
+ label = "NOR DTB Image";
+ };
+
+ partition@80000 {
+ /* 7 MB for Linux Kernel Image */
+ reg = <0x00080000 0x00700000>;
+ label = "NOR Linux Kernel Image";
+ };
+
+ partition@800000 {
+ /* 20MB for JFFS2 based Root file System */
+ reg = <0x00800000 0x01400000>;
+ label = "NOR JFFS2 Root File System";
+ };
+
+ partition@1f00000 {
+ /* This location must not be altered */
+ /* 512KB for u-boot Bootloader Image */
+ /* 512KB for u-boot Environment Variables */
+ reg = <0x01f00000 0x00100000>;
+ label = "NOR U-Boot Image";
+ read-only;
+ };
+ };
+
+ nand@1,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,ifc-nand";
+ reg = <0x1 0x0 0x10000>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 1MB for u-boot Bootloader Image */
+ reg = <0x0 0x00100000>;
+ label = "NAND U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 1MB for DTB Image */
+ reg = <0x00100000 0x00100000>;
+ label = "NAND DTB Image";
+ };
+
+ partition@200000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00200000 0x00400000>;
+ label = "NAND Linux Kernel Image";
+ };
+
+ partition@600000 {
+ /* 4MB for Compressed Root file System Image */
+ reg = <0x00600000 0x00400000>;
+ label = "NAND Compressed RFS Image";
+ };
+
+ partition@a00000 {
+ /* 15MB for JFFS2 based Root file System */
+ reg = <0x00a00000 0x00f00000>;
+ label = "NAND JFFS2 Root File System";
+ };
+
+ partition@1900000 {
+ /* 7MB for User Area */
+ reg = <0x01900000 0x00700000>;
+ label = "NAND User area";
+ };
+ };
+
+ cpld@3,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p1010rdb-cpld";
+ reg = <0x3 0x0 0x0000020>;
+ bank-width = <1>;
+ device-width = <1>;
+ };
+ };
+
+ soc@ffe00000 {
+ spi@7000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25sl12801";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+
+ partition@0 {
+ /* 1MB for u-boot Bootloader Image */
+ /* 1MB for Environment */
+ reg = <0x0 0x00100000>;
+ label = "SPI Flash U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 512KB for DTB Image */
+ reg = <0x00100000 0x00080000>;
+ label = "SPI Flash DTB Image";
+ };
+
+ partition@180000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00180000 0x00400000>;
+ label = "SPI Flash Linux Kernel Image";
+ };
+
+ partition@580000 {
+ /* 4MB for Compressed RFS Image */
+ reg = <0x00580000 0x00400000>;
+ label = "SPI Flash Compressed RFSImage";
+ };
+
+ partition@980000 {
+ /* 6.5MB for JFFS2 based RFS */
+ reg = <0x00980000 0x00680000>;
+ label = "SPI Flash JFFS2 RFS";
+ };
+ };
+ };
+
+ can0@1c000 {
+ fsl,flexcan-clock-source = "platform";
+ };
+
+ can1@1d000 {
+ fsl,flexcan-clock-source = "platform";
+ };
+
+ usb@22000 {
+ phy_type = "utmi";
+ };
+
+ mdio@24000 {
+ phy0: ethernet-phy@0 {
+ interrupt-parent = <&mpic>;
+ interrupts = <3 1>;
+ reg = <0x1>;
+ };
+
+ phy1: ethernet-phy@1 {
+ interrupt-parent = <&mpic>;
+ interrupts = <2 1>;
+ reg = <0x0>;
+ };
+
+ phy2: ethernet-phy@2 {
+ interrupt-parent = <&mpic>;
+ interrupts = <2 1>;
+ reg = <0x2>;
+ };
+ };
+
+ enet0: ethernet@b0000 {
+ phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ enet1: ethernet@b1000 {
+ phy-handle = <&phy1>;
+ tbi-handle = <&tbi0>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet2: ethernet@b2000 {
+ phy-handle = <&phy2>;
+ tbi-handle = <&tbi1>;
+ phy-connection-type = "sgmii";
+ };
+ };
+
+ pci0: pcie@ffe09000 {
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1
+ >;
+
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@ffe0a000 {
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1
+ >;
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1010si.dtsi b/arch/powerpc/boot/dts/p1010si.dtsi
new file mode 100644
index 00000000000..7f51104f2e3
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010si.dtsi
@@ -0,0 +1,376 @@
+/*
+ * P1010si Device Tree Source
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,P1010";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,P1010@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ ifc@ffe1e000 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,ifc", "simple-bus";
+ reg = <0x0 0xffe1e000 0 0x2000>;
+ interrupts = <16 2 19 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ soc@ffe00000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,p1010-immr", "simple-bus";
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,p1010-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <16 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,p1010-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+
+ i2c@3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x3000 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ i2c@3100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x3100 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ serial0: serial@4500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ serial1: serial@4600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ spi@7000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc8536-espi";
+ reg = <0x7000 0x1000>;
+ interrupts = <59 0x2>;
+ interrupt-parent = <&mpic>;
+ fsl,espi-num-chipselects = <1>;
+ };
+
+ gpio: gpio-controller@f000 {
+ #gpio-cells = <2>;
+ compatible = "fsl,mpc8572-gpio";
+ reg = <0xf000 0x100>;
+ interrupts = <47 0x2>;
+ interrupt-parent = <&mpic>;
+ gpio-controller;
+ };
+
+ sata@18000 {
+ compatible = "fsl,pq-sata-v2";
+ reg = <0x18000 0x1000>;
+ cell-index = <1>;
+ interrupts = <74 0x2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ sata@19000 {
+ compatible = "fsl,pq-sata-v2";
+ reg = <0x19000 0x1000>;
+ cell-index = <2>;
+ interrupts = <41 0x2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ can0@1c000 {
+ compatible = "fsl,flexcan-v1.0";
+ reg = <0x1c000 0x1000>;
+ interrupts = <48 0x2>;
+ interrupt-parent = <&mpic>;
+ fsl,flexcan-clock-divider = <2>;
+ };
+
+ can1@1d000 {
+ compatible = "fsl,flexcan-v1.0";
+ reg = <0x1d000 0x1000>;
+ interrupts = <61 0x2>;
+ interrupt-parent = <&mpic>;
+ fsl,flexcan-clock-divider = <2>;
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,p1010-l2-cache-controller",
+ "fsl,p1014-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x40000>; // L2,256K
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+
+ dma@21300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p1010-dma", "fsl,eloplus-dma";
+ reg = <0x21300 0x4>;
+ ranges = <0x0 0x21100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <20 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <21 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <22 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <23 2>;
+ };
+ };
+
+ usb@22000 {
+ compatible = "fsl-usb2-dr";
+ reg = <0x22000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <28 0x2>;
+ dr_mode = "host";
+ };
+
+ mdio@24000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,etsec2-mdio";
+ reg = <0x24000 0x1000 0xb0030 0x4>;
+ };
+
+ mdio@25000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,etsec2-tbi";
+ reg = <0x25000 0x1000 0xb1030 0x4>;
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,etsec2-tbi";
+ reg = <0x26000 0x1000 0xb1030 0x4>;
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ sdhci@2e000 {
+ compatible = "fsl,esdhc";
+ reg = <0x2e000 0x1000>;
+ interrupts = <72 0x8>;
+ interrupt-parent = <&mpic>;
+ /* Filled in by U-Boot */
+ clock-frequency = <0>;
+ fsl,sdhci-auto-cmd12;
+ };
+
+ enet0: ethernet@b0000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "fsl,etsec2";
+ fsl,num_rx_queues = <0x8>;
+ fsl,num_tx_queues = <0x8>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupt-parent = <&mpic>;
+
+ queue-group@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb0000 0x1000>;
+ fsl,rx-bit-map = <0xff>;
+ fsl,tx-bit-map = <0xff>;
+ interrupts = <29 2 30 2 34 2>;
+ };
+
+ };
+
+ enet1: ethernet@b1000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "fsl,etsec2";
+ fsl,num_rx_queues = <0x8>;
+ fsl,num_tx_queues = <0x8>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupt-parent = <&mpic>;
+
+ queue-group@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb1000 0x1000>;
+ fsl,rx-bit-map = <0xff>;
+ fsl,tx-bit-map = <0xff>;
+ interrupts = <35 2 36 2 40 2>;
+ };
+
+ };
+
+ enet2: ethernet@b2000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "fsl,etsec2";
+ fsl,num_rx_queues = <0x8>;
+ fsl,num_tx_queues = <0x8>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupt-parent = <&mpic>;
+
+ queue-group@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb2000 0x1000>;
+ fsl,rx-bit-map = <0xff>;
+ fsl,tx-bit-map = <0xff>;
+ interrupts = <31 2 32 2 33 2>;
+ };
+
+ };
+
+ mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x40000 0x40000>;
+ compatible = "chrp,open-pic";
+ device_type = "open-pic";
+ };
+
+ msi@41600 {
+ compatible = "fsl,p1010-msi", "fsl,mpic-msi";
+ reg = <0x41600 0x80>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe0 0
+ 0xe1 0
+ 0xe2 0
+ 0xe3 0
+ 0xe4 0
+ 0xe5 0
+ 0xe6 0
+ 0xe7 0>;
+ interrupt-parent = <&mpic>;
+ };
+
+ global-utilities@e0000 { //global utilities block
+ compatible = "fsl,p1010-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+ };
+
+ pci0: pcie@ffe09000 {
+ compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0 0xffe09000 0 0x1000>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+
+ pci1: pcie@ffe0a000 {
+ compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0 0xffe0a000 0 0x1000>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
index 98d9426d4b8..1be9743ab5e 100644
--- a/arch/powerpc/boot/dts/p1022ds.dts
+++ b/arch/powerpc/boot/dts/p1022ds.dts
@@ -412,7 +412,6 @@
fsl,magic-packet;
fsl,wake-on-filer;
local-mac-address = [ 00 00 00 00 00 00 ];
- fixed-link = <1 1 1000 0 0>;
phy-handle = <&phy0>;
phy-connection-type = "rgmii-id";
queue-group@0{
@@ -439,7 +438,6 @@
fsl,num_rx_queues = <0x8>;
fsl,num_tx_queues = <0x8>;
local-mac-address = [ 00 00 00 00 00 00 ];
- fixed-link = <1 1 1000 0 0>;
phy-handle = <&phy1>;
phy-connection-type = "rgmii-id";
queue-group@0{
diff --git a/arch/powerpc/boot/dts/p1023rds.dts b/arch/powerpc/boot/dts/p1023rds.dts
new file mode 100644
index 00000000000..bfa96aa8f2c
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1023rds.dts
@@ -0,0 +1,546 @@
+/*
+ * P1023 RDS Device Tree Source
+ *
+ * Copyright 2010-2011 Freescale Semiconductor Inc.
+ *
+ * Author: Roy Zang <tie-fei.zang@freescale.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+/ {
+ model = "fsl,P1023";
+ compatible = "fsl,P1023RDS";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+
+ crypto = &crypto;
+ sec_jr0 = &sec_jr0;
+ sec_jr1 = &sec_jr1;
+ sec_jr2 = &sec_jr2;
+ sec_jr3 = &sec_jr3;
+ rtic_a = &rtic_a;
+ rtic_b = &rtic_b;
+ rtic_c = &rtic_c;
+ rtic_d = &rtic_d;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,P1023@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+
+ cpu1: PowerPC,P1023@1 {
+ device_type = "cpu";
+ reg = <0x1>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ soc@ff600000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,p1023-immr", "simple-bus";
+ ranges = <0x0 0x0 0xff600000 0x200000>;
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,p1023-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <16 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,p1023-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+
+ i2c@3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x3000 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ rtc@68 {
+ compatible = "dallas,ds1374";
+ reg = <0x68>;
+ };
+ };
+
+ i2c@3100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x3100 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ serial0: serial@4500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ serial1: serial@4600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ spi@7000 {
+ cell-index = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,p1023-espi", "fsl,mpc8536-espi";
+ reg = <0x7000 0x1000>;
+ interrupts = <59 0x2>;
+ interrupt-parent = <&mpic>;
+ fsl,espi-num-chipselects = <4>;
+
+ fsl_dataflash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "atmel,at45db081d";
+ reg = <0>;
+ spi-max-frequency = <40000000>; /* input clock */
+ partition@u-boot {
+ /* 512KB for u-boot Bootloader Image */
+ label = "u-boot-spi";
+ reg = <0x00000000 0x00080000>;
+ read-only;
+ };
+ partition@dtb {
+ /* 512KB for DTB Image */
+ label = "dtb-spi";
+ reg = <0x00080000 0x00080000>;
+ read-only;
+ };
+ };
+ };
+
+ gpio: gpio-controller@f000 {
+ #gpio-cells = <2>;
+ compatible = "fsl,qoriq-gpio";
+ reg = <0xf000 0x100>;
+ interrupts = <47 0x2>;
+ interrupt-parent = <&mpic>;
+ gpio-controller;
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,p1023-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x40000>; // L2,256K
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+
+ dma@21300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,eloplus-dma";
+ reg = <0x21300 0x4>;
+ ranges = <0x0 0x21100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <20 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <21 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <22 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <23 2>;
+ };
+ };
+
+ usb@22000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl-usb2-dr";
+ reg = <0x22000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <28 0x2>;
+ dr_mode = "host";
+ phy_type = "ulpi";
+ };
+
+ crypto: crypto@300000 {
+ compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x30000 0x10000>;
+ ranges = <0 0x30000 0x10000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <58 2>;
+
+ sec_jr0: jr@1000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x1000 0x1000>;
+ interrupts = <45 2>;
+ };
+
+ sec_jr1: jr@2000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x2000 0x1000>;
+ interrupts = <45 2>;
+ };
+
+ sec_jr2: jr@3000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x3000 0x1000>;
+ interrupts = <57 2>;
+ };
+
+ sec_jr3: jr@4000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x4000 0x1000>;
+ interrupts = <57 2>;
+ };
+
+ rtic@6000 {
+ compatible = "fsl,sec-v4.2-rtic",
+ "fsl,sec-v4.0-rtic";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x6000 0x100>;
+ ranges = <0x0 0x6100 0xe00>;
+
+ rtic_a: rtic-a@0 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x00 0x20 0x100 0x80>;
+ };
+
+ rtic_b: rtic-b@20 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x20 0x20 0x200 0x80>;
+ };
+
+ rtic_c: rtic-c@40 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x40 0x20 0x300 0x80>;
+ };
+
+ rtic_d: rtic-d@60 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x60 0x20 0x500 0x80>;
+ };
+ };
+ };
+
+ power@e0070{
+ compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc",
+ "fsl,p1022-pmc";
+ reg = <0xe0070 0x20>;
+ etsec1_clk: soc-clk@B0{
+ fsl,pmcdr-mask = <0x00000080>;
+ };
+ etsec2_clk: soc-clk@B1{
+ fsl,pmcdr-mask = <0x00000040>;
+ };
+ etsec3_clk: soc-clk@B2{
+ fsl,pmcdr-mask = <0x00000020>;
+ };
+ };
+
+ mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x40000 0x40000>;
+ compatible = "chrp,open-pic";
+ device_type = "open-pic";
+ };
+
+ msi@41600 {
+ compatible = "fsl,p1023-msi", "fsl,mpic-msi";
+ reg = <0x41600 0x80>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe0 0
+ 0xe1 0
+ 0xe2 0
+ 0xe3 0
+ 0xe4 0
+ 0xe5 0
+ 0xe6 0
+ 0xe7 0>;
+ interrupt-parent = <&mpic>;
+ };
+
+ global-utilities@e0000 { //global utilities block
+ compatible = "fsl,p1023-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+ };
+
+ localbus@ff605000 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,p1023-elbc", "fsl,elbc", "simple-bus";
+ reg = <0 0xff605000 0 0x1000>;
+ interrupts = <19 2>;
+ interrupt-parent = <&mpic>;
+
+ /* NOR Flash, BCSR */
+ ranges = <0x0 0x0 0x0 0xee000000 0x02000000
+ 0x1 0x0 0x0 0xe0000000 0x00008000>;
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x02000000>;
+ bank-width = <1>;
+ device-width = <1>;
+ partition@0 {
+ label = "ramdisk";
+ reg = <0x00000000 0x01c00000>;
+ };
+ partition@1c00000 {
+ label = "kernel";
+ reg = <0x01c00000 0x002e0000>;
+ };
+ partiton@1ee0000 {
+ label = "dtb";
+ reg = <0x01ee0000 0x00020000>;
+ };
+ partition@1f00000 {
+ label = "firmware";
+ reg = <0x01f00000 0x00080000>;
+ read-only;
+ };
+ partition@1f80000 {
+ label = "u-boot";
+ reg = <0x01f80000 0x00080000>;
+ read-only;
+ };
+ };
+
+ fpga@1,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p1023rds-fpga";
+ reg = <1 0 0x8000>;
+ ranges = <0 1 0 0x8000>;
+
+ bcsr@20 {
+ compatible = "fsl,p1023rds-bcsr";
+ reg = <0x20 0x20>;
+ };
+ };
+ };
+
+ pci0: pcie@ff60a000 {
+ compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
+ cell-index = <1>;
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0 0xff60a000 0 0x1000>;
+ bus-range = <0 255>;
+ ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 0 1
+ 0000 0 0 2 &mpic 1 1
+ 0000 0 0 3 &mpic 2 1
+ 0000 0 0 4 &mpic 3 1
+ >;
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@ff609000 {
+ compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
+ cell-index = <2>;
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0 0xff609000 0 0x1000>;
+ bus-range = <0 255>;
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 4 1
+ 0000 0 0 2 &mpic 5 1
+ 0000 0 0 3 &mpic 6 1
+ 0000 0 0 4 &mpic 7 1
+ >;
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci2: pcie@ff60b000 {
+ cell-index = <3>;
+ compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0 0xff60b000 0 0x1000>;
+ bus-range = <0 255>;
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 8 1
+ 0000 0 0 2 &mpic 9 1
+ 0000 0 0 3 &mpic 10 1
+ 0000 0 0 4 &mpic 11 1
+ >;
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p2040rdb.dts b/arch/powerpc/boot/dts/p2040rdb.dts
new file mode 100644
index 00000000000..7d84e391c63
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2040rdb.dts
@@ -0,0 +1,166 @@
+/*
+ * P2040RDB Device Tree Source
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "p2040si.dtsi"
+
+/ {
+ model = "fsl,P2040RDB";
+ compatible = "fsl,P2040RDB";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ memory {
+ device_type = "memory";
+ };
+
+ soc: soc@ffe000000 {
+ spi@110000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25sl12801";
+ reg = <0>;
+ spi-max-frequency = <40000000>; /* input clock */
+ partition@u-boot {
+ label = "u-boot";
+ reg = <0x00000000 0x00100000>;
+ read-only;
+ };
+ partition@kernel {
+ label = "kernel";
+ reg = <0x00100000 0x00500000>;
+ read-only;
+ };
+ partition@dtb {
+ label = "dtb";
+ reg = <0x00600000 0x00100000>;
+ read-only;
+ };
+ partition@fs {
+ label = "file system";
+ reg = <0x00700000 0x00900000>;
+ };
+ };
+ };
+
+ i2c@118000 {
+ lm75b@48 {
+ compatible = "nxp,lm75a";
+ reg = <0x48>;
+ };
+ eeprom@50 {
+ compatible = "at24,24c256";
+ reg = <0x50>;
+ };
+ rtc@68 {
+ compatible = "pericom,pt7c4338";
+ reg = <0x68>;
+ };
+ };
+
+ i2c@118100 {
+ eeprom@50 {
+ compatible = "at24,24c256";
+ reg = <0x50>;
+ };
+ };
+
+ usb0: usb@210000 {
+ phy_type = "utmi";
+ };
+
+ usb1: usb@211000 {
+ dr_mode = "host";
+ phy_type = "utmi";
+ };
+ };
+
+ localbus@ffe124000 {
+ reg = <0xf 0xfe124000 0 0x1000>;
+ ranges = <0 0 0xf 0xe8000000 0x08000000>;
+
+ flash@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0 0x08000000>;
+ bank-width = <2>;
+ device-width = <2>;
+ };
+ };
+
+ pci0: pcie@ffe200000 {
+ reg = <0xf 0xfe200000 0 0x1000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci1: pcie@ffe201000 {
+ reg = <0xf 0xfe201000 0 0x1000>;
+ ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
+ 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci2: pcie@ffe202000 {
+ reg = <0xf 0xfe202000 0 0x1000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p2040si.dtsi b/arch/powerpc/boot/dts/p2040si.dtsi
new file mode 100644
index 00000000000..5fdbb24c076
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2040si.dtsi
@@ -0,0 +1,623 @@
+/*
+ * P2040 Silicon Device Tree Source
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+/ {
+ compatible = "fsl,P2040";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ ccsr = &soc;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ sdhc = &sdhc;
+ msi0 = &msi0;
+ msi1 = &msi1;
+ msi2 = &msi2;
+
+ crypto = &crypto;
+ sec_jr0 = &sec_jr0;
+ sec_jr1 = &sec_jr1;
+ sec_jr2 = &sec_jr2;
+ sec_jr3 = &sec_jr3;
+ rtic_a = &rtic_a;
+ rtic_b = &rtic_b;
+ rtic_c = &rtic_c;
+ rtic_d = &rtic_d;
+ sec_mon = &sec_mon;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,e500mc@0 {
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu1: PowerPC,e500mc@1 {
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu2: PowerPC,e500mc@2 {
+ device_type = "cpu";
+ reg = <2>;
+ next-level-cache = <&L2_2>;
+ L2_2: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu3: PowerPC,e500mc@3 {
+ device_type = "cpu";
+ reg = <3>;
+ next-level-cache = <&L2_3>;
+ L2_3: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ };
+
+ soc: soc@ffe000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
+
+ soc-sram-error {
+ compatible = "fsl,soc-sram-error";
+ interrupts = <16 2 1 29>;
+ };
+
+ corenet-law@0 {
+ compatible = "fsl,corenet-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <32>;
+ };
+
+ memory-controller@8000 {
+ compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
+ reg = <0x8000 0x1000>;
+ interrupts = <16 2 1 23>;
+ };
+
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,p2040-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
+ reg = <0x10000 0x1000>;
+ interrupts = <16 2 1 27>;
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,corenet-cf";
+ reg = <0x18000 0x1000>;
+ interrupts = <16 2 1 31>;
+ fsl,ccf-num-csdids = <32>;
+ fsl,ccf-num-snoopids = <32>;
+ };
+
+ iommu@20000 {
+ compatible = "fsl,pamu-v1.0", "fsl,pamu";
+ reg = <0x20000 0x4000>;
+ interrupts = <
+ 24 2 0 0
+ 16 2 1 30>;
+ };
+
+ mpic: pic@40000 {
+ clock-frequency = <0>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <4>;
+ reg = <0x40000 0x40000>;
+ compatible = "fsl,mpic", "chrp,open-pic";
+ device_type = "open-pic";
+ };
+
+ msi0: msi@41600 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41600 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe0 0 0 0
+ 0xe1 0 0 0
+ 0xe2 0 0 0
+ 0xe3 0 0 0
+ 0xe4 0 0 0
+ 0xe5 0 0 0
+ 0xe6 0 0 0
+ 0xe7 0 0 0>;
+ };
+
+ msi1: msi@41800 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41800 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe8 0 0 0
+ 0xe9 0 0 0
+ 0xea 0 0 0
+ 0xeb 0 0 0
+ 0xec 0 0 0
+ 0xed 0 0 0
+ 0xee 0 0 0
+ 0xef 0 0 0>;
+ };
+
+ msi2: msi@41a00 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41a00 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xf0 0 0 0
+ 0xf1 0 0 0
+ 0xf2 0 0 0
+ 0xf3 0 0 0
+ 0xf4 0 0 0
+ 0xf5 0 0 0
+ 0xf6 0 0 0
+ 0xf7 0 0 0>;
+ };
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,qoriq-device-config-1.0";
+ reg = <0xe0000 0xe00>;
+ fsl,has-rstcr;
+ #sleep-cells = <1>;
+ fsl,liodn-bits = <12>;
+ };
+
+ pins: global-utilities@e0e00 {
+ compatible = "fsl,qoriq-pin-control-1.0";
+ reg = <0xe0e00 0x200>;
+ #sleep-cells = <2>;
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,p2040-clockgen", "fsl,qoriq-clockgen-1.0";
+ reg = <0xe1000 0x1000>;
+ clock-frequency = <0>;
+ };
+
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,qoriq-rcpm-1.0";
+ reg = <0xe2000 0x1000>;
+ #sleep-cells = <1>;
+ };
+
+ sfp: sfp@e8000 {
+ compatible = "fsl,p2040-sfp", "fsl,qoriq-sfp-1.0";
+ reg = <0xe8000 0x1000>;
+ };
+
+ serdes: serdes@ea000 {
+ compatible = "fsl,p2040-serdes";
+ reg = <0xea000 0x1000>;
+ };
+
+ dma0: dma@100300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p2040-dma", "fsl,eloplus-dma";
+ reg = <0x100300 0x4>;
+ ranges = <0x0 0x100100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,p2040-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupts = <28 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,p2040-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <29 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,p2040-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupts = <30 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,p2040-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupts = <31 2 0 0>;
+ };
+ };
+
+ dma1: dma@101300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p2040-dma", "fsl,eloplus-dma";
+ reg = <0x101300 0x4>;
+ ranges = <0x0 0x101100 0x200>;
+ cell-index = <1>;
+ dma-channel@0 {
+ compatible = "fsl,p2040-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupts = <32 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,p2040-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <33 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,p2040-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupts = <34 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,p2040-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupts = <35 2 0 0>;
+ };
+ };
+
+ spi@110000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,p2040-espi", "fsl,mpc8536-espi";
+ reg = <0x110000 0x1000>;
+ interrupts = <53 0x2 0 0>;
+ fsl,espi-num-chipselects = <4>;
+
+ };
+
+ sdhc: sdhc@114000 {
+ compatible = "fsl,p2040-esdhc", "fsl,esdhc";
+ reg = <0x114000 0x1000>;
+ interrupts = <48 2 0 0>;
+ sdhci,auto-cmd12;
+ clock-frequency = <0>;
+ };
+
+
+ i2c@118000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x118000 0x100>;
+ interrupts = <38 2 0 0>;
+ dfsrr;
+ };
+
+ i2c@118100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x118100 0x100>;
+ interrupts = <38 2 0 0>;
+ dfsrr;
+ };
+
+ i2c@119000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <2>;
+ compatible = "fsl-i2c";
+ reg = <0x119000 0x100>;
+ interrupts = <39 2 0 0>;
+ dfsrr;
+ };
+
+ i2c@119100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <3>;
+ compatible = "fsl-i2c";
+ reg = <0x119100 0x100>;
+ interrupts = <39 2 0 0>;
+ dfsrr;
+ };
+
+ serial0: serial@11c500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11c500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <36 2 0 0>;
+ };
+
+ serial1: serial@11c600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11c600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <36 2 0 0>;
+ };
+
+ serial2: serial@11d500 {
+ cell-index = <2>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11d500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <37 2 0 0>;
+ };
+
+ serial3: serial@11d600 {
+ cell-index = <3>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11d600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <37 2 0 0>;
+ };
+
+ gpio0: gpio@130000 {
+ compatible = "fsl,p2040-gpio", "fsl,qoriq-gpio";
+ reg = <0x130000 0x1000>;
+ interrupts = <55 2 0 0>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+
+ usb0: usb@210000 {
+ compatible = "fsl,p2040-usb2-mph",
+ "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
+ reg = <0x210000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <44 0x2 0 0>;
+ port0;
+ };
+
+ usb1: usb@211000 {
+ compatible = "fsl,p2040-usb2-dr",
+ "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
+ reg = <0x211000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <45 0x2 0 0>;
+ };
+
+ sata@220000 {
+ compatible = "fsl,p2040-sata", "fsl,pq-sata-v2";
+ reg = <0x220000 0x1000>;
+ interrupts = <68 0x2 0 0>;
+ };
+
+ sata@221000 {
+ compatible = "fsl,p2040-sata", "fsl,pq-sata-v2";
+ reg = <0x221000 0x1000>;
+ interrupts = <69 0x2 0 0>;
+ };
+
+ crypto: crypto@300000 {
+ compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x300000 0x10000>;
+ ranges = <0 0x300000 0x10000>;
+ interrupts = <92 2 0 0>;
+
+ sec_jr0: jr@1000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x1000 0x1000>;
+ interrupts = <88 2 0 0>;
+ };
+
+ sec_jr1: jr@2000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x2000 0x1000>;
+ interrupts = <89 2 0 0>;
+ };
+
+ sec_jr2: jr@3000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x3000 0x1000>;
+ interrupts = <90 2 0 0>;
+ };
+
+ sec_jr3: jr@4000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x4000 0x1000>;
+ interrupts = <91 2 0 0>;
+ };
+
+ rtic@6000 {
+ compatible = "fsl,sec-v4.2-rtic",
+ "fsl,sec-v4.0-rtic";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x6000 0x100>;
+ ranges = <0x0 0x6100 0xe00>;
+
+ rtic_a: rtic-a@0 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x00 0x20 0x100 0x80>;
+ };
+
+ rtic_b: rtic-b@20 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x20 0x20 0x200 0x80>;
+ };
+
+ rtic_c: rtic-c@40 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x40 0x20 0x300 0x80>;
+ };
+
+ rtic_d: rtic-d@60 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x60 0x20 0x500 0x80>;
+ };
+ };
+ };
+
+ sec_mon: sec_mon@314000 {
+ compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon";
+ reg = <0x314000 0x1000>;
+ interrupts = <93 2 0 0>;
+ };
+
+ };
+
+ localbus@ffe124000 {
+ compatible = "fsl,p2040-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <25 2 0 0>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ };
+
+ pci0: pcie@ffe200000 {
+ compatible = "fsl,p2040-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <0x1fca055>;
+ fsl,msi = <&msi0>;
+ interrupts = <16 2 1 15>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 15>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 40 1 0 0
+ 0000 0 0 2 &mpic 1 1 0 0
+ 0000 0 0 3 &mpic 2 1 0 0
+ 0000 0 0 4 &mpic 3 1 0 0
+ >;
+ };
+ };
+
+ pci1: pcie@ffe201000 {
+ compatible = "fsl,p2040-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 0xff>;
+ clock-frequency = <0x1fca055>;
+ fsl,msi = <&msi1>;
+ interrupts = <16 2 1 14>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 14>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 41 1 0 0
+ 0000 0 0 2 &mpic 5 1 0 0
+ 0000 0 0 3 &mpic 6 1 0 0
+ 0000 0 0 4 &mpic 7 1 0 0
+ >;
+ };
+ };
+
+ pci2: pcie@ffe202000 {
+ compatible = "fsl,p2040-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <0x1fca055>;
+ fsl,msi = <&msi2>;
+ interrupts = <16 2 1 13>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 13>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 42 1 0 0
+ 0000 0 0 2 &mpic 9 1 0 0
+ 0000 0 0 3 &mpic 10 1 0 0
+ 0000 0 0 4 &mpic 11 1 0 0
+ >;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts
new file mode 100644
index 00000000000..69cae674f39
--- /dev/null
+++ b/arch/powerpc/boot/dts/p3041ds.dts
@@ -0,0 +1,214 @@
+/*
+ * P3041DS Device Tree Source
+ *
+ * Copyright 2010-2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "p3041si.dtsi"
+
+/ {
+ model = "fsl,P3041DS";
+ compatible = "fsl,P3041DS";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ memory {
+ device_type = "memory";
+ };
+
+ soc: soc@ffe000000 {
+ spi@110000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25sl12801";
+ reg = <0>;
+ spi-max-frequency = <40000000>; /* input clock */
+ partition@u-boot {
+ label = "u-boot";
+ reg = <0x00000000 0x00100000>;
+ read-only;
+ };
+ partition@kernel {
+ label = "kernel";
+ reg = <0x00100000 0x00500000>;
+ read-only;
+ };
+ partition@dtb {
+ label = "dtb";
+ reg = <0x00600000 0x00100000>;
+ read-only;
+ };
+ partition@fs {
+ label = "file system";
+ reg = <0x00700000 0x00900000>;
+ };
+ };
+ };
+
+ i2c@118100 {
+ eeprom@51 {
+ compatible = "at24,24c256";
+ reg = <0x51>;
+ };
+ eeprom@52 {
+ compatible = "at24,24c256";
+ reg = <0x52>;
+ };
+ };
+
+ i2c@119100 {
+ rtc@68 {
+ compatible = "dallas,ds3232";
+ reg = <0x68>;
+ interrupts = <0x1 0x1 0 0>;
+ };
+ };
+ };
+
+ localbus@ffe124000 {
+ reg = <0xf 0xfe124000 0 0x1000>;
+ ranges = <0 0 0xf 0xe8000000 0x08000000
+ 2 0 0xf 0xffa00000 0x00040000
+ 3 0 0xf 0xffdf0000 0x00008000>;
+
+ flash@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0 0x08000000>;
+ bank-width = <2>;
+ device-width = <2>;
+ };
+
+ nand@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,elbc-fcm-nand";
+ reg = <0x2 0x0 0x40000>;
+
+ partition@0 {
+ label = "NAND U-Boot Image";
+ reg = <0x0 0x02000000>;
+ read-only;
+ };
+
+ partition@2000000 {
+ label = "NAND Root File System";
+ reg = <0x02000000 0x10000000>;
+ };
+
+ partition@12000000 {
+ label = "NAND Compressed RFS Image";
+ reg = <0x12000000 0x08000000>;
+ };
+
+ partition@1a000000 {
+ label = "NAND Linux Kernel Image";
+ reg = <0x1a000000 0x04000000>;
+ };
+
+ partition@1e000000 {
+ label = "NAND DTB Image";
+ reg = <0x1e000000 0x01000000>;
+ };
+
+ partition@1f000000 {
+ label = "NAND Writable User area";
+ reg = <0x1f000000 0x21000000>;
+ };
+ };
+
+ board-control@3,0 {
+ compatible = "fsl,p3041ds-pixis";
+ reg = <3 0 0x20>;
+ };
+ };
+
+ pci0: pcie@ffe200000 {
+ reg = <0xf 0xfe200000 0 0x1000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci1: pcie@ffe201000 {
+ reg = <0xf 0xfe201000 0 0x1000>;
+ ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
+ 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci2: pcie@ffe202000 {
+ reg = <0xf 0xfe202000 0 0x1000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci3: pcie@ffe203000 {
+ reg = <0xf 0xfe203000 0 0x1000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p3041si.dtsi b/arch/powerpc/boot/dts/p3041si.dtsi
new file mode 100644
index 00000000000..8b695801f50
--- /dev/null
+++ b/arch/powerpc/boot/dts/p3041si.dtsi
@@ -0,0 +1,660 @@
+/*
+ * P3041 Silicon Device Tree Source
+ *
+ * Copyright 2010-2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+/ {
+ compatible = "fsl,P3041";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ ccsr = &soc;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ pci3 = &pci3;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ sdhc = &sdhc;
+ msi0 = &msi0;
+ msi1 = &msi1;
+ msi2 = &msi2;
+
+ crypto = &crypto;
+ sec_jr0 = &sec_jr0;
+ sec_jr1 = &sec_jr1;
+ sec_jr2 = &sec_jr2;
+ sec_jr3 = &sec_jr3;
+ rtic_a = &rtic_a;
+ rtic_b = &rtic_b;
+ rtic_c = &rtic_c;
+ rtic_d = &rtic_d;
+ sec_mon = &sec_mon;
+
+/*
+ rio0 = &rapidio0;
+ */
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,e500mc@0 {
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu1: PowerPC,e500mc@1 {
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu2: PowerPC,e500mc@2 {
+ device_type = "cpu";
+ reg = <2>;
+ next-level-cache = <&L2_2>;
+ L2_2: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu3: PowerPC,e500mc@3 {
+ device_type = "cpu";
+ reg = <3>;
+ next-level-cache = <&L2_3>;
+ L2_3: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ };
+
+ soc: soc@ffe000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
+
+ soc-sram-error {
+ compatible = "fsl,soc-sram-error";
+ interrupts = <16 2 1 29>;
+ };
+
+ corenet-law@0 {
+ compatible = "fsl,corenet-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <32>;
+ };
+
+ memory-controller@8000 {
+ compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
+ reg = <0x8000 0x1000>;
+ interrupts = <16 2 1 23>;
+ };
+
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,p3041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
+ reg = <0x10000 0x1000>;
+ interrupts = <16 2 1 27>;
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,corenet-cf";
+ reg = <0x18000 0x1000>;
+ interrupts = <16 2 1 31>;
+ fsl,ccf-num-csdids = <32>;
+ fsl,ccf-num-snoopids = <32>;
+ };
+
+ iommu@20000 {
+ compatible = "fsl,pamu-v1.0", "fsl,pamu";
+ reg = <0x20000 0x4000>;
+ interrupts = <
+ 24 2 0 0
+ 16 2 1 30>;
+ };
+
+ mpic: pic@40000 {
+ clock-frequency = <0>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <4>;
+ reg = <0x40000 0x40000>;
+ compatible = "fsl,mpic", "chrp,open-pic";
+ device_type = "open-pic";
+ };
+
+ msi0: msi@41600 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41600 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe0 0 0 0
+ 0xe1 0 0 0
+ 0xe2 0 0 0
+ 0xe3 0 0 0
+ 0xe4 0 0 0
+ 0xe5 0 0 0
+ 0xe6 0 0 0
+ 0xe7 0 0 0>;
+ };
+
+ msi1: msi@41800 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41800 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe8 0 0 0
+ 0xe9 0 0 0
+ 0xea 0 0 0
+ 0xeb 0 0 0
+ 0xec 0 0 0
+ 0xed 0 0 0
+ 0xee 0 0 0
+ 0xef 0 0 0>;
+ };
+
+ msi2: msi@41a00 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41a00 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xf0 0 0 0
+ 0xf1 0 0 0
+ 0xf2 0 0 0
+ 0xf3 0 0 0
+ 0xf4 0 0 0
+ 0xf5 0 0 0
+ 0xf6 0 0 0
+ 0xf7 0 0 0>;
+ };
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,qoriq-device-config-1.0";
+ reg = <0xe0000 0xe00>;
+ fsl,has-rstcr;
+ #sleep-cells = <1>;
+ fsl,liodn-bits = <12>;
+ };
+
+ pins: global-utilities@e0e00 {
+ compatible = "fsl,qoriq-pin-control-1.0";
+ reg = <0xe0e00 0x200>;
+ #sleep-cells = <2>;
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,p3041-clockgen", "fsl,qoriq-clockgen-1.0";
+ reg = <0xe1000 0x1000>;
+ clock-frequency = <0>;
+ };
+
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,qoriq-rcpm-1.0";
+ reg = <0xe2000 0x1000>;
+ #sleep-cells = <1>;
+ };
+
+ sfp: sfp@e8000 {
+ compatible = "fsl,p3041-sfp", "fsl,qoriq-sfp-1.0";
+ reg = <0xe8000 0x1000>;
+ };
+
+ serdes: serdes@ea000 {
+ compatible = "fsl,p3041-serdes";
+ reg = <0xea000 0x1000>;
+ };
+
+ dma0: dma@100300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p3041-dma", "fsl,eloplus-dma";
+ reg = <0x100300 0x4>;
+ ranges = <0x0 0x100100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,p3041-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupts = <28 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,p3041-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <29 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,p3041-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupts = <30 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,p3041-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupts = <31 2 0 0>;
+ };
+ };
+
+ dma1: dma@101300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p3041-dma", "fsl,eloplus-dma";
+ reg = <0x101300 0x4>;
+ ranges = <0x0 0x101100 0x200>;
+ cell-index = <1>;
+ dma-channel@0 {
+ compatible = "fsl,p3041-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupts = <32 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,p3041-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <33 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,p3041-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupts = <34 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,p3041-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupts = <35 2 0 0>;
+ };
+ };
+
+ spi@110000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,p3041-espi", "fsl,mpc8536-espi";
+ reg = <0x110000 0x1000>;
+ interrupts = <53 0x2 0 0>;
+ fsl,espi-num-chipselects = <4>;
+ };
+
+ sdhc: sdhc@114000 {
+ compatible = "fsl,p3041-esdhc", "fsl,esdhc";
+ reg = <0x114000 0x1000>;
+ interrupts = <48 2 0 0>;
+ sdhci,auto-cmd12;
+ clock-frequency = <0>;
+ };
+
+ i2c@118000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x118000 0x100>;
+ interrupts = <38 2 0 0>;
+ dfsrr;
+ };
+
+ i2c@118100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x118100 0x100>;
+ interrupts = <38 2 0 0>;
+ dfsrr;
+ };
+
+ i2c@119000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <2>;
+ compatible = "fsl-i2c";
+ reg = <0x119000 0x100>;
+ interrupts = <39 2 0 0>;
+ dfsrr;
+ };
+
+ i2c@119100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <3>;
+ compatible = "fsl-i2c";
+ reg = <0x119100 0x100>;
+ interrupts = <39 2 0 0>;
+ dfsrr;
+ };
+
+ serial0: serial@11c500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11c500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <36 2 0 0>;
+ };
+
+ serial1: serial@11c600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11c600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <36 2 0 0>;
+ };
+
+ serial2: serial@11d500 {
+ cell-index = <2>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11d500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <37 2 0 0>;
+ };
+
+ serial3: serial@11d600 {
+ cell-index = <3>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11d600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <37 2 0 0>;
+ };
+
+ gpio0: gpio@130000 {
+ compatible = "fsl,p3041-gpio", "fsl,qoriq-gpio";
+ reg = <0x130000 0x1000>;
+ interrupts = <55 2 0 0>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+
+ usb0: usb@210000 {
+ compatible = "fsl,p3041-usb2-mph",
+ "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
+ reg = <0x210000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <44 0x2 0 0>;
+ phy_type = "utmi";
+ port0;
+ };
+
+ usb1: usb@211000 {
+ compatible = "fsl,p3041-usb2-dr",
+ "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
+ reg = <0x211000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <45 0x2 0 0>;
+ dr_mode = "host";
+ phy_type = "utmi";
+ };
+
+ sata@220000 {
+ compatible = "fsl,p3041-sata", "fsl,pq-sata-v2";
+ reg = <0x220000 0x1000>;
+ interrupts = <68 0x2 0 0>;
+ };
+
+ sata@221000 {
+ compatible = "fsl,p3041-sata", "fsl,pq-sata-v2";
+ reg = <0x221000 0x1000>;
+ interrupts = <69 0x2 0 0>;
+ };
+
+ crypto: crypto@300000 {
+ compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x300000 0x10000>;
+ ranges = <0 0x300000 0x10000>;
+ interrupts = <92 2 0 0>;
+
+ sec_jr0: jr@1000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x1000 0x1000>;
+ interrupts = <88 2 0 0>;
+ };
+
+ sec_jr1: jr@2000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x2000 0x1000>;
+ interrupts = <89 2 0 0>;
+ };
+
+ sec_jr2: jr@3000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x3000 0x1000>;
+ interrupts = <90 2 0 0>;
+ };
+
+ sec_jr3: jr@4000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x4000 0x1000>;
+ interrupts = <91 2 0 0>;
+ };
+
+ rtic@6000 {
+ compatible = "fsl,sec-v4.2-rtic",
+ "fsl,sec-v4.0-rtic";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x6000 0x100>;
+ ranges = <0x0 0x6100 0xe00>;
+
+ rtic_a: rtic-a@0 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x00 0x20 0x100 0x80>;
+ };
+
+ rtic_b: rtic-b@20 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x20 0x20 0x200 0x80>;
+ };
+
+ rtic_c: rtic-c@40 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x40 0x20 0x300 0x80>;
+ };
+
+ rtic_d: rtic-d@60 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x60 0x20 0x500 0x80>;
+ };
+ };
+ };
+
+ sec_mon: sec_mon@314000 {
+ compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon";
+ reg = <0x314000 0x1000>;
+ interrupts = <93 2 0 0>;
+ };
+ };
+
+/*
+ rapidio0: rapidio@ffe0c0000
+*/
+
+ localbus@ffe124000 {
+ compatible = "fsl,p3041-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <25 2 0 0>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ };
+
+ pci0: pcie@ffe200000 {
+ compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <0x1fca055>;
+ fsl,msi = <&msi0>;
+ interrupts = <16 2 1 15>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 15>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 40 1 0 0
+ 0000 0 0 2 &mpic 1 1 0 0
+ 0000 0 0 3 &mpic 2 1 0 0
+ 0000 0 0 4 &mpic 3 1 0 0
+ >;
+ };
+ };
+
+ pci1: pcie@ffe201000 {
+ compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 0xff>;
+ clock-frequency = <0x1fca055>;
+ fsl,msi = <&msi1>;
+ interrupts = <16 2 1 14>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 14>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 41 1 0 0
+ 0000 0 0 2 &mpic 5 1 0 0
+ 0000 0 0 3 &mpic 6 1 0 0
+ 0000 0 0 4 &mpic 7 1 0 0
+ >;
+ };
+ };
+
+ pci2: pcie@ffe202000 {
+ compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <0x1fca055>;
+ fsl,msi = <&msi2>;
+ interrupts = <16 2 1 13>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 13>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 42 1 0 0
+ 0000 0 0 2 &mpic 9 1 0 0
+ 0000 0 0 3 &mpic 10 1 0 0
+ 0000 0 0 4 &mpic 11 1 0 0
+ >;
+ };
+ };
+
+ pci3: pcie@ffe203000 {
+ compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <0x1fca055>;
+ fsl,msi = <&msi2>;
+ interrupts = <16 2 1 12>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 12>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 43 1 0 0
+ 0000 0 0 2 &mpic 0 1 0 0
+ 0000 0 0 3 &mpic 4 1 0 0
+ 0000 0 0 4 &mpic 8 1 0 0
+ >;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p4080ds.dts b/arch/powerpc/boot/dts/p4080ds.dts
index 927f94d16e9..eb11098bb68 100644
--- a/arch/powerpc/boot/dts/p4080ds.dts
+++ b/arch/powerpc/boot/dts/p4080ds.dts
@@ -3,258 +3,50 @@
*
* Copyright 2009-2011 Freescale Semiconductor Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/dts-v1/;
+/include/ "p4080si.dtsi"
/ {
model = "fsl,P4080DS";
compatible = "fsl,P4080DS";
#address-cells = <2>;
#size-cells = <2>;
-
- aliases {
- ccsr = &soc;
-
- serial0 = &serial0;
- serial1 = &serial1;
- serial2 = &serial2;
- serial3 = &serial3;
- pci0 = &pci0;
- pci1 = &pci1;
- pci2 = &pci2;
- usb0 = &usb0;
- usb1 = &usb1;
- dma0 = &dma0;
- dma1 = &dma1;
- sdhc = &sdhc;
-
- crypto = &crypto;
- sec_jr0 = &sec_jr0;
- sec_jr1 = &sec_jr1;
- sec_jr2 = &sec_jr2;
- sec_jr3 = &sec_jr3;
- rtic_a = &rtic_a;
- rtic_b = &rtic_b;
- rtic_c = &rtic_c;
- rtic_d = &rtic_d;
- sec_mon = &sec_mon;
-
- rio0 = &rapidio0;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: PowerPC,4080@0 {
- device_type = "cpu";
- reg = <0>;
- next-level-cache = <&L2_0>;
- L2_0: l2-cache {
- };
- };
- cpu1: PowerPC,4080@1 {
- device_type = "cpu";
- reg = <1>;
- next-level-cache = <&L2_1>;
- L2_1: l2-cache {
- };
- };
- cpu2: PowerPC,4080@2 {
- device_type = "cpu";
- reg = <2>;
- next-level-cache = <&L2_2>;
- L2_2: l2-cache {
- };
- };
- cpu3: PowerPC,4080@3 {
- device_type = "cpu";
- reg = <3>;
- next-level-cache = <&L2_3>;
- L2_3: l2-cache {
- };
- };
- cpu4: PowerPC,4080@4 {
- device_type = "cpu";
- reg = <4>;
- next-level-cache = <&L2_4>;
- L2_4: l2-cache {
- };
- };
- cpu5: PowerPC,4080@5 {
- device_type = "cpu";
- reg = <5>;
- next-level-cache = <&L2_5>;
- L2_5: l2-cache {
- };
- };
- cpu6: PowerPC,4080@6 {
- device_type = "cpu";
- reg = <6>;
- next-level-cache = <&L2_6>;
- L2_6: l2-cache {
- };
- };
- cpu7: PowerPC,4080@7 {
- device_type = "cpu";
- reg = <7>;
- next-level-cache = <&L2_7>;
- L2_7: l2-cache {
- };
- };
- };
+ interrupt-parent = <&mpic>;
memory {
device_type = "memory";
};
soc: soc@ffe000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
- ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
- reg = <0xf 0xfe000000 0 0x00001000>;
-
- corenet-law@0 {
- compatible = "fsl,corenet-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <32>;
- };
-
- memory-controller@8000 {
- compatible = "fsl,p4080-memory-controller";
- reg = <0x8000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <0x12 2>;
- };
-
- memory-controller@9000 {
- compatible = "fsl,p4080-memory-controller";
- reg = <0x9000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <0x12 2>;
- };
-
- corenet-cf@18000 {
- compatible = "fsl,corenet-cf";
- reg = <0x18000 0x1000>;
- fsl,ccf-num-csdids = <32>;
- fsl,ccf-num-snoopids = <32>;
- };
-
- iommu@20000 {
- compatible = "fsl,p4080-pamu";
- reg = <0x20000 0x10000>;
- interrupts = <24 2>;
- interrupt-parent = <&mpic>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
-
- dma0: dma@100300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
- reg = <0x100300 0x4>;
- ranges = <0x0 0x100100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <28 2>;
- };
- dma-channel@80 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <29 2>;
- };
- dma-channel@100 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <30 2>;
- };
- dma-channel@180 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <31 2>;
- };
- };
-
- dma1: dma@101300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
- reg = <0x101300 0x4>;
- ranges = <0x0 0x101100 0x200>;
- cell-index = <1>;
- dma-channel@0 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <32 2>;
- };
- dma-channel@80 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <33 2>;
- };
- dma-channel@100 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <34 2>;
- };
- dma-channel@180 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <35 2>;
- };
- };
-
spi@110000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,p4080-espi", "fsl,mpc8536-espi";
- reg = <0x110000 0x1000>;
- interrupts = <53 0x2>;
- interrupt-parent = <&mpic>;
- fsl,espi-num-chipselects = <4>;
-
flash@0 {
#address-cells = <1>;
#size-cells = <1>;
@@ -283,35 +75,7 @@
};
};
- sdhc: sdhc@114000 {
- compatible = "fsl,p4080-esdhc", "fsl,esdhc";
- reg = <0x114000 0x1000>;
- interrupts = <48 2>;
- interrupt-parent = <&mpic>;
- voltage-ranges = <3300 3300>;
- sdhci,auto-cmd12;
- };
-
- i2c@118000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x118000 0x100>;
- interrupts = <38 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
i2c@118100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x118100 0x100>;
- interrupts = <38 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
eeprom@51 {
compatible = "at24,24c256";
reg = <0x51>;
@@ -323,198 +87,27 @@
rtc@68 {
compatible = "dallas,ds3232";
reg = <0x68>;
- interrupts = <0 0x1>;
- interrupt-parent = <&mpic>;
+ interrupts = <0x1 0x1 0 0>;
};
};
- i2c@119000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <2>;
- compatible = "fsl-i2c";
- reg = <0x119000 0x100>;
- interrupts = <39 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- i2c@119100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <3>;
- compatible = "fsl-i2c";
- reg = <0x119100 0x100>;
- interrupts = <39 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- serial0: serial@11c500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11c500 0x100>;
- clock-frequency = <0>;
- interrupts = <36 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@11c600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11c600 0x100>;
- clock-frequency = <0>;
- interrupts = <36 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial2: serial@11d500 {
- cell-index = <2>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11d500 0x100>;
- clock-frequency = <0>;
- interrupts = <37 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial3: serial@11d600 {
- cell-index = <3>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11d600 0x100>;
- clock-frequency = <0>;
- interrupts = <37 2>;
- interrupt-parent = <&mpic>;
- };
-
- gpio0: gpio@130000 {
- compatible = "fsl,p4080-gpio";
- reg = <0x130000 0x1000>;
- interrupts = <55 2>;
- interrupt-parent = <&mpic>;
- #gpio-cells = <2>;
- gpio-controller;
- };
-
usb0: usb@210000 {
- compatible = "fsl,p4080-usb2-mph",
- "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
- reg = <0x210000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <44 0x2>;
phy_type = "ulpi";
};
usb1: usb@211000 {
- compatible = "fsl,p4080-usb2-dr",
- "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
- reg = <0x211000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <45 0x2>;
dr_mode = "host";
phy_type = "ulpi";
};
-
- crypto: crypto@300000 {
- compatible = "fsl,sec-v4.0";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x300000 0x10000>;
- ranges = <0 0x300000 0x10000>;
- interrupt-parent = <&mpic>;
- interrupts = <92 2>;
-
- sec_jr0: jr@1000 {
- compatible = "fsl,sec-v4.0-job-ring";
- reg = <0x1000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <88 2>;
- };
-
- sec_jr1: jr@2000 {
- compatible = "fsl,sec-v4.0-job-ring";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <89 2>;
- };
-
- sec_jr2: jr@3000 {
- compatible = "fsl,sec-v4.0-job-ring";
- reg = <0x3000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <90 2>;
- };
-
- sec_jr3: jr@4000 {
- compatible = "fsl,sec-v4.0-job-ring";
- reg = <0x4000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <91 2>;
- };
-
- rtic@6000 {
- compatible = "fsl,sec-v4.0-rtic";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x6000 0x100>;
- ranges = <0x0 0x6100 0xe00>;
-
- rtic_a: rtic-a@0 {
- compatible = "fsl,sec-v4.0-rtic-memory";
- reg = <0x00 0x20 0x100 0x80>;
- };
-
- rtic_b: rtic-b@20 {
- compatible = "fsl,sec-v4.0-rtic-memory";
- reg = <0x20 0x20 0x200 0x80>;
- };
-
- rtic_c: rtic-c@40 {
- compatible = "fsl,sec-v4.0-rtic-memory";
- reg = <0x40 0x20 0x300 0x80>;
- };
-
- rtic_d: rtic-d@60 {
- compatible = "fsl,sec-v4.0-rtic-memory";
- reg = <0x60 0x20 0x500 0x80>;
- };
- };
- };
-
- sec_mon: sec_mon@314000 {
- compatible = "fsl,sec-v4.0-mon";
- reg = <0x314000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <93 2>;
- };
};
rapidio0: rapidio@ffe0c0000 {
- #address-cells = <2>;
- #size-cells = <2>;
- compatible = "fsl,rapidio-delta";
reg = <0xf 0xfe0c0000 0 0x20000>;
- ranges = <0 0 0xf 0xf5000000 0 0x01000000>;
- interrupt-parent = <&mpic>;
- /* err_irq bell_outb_irq bell_inb_irq
- msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq */
- interrupts = <16 2 56 2 57 2 60 2 61 2 62 2 63 2>;
+ ranges = <0 0 0xc 0x20000000 0 0x01000000>;
};
localbus@ffe124000 {
- compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus";
reg = <0xf 0xfe124000 0 0x1000>;
- interrupts = <25 2>;
- #address-cells = <2>;
- #size-cells = <1>;
-
ranges = <0 0 0xf 0xe8000000 0x08000000>;
flash@0,0 {
@@ -526,32 +119,10 @@
};
pci0: pcie@ffe200000 {
- compatible = "fsl,p4080-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0xf 0xfe200000 0 0x1000>;
- bus-range = <0x0 0xff>;
ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
- clock-frequency = <0x1fca055>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
-
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 40 1
- 0000 0 0 2 &mpic 1 1
- 0000 0 0 3 &mpic 2 1
- 0000 0 0 4 &mpic 3 1
- >;
pcie@0 {
- reg = <0 0 0 0 0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
0 0x20000000
@@ -563,31 +134,10 @@
};
pci1: pcie@ffe201000 {
- compatible = "fsl,p4080-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0xf 0xfe201000 0 0x1000>;
- bus-range = <0 0xff>;
ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
- clock-frequency = <0x1fca055>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 41 1
- 0000 0 0 2 &mpic 5 1
- 0000 0 0 3 &mpic 6 1
- 0000 0 0 4 &mpic 7 1
- >;
pcie@0 {
- reg = <0 0 0 0 0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
0 0x20000000
@@ -599,31 +149,10 @@
};
pci2: pcie@ffe202000 {
- compatible = "fsl,p4080-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0xf 0xfe202000 0 0x1000>;
- bus-range = <0x0 0xff>;
ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
- clock-frequency = <0x1fca055>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 42 1
- 0000 0 0 2 &mpic 9 1
- 0000 0 0 3 &mpic 10 1
- 0000 0 0 4 &mpic 11 1
- >;
pcie@0 {
- reg = <0 0 0 0 0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
0 0x20000000
diff --git a/arch/powerpc/boot/dts/p4080si.dtsi b/arch/powerpc/boot/dts/p4080si.dtsi
new file mode 100644
index 00000000000..b71051f506c
--- /dev/null
+++ b/arch/powerpc/boot/dts/p4080si.dtsi
@@ -0,0 +1,661 @@
+/*
+ * P4080 Silicon Device Tree Source
+ *
+ * Copyright 2009-2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+/ {
+ compatible = "fsl,P4080";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ ccsr = &soc;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ sdhc = &sdhc;
+ msi0 = &msi0;
+ msi1 = &msi1;
+ msi2 = &msi2;
+
+ crypto = &crypto;
+ sec_jr0 = &sec_jr0;
+ sec_jr1 = &sec_jr1;
+ sec_jr2 = &sec_jr2;
+ sec_jr3 = &sec_jr3;
+ rtic_a = &rtic_a;
+ rtic_b = &rtic_b;
+ rtic_c = &rtic_c;
+ rtic_d = &rtic_d;
+ sec_mon = &sec_mon;
+
+ rio0 = &rapidio0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,4080@0 {
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu1: PowerPC,4080@1 {
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu2: PowerPC,4080@2 {
+ device_type = "cpu";
+ reg = <2>;
+ next-level-cache = <&L2_2>;
+ L2_2: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu3: PowerPC,4080@3 {
+ device_type = "cpu";
+ reg = <3>;
+ next-level-cache = <&L2_3>;
+ L2_3: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu4: PowerPC,4080@4 {
+ device_type = "cpu";
+ reg = <4>;
+ next-level-cache = <&L2_4>;
+ L2_4: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu5: PowerPC,4080@5 {
+ device_type = "cpu";
+ reg = <5>;
+ next-level-cache = <&L2_5>;
+ L2_5: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu6: PowerPC,4080@6 {
+ device_type = "cpu";
+ reg = <6>;
+ next-level-cache = <&L2_6>;
+ L2_6: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu7: PowerPC,4080@7 {
+ device_type = "cpu";
+ reg = <7>;
+ next-level-cache = <&L2_7>;
+ L2_7: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ };
+
+ soc: soc@ffe000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
+
+ soc-sram-error {
+ compatible = "fsl,soc-sram-error";
+ interrupts = <16 2 1 29>;
+ };
+
+ corenet-law@0 {
+ compatible = "fsl,corenet-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <32>;
+ };
+
+ memory-controller@8000 {
+ compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller";
+ reg = <0x8000 0x1000>;
+ interrupts = <16 2 1 23>;
+ };
+
+ memory-controller@9000 {
+ compatible = "fsl,qoriq-memory-controller-v4.4","fsl,qoriq-memory-controller";
+ reg = <0x9000 0x1000>;
+ interrupts = <16 2 1 22>;
+ };
+
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,p4080-l3-cache-controller", "cache";
+ reg = <0x10000 0x1000
+ 0x11000 0x1000>;
+ interrupts = <16 2 1 27
+ 16 2 1 26>;
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,corenet-cf";
+ reg = <0x18000 0x1000>;
+ interrupts = <16 2 1 31>;
+ fsl,ccf-num-csdids = <32>;
+ fsl,ccf-num-snoopids = <32>;
+ };
+
+ iommu@20000 {
+ compatible = "fsl,pamu-v1.0", "fsl,pamu";
+ reg = <0x20000 0x5000>;
+ interrupts = <
+ 24 2 0 0
+ 16 2 1 30>;
+ };
+
+ mpic: pic@40000 {
+ clock-frequency = <0>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <4>;
+ reg = <0x40000 0x40000>;
+ compatible = "fsl,mpic", "chrp,open-pic";
+ device_type = "open-pic";
+ };
+
+ msi0: msi@41600 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41600 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe0 0 0 0
+ 0xe1 0 0 0
+ 0xe2 0 0 0
+ 0xe3 0 0 0
+ 0xe4 0 0 0
+ 0xe5 0 0 0
+ 0xe6 0 0 0
+ 0xe7 0 0 0>;
+ };
+
+ msi1: msi@41800 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41800 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe8 0 0 0
+ 0xe9 0 0 0
+ 0xea 0 0 0
+ 0xeb 0 0 0
+ 0xec 0 0 0
+ 0xed 0 0 0
+ 0xee 0 0 0
+ 0xef 0 0 0>;
+ };
+
+ msi2: msi@41a00 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41a00 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xf0 0 0 0
+ 0xf1 0 0 0
+ 0xf2 0 0 0
+ 0xf3 0 0 0
+ 0xf4 0 0 0
+ 0xf5 0 0 0
+ 0xf6 0 0 0
+ 0xf7 0 0 0>;
+ };
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,qoriq-device-config-1.0";
+ reg = <0xe0000 0xe00>;
+ fsl,has-rstcr;
+ #sleep-cells = <1>;
+ fsl,liodn-bits = <12>;
+ };
+
+ pins: global-utilities@e0e00 {
+ compatible = "fsl,qoriq-pin-control-1.0";
+ reg = <0xe0e00 0x200>;
+ #sleep-cells = <2>;
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,p4080-clockgen", "fsl,qoriq-clockgen-1.0";
+ reg = <0xe1000 0x1000>;
+ clock-frequency = <0>;
+ };
+
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,qoriq-rcpm-1.0";
+ reg = <0xe2000 0x1000>;
+ #sleep-cells = <1>;
+ };
+
+ sfp: sfp@e8000 {
+ compatible = "fsl,p4080-sfp", "fsl,qoriq-sfp-1.0";
+ reg = <0xe8000 0x1000>;
+ };
+
+ serdes: serdes@ea000 {
+ compatible = "fsl,p4080-serdes";
+ reg = <0xea000 0x1000>;
+ };
+
+ dma0: dma@100300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
+ reg = <0x100300 0x4>;
+ ranges = <0x0 0x100100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupts = <28 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <29 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupts = <30 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupts = <31 2 0 0>;
+ };
+ };
+
+ dma1: dma@101300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
+ reg = <0x101300 0x4>;
+ ranges = <0x0 0x101100 0x200>;
+ cell-index = <1>;
+ dma-channel@0 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupts = <32 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <33 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupts = <34 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupts = <35 2 0 0>;
+ };
+ };
+
+ spi@110000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,p4080-espi", "fsl,mpc8536-espi";
+ reg = <0x110000 0x1000>;
+ interrupts = <53 0x2 0 0>;
+ fsl,espi-num-chipselects = <4>;
+ };
+
+ sdhc: sdhc@114000 {
+ compatible = "fsl,p4080-esdhc", "fsl,esdhc";
+ reg = <0x114000 0x1000>;
+ interrupts = <48 2 0 0>;
+ voltage-ranges = <3300 3300>;
+ sdhci,auto-cmd12;
+ clock-frequency = <0>;
+ };
+
+ i2c@118000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x118000 0x100>;
+ interrupts = <38 2 0 0>;
+ dfsrr;
+ };
+
+ i2c@118100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x118100 0x100>;
+ interrupts = <38 2 0 0>;
+ dfsrr;
+ };
+
+ i2c@119000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <2>;
+ compatible = "fsl-i2c";
+ reg = <0x119000 0x100>;
+ interrupts = <39 2 0 0>;
+ dfsrr;
+ };
+
+ i2c@119100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <3>;
+ compatible = "fsl-i2c";
+ reg = <0x119100 0x100>;
+ interrupts = <39 2 0 0>;
+ dfsrr;
+ };
+
+ serial0: serial@11c500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11c500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <36 2 0 0>;
+ };
+
+ serial1: serial@11c600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11c600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <36 2 0 0>;
+ };
+
+ serial2: serial@11d500 {
+ cell-index = <2>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11d500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <37 2 0 0>;
+ };
+
+ serial3: serial@11d600 {
+ cell-index = <3>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11d600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <37 2 0 0>;
+ };
+
+ gpio0: gpio@130000 {
+ compatible = "fsl,p4080-gpio", "fsl,qoriq-gpio";
+ reg = <0x130000 0x1000>;
+ interrupts = <55 2 0 0>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+
+ usb0: usb@210000 {
+ compatible = "fsl,p4080-usb2-mph",
+ "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
+ reg = <0x210000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <44 0x2 0 0>;
+ };
+
+ usb1: usb@211000 {
+ compatible = "fsl,p4080-usb2-dr",
+ "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
+ reg = <0x211000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <45 0x2 0 0>;
+ };
+
+ crypto: crypto@300000 {
+ compatible = "fsl,sec-v4.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x300000 0x10000>;
+ ranges = <0 0x300000 0x10000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <92 2 0 0>;
+
+ sec_jr0: jr@1000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x1000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <88 2 0 0>;
+ };
+
+ sec_jr1: jr@2000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x2000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <89 2 0 0>;
+ };
+
+ sec_jr2: jr@3000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x3000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <90 2 0 0>;
+ };
+
+ sec_jr3: jr@4000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x4000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <91 2 0 0>;
+ };
+
+ rtic@6000 {
+ compatible = "fsl,sec-v4.0-rtic";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x6000 0x100>;
+ ranges = <0x0 0x6100 0xe00>;
+
+ rtic_a: rtic-a@0 {
+ compatible = "fsl,sec-v4.0-rtic-memory";
+ reg = <0x00 0x20 0x100 0x80>;
+ };
+
+ rtic_b: rtic-b@20 {
+ compatible = "fsl,sec-v4.0-rtic-memory";
+ reg = <0x20 0x20 0x200 0x80>;
+ };
+
+ rtic_c: rtic-c@40 {
+ compatible = "fsl,sec-v4.0-rtic-memory";
+ reg = <0x40 0x20 0x300 0x80>;
+ };
+
+ rtic_d: rtic-d@60 {
+ compatible = "fsl,sec-v4.0-rtic-memory";
+ reg = <0x60 0x20 0x500 0x80>;
+ };
+ };
+ };
+
+ sec_mon: sec_mon@314000 {
+ compatible = "fsl,sec-v4.0-mon";
+ reg = <0x314000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <93 2 0 0>;
+ };
+ };
+
+ rapidio0: rapidio@ffe0c0000 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ compatible = "fsl,rapidio-delta";
+ interrupts = <
+ 16 2 1 11 /* err_irq */
+ 56 2 0 0 /* bell_outb_irq */
+ 57 2 0 0 /* bell_inb_irq */
+ 60 2 0 0 /* msg1_tx_irq */
+ 61 2 0 0 /* msg1_rx_irq */
+ 62 2 0 0 /* msg2_tx_irq */
+ 63 2 0 0>; /* msg2_rx_irq */
+ };
+
+ localbus@ffe124000 {
+ compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <25 2 0 0>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ };
+
+ pci0: pcie@ffe200000 {
+ compatible = "fsl,p4080-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <0x1fca055>;
+ fsl,msi = <&msi0>;
+ interrupts = <16 2 1 15>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 15>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 40 1 0 0
+ 0000 0 0 2 &mpic 1 1 0 0
+ 0000 0 0 3 &mpic 2 1 0 0
+ 0000 0 0 4 &mpic 3 1 0 0
+ >;
+ };
+ };
+
+ pci1: pcie@ffe201000 {
+ compatible = "fsl,p4080-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 0xff>;
+ clock-frequency = <0x1fca055>;
+ fsl,msi = <&msi1>;
+ interrupts = <16 2 1 14>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 14>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 41 1 0 0
+ 0000 0 0 2 &mpic 5 1 0 0
+ 0000 0 0 3 &mpic 6 1 0 0
+ 0000 0 0 4 &mpic 7 1 0 0
+ >;
+ };
+ };
+
+ pci2: pcie@ffe202000 {
+ compatible = "fsl,p4080-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <0x1fca055>;
+ fsl,msi = <&msi2>;
+ interrupts = <16 2 1 13>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 13>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 42 1 0 0
+ 0000 0 0 2 &mpic 9 1 0 0
+ 0000 0 0 3 &mpic 10 1 0 0
+ 0000 0 0 4 &mpic 11 1 0 0
+ >;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p5020ds.dts b/arch/powerpc/boot/dts/p5020ds.dts
new file mode 100644
index 00000000000..8366e2fd2fb
--- /dev/null
+++ b/arch/powerpc/boot/dts/p5020ds.dts
@@ -0,0 +1,215 @@
+/*
+ * P5020DS Device Tree Source
+ *
+ * Copyright 2010-2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "p5020si.dtsi"
+
+/ {
+ model = "fsl,P5020DS";
+ compatible = "fsl,P5020DS";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ memory {
+ device_type = "memory";
+ };
+
+ soc: soc@ffe000000 {
+ spi@110000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25sl12801";
+ reg = <0>;
+ spi-max-frequency = <40000000>; /* input clock */
+ partition@u-boot {
+ label = "u-boot";
+ reg = <0x00000000 0x00100000>;
+ read-only;
+ };
+ partition@kernel {
+ label = "kernel";
+ reg = <0x00100000 0x00500000>;
+ read-only;
+ };
+ partition@dtb {
+ label = "dtb";
+ reg = <0x00600000 0x00100000>;
+ read-only;
+ };
+ partition@fs {
+ label = "file system";
+ reg = <0x00700000 0x00900000>;
+ };
+ };
+ };
+
+ i2c@118100 {
+ eeprom@51 {
+ compatible = "at24,24c256";
+ reg = <0x51>;
+ };
+ eeprom@52 {
+ compatible = "at24,24c256";
+ reg = <0x52>;
+ };
+ };
+
+ i2c@119100 {
+ rtc@68 {
+ compatible = "dallas,ds3232";
+ reg = <0x68>;
+ interrupts = <0x1 0x1 0 0>;
+ };
+ };
+ };
+
+ localbus@ffe124000 {
+ reg = <0xf 0xfe124000 0 0x1000>;
+ ranges = <0 0 0xf 0xe8000000 0x08000000
+ 2 0 0xf 0xffa00000 0x00040000
+ 3 0 0xf 0xffdf0000 0x00008000>;
+
+ flash@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0 0x08000000>;
+ bank-width = <2>;
+ device-width = <2>;
+ };
+
+ nand@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,elbc-fcm-nand";
+ reg = <0x2 0x0 0x40000>;
+
+ partition@0 {
+ label = "NAND U-Boot Image";
+ reg = <0x0 0x02000000>;
+ read-only;
+ };
+
+ partition@2000000 {
+ label = "NAND Root File System";
+ reg = <0x02000000 0x10000000>;
+ };
+
+ partition@12000000 {
+ label = "NAND Compressed RFS Image";
+ reg = <0x12000000 0x08000000>;
+ };
+
+ partition@1a000000 {
+ label = "NAND Linux Kernel Image";
+ reg = <0x1a000000 0x04000000>;
+ };
+
+ partition@1e000000 {
+ label = "NAND DTB Image";
+ reg = <0x1e000000 0x01000000>;
+ };
+
+ partition@1f000000 {
+ label = "NAND Writable User area";
+ reg = <0x1f000000 0x21000000>;
+ };
+ };
+
+ board-control@3,0 {
+ compatible = "fsl,p5020ds-pixis";
+ reg = <3 0 0x20>;
+ };
+ };
+
+ pci0: pcie@ffe200000 {
+ reg = <0xf 0xfe200000 0 0x1000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci1: pcie@ffe201000 {
+ reg = <0xf 0xfe201000 0 0x1000>;
+ ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
+ 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci2: pcie@ffe202000 {
+ reg = <0xf 0xfe202000 0 0x1000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci3: pcie@ffe203000 {
+ reg = <0xf 0xfe203000 0 0x1000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p5020si.dtsi b/arch/powerpc/boot/dts/p5020si.dtsi
new file mode 100644
index 00000000000..5e6048ec55b
--- /dev/null
+++ b/arch/powerpc/boot/dts/p5020si.dtsi
@@ -0,0 +1,652 @@
+/*
+ * P5020 Silicon Device Tree Source
+ *
+ * Copyright 2010-2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+/ {
+ compatible = "fsl,P5020";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ ccsr = &soc;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ pci3 = &pci3;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ sdhc = &sdhc;
+ msi0 = &msi0;
+ msi1 = &msi1;
+ msi2 = &msi2;
+
+ crypto = &crypto;
+ sec_jr0 = &sec_jr0;
+ sec_jr1 = &sec_jr1;
+ sec_jr2 = &sec_jr2;
+ sec_jr3 = &sec_jr3;
+ rtic_a = &rtic_a;
+ rtic_b = &rtic_b;
+ rtic_c = &rtic_c;
+ rtic_d = &rtic_d;
+ sec_mon = &sec_mon;
+
+/*
+ rio0 = &rapidio0;
+ */
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,e5500@0 {
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu1: PowerPC,e5500@1 {
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ };
+
+ soc: soc@ffe000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
+
+ soc-sram-error {
+ compatible = "fsl,soc-sram-error";
+ interrupts = <16 2 1 29>;
+ };
+
+ corenet-law@0 {
+ compatible = "fsl,corenet-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <32>;
+ };
+
+ memory-controller@8000 {
+ compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
+ reg = <0x8000 0x1000>;
+ interrupts = <16 2 1 23>;
+ };
+
+ memory-controller@9000 {
+ compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
+ reg = <0x9000 0x1000>;
+ interrupts = <16 2 1 22>;
+ };
+
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,p5020-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
+ reg = <0x10000 0x1000
+ 0x11000 0x1000>;
+ interrupts = <16 2 1 27
+ 16 2 1 26>;
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,corenet-cf";
+ reg = <0x18000 0x1000>;
+ interrupts = <16 2 1 31>;
+ fsl,ccf-num-csdids = <32>;
+ fsl,ccf-num-snoopids = <32>;
+ };
+
+ iommu@20000 {
+ compatible = "fsl,pamu-v1.0", "fsl,pamu";
+ reg = <0x20000 0x4000>;
+ interrupts = <
+ 24 2 0 0
+ 16 2 1 30>;
+ };
+
+ mpic: pic@40000 {
+ clock-frequency = <0>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <4>;
+ reg = <0x40000 0x40000>;
+ compatible = "fsl,mpic", "chrp,open-pic";
+ device_type = "open-pic";
+ };
+
+ msi0: msi@41600 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41600 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe0 0 0 0
+ 0xe1 0 0 0
+ 0xe2 0 0 0
+ 0xe3 0 0 0
+ 0xe4 0 0 0
+ 0xe5 0 0 0
+ 0xe6 0 0 0
+ 0xe7 0 0 0>;
+ };
+
+ msi1: msi@41800 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41800 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe8 0 0 0
+ 0xe9 0 0 0
+ 0xea 0 0 0
+ 0xeb 0 0 0
+ 0xec 0 0 0
+ 0xed 0 0 0
+ 0xee 0 0 0
+ 0xef 0 0 0>;
+ };
+
+ msi2: msi@41a00 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41a00 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xf0 0 0 0
+ 0xf1 0 0 0
+ 0xf2 0 0 0
+ 0xf3 0 0 0
+ 0xf4 0 0 0
+ 0xf5 0 0 0
+ 0xf6 0 0 0
+ 0xf7 0 0 0>;
+ };
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,qoriq-device-config-1.0";
+ reg = <0xe0000 0xe00>;
+ fsl,has-rstcr;
+ #sleep-cells = <1>;
+ fsl,liodn-bits = <12>;
+ };
+
+ pins: global-utilities@e0e00 {
+ compatible = "fsl,qoriq-pin-control-1.0";
+ reg = <0xe0e00 0x200>;
+ #sleep-cells = <2>;
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0";
+ reg = <0xe1000 0x1000>;
+ clock-frequency = <0>;
+ };
+
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,qoriq-rcpm-1.0";
+ reg = <0xe2000 0x1000>;
+ #sleep-cells = <1>;
+ };
+
+ sfp: sfp@e8000 {
+ compatible = "fsl,p5020-sfp", "fsl,qoriq-sfp-1.0";
+ reg = <0xe8000 0x1000>;
+ };
+
+ serdes: serdes@ea000 {
+ compatible = "fsl,p5020-serdes";
+ reg = <0xea000 0x1000>;
+ };
+
+ dma0: dma@100300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p5020-dma", "fsl,eloplus-dma";
+ reg = <0x100300 0x4>;
+ ranges = <0x0 0x100100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,p5020-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupts = <28 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,p5020-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <29 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,p5020-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupts = <30 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,p5020-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupts = <31 2 0 0>;
+ };
+ };
+
+ dma1: dma@101300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p5020-dma", "fsl,eloplus-dma";
+ reg = <0x101300 0x4>;
+ ranges = <0x0 0x101100 0x200>;
+ cell-index = <1>;
+ dma-channel@0 {
+ compatible = "fsl,p5020-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupts = <32 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,p5020-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <33 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,p5020-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupts = <34 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,p5020-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupts = <35 2 0 0>;
+ };
+ };
+
+ spi@110000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,p5020-espi", "fsl,mpc8536-espi";
+ reg = <0x110000 0x1000>;
+ interrupts = <53 0x2 0 0>;
+ fsl,espi-num-chipselects = <4>;
+ };
+
+ sdhc: sdhc@114000 {
+ compatible = "fsl,p5020-esdhc", "fsl,esdhc";
+ reg = <0x114000 0x1000>;
+ interrupts = <48 2 0 0>;
+ sdhci,auto-cmd12;
+ clock-frequency = <0>;
+ };
+
+ i2c@118000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x118000 0x100>;
+ interrupts = <38 2 0 0>;
+ dfsrr;
+ };
+
+ i2c@118100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x118100 0x100>;
+ interrupts = <38 2 0 0>;
+ dfsrr;
+ };
+
+ i2c@119000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <2>;
+ compatible = "fsl-i2c";
+ reg = <0x119000 0x100>;
+ interrupts = <39 2 0 0>;
+ dfsrr;
+ };
+
+ i2c@119100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <3>;
+ compatible = "fsl-i2c";
+ reg = <0x119100 0x100>;
+ interrupts = <39 2 0 0>;
+ dfsrr;
+ };
+
+ serial0: serial@11c500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11c500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <36 2 0 0>;
+ };
+
+ serial1: serial@11c600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11c600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <36 2 0 0>;
+ };
+
+ serial2: serial@11d500 {
+ cell-index = <2>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11d500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <37 2 0 0>;
+ };
+
+ serial3: serial@11d600 {
+ cell-index = <3>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11d600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <37 2 0 0>;
+ };
+
+ gpio0: gpio@130000 {
+ compatible = "fsl,p5020-gpio", "fsl,qoriq-gpio";
+ reg = <0x130000 0x1000>;
+ interrupts = <55 2 0 0>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+
+ usb0: usb@210000 {
+ compatible = "fsl,p5020-usb2-mph",
+ "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
+ reg = <0x210000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <44 0x2 0 0>;
+ phy_type = "utmi";
+ port0;
+ };
+
+ usb1: usb@211000 {
+ compatible = "fsl,p5020-usb2-dr",
+ "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
+ reg = <0x211000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <45 0x2 0 0>;
+ dr_mode = "host";
+ phy_type = "utmi";
+ };
+
+ sata@220000 {
+ compatible = "fsl,p5020-sata", "fsl,pq-sata-v2";
+ reg = <0x220000 0x1000>;
+ interrupts = <68 0x2 0 0>;
+ };
+
+ sata@221000 {
+ compatible = "fsl,p5020-sata", "fsl,pq-sata-v2";
+ reg = <0x221000 0x1000>;
+ interrupts = <69 0x2 0 0>;
+ };
+
+ crypto: crypto@300000 {
+ compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x300000 0x10000>;
+ ranges = <0 0x300000 0x10000>;
+ interrupts = <92 2 0 0>;
+
+ sec_jr0: jr@1000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x1000 0x1000>;
+ interrupts = <88 2 0 0>;
+ };
+
+ sec_jr1: jr@2000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x2000 0x1000>;
+ interrupts = <89 2 0 0>;
+ };
+
+ sec_jr2: jr@3000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x3000 0x1000>;
+ interrupts = <90 2 0 0>;
+ };
+
+ sec_jr3: jr@4000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x4000 0x1000>;
+ interrupts = <91 2 0 0>;
+ };
+
+ rtic@6000 {
+ compatible = "fsl,sec-v4.2-rtic",
+ "fsl,sec-v4.0-rtic";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x6000 0x100>;
+ ranges = <0x0 0x6100 0xe00>;
+
+ rtic_a: rtic-a@0 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x00 0x20 0x100 0x80>;
+ };
+
+ rtic_b: rtic-b@20 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x20 0x20 0x200 0x80>;
+ };
+
+ rtic_c: rtic-c@40 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x40 0x20 0x300 0x80>;
+ };
+
+ rtic_d: rtic-d@60 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x60 0x20 0x500 0x80>;
+ };
+ };
+ };
+
+ sec_mon: sec_mon@314000 {
+ compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon";
+ reg = <0x314000 0x1000>;
+ interrupts = <93 2 0 0>;
+ };
+ };
+
+/*
+ rapidio0: rapidio@ffe0c0000
+*/
+
+ localbus@ffe124000 {
+ compatible = "fsl,p5020-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <25 2 0 0>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ };
+
+ pci0: pcie@ffe200000 {
+ compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <0x1fca055>;
+ fsl,msi = <&msi0>;
+ interrupts = <16 2 1 15>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 15>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 40 1 0 0
+ 0000 0 0 2 &mpic 1 1 0 0
+ 0000 0 0 3 &mpic 2 1 0 0
+ 0000 0 0 4 &mpic 3 1 0 0
+ >;
+ };
+ };
+
+ pci1: pcie@ffe201000 {
+ compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 0xff>;
+ clock-frequency = <0x1fca055>;
+ fsl,msi = <&msi1>;
+ interrupts = <16 2 1 14>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 14>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 41 1 0 0
+ 0000 0 0 2 &mpic 5 1 0 0
+ 0000 0 0 3 &mpic 6 1 0 0
+ 0000 0 0 4 &mpic 7 1 0 0
+ >;
+ };
+ };
+
+ pci2: pcie@ffe202000 {
+ compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <0x1fca055>;
+ fsl,msi = <&msi2>;
+ interrupts = <16 2 1 13>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 13>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 42 1 0 0
+ 0000 0 0 2 &mpic 9 1 0 0
+ 0000 0 0 3 &mpic 10 1 0 0
+ 0000 0 0 4 &mpic 11 1 0 0
+ >;
+ };
+ };
+
+ pci3: pcie@ffe203000 {
+ compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <0x1fca055>;
+ fsl,msi = <&msi2>;
+ interrupts = <16 2 1 12>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 12>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 43 1 0 0
+ 0000 0 0 2 &mpic 0 1 0 0
+ 0000 0 0 3 &mpic 4 1 0 0
+ 0000 0 0 4 &mpic 8 1 0 0
+ >;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/sequoia.dts b/arch/powerpc/boot/dts/sequoia.dts
index 739dd0da241..b1d329246b0 100644
--- a/arch/powerpc/boot/dts/sequoia.dts
+++ b/arch/powerpc/boot/dts/sequoia.dts
@@ -110,6 +110,18 @@
dcr-reg = <0x010 0x002>;
};
+ CRYPTO: crypto@e0100000 {
+ compatible = "amcc,ppc440epx-crypto","amcc,ppc4xx-crypto";
+ reg = <0 0xE0100000 0x80400>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <0x17 0x4>;
+ };
+
+ rng@e0120000 {
+ compatible = "amcc,ppc440epx-rng","amcc,ppc4xx-rng";
+ reg = <0 0xE0120000 0x150>;
+ };
+
DMA0: dma {
compatible = "ibm,dma-440epx", "ibm,dma-4xx";
dcr-reg = <0x100 0x027>;
diff --git a/arch/powerpc/boot/dts/socrates.dts b/arch/powerpc/boot/dts/socrates.dts
index feb4ef6bd14..38c35404bdc 100644
--- a/arch/powerpc/boot/dts/socrates.dts
+++ b/arch/powerpc/boot/dts/socrates.dts
@@ -240,6 +240,8 @@
#address-cells = <2>;
#size-cells = <1>;
reg = <0xe0005000 0x40>;
+ interrupt-parent = <&mpic>;
+ interrupts = <19 2>;
ranges = <0 0 0xfc000000 0x04000000
2 0 0xc8000000 0x04000000
diff --git a/arch/powerpc/boot/dts/taishan.dts b/arch/powerpc/boot/dts/taishan.dts
index 058438f9629..1657ad0bf8a 100644
--- a/arch/powerpc/boot/dts/taishan.dts
+++ b/arch/powerpc/boot/dts/taishan.dts
@@ -337,7 +337,7 @@
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
phy-mode = "rgmii";
- phy-map = <0x00000001>;
+ phy-address = <1>;
rgmii-device = <&RGMII0>;
rgmii-channel = <0>;
zmii-device = <&ZMII0>;
@@ -361,7 +361,7 @@
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
phy-mode = "rgmii";
- phy-map = <0x00000003>;
+ phy-address = <3>;
rgmii-device = <&RGMII0>;
rgmii-channel = <1>;
zmii-device = <&ZMII0>;
diff --git a/arch/powerpc/boot/dts/tqm8540.dts b/arch/powerpc/boot/dts/tqm8540.dts
index 15ca731bc24..0a4cedbdcb5 100644
--- a/arch/powerpc/boot/dts/tqm8540.dts
+++ b/arch/powerpc/boot/dts/tqm8540.dts
@@ -277,6 +277,48 @@
};
};
+ localbus@e0005000 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8540-localbus", "fsl,pq3-localbus",
+ "simple-bus";
+ reg = <0xe0005000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <19 2>;
+
+ ranges = <0x0 0x0 0xfe000000 0x02000000>;
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x02000000>;
+ bank-width = <4>;
+ device-width = <2>;
+ partition@0 {
+ label = "kernel";
+ reg = <0x00000000 0x00180000>;
+ };
+ partition@180000 {
+ label = "root";
+ reg = <0x00180000 0x01dc0000>;
+ };
+ partition@1f40000 {
+ label = "env1";
+ reg = <0x01f40000 0x00040000>;
+ };
+ partition@1f80000 {
+ label = "env2";
+ reg = <0x01f80000 0x00040000>;
+ };
+ partition@1fc0000 {
+ label = "u-boot";
+ reg = <0x01fc0000 0x00040000>;
+ read-only;
+ };
+ };
+ };
+
pci0: pci@e0008000 {
#interrupt-cells = <1>;
#size-cells = <2>;
diff --git a/arch/powerpc/boot/dts/tqm8548-bigflash.dts b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
index 5dbb36edb03..9452c3c0511 100644
--- a/arch/powerpc/boot/dts/tqm8548-bigflash.dts
+++ b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
@@ -346,6 +346,8 @@
#address-cells = <2>;
#size-cells = <1>;
reg = <0xa0005000 0x100>; // BRx, ORx, etc.
+ interrupt-parent = <&mpic>;
+ interrupts = <19 2>;
ranges = <
0 0x0 0xfc000000 0x04000000 // NOR FLASH bank 1
diff --git a/arch/powerpc/boot/dts/tqm8548.dts b/arch/powerpc/boot/dts/tqm8548.dts
index a050ae42710..619776f72c9 100644
--- a/arch/powerpc/boot/dts/tqm8548.dts
+++ b/arch/powerpc/boot/dts/tqm8548.dts
@@ -346,6 +346,8 @@
#address-cells = <2>;
#size-cells = <1>;
reg = <0xe0005000 0x100>; // BRx, ORx, etc.
+ interrupt-parent = <&mpic>;
+ interrupts = <19 2>;
ranges = <
0 0x0 0xfc000000 0x04000000 // NOR FLASH bank 1
diff --git a/arch/powerpc/boot/dts/tqm8560.dts b/arch/powerpc/boot/dts/tqm8560.dts
index 22ec39b5bee..7665a16a8b9 100644
--- a/arch/powerpc/boot/dts/tqm8560.dts
+++ b/arch/powerpc/boot/dts/tqm8560.dts
@@ -312,6 +312,8 @@
#address-cells = <2>;
#size-cells = <1>;
reg = <0xe0005000 0x100>; // BRx, ORx, etc.
+ interrupt-parent = <&mpic>;
+ interrupts = <19 2>;
ranges = <
0 0x0 0xfc000000 0x04000000 // NOR FLASH bank 1
diff --git a/arch/powerpc/boot/dts/xpedite5200.dts b/arch/powerpc/boot/dts/xpedite5200.dts
index a0cf53fbd55..c41a80c55e4 100644
--- a/arch/powerpc/boot/dts/xpedite5200.dts
+++ b/arch/powerpc/boot/dts/xpedite5200.dts
@@ -374,6 +374,8 @@
#address-cells = <2>;
#size-cells = <1>;
reg = <0xef005000 0x100>; // BRx, ORx, etc.
+ interrupt-parent = <&mpic>;
+ interrupts = <19 2>;
ranges = <
0 0x0 0xfc000000 0x04000000 // NOR boot flash
diff --git a/arch/powerpc/boot/dts/xpedite5200_xmon.dts b/arch/powerpc/boot/dts/xpedite5200_xmon.dts
index c5b29752651..c0efcbb4513 100644
--- a/arch/powerpc/boot/dts/xpedite5200_xmon.dts
+++ b/arch/powerpc/boot/dts/xpedite5200_xmon.dts
@@ -378,6 +378,8 @@
#address-cells = <2>;
#size-cells = <1>;
reg = <0xef005000 0x100>; // BRx, ORx, etc.
+ interrupt-parent = <&mpic>;
+ interrupts = <19 2>;
ranges = <
0 0x0 0xf8000000 0x08000000 // NOR boot flash
diff --git a/arch/powerpc/boot/treeboot-iss4xx.c b/arch/powerpc/boot/treeboot-iss4xx.c
index fcc44952874..329e710feda 100644
--- a/arch/powerpc/boot/treeboot-iss4xx.c
+++ b/arch/powerpc/boot/treeboot-iss4xx.c
@@ -34,9 +34,29 @@
BSS_STACK(4096);
+static u32 ibm4xx_memstart;
+
static void iss_4xx_fixups(void)
{
- ibm4xx_sdram_fixup_memsize();
+ void *memory;
+ u32 reg[3];
+
+ memory = finddevice("/memory");
+ if (!memory)
+ fatal("Can't find memory node\n");
+ /* This assumes #address-cells = 2, #size-cells =1 and that */
+ getprop(memory, "reg", reg, sizeof(reg));
+ if (reg[2])
+ /* If the device tree specifies the memory range, use it */
+ ibm4xx_memstart = reg[1];
+ else
+ /* othersize, read it from the SDRAM controller */
+ ibm4xx_sdram_fixup_memsize();
+}
+
+static void *iss_4xx_vmlinux_alloc(unsigned long size)
+{
+ return (void *)ibm4xx_memstart;
}
#define SPRN_PIR 0x11E /* Processor Indentification Register */
@@ -48,6 +68,7 @@ void platform_init(void)
simple_alloc_init(_end, avail_ram, 128, 64);
platform_ops.fixups = iss_4xx_fixups;
+ platform_ops.vmlinux_alloc = iss_4xx_vmlinux_alloc;
platform_ops.exit = ibm44x_dbcr_reset;
pir_reg = mfspr(SPRN_PIR);
fdt_set_boot_cpuid_phys(_dtb_start, pir_reg);
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index 92f863ac844..a6eb6ad05b2 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -3,8 +3,8 @@ CONFIG_SMP=y
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_SPARSE_IRQ=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
@@ -21,10 +21,11 @@ CONFIG_ISS4xx=y
CONFIG_HZ_100=y
CONFIG_MATH_EMULATION=y
CONFIG_IRQ_ALL_CPUS=y
-CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="root=/dev/issblk0"
# CONFIG_PCI is not set
+CONFIG_ADVANCED_OPTIONS=y
+CONFIG_RELOCATABLE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -67,7 +68,6 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
diff --git a/arch/powerpc/configs/85xx/p1023rds_defconfig b/arch/powerpc/configs/85xx/p1023rds_defconfig
new file mode 100644
index 00000000000..980ff8f61fd
--- /dev/null
+++ b/arch/powerpc/configs/85xx/p1023rds_defconfig
@@ -0,0 +1,173 @@
+CONFIG_PPC_85xx=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_AUDIT=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_EMBEDDED=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_P1023_RDS=y
+CONFIG_QUICC_ENGINE=y
+CONFIG_QE_GPIO=y
+CONFIG_CPM2=y
+CONFIG_MPC8xxx_GPIO=y
+CONFIG_HIGHMEM=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_MATH_EMULATION=y
+CONFIG_SWIOTLB=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEAER is not set
+# CONFIG_PCIEASPM is not set
+CONFIG_PCI_MSI=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_ARPD=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6=y
+CONFIG_IP_SCTP=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
+CONFIG_EEPROM_LEGACY=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_ATA=y
+CONFIG_SATA_FSL=y
+CONFIG_SATA_SIL24=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_MARVELL_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_FIXED_PHY=y
+CONFIG_NET_ETHERNET=y
+CONFIG_FS_ENET=y
+CONFIG_E1000E=y
+CONFIG_FSL_PQ_MDIO=y
+CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_QE=m
+CONFIG_HW_RANDOM=y
+CONFIG_NVRAM=y
+CONFIG_I2C=y
+CONFIG_I2C_CPM=m
+CONFIG_I2C_MPC=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_DMADEVICES=y
+CONFIG_FSL_DMA=y
+# CONFIG_NET_DMA is not set
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_ADFS_FS=m
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_CRAMFS=y
+CONFIG_VXFS_FS=m
+CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
+CONFIG_CRC_T10DIF=y
+CONFIG_FRAME_WARN=8092
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_VIRQ_DEBUG=y
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
index 036bfb2d18c..0db9ba0423f 100644
--- a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
+++ b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
@@ -89,6 +89,11 @@ CONFIG_I2C_MPC=y
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FB_FSL_DIU=y
+CONFIG_VGACON_SOFT_SCROLLBACK=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_MIXER_OSS=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
new file mode 100644
index 00000000000..10562a5c65b
--- /dev/null
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -0,0 +1,187 @@
+CONFIG_PPC_85xx=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=8
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_AUDIT=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_RCU_TRACE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_P2040_RDB=y
+CONFIG_P3041_DS=y
+CONFIG_P4080_DS=y
+CONFIG_P5020_DS=y
+CONFIG_HIGHMEM=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_KEXEC=y
+CONFIG_FORCE_MAX_ZONEORDER=13
+CONFIG_FSL_LBC=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEASPM is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_ARPD=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6=y
+CONFIG_IP_SCTP=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_FSL=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_SIL=y
+CONFIG_PATA_SIL680=y
+CONFIG_NETDEVICES=y
+CONFIG_VITESSE_PHY=y
+CONFIG_FIXED_PHY=y
+CONFIG_NET_ETHERNET=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_FSL_PQ_MDIO=y
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_PPC_EPAPR_HV_BYTECHAN=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_HW_RANDOM=y
+CONFIG_NVRAM=y
+CONFIG_I2C=y
+CONFIG_I2C_MPC=y
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
+CONFIG_SPI_FSL_SPI=y
+CONFIG_SPI_FSL_ESPI=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_USB_HID=m
+CONFIG_USB=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_FSL=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
+CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_OF=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_MPC85XX=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=y
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+CONFIG_VIRT_DRIVERS=y
+CONFIG_FSL_HV_MANAGER=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=m
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_INFO=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/e55xx_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index d32283555b5..d32283555b5 100644
--- a/arch/powerpc/configs/e55xx_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 96b89df7752..fcd85d2c72d 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -5,6 +5,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
+CONFIG_SPARSE_IRQ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -25,7 +26,9 @@ CONFIG_MPC85xx_MDS=y
CONFIG_MPC8536_DS=y
CONFIG_MPC85xx_DS=y
CONFIG_MPC85xx_RDB=y
+CONFIG_P1010_RDB=y
CONFIG_P1022_DS=y
+CONFIG_P1023_RDS=y
CONFIG_SOCRATES=y
CONFIG_KSI8560=y
CONFIG_XES_MPC85xx=y
@@ -44,7 +47,6 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_FORCE_MAX_ZONEORDER=12
CONFIG_PCI=y
CONFIG_PCI_MSI=y
@@ -65,8 +67,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=y
-CONFIG_NET_IPGRE=y
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
@@ -128,6 +128,10 @@ CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FB_FSL_DIU=y
# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
CONFIG_SOUND=y
CONFIG_SND=y
# CONFIG_SND_SUPPORT_OLD_API is not set
@@ -170,7 +174,6 @@ CONFIG_FSL_DMA=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -205,7 +208,6 @@ CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_VIRQ_DEBUG=y
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index de65841aa04..908c941fc24 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -7,6 +7,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
+CONFIG_SPARSE_IRQ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -28,6 +29,7 @@ CONFIG_MPC8536_DS=y
CONFIG_MPC85xx_DS=y
CONFIG_MPC85xx_RDB=y
CONFIG_P1022_DS=y
+CONFIG_P1023_RDS=y
CONFIG_SOCRATES=y
CONFIG_KSI8560=y
CONFIG_XES_MPC85xx=y
@@ -46,7 +48,6 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_FORCE_MAX_ZONEORDER=12
CONFIG_PCI=y
CONFIG_PCI_MSI=y
@@ -67,8 +68,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=y
-CONFIG_NET_IPGRE=y
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
@@ -130,6 +129,10 @@ CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FB_FSL_DIU=y
# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
CONFIG_SOUND=y
CONFIG_SND=y
# CONFIG_SND_SUPPORT_OLD_API is not set
@@ -172,7 +175,6 @@ CONFIG_FSL_DMA=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 76736017cd3..84a685a505f 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -176,12 +176,19 @@ CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
+CONFIG_SCSI_CXGB3_ISCSI=m
+CONFIG_SCSI_CXGB4_ISCSI=m
+CONFIG_SCSI_BNX2_ISCSI=m
+CONFIG_BE2ISCSI=m
+CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_IBMVSCSI=y
CONFIG_SCSI_IBMVFC=m
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_SCSI_IPR=y
CONFIG_SCSI_QLA_FC=m
+CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_LPFC=m
CONFIG_ATA=y
CONFIG_SATA_SIL24=y
@@ -235,11 +242,13 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
CONFIG_E1000=y
CONFIG_E1000E=y
CONFIG_TIGON3=y
+CONFIG_BNX2=m
CONFIG_SPIDER_NET=m
CONFIG_GELIC_NET=m
CONFIG_GELIC_WIRELESS=y
CONFIG_CHELSIO_T1=m
CONFIG_CHELSIO_T3=m
+CONFIG_CHELSIO_T4=m
CONFIG_EHEA=m
CONFIG_IXGBE=m
CONFIG_IXGB=m
@@ -248,6 +257,8 @@ CONFIG_MYRI10GE=m
CONFIG_NETXEN_NIC=m
CONFIG_PASEMI_MAC=y
CONFIG_MLX4_EN=m
+CONFIG_QLGE=m
+CONFIG_BE2NET=m
CONFIG_ISERIES_VETH=m
CONFIG_PPP=m
CONFIG_PPP_ASYNC=m
@@ -330,6 +341,8 @@ CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
CONFIG_INFINIBAND_EHCA=m
+CONFIG_INFINIBAND_CXGB3=m
+CONFIG_INFINIBAND_CXGB4=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_INFINIBAND_IPOIB=m
CONFIG_INFINIBAND_IPOIB_CM=y
@@ -430,11 +443,12 @@ CONFIG_NLS_KOI8_U=m
CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_LATENCYTOP=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQSOFF_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_DEBUG_STACKOVERFLOW=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 80bc5de7ee1..96a58b70970 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -149,6 +149,7 @@ CONFIG_SCSI_CXGB3_ISCSI=m
CONFIG_SCSI_CXGB4_ISCSI=m
CONFIG_SCSI_BNX2_ISCSI=m
CONFIG_BE2ISCSI=m
+CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_IBMVSCSI=y
CONFIG_SCSI_IBMVFC=m
CONFIG_SCSI_SYM53C8XX_2=y
@@ -320,6 +321,8 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_DETECT_HUNG_TASK=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_LATENCYTOP=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/include/asm/8253pit.h b/arch/powerpc/include/asm/8253pit.h
deleted file mode 100644
index a71c9c1455a..00000000000
--- a/arch/powerpc/include/asm/8253pit.h
+++ /dev/null
@@ -1,3 +0,0 @@
-/*
- * 8253/8254 Programmable Interval Timer
- */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index b8f152ece02..e2a4c26ad37 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -181,21 +181,21 @@ static __inline__ int atomic_dec_return(atomic_t *v)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
- * atomic_add_unless - add unless the number is a given value
+ * __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int t;
__asm__ __volatile__ (
PPC_RELEASE_BARRIER
-"1: lwarx %0,0,%1 # atomic_add_unless\n\
+"1: lwarx %0,0,%1 # __atomic_add_unless\n\
cmpw 0,%0,%3 \n\
beq- 2f \n\
add %0,%2,%0 \n"
@@ -209,10 +209,9 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
: "r" (&v->counter), "r" (a), "r" (u)
: "cc", "memory");
- return t != u;
+ return t;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
@@ -444,7 +443,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
@@ -452,7 +451,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
__asm__ __volatile__ (
PPC_RELEASE_BARRIER
-"1: ldarx %0,0,%1 # atomic_add_unless\n\
+"1: ldarx %0,0,%1 # __atomic_add_unless\n\
cmpd 0,%0,%3 \n\
beq- 2f \n\
add %0,%2,%0 \n"
@@ -470,11 +469,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-#else /* __powerpc64__ */
-#include <asm-generic/atomic64.h>
-
#endif /* __powerpc64__ */
-#include <asm-generic/atomic-long.h>
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index f18c6d9b951..e137afcc10f 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -327,10 +327,7 @@ unsigned long find_next_bit_le(const void *addr,
unsigned long size, unsigned long offset);
/* Bitmap functions for the ext2 filesystem */
-#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit_le((nr), (unsigned long*)addr)
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit_le((nr), (unsigned long*)addr)
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
#include <asm-generic/bitops/sched.h>
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index c0d842cfd01..e30442c539c 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -179,8 +179,9 @@ extern const char *powerpc_base_platform;
#define LONG_ASM_CONST(x) 0
#endif
-
-#define CPU_FTR_HVMODE_206 LONG_ASM_CONST(0x0000000800000000)
+#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000200000000)
+#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000400000000)
+#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000800000000)
#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000)
#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
@@ -401,9 +402,10 @@ extern const char *powerpc_base_platform;
CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \
CPU_FTR_STCX_CHECKS_ADDRESS)
#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
- CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS)
+ CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
+ CPU_FTR_HVMODE)
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -417,13 +419,13 @@ extern const char *powerpc_base_platform;
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR)
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_HVMODE_206 |\
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_ICSWX | CPU_FTR_CFAR)
+ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 9c70d0ca96d..efa74ac44a3 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -18,7 +18,7 @@
#include <asm/ppc-opcode.h>
#define PPC_DBELL_MSG_BRDCAST (0x04000000)
-#define PPC_DBELL_TYPE(x) (((x) & 0xf) << 28)
+#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
enum ppc_dbell {
PPC_DBELL = 0, /* doorbell */
PPC_DBELL_CRIT = 1, /* critical doorbell */
diff --git a/arch/powerpc/include/asm/ehv_pic.h b/arch/powerpc/include/asm/ehv_pic.h
new file mode 100644
index 00000000000..a9e1f4f796f
--- /dev/null
+++ b/arch/powerpc/include/asm/ehv_pic.h
@@ -0,0 +1,40 @@
+/*
+ * EHV_PIC private definitions and structure.
+ *
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef __EHV_PIC_H__
+#define __EHV_PIC_H__
+
+#include <linux/irq.h>
+
+#define NR_EHV_PIC_INTS 1024
+
+#define EHV_PIC_INFO(name) EHV_PIC_##name
+
+#define EHV_PIC_VECPRI_POLARITY_NEGATIVE 0
+#define EHV_PIC_VECPRI_POLARITY_POSITIVE 1
+#define EHV_PIC_VECPRI_SENSE_EDGE 0
+#define EHV_PIC_VECPRI_SENSE_LEVEL 0x2
+#define EHV_PIC_VECPRI_POLARITY_MASK 0x1
+#define EHV_PIC_VECPRI_SENSE_MASK 0x2
+
+struct ehv_pic {
+ /* The remapper for this EHV_PIC */
+ struct irq_host *irqhost;
+
+ /* The "linux" controller struct */
+ struct irq_chip hc_irq;
+
+ /* core int flag */
+ int coreint_flag;
+};
+
+void ehv_pic_init(void);
+unsigned int ehv_pic_get_irq(void);
+
+#endif /* __EHV_PIC_H__ */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 2b917c69ed1..3bf9cca3514 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -267,7 +267,7 @@ extern int ucache_bsize;
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
-#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
+#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b)
/* 1GB for 64bit, 8MB for 32bit */
#define STACK_RND_MASK (is_32bit_task() ? \
@@ -298,7 +298,7 @@ do { \
NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
- VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base) \
+ VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base); \
} while (0)
/* PowerPC64 relocations defined by the ABIs */
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
index 45921672b97..63f2a22e995 100644
--- a/arch/powerpc/include/asm/emulated_ops.h
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -18,7 +18,7 @@
#ifndef _ASM_POWERPC_EMULATED_OPS_H
#define _ASM_POWERPC_EMULATED_OPS_H
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/perf_event.h>
@@ -78,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type);
#define PPC_WARN_EMULATED(type, regs) \
do { \
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \
- 1, 0, regs, 0); \
+ 1, regs, 0); \
__PPC_WARN_EMULATED(type); \
} while (0)
#define PPC_WARN_ALIGNMENT(type, regs) \
do { \
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \
- 1, 0, regs, regs->dar); \
+ 1, regs, regs->dar); \
__PPC_WARN_EMULATED(type); \
} while (0)
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
new file mode 100644
index 00000000000..f3b0c2cc9fe
--- /dev/null
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -0,0 +1,502 @@
+/*
+ * ePAPR hcall interface
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * This file is provided under a dual BSD/GPL license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* A "hypercall" is an "sc 1" instruction. This header file file provides C
+ * wrapper functions for the ePAPR hypervisor interface. It is inteded
+ * for use by Linux device drivers and other operating systems.
+ *
+ * The hypercalls are implemented as inline assembly, rather than assembly
+ * language functions in a .S file, for optimization. It allows
+ * the caller to issue the hypercall instruction directly, improving both
+ * performance and memory footprint.
+ */
+
+#ifndef _EPAPR_HCALLS_H
+#define _EPAPR_HCALLS_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/byteorder.h>
+
+#define EV_BYTE_CHANNEL_SEND 1
+#define EV_BYTE_CHANNEL_RECEIVE 2
+#define EV_BYTE_CHANNEL_POLL 3
+#define EV_INT_SET_CONFIG 4
+#define EV_INT_GET_CONFIG 5
+#define EV_INT_SET_MASK 6
+#define EV_INT_GET_MASK 7
+#define EV_INT_IACK 9
+#define EV_INT_EOI 10
+#define EV_INT_SEND_IPI 11
+#define EV_INT_SET_TASK_PRIORITY 12
+#define EV_INT_GET_TASK_PRIORITY 13
+#define EV_DOORBELL_SEND 14
+#define EV_MSGSND 15
+#define EV_IDLE 16
+
+/* vendor ID: epapr */
+#define EV_LOCAL_VENDOR_ID 0 /* for private use */
+#define EV_EPAPR_VENDOR_ID 1
+#define EV_FSL_VENDOR_ID 2 /* Freescale Semiconductor */
+#define EV_IBM_VENDOR_ID 3 /* IBM */
+#define EV_GHS_VENDOR_ID 4 /* Green Hills Software */
+#define EV_ENEA_VENDOR_ID 5 /* Enea */
+#define EV_WR_VENDOR_ID 6 /* Wind River Systems */
+#define EV_AMCC_VENDOR_ID 7 /* Applied Micro Circuits */
+#define EV_KVM_VENDOR_ID 42 /* KVM */
+
+/* The max number of bytes that a byte channel can send or receive per call */
+#define EV_BYTE_CHANNEL_MAX_BYTES 16
+
+
+#define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num))
+#define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num)
+
+/* epapr error codes */
+#define EV_EPERM 1 /* Operation not permitted */
+#define EV_ENOENT 2 /* Entry Not Found */
+#define EV_EIO 3 /* I/O error occured */
+#define EV_EAGAIN 4 /* The operation had insufficient
+ * resources to complete and should be
+ * retried
+ */
+#define EV_ENOMEM 5 /* There was insufficient memory to
+ * complete the operation */
+#define EV_EFAULT 6 /* Bad guest address */
+#define EV_ENODEV 7 /* No such device */
+#define EV_EINVAL 8 /* An argument supplied to the hcall
+ was out of range or invalid */
+#define EV_INTERNAL 9 /* An internal error occured */
+#define EV_CONFIG 10 /* A configuration error was detected */
+#define EV_INVALID_STATE 11 /* The object is in an invalid state */
+#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */
+#define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */
+
+/*
+ * Hypercall register clobber list
+ *
+ * These macros are used to define the list of clobbered registers during a
+ * hypercall. Technically, registers r0 and r3-r12 are always clobbered,
+ * but the gcc inline assembly syntax does not allow us to specify registers
+ * on the clobber list that are also on the input/output list. Therefore,
+ * the lists of clobbered registers depends on the number of register
+ * parmeters ("+r" and "=r") passed to the hypercall.
+ *
+ * Each assembly block should use one of the HCALL_CLOBBERSx macros. As a
+ * general rule, 'x' is the number of parameters passed to the assembly
+ * block *except* for r11.
+ *
+ * If you're not sure, just use the smallest value of 'x' that does not
+ * generate a compilation error. Because these are static inline functions,
+ * the compiler will only check the clobber list for a function if you
+ * compile code that calls that function.
+ *
+ * r3 and r11 are not included in any clobbers list because they are always
+ * listed as output registers.
+ *
+ * XER, CTR, and LR are currently listed as clobbers because it's uncertain
+ * whether they will be clobbered.
+ *
+ * Note that r11 can be used as an output parameter.
+*/
+
+/* List of common clobbered registers. Do not use this macro. */
+#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc"
+
+#define EV_HCALL_CLOBBERS8 EV_HCALL_CLOBBERS
+#define EV_HCALL_CLOBBERS7 EV_HCALL_CLOBBERS8, "r10"
+#define EV_HCALL_CLOBBERS6 EV_HCALL_CLOBBERS7, "r9"
+#define EV_HCALL_CLOBBERS5 EV_HCALL_CLOBBERS6, "r8"
+#define EV_HCALL_CLOBBERS4 EV_HCALL_CLOBBERS5, "r7"
+#define EV_HCALL_CLOBBERS3 EV_HCALL_CLOBBERS4, "r6"
+#define EV_HCALL_CLOBBERS2 EV_HCALL_CLOBBERS3, "r5"
+#define EV_HCALL_CLOBBERS1 EV_HCALL_CLOBBERS2, "r4"
+
+
+/*
+ * We use "uintptr_t" to define a register because it's guaranteed to be a
+ * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
+ * platform.
+ *
+ * All registers are either input/output or output only. Registers that are
+ * initialized before making the hypercall are input/output. All
+ * input/output registers are represented with "+r". Output-only registers
+ * are represented with "=r". Do not specify any unused registers. The
+ * clobber list will tell the compiler that the hypercall modifies those
+ * registers, which is good enough.
+ */
+
+/**
+ * ev_int_set_config - configure the specified interrupt
+ * @interrupt: the interrupt number
+ * @config: configuration for this interrupt
+ * @priority: interrupt priority
+ * @destination: destination CPU number
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_set_config(unsigned int interrupt,
+ uint32_t config, unsigned int priority, uint32_t destination)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_SET_CONFIG);
+ r3 = interrupt;
+ r4 = config;
+ r5 = priority;
+ r6 = destination;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6)
+ : : EV_HCALL_CLOBBERS4
+ );
+
+ return r3;
+}
+
+/**
+ * ev_int_get_config - return the config of the specified interrupt
+ * @interrupt: the interrupt number
+ * @config: returned configuration for this interrupt
+ * @priority: returned interrupt priority
+ * @destination: returned destination CPU number
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_get_config(unsigned int interrupt,
+ uint32_t *config, unsigned int *priority, uint32_t *destination)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG);
+ r3 = interrupt;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6)
+ : : EV_HCALL_CLOBBERS4
+ );
+
+ *config = r4;
+ *priority = r5;
+ *destination = r6;
+
+ return r3;
+}
+
+/**
+ * ev_int_set_mask - sets the mask for the specified interrupt source
+ * @interrupt: the interrupt number
+ * @mask: 0=enable interrupts, 1=disable interrupts
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_set_mask(unsigned int interrupt,
+ unsigned int mask)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_SET_MASK);
+ r3 = interrupt;
+ r4 = mask;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ return r3;
+}
+
+/**
+ * ev_int_get_mask - returns the mask for the specified interrupt source
+ * @interrupt: the interrupt number
+ * @mask: returned mask for this interrupt (0=enabled, 1=disabled)
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_get_mask(unsigned int interrupt,
+ unsigned int *mask)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK);
+ r3 = interrupt;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *mask = r4;
+
+ return r3;
+}
+
+/**
+ * ev_int_eoi - signal the end of interrupt processing
+ * @interrupt: the interrupt number
+ *
+ * This function signals the end of processing for the the specified
+ * interrupt, which must be the interrupt currently in service. By
+ * definition, this is also the highest-priority interrupt.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_eoi(unsigned int interrupt)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_EOI);
+ r3 = interrupt;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * ev_byte_channel_send - send characters to a byte stream
+ * @handle: byte stream handle
+ * @count: (input) num of chars to send, (output) num chars sent
+ * @buffer: pointer to a 16-byte buffer
+ *
+ * @buffer must be at least 16 bytes long, because all 16 bytes will be
+ * read from memory into registers, even if count < 16.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_send(unsigned int handle,
+ unsigned int *count, const char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r8 __asm__("r8");
+ const uint32_t *p = (const uint32_t *) buffer;
+
+ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_SEND);
+ r3 = handle;
+ r4 = *count;
+ r5 = be32_to_cpu(p[0]);
+ r6 = be32_to_cpu(p[1]);
+ r7 = be32_to_cpu(p[2]);
+ r8 = be32_to_cpu(p[3]);
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3),
+ "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8)
+ : : EV_HCALL_CLOBBERS6
+ );
+
+ *count = r4;
+
+ return r3;
+}
+
+/**
+ * ev_byte_channel_receive - fetch characters from a byte channel
+ * @handle: byte channel handle
+ * @count: (input) max num of chars to receive, (output) num chars received
+ * @buffer: pointer to a 16-byte buffer
+ *
+ * The size of @buffer must be at least 16 bytes, even if you request fewer
+ * than 16 characters, because we always write 16 bytes to @buffer. This is
+ * for performance reasons.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_receive(unsigned int handle,
+ unsigned int *count, char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r8 __asm__("r8");
+ uint32_t *p = (uint32_t *) buffer;
+
+ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_RECEIVE);
+ r3 = handle;
+ r4 = *count;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4),
+ "=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8)
+ : : EV_HCALL_CLOBBERS6
+ );
+
+ *count = r4;
+ p[0] = cpu_to_be32(r5);
+ p[1] = cpu_to_be32(r6);
+ p[2] = cpu_to_be32(r7);
+ p[3] = cpu_to_be32(r8);
+
+ return r3;
+}
+
+/**
+ * ev_byte_channel_poll - returns the status of the byte channel buffers
+ * @handle: byte channel handle
+ * @rx_count: returned count of bytes in receive queue
+ * @tx_count: returned count of free space in transmit queue
+ *
+ * This function reports the amount of data in the receive queue (i.e. the
+ * number of bytes you can read), and the amount of free space in the transmit
+ * queue (i.e. the number of bytes you can write).
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_poll(unsigned int handle,
+ unsigned int *rx_count, unsigned int *tx_count)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+
+ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL);
+ r3 = handle;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5)
+ : : EV_HCALL_CLOBBERS3
+ );
+
+ *rx_count = r4;
+ *tx_count = r5;
+
+ return r3;
+}
+
+/**
+ * ev_int_iack - acknowledge an interrupt
+ * @handle: handle to the target interrupt controller
+ * @vector: returned interrupt vector
+ *
+ * If handle is zero, the function returns the next interrupt source
+ * number to be handled irrespective of the hierarchy or cascading
+ * of interrupt controllers. If non-zero, specifies a handle to the
+ * interrupt controller that is the target of the acknowledge.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_iack(unsigned int handle,
+ unsigned int *vector)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_IACK);
+ r3 = handle;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *vector = r4;
+
+ return r3;
+}
+
+/**
+ * ev_doorbell_send - send a doorbell to another partition
+ * @handle: doorbell send handle
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_doorbell_send(unsigned int handle)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND);
+ r3 = handle;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * ev_idle -- wait for next interrupt on this core
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_idle(void)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = EV_HCALL_TOKEN(EV_IDLE);
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "=r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index 6d53f311d94..ac13addb849 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -48,30 +48,33 @@
#define EX_R14 (4 * 8)
#define EX_R15 (5 * 8)
-/* The TLB miss exception uses different slots */
+/*
+ * The TLB miss exception uses different slots.
+ *
+ * The bolted variant uses only the first six fields,
+ * which in combination with pgd and kernel_pgd fits in
+ * one 64-byte cache line.
+ */
#define EX_TLB_R10 ( 0 * 8)
#define EX_TLB_R11 ( 1 * 8)
-#define EX_TLB_R12 ( 2 * 8)
-#define EX_TLB_R13 ( 3 * 8)
-#define EX_TLB_R14 ( 4 * 8)
-#define EX_TLB_R15 ( 5 * 8)
-#define EX_TLB_R16 ( 6 * 8)
-#define EX_TLB_CR ( 7 * 8)
+#define EX_TLB_R14 ( 2 * 8)
+#define EX_TLB_R15 ( 3 * 8)
+#define EX_TLB_R16 ( 4 * 8)
+#define EX_TLB_CR ( 5 * 8)
+#define EX_TLB_R12 ( 6 * 8)
+#define EX_TLB_R13 ( 7 * 8)
#define EX_TLB_DEAR ( 8 * 8) /* Level 0 and 2 only */
#define EX_TLB_ESR ( 9 * 8) /* Level 0 and 2 only */
#define EX_TLB_SRR0 (10 * 8)
#define EX_TLB_SRR1 (11 * 8)
-#define EX_TLB_MMUCR0 (12 * 8) /* Level 0 */
-#define EX_TLB_MAS1 (12 * 8) /* Level 0 */
-#define EX_TLB_MAS2 (13 * 8) /* Level 0 */
#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
-#define EX_TLB_R8 (14 * 8)
-#define EX_TLB_R9 (15 * 8)
-#define EX_TLB_LR (16 * 8)
-#define EX_TLB_SIZE (17 * 8)
+#define EX_TLB_R8 (12 * 8)
+#define EX_TLB_R9 (13 * 8)
+#define EX_TLB_LR (14 * 8)
+#define EX_TLB_SIZE (15 * 8)
#else
-#define EX_TLB_SIZE (14 * 8)
+#define EX_TLB_SIZE (12 * 8)
#endif
#define START_EXCEPTION(label) \
@@ -168,6 +171,16 @@ exc_##label##_book3e:
ld r9,EX_TLB_R9(r12); \
ld r8,EX_TLB_R8(r12); \
mtlr r16;
+#define TLB_MISS_PROLOG_STATS_BOLTED \
+ mflr r10; \
+ std r8,PACA_EXTLB+EX_TLB_R8(r13); \
+ std r9,PACA_EXTLB+EX_TLB_R9(r13); \
+ std r10,PACA_EXTLB+EX_TLB_LR(r13);
+#define TLB_MISS_RESTORE_STATS_BOLTED \
+ ld r16,PACA_EXTLB+EX_TLB_LR(r13); \
+ ld r9,PACA_EXTLB+EX_TLB_R9(r13); \
+ ld r8,PACA_EXTLB+EX_TLB_R8(r13); \
+ mtlr r16;
#define TLB_MISS_STATS_D(name) \
addi r9,r13,MMSTAT_DSTATS+name; \
bl .tlb_stat_inc;
@@ -183,17 +196,20 @@ exc_##label##_book3e:
61: addi r9,r13,MMSTAT_ISTATS+name; \
62: bl .tlb_stat_inc;
#define TLB_MISS_STATS_SAVE_INFO \
- std r14,EX_TLB_ESR(r12); /* save ESR */ \
-
-
+ std r14,EX_TLB_ESR(r12); /* save ESR */
+#define TLB_MISS_STATS_SAVE_INFO_BOLTED \
+ std r14,PACA_EXTLB+EX_TLB_ESR(r13); /* save ESR */
#else
#define TLB_MISS_PROLOG_STATS
#define TLB_MISS_RESTORE_STATS
+#define TLB_MISS_PROLOG_STATS_BOLTED
+#define TLB_MISS_RESTORE_STATS_BOLTED
#define TLB_MISS_STATS_D(name)
#define TLB_MISS_STATS_I(name)
#define TLB_MISS_STATS_X(name)
#define TLB_MISS_STATS_Y(name)
#define TLB_MISS_STATS_SAVE_INFO
+#define TLB_MISS_STATS_SAVE_INFO_BOLTED
#endif
#define SET_IVOR(vector_number, vector_offset) \
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index f5dfe3411f6..8057f4f6980 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -61,19 +61,22 @@
#define EXC_HV H
#define EXC_STD
-#define EXCEPTION_PROLOG_1(area) \
+#define __EXCEPTION_PROLOG_1(area, extra, vec) \
GET_PACA(r13); \
std r9,area+EX_R9(r13); /* save r9 - r12 */ \
std r10,area+EX_R10(r13); \
- std r11,area+EX_R11(r13); \
- std r12,area+EX_R12(r13); \
BEGIN_FTR_SECTION_NESTED(66); \
mfspr r10,SPRN_CFAR; \
std r10,area+EX_CFAR(r13); \
END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
- GET_SCRATCH0(r9); \
- std r9,area+EX_R13(r13); \
- mfcr r9
+ mfcr r9; \
+ extra(vec); \
+ std r11,area+EX_R11(r13); \
+ std r12,area+EX_R12(r13); \
+ GET_SCRATCH0(r10); \
+ std r10,area+EX_R13(r13)
+#define EXCEPTION_PROLOG_1(area, extra, vec) \
+ __EXCEPTION_PROLOG_1(area, extra, vec)
#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \
ld r12,PACAKBASE(r13); /* get high part of &label */ \
@@ -85,13 +88,65 @@
mtspr SPRN_##h##SRR1,r10; \
h##rfid; \
b . /* prevent speculative execution */
-#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
+#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
__EXCEPTION_PROLOG_PSERIES_1(label, h)
-#define EXCEPTION_PROLOG_PSERIES(area, label, h) \
- EXCEPTION_PROLOG_1(area); \
+#define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \
+ EXCEPTION_PROLOG_1(area, extra, vec); \
EXCEPTION_PROLOG_PSERIES_1(label, h);
+#define __KVMTEST(n) \
+ lbz r10,HSTATE_IN_GUEST(r13); \
+ cmpwi r10,0; \
+ bne do_kvm_##n
+
+#define __KVM_HANDLER(area, h, n) \
+do_kvm_##n: \
+ ld r10,area+EX_R10(r13); \
+ stw r9,HSTATE_SCRATCH1(r13); \
+ ld r9,area+EX_R9(r13); \
+ std r12,HSTATE_SCRATCH0(r13); \
+ li r12,n; \
+ b kvmppc_interrupt
+
+#define __KVM_HANDLER_SKIP(area, h, n) \
+do_kvm_##n: \
+ cmpwi r10,KVM_GUEST_MODE_SKIP; \
+ ld r10,area+EX_R10(r13); \
+ beq 89f; \
+ stw r9,HSTATE_SCRATCH1(r13); \
+ ld r9,area+EX_R9(r13); \
+ std r12,HSTATE_SCRATCH0(r13); \
+ li r12,n; \
+ b kvmppc_interrupt; \
+89: mtocrf 0x80,r9; \
+ ld r9,area+EX_R9(r13); \
+ b kvmppc_skip_##h##interrupt
+
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#define KVMTEST(n) __KVMTEST(n)
+#define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n)
+#define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
+
+#else
+#define KVMTEST(n)
+#define KVM_HANDLER(area, h, n)
+#define KVM_HANDLER_SKIP(area, h, n)
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_PR
+#define KVMTEST_PR(n) __KVMTEST(n)
+#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n)
+#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
+
+#else
+#define KVMTEST_PR(n)
+#define KVM_HANDLER_PR(area, h, n)
+#define KVM_HANDLER_PR_SKIP(area, h, n)
+#endif
+
+#define NOTEST(n)
+
/*
* The common exception prolog is used for all except a few exceptions
* such as a segment miss on a kernel address. We have to be prepared
@@ -164,57 +219,58 @@
.globl label##_pSeries; \
label##_pSeries: \
HMT_MEDIUM; \
- DO_KVM vec; \
SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_STD)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
+ EXC_STD, KVMTEST_PR, vec)
#define STD_EXCEPTION_HV(loc, vec, label) \
. = loc; \
.globl label##_hv; \
label##_hv: \
HMT_MEDIUM; \
- DO_KVM vec; \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_HV)
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
+ EXC_HV, KVMTEST, vec)
-#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h) \
- HMT_MEDIUM; \
- DO_KVM vec; \
- SET_SCRATCH0(r13); /* save r13 */ \
- GET_PACA(r13); \
- std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
- std r10,PACA_EXGEN+EX_R10(r13); \
+#define __SOFTEN_TEST(h) \
lbz r10,PACASOFTIRQEN(r13); \
- mfcr r9; \
cmpwi r10,0; \
- beq masked_##h##interrupt; \
- GET_SCRATCH0(r10); \
- std r10,PACA_EXGEN+EX_R13(r13); \
- std r11,PACA_EXGEN+EX_R11(r13); \
- std r12,PACA_EXGEN+EX_R12(r13); \
- ld r12,PACAKBASE(r13); /* get high part of &label */ \
- ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
- mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
- LOAD_HANDLER(r12,label##_common) \
- mtspr SPRN_##h##SRR0,r12; \
- mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
- mtspr SPRN_##h##SRR1,r10; \
- h##rfid; \
- b . /* prevent speculative execution */
-#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h) \
- __MASKABLE_EXCEPTION_PSERIES(vec, label, h)
+ beq masked_##h##interrupt
+#define _SOFTEN_TEST(h) __SOFTEN_TEST(h)
+
+#define SOFTEN_TEST_PR(vec) \
+ KVMTEST_PR(vec); \
+ _SOFTEN_TEST(EXC_STD)
+
+#define SOFTEN_TEST_HV(vec) \
+ KVMTEST(vec); \
+ _SOFTEN_TEST(EXC_HV)
+
+#define SOFTEN_TEST_HV_201(vec) \
+ KVMTEST(vec); \
+ _SOFTEN_TEST(EXC_STD)
+
+#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
+ HMT_MEDIUM; \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, h);
+#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
+ __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra)
#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \
. = loc; \
.globl label##_pSeries; \
label##_pSeries: \
- _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_STD)
+ _MASKABLE_EXCEPTION_PSERIES(vec, label, \
+ EXC_STD, SOFTEN_TEST_PR)
#define MASKABLE_EXCEPTION_HV(loc, vec, label) \
. = loc; \
.globl label##_hv; \
label##_hv: \
- _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_HV)
+ _MASKABLE_EXCEPTION_PSERIES(vec, label, \
+ EXC_HV, SOFTEN_TEST_HV)
#ifdef CONFIG_PPC_ISERIES
#define DISABLE_INTS \
diff --git a/arch/powerpc/include/asm/fsl_hcalls.h b/arch/powerpc/include/asm/fsl_hcalls.h
new file mode 100644
index 00000000000..922d9b5fe3d
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_hcalls.h
@@ -0,0 +1,655 @@
+/*
+ * Freescale hypervisor call interface
+ *
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * This file is provided under a dual BSD/GPL license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FSL_HCALLS_H
+#define _FSL_HCALLS_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/byteorder.h>
+#include <asm/epapr_hcalls.h>
+
+#define FH_API_VERSION 1
+
+#define FH_ERR_GET_INFO 1
+#define FH_PARTITION_GET_DTPROP 2
+#define FH_PARTITION_SET_DTPROP 3
+#define FH_PARTITION_RESTART 4
+#define FH_PARTITION_GET_STATUS 5
+#define FH_PARTITION_START 6
+#define FH_PARTITION_STOP 7
+#define FH_PARTITION_MEMCPY 8
+#define FH_DMA_ENABLE 9
+#define FH_DMA_DISABLE 10
+#define FH_SEND_NMI 11
+#define FH_VMPIC_GET_MSIR 12
+#define FH_SYSTEM_RESET 13
+#define FH_GET_CORE_STATE 14
+#define FH_ENTER_NAP 15
+#define FH_EXIT_NAP 16
+#define FH_CLAIM_DEVICE 17
+#define FH_PARTITION_STOP_DMA 18
+
+/* vendor ID: Freescale Semiconductor */
+#define FH_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_FSL_VENDOR_ID, num)
+
+/*
+ * We use "uintptr_t" to define a register because it's guaranteed to be a
+ * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
+ * platform.
+ *
+ * All registers are either input/output or output only. Registers that are
+ * initialized before making the hypercall are input/output. All
+ * input/output registers are represented with "+r". Output-only registers
+ * are represented with "=r". Do not specify any unused registers. The
+ * clobber list will tell the compiler that the hypercall modifies those
+ * registers, which is good enough.
+ */
+
+/**
+ * fh_send_nmi - send NMI to virtual cpu(s).
+ * @vcpu_mask: send NMI to virtual cpu(s) specified by this mask.
+ *
+ * Returns 0 for success, or EINVAL for invalid vcpu_mask.
+ */
+static inline unsigned int fh_send_nmi(unsigned int vcpu_mask)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_SEND_NMI);
+ r3 = vcpu_mask;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/* Arbitrary limits to avoid excessive memory allocation in hypervisor */
+#define FH_DTPROP_MAX_PATHLEN 4096
+#define FH_DTPROP_MAX_PROPLEN 32768
+
+/**
+ * fh_partiton_get_dtprop - get a property from a guest device tree.
+ * @handle: handle of partition whose device tree is to be accessed
+ * @dtpath_addr: physical address of device tree path to access
+ * @propname_addr: physical address of name of property
+ * @propvalue_addr: physical address of property value buffer
+ * @propvalue_len: length of buffer on entry, length of property on return
+ *
+ * Returns zero on success, non-zero on error.
+ */
+static inline unsigned int fh_partition_get_dtprop(int handle,
+ uint64_t dtpath_addr,
+ uint64_t propname_addr,
+ uint64_t propvalue_addr,
+ uint32_t *propvalue_len)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r8 __asm__("r8");
+ register uintptr_t r9 __asm__("r9");
+ register uintptr_t r10 __asm__("r10");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_DTPROP);
+ r3 = handle;
+
+#ifdef CONFIG_PHYS_64BIT
+ r4 = dtpath_addr >> 32;
+ r6 = propname_addr >> 32;
+ r8 = propvalue_addr >> 32;
+#else
+ r4 = 0;
+ r6 = 0;
+ r8 = 0;
+#endif
+ r5 = (uint32_t)dtpath_addr;
+ r7 = (uint32_t)propname_addr;
+ r9 = (uint32_t)propvalue_addr;
+ r10 = *propvalue_len;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11),
+ "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
+ "+r" (r8), "+r" (r9), "+r" (r10)
+ : : EV_HCALL_CLOBBERS8
+ );
+
+ *propvalue_len = r4;
+ return r3;
+}
+
+/**
+ * Set a property in a guest device tree.
+ * @handle: handle of partition whose device tree is to be accessed
+ * @dtpath_addr: physical address of device tree path to access
+ * @propname_addr: physical address of name of property
+ * @propvalue_addr: physical address of property value
+ * @propvalue_len: length of property
+ *
+ * Returns zero on success, non-zero on error.
+ */
+static inline unsigned int fh_partition_set_dtprop(int handle,
+ uint64_t dtpath_addr,
+ uint64_t propname_addr,
+ uint64_t propvalue_addr,
+ uint32_t propvalue_len)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r8 __asm__("r8");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r9 __asm__("r9");
+ register uintptr_t r10 __asm__("r10");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_SET_DTPROP);
+ r3 = handle;
+
+#ifdef CONFIG_PHYS_64BIT
+ r4 = dtpath_addr >> 32;
+ r6 = propname_addr >> 32;
+ r8 = propvalue_addr >> 32;
+#else
+ r4 = 0;
+ r6 = 0;
+ r8 = 0;
+#endif
+ r5 = (uint32_t)dtpath_addr;
+ r7 = (uint32_t)propname_addr;
+ r9 = (uint32_t)propvalue_addr;
+ r10 = propvalue_len;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11),
+ "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
+ "+r" (r8), "+r" (r9), "+r" (r10)
+ : : EV_HCALL_CLOBBERS8
+ );
+
+ return r3;
+}
+
+/**
+ * fh_partition_restart - reboot the current partition
+ * @partition: partition ID
+ *
+ * Returns an error code if reboot failed. Does not return if it succeeds.
+ */
+static inline unsigned int fh_partition_restart(unsigned int partition)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_RESTART);
+ r3 = partition;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+#define FH_PARTITION_STOPPED 0
+#define FH_PARTITION_RUNNING 1
+#define FH_PARTITION_STARTING 2
+#define FH_PARTITION_STOPPING 3
+#define FH_PARTITION_PAUSING 4
+#define FH_PARTITION_PAUSED 5
+#define FH_PARTITION_RESUMING 6
+
+/**
+ * fh_partition_get_status - gets the status of a partition
+ * @partition: partition ID
+ * @status: returned status code
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_get_status(unsigned int partition,
+ unsigned int *status)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_STATUS);
+ r3 = partition;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *status = r4;
+
+ return r3;
+}
+
+/**
+ * fh_partition_start - boots and starts execution of the specified partition
+ * @partition: partition ID
+ * @entry_point: guest physical address to start execution
+ *
+ * The hypervisor creates a 1-to-1 virtual/physical IMA mapping, so at boot
+ * time, guest physical address are the same as guest virtual addresses.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_start(unsigned int partition,
+ uint32_t entry_point, int load)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_START);
+ r3 = partition;
+ r4 = entry_point;
+ r5 = load;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5)
+ : : EV_HCALL_CLOBBERS3
+ );
+
+ return r3;
+}
+
+/**
+ * fh_partition_stop - stops another partition
+ * @partition: partition ID
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_stop(unsigned int partition)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP);
+ r3 = partition;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * struct fh_sg_list: definition of the fh_partition_memcpy S/G list
+ * @source: guest physical address to copy from
+ * @target: guest physical address to copy to
+ * @size: number of bytes to copy
+ * @reserved: reserved, must be zero
+ *
+ * The scatter/gather list for fh_partition_memcpy() is an array of these
+ * structures. The array must be guest physically contiguous.
+ *
+ * This structure must be aligned on 32-byte boundary, so that no single
+ * strucuture can span two pages.
+ */
+struct fh_sg_list {
+ uint64_t source; /**< guest physical address to copy from */
+ uint64_t target; /**< guest physical address to copy to */
+ uint64_t size; /**< number of bytes to copy */
+ uint64_t reserved; /**< reserved, must be zero */
+} __attribute__ ((aligned(32)));
+
+/**
+ * fh_partition_memcpy - copies data from one guest to another
+ * @source: the ID of the partition to copy from
+ * @target: the ID of the partition to copy to
+ * @sg_list: guest physical address of an array of &fh_sg_list structures
+ * @count: the number of entries in @sg_list
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_memcpy(unsigned int source,
+ unsigned int target, phys_addr_t sg_list, unsigned int count)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_MEMCPY);
+ r3 = source;
+ r4 = target;
+ r5 = (uint32_t) sg_list;
+
+#ifdef CONFIG_PHYS_64BIT
+ r6 = sg_list >> 32;
+#else
+ r6 = 0;
+#endif
+ r7 = count;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11),
+ "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7)
+ : : EV_HCALL_CLOBBERS5
+ );
+
+ return r3;
+}
+
+/**
+ * fh_dma_enable - enable DMA for the specified device
+ * @liodn: the LIODN of the I/O device for which to enable DMA
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_dma_enable(unsigned int liodn)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_DMA_ENABLE);
+ r3 = liodn;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * fh_dma_disable - disable DMA for the specified device
+ * @liodn: the LIODN of the I/O device for which to disable DMA
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_dma_disable(unsigned int liodn)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_DMA_DISABLE);
+ r3 = liodn;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+
+/**
+ * fh_vmpic_get_msir - returns the MPIC-MSI register value
+ * @interrupt: the interrupt number
+ * @msir_val: returned MPIC-MSI register value
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_vmpic_get_msir(unsigned int interrupt,
+ unsigned int *msir_val)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_VMPIC_GET_MSIR);
+ r3 = interrupt;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *msir_val = r4;
+
+ return r3;
+}
+
+/**
+ * fh_system_reset - reset the system
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_system_reset(void)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_SYSTEM_RESET);
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "=r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+
+/**
+ * fh_err_get_info - get platform error information
+ * @queue id:
+ * 0 for guest error event queue
+ * 1 for global error event queue
+ *
+ * @pointer to store the platform error data:
+ * platform error data is returned in registers r4 - r11
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_err_get_info(int queue, uint32_t *bufsize,
+ uint32_t addr_hi, uint32_t addr_lo, int peek)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+
+ r11 = FH_HCALL_TOKEN(FH_ERR_GET_INFO);
+ r3 = queue;
+ r4 = *bufsize;
+ r5 = addr_hi;
+ r6 = addr_lo;
+ r7 = peek;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6),
+ "+r" (r7)
+ : : EV_HCALL_CLOBBERS5
+ );
+
+ *bufsize = r4;
+
+ return r3;
+}
+
+
+#define FH_VCPU_RUN 0
+#define FH_VCPU_IDLE 1
+#define FH_VCPU_NAP 2
+
+/**
+ * fh_get_core_state - get the state of a vcpu
+ *
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ * @state:the current state of the vcpu, see FH_VCPU_*
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_get_core_state(unsigned int handle,
+ unsigned int vcpu, unsigned int *state)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_GET_CORE_STATE);
+ r3 = handle;
+ r4 = vcpu;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *state = r4;
+ return r3;
+}
+
+/**
+ * fh_enter_nap - enter nap on a vcpu
+ *
+ * Note that though the API supports entering nap on a vcpu other
+ * than the caller, this may not be implmented and may return EINVAL.
+ *
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_enter_nap(unsigned int handle, unsigned int vcpu)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_ENTER_NAP);
+ r3 = handle;
+ r4 = vcpu;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ return r3;
+}
+
+/**
+ * fh_exit_nap - exit nap on a vcpu
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_exit_nap(unsigned int handle, unsigned int vcpu)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_EXIT_NAP);
+ r3 = handle;
+ r4 = vcpu;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ return r3;
+}
+/**
+ * fh_claim_device - claim a "claimable" shared device
+ * @handle: fsl,hv-device-handle of node to claim
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_claim_device(unsigned int handle)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_CLAIM_DEVICE);
+ r3 = handle;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * Run deferred DMA disabling on a partition's private devices
+ *
+ * This applies to devices which a partition owns either privately,
+ * or which are claimable and still actively owned by that partition,
+ * and which do not have the no-dma-disable property.
+ *
+ * @handle: partition (must be stopped) whose DMA is to be disabled
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_stop_dma(unsigned int handle)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP_DMA);
+ r3 = handle;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+#endif
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index fd8201dddd4..1c324ff55ea 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -29,6 +29,10 @@
#define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \
is a good time to retry */
#define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */
+
+/* Internal value used in book3s_hv kvm support; not returned to guests */
+#define H_TOO_HARD 9999
+
#define H_HARDWARE -1 /* Hardware error */
#define H_FUNCTION -2 /* Function not supported */
#define H_PRIVILEGE -3 /* Caller not privileged */
@@ -100,6 +104,7 @@
#define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE
#define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */
#define H_ANDCOND (1UL<<(63-33))
+#define H_LOCAL (1UL<<(63-35))
#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */
#define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */
#define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */
diff --git a/arch/powerpc/include/asm/hvsi.h b/arch/powerpc/include/asm/hvsi.h
new file mode 100644
index 00000000000..d3f64f36181
--- /dev/null
+++ b/arch/powerpc/include/asm/hvsi.h
@@ -0,0 +1,94 @@
+#ifndef _HVSI_H
+#define _HVSI_H
+
+#define VS_DATA_PACKET_HEADER 0xff
+#define VS_CONTROL_PACKET_HEADER 0xfe
+#define VS_QUERY_PACKET_HEADER 0xfd
+#define VS_QUERY_RESPONSE_PACKET_HEADER 0xfc
+
+/* control verbs */
+#define VSV_SET_MODEM_CTL 1 /* to service processor only */
+#define VSV_MODEM_CTL_UPDATE 2 /* from service processor only */
+#define VSV_CLOSE_PROTOCOL 3
+
+/* query verbs */
+#define VSV_SEND_VERSION_NUMBER 1
+#define VSV_SEND_MODEM_CTL_STATUS 2
+
+/* yes, these masks are not consecutive. */
+#define HVSI_TSDTR 0x01
+#define HVSI_TSCD 0x20
+
+#define HVSI_MAX_OUTGOING_DATA 12
+#define HVSI_VERSION 1
+
+struct hvsi_header {
+ uint8_t type;
+ uint8_t len;
+ uint16_t seqno;
+} __attribute__((packed));
+
+struct hvsi_data {
+ struct hvsi_header hdr;
+ uint8_t data[HVSI_MAX_OUTGOING_DATA];
+} __attribute__((packed));
+
+struct hvsi_control {
+ struct hvsi_header hdr;
+ uint16_t verb;
+ /* optional depending on verb: */
+ uint32_t word;
+ uint32_t mask;
+} __attribute__((packed));
+
+struct hvsi_query {
+ struct hvsi_header hdr;
+ uint16_t verb;
+} __attribute__((packed));
+
+struct hvsi_query_response {
+ struct hvsi_header hdr;
+ uint16_t verb;
+ uint16_t query_seqno;
+ union {
+ uint8_t version;
+ uint32_t mctrl_word;
+ } u;
+} __attribute__((packed));
+
+/* hvsi lib struct definitions */
+#define HVSI_INBUF_SIZE 255
+struct tty_struct;
+struct hvsi_priv {
+ unsigned int inbuf_len; /* data in input buffer */
+ unsigned char inbuf[HVSI_INBUF_SIZE];
+ unsigned int inbuf_cur; /* Cursor in input buffer */
+ unsigned int inbuf_pktlen; /* packet lenght from cursor */
+ atomic_t seqno; /* packet sequence number */
+ unsigned int opened:1; /* driver opened */
+ unsigned int established:1; /* protocol established */
+ unsigned int is_console:1; /* used as a kernel console device */
+ unsigned int mctrl_update:1; /* modem control updated */
+ unsigned short mctrl; /* modem control */
+ struct tty_struct *tty; /* tty structure */
+ int (*get_chars)(uint32_t termno, char *buf, int count);
+ int (*put_chars)(uint32_t termno, const char *buf, int count);
+ uint32_t termno;
+};
+
+/* hvsi lib functions */
+struct hvc_struct;
+extern void hvsilib_init(struct hvsi_priv *pv,
+ int (*get_chars)(uint32_t termno, char *buf, int count),
+ int (*put_chars)(uint32_t termno, const char *buf,
+ int count),
+ int termno, int is_console);
+extern int hvsilib_open(struct hvsi_priv *pv, struct hvc_struct *hp);
+extern void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp);
+extern int hvsilib_read_mctrl(struct hvsi_priv *pv);
+extern int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr);
+extern void hvsilib_establish(struct hvsi_priv *pv);
+extern int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count);
+extern int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count);
+
+#endif /* _HVSI_H */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 1c33ec17ca3..80fd4d2b4a6 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -57,7 +57,7 @@ void hw_breakpoint_pmu_read(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
extern struct pmu perf_ops_bp;
-extern void ptrace_triggered(struct perf_event *bp, int nmi,
+extern void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs);
static inline void hw_breakpoint_disable(void)
{
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 1bff591f7f7..c0e1bc319e3 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -14,7 +14,7 @@
#include <linux/radix-tree.h>
#include <asm/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/* Define a way to iterate across irqs. */
@@ -330,5 +330,7 @@ extern int call_handle_irq(int irq, void *p1,
struct thread_info *tp, void *func);
extern void do_IRQ(struct pt_regs *regs);
+int irq_choose_cpu(const struct cpumask *mask);
+
#endif /* _ASM_IRQ_H */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
new file mode 100644
index 00000000000..1f780b95c0f
--- /dev/null
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -0,0 +1,47 @@
+#ifndef _ASM_POWERPC_JUMP_LABEL_H
+#define _ASM_POWERPC_JUMP_LABEL_H
+
+/*
+ * Copyright 2010 Michael Ellerman, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+
+#include <asm/feature-fixups.h>
+
+#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
+#define JUMP_LABEL_NOP_SIZE 4
+
+static __always_inline bool arch_static_branch(struct jump_label_key *key)
+{
+ asm goto("1:\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".align 4\n\t"
+ JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+ ".popsection \n\t"
+ : : "i" (key) : : l_yes);
+ return false;
+l_yes:
+ return true;
+}
+
+#ifdef CONFIG_PPC64
+typedef u64 jump_label_t;
+#else
+typedef u32 jump_label_t;
+#endif
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+ jump_label_t pad;
+};
+
+#endif /* _ASM_POWERPC_JUMP_LABEL_H */
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index d2ca5ed3877..a4f6c85431f 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -22,6 +22,10 @@
#include <linux/types.h>
+/* Select powerpc specific features in <linux/kvm.h> */
+#define __KVM_HAVE_SPAPR_TCE
+#define __KVM_HAVE_PPC_SMT
+
struct kvm_regs {
__u64 pc;
__u64 cr;
@@ -272,4 +276,15 @@ struct kvm_guest_debug_arch {
#define KVM_INTERRUPT_UNSET -2U
#define KVM_INTERRUPT_SET_LEVEL -3U
+/* for KVM_CAP_SPAPR_TCE */
+struct kvm_create_spapr_tce {
+ __u64 liobn;
+ __u32 window_size;
+};
+
+/* for KVM_ALLOCATE_RMA */
+struct kvm_allocate_rma {
+ __u64 rma_size;
+};
+
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 0951b17f4eb..7b1f0e0fc65 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -64,8 +64,12 @@
#define BOOK3S_INTERRUPT_PROGRAM 0x700
#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
#define BOOK3S_INTERRUPT_DECREMENTER 0x900
+#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
#define BOOK3S_INTERRUPT_SYSCALL 0xc00
#define BOOK3S_INTERRUPT_TRACE 0xd00
+#define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00
+#define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20
+#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
#define BOOK3S_INTERRUPT_PERFMON 0xf00
#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
#define BOOK3S_INTERRUPT_VSX 0xf40
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index d62e703f121..98da010252a 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -24,20 +24,6 @@
#include <linux/kvm_host.h>
#include <asm/kvm_book3s_asm.h>
-struct kvmppc_slb {
- u64 esid;
- u64 vsid;
- u64 orige;
- u64 origv;
- bool valid : 1;
- bool Ks : 1;
- bool Kp : 1;
- bool nx : 1;
- bool large : 1; /* PTEs are 16MB */
- bool tb : 1; /* 1TB segment */
- bool class : 1;
-};
-
struct kvmppc_bat {
u64 raw;
u32 bepi;
@@ -67,11 +53,22 @@ struct kvmppc_sid_map {
#define VSID_POOL_SIZE (SID_CONTEXTS * 16)
#endif
+struct hpte_cache {
+ struct hlist_node list_pte;
+ struct hlist_node list_pte_long;
+ struct hlist_node list_vpte;
+ struct hlist_node list_vpte_long;
+ struct rcu_head rcu_head;
+ u64 host_va;
+ u64 pfn;
+ ulong slot;
+ struct kvmppc_pte pte;
+};
+
struct kvmppc_vcpu_book3s {
struct kvm_vcpu vcpu;
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
- struct kvmppc_slb slb[64];
struct {
u64 esid;
u64 vsid;
@@ -81,7 +78,6 @@ struct kvmppc_vcpu_book3s {
struct kvmppc_bat dbat[8];
u64 hid[6];
u64 gqr[8];
- int slb_nr;
u64 sdr1;
u64 hior;
u64 msr_mask;
@@ -93,7 +89,13 @@ struct kvmppc_vcpu_book3s {
u64 vsid_max;
#endif
int context_id[SID_CONTEXTS];
- ulong prog_flags; /* flags to inject when giving a 700 trap */
+
+ struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
+ struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
+ struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
+ struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
+ int hpte_cache_count;
+ spinlock_t mmu_lock;
};
#define CONTEXT_HOST 0
@@ -110,8 +112,10 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask)
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
+extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
@@ -123,19 +127,22 @@ extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
extern int kvmppc_mmu_hpte_sysinit(void);
extern void kvmppc_mmu_hpte_sysexit(void);
+extern int kvmppc_mmu_hv_init(void);
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
+extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
-extern ulong kvmppc_trampoline_lowmem;
-extern ulong kvmppc_trampoline_enter;
+extern void kvmppc_handler_lowmem_trampoline(void);
+extern void kvmppc_handler_trampoline_enter(void);
extern void kvmppc_rmcall(ulong srr0, ulong srr1);
+extern void kvmppc_hv_entry_trampoline(void);
extern void kvmppc_load_up_fpu(void);
extern void kvmppc_load_up_altivec(void);
extern void kvmppc_load_up_vsx(void);
@@ -147,15 +154,32 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
}
-static inline ulong dsisr(void)
+extern void kvm_return_point(void);
+
+/* Also add subarch specific defines */
+
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
+#include <asm/kvm_book3s_32.h>
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/kvm_book3s_64.h>
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_PR
+
+static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
{
- ulong r;
- asm ( "mfdsisr %0 " : "=r" (r) );
- return r;
+ return to_book3s(vcpu)->hior;
}
-extern void kvm_return_point(void);
-static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu);
+static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
+ unsigned long pending_now, unsigned long old_pending)
+{
+ if (pending_now)
+ vcpu->arch.shared->int_pending = 1;
+ else if (old_pending)
+ vcpu->arch.shared->int_pending = 0;
+}
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
@@ -244,6 +268,120 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return to_svcpu(vcpu)->fault_dar;
}
+static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
+{
+ ulong crit_raw = vcpu->arch.shared->critical;
+ ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
+ bool crit;
+
+ /* Truncate crit indicators in 32 bit mode */
+ if (!(vcpu->arch.shared->msr & MSR_SF)) {
+ crit_raw &= 0xffffffff;
+ crit_r1 &= 0xffffffff;
+ }
+
+ /* Critical section when crit == r1 */
+ crit = (crit_raw == crit_r1);
+ /* ... and we're in supervisor mode */
+ crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
+
+ return crit;
+}
+#else /* CONFIG_KVM_BOOK3S_PR */
+
+static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
+ unsigned long pending_now, unsigned long old_pending)
+{
+}
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+ vcpu->arch.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+ return vcpu->arch.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.cr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.cr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.xer = val;
+}
+
+static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.xer;
+}
+
+static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.ctr = val;
+}
+
+static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.ctr;
+}
+
+static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.lr = val;
+}
+
+static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.lr;
+}
+
+static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.pc = val;
+}
+
+static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.pc;
+}
+
+static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+{
+ ulong pc = kvmppc_get_pc(vcpu);
+
+ /* Load the instruction manually if it failed to do so in the
+ * exit path */
+ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
+ kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
+
+ return vcpu->arch.last_inst;
+}
+
+static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault_dar;
+}
+
+static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+#endif
+
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
* instruction for the OSI hypercalls */
#define OSI_SC_MAGIC_R3 0x113724FA
@@ -251,12 +389,4 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
#define INS_DCBZ 0x7c0007ec
-/* Also add subarch specific defines */
-
-#ifdef CONFIG_PPC_BOOK3S_32
-#include <asm/kvm_book3s_32.h>
-#else
-#include <asm/kvm_book3s_64.h>
-#endif
-
#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 4cadd612d57..e43fe42b987 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -20,9 +20,13 @@
#ifndef __ASM_KVM_BOOK3S_64_H__
#define __ASM_KVM_BOOK3S_64_H__
+#ifdef CONFIG_KVM_BOOK3S_PR
static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
{
return &get_paca()->shadow_vcpu;
}
+#endif
+
+#define SPAPR_TCE_SHIFT 12
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index d5a8a386163..ef7b3688c3b 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -60,6 +60,36 @@ kvmppc_resume_\intno:
#else /*__ASSEMBLY__ */
+/*
+ * This struct goes in the PACA on 64-bit processors. It is used
+ * to store host state that needs to be saved when we enter a guest
+ * and restored when we exit, but isn't specific to any particular
+ * guest or vcpu. It also has some scratch fields used by the guest
+ * exit code.
+ */
+struct kvmppc_host_state {
+ ulong host_r1;
+ ulong host_r2;
+ ulong host_msr;
+ ulong vmhandler;
+ ulong scratch0;
+ ulong scratch1;
+ u8 in_guest;
+
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ struct kvm_vcpu *kvm_vcpu;
+ struct kvmppc_vcore *kvm_vcore;
+ unsigned long xics_phys;
+ u64 dabr;
+ u64 host_mmcr[3];
+ u32 host_pmc[8];
+ u64 host_purr;
+ u64 host_spurr;
+ u64 host_dscr;
+ u64 dec_expires;
+#endif
+};
+
struct kvmppc_book3s_shadow_vcpu {
ulong gpr[14];
u32 cr;
@@ -73,17 +103,12 @@ struct kvmppc_book3s_shadow_vcpu {
ulong shadow_srr1;
ulong fault_dar;
- ulong host_r1;
- ulong host_r2;
- ulong handler;
- ulong scratch0;
- ulong scratch1;
- ulong vmhandler;
- u8 in_guest;
-
#ifdef CONFIG_PPC_BOOK3S_32
u32 sr[16]; /* Guest SRs */
+
+ struct kvmppc_host_state hstate;
#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
u8 slb_max; /* highest used guest slb entry */
struct {
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index 9c9ba3d59b1..a90e0918877 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -93,4 +93,8 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return vcpu->arch.fault_dear;
}
+static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.shared->msr;
+}
#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
index 7a2a565f88c..adbfca9dd10 100644
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ b/arch/powerpc/include/asm/kvm_e500.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, <yu.liu@freescale.com>
*
@@ -29,17 +29,25 @@ struct tlbe{
u32 mas7;
};
+#define E500_TLB_VALID 1
+#define E500_TLB_DIRTY 2
+
+struct tlbe_priv {
+ pfn_t pfn;
+ unsigned int flags; /* E500_TLB_* */
+};
+
+struct vcpu_id_table;
+
struct kvmppc_vcpu_e500 {
/* Unmodified copy of the guest's TLB. */
- struct tlbe *guest_tlb[E500_TLB_NUM];
- /* TLB that's actually used when the guest is running. */
- struct tlbe *shadow_tlb[E500_TLB_NUM];
- /* Pages which are referenced in the shadow TLB. */
- struct page **shadow_pages[E500_TLB_NUM];
+ struct tlbe *gtlb_arch[E500_TLB_NUM];
- unsigned int guest_tlb_size[E500_TLB_NUM];
- unsigned int shadow_tlb_size[E500_TLB_NUM];
- unsigned int guest_tlb_nv[E500_TLB_NUM];
+ /* KVM internal information associated with each guest TLB entry */
+ struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
+
+ unsigned int gtlb_size[E500_TLB_NUM];
+ unsigned int gtlb_nv[E500_TLB_NUM];
u32 host_pid[E500_PID_NUM];
u32 pid[E500_PID_NUM];
@@ -53,6 +61,10 @@ struct kvmppc_vcpu_e500 {
u32 mas5;
u32 mas6;
u32 mas7;
+
+ /* vcpu id table */
+ struct vcpu_id_table *idt;
+
u32 l1csr0;
u32 l1csr1;
u32 hid0;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 186f150b9b8..cc22b282d75 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -25,15 +25,23 @@
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/kvm_types.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
#include <linux/kvm_para.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
#include <asm/kvm_asm.h>
+#include <asm/processor.h>
-#define KVM_MAX_VCPUS 1
+#define KVM_MAX_VCPUS NR_CPUS
+#define KVM_MAX_VCORES NR_CPUS
#define KVM_MEMORY_SLOTS 32
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
+#ifdef CONFIG_KVM_MMIO
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#endif
/* We don't currently support large pages. */
#define KVM_HPAGE_GFN_SHIFT(x) 0
@@ -57,6 +65,10 @@ struct kvm;
struct kvm_run;
struct kvm_vcpu;
+struct lppaca;
+struct slb_shadow;
+struct dtl;
+
struct kvm_vm_stat {
u32 remote_tlb_flush;
};
@@ -133,9 +145,74 @@ struct kvmppc_exit_timing {
};
};
+struct kvmppc_pginfo {
+ unsigned long pfn;
+ atomic_t refcnt;
+};
+
+struct kvmppc_spapr_tce_table {
+ struct list_head list;
+ struct kvm *kvm;
+ u64 liobn;
+ u32 window_size;
+ struct page *pages[0];
+};
+
+struct kvmppc_rma_info {
+ void *base_virt;
+ unsigned long base_pfn;
+ unsigned long npages;
+ struct list_head list;
+ atomic_t use_count;
+};
+
struct kvm_arch {
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ unsigned long hpt_virt;
+ unsigned long ram_npages;
+ unsigned long ram_psize;
+ unsigned long ram_porder;
+ struct kvmppc_pginfo *ram_pginfo;
+ unsigned int lpid;
+ unsigned int host_lpid;
+ unsigned long host_lpcr;
+ unsigned long sdr1;
+ unsigned long host_sdr1;
+ int tlbie_lock;
+ int n_rma_pages;
+ unsigned long lpcr;
+ unsigned long rmor;
+ struct kvmppc_rma_info *rma;
+ struct list_head spapr_tce_tables;
+ unsigned short last_vcpu[NR_CPUS];
+ struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
};
+/*
+ * Struct for a virtual core.
+ * Note: entry_exit_count combines an entry count in the bottom 8 bits
+ * and an exit count in the next 8 bits. This is so that we can
+ * atomically increment the entry count iff the exit count is 0
+ * without taking the lock.
+ */
+struct kvmppc_vcore {
+ int n_runnable;
+ int n_blocked;
+ int num_threads;
+ int entry_exit_count;
+ int n_woken;
+ int nap_count;
+ u16 pcpu;
+ u8 vcore_running;
+ u8 in_guest;
+ struct list_head runnable_threads;
+ spinlock_t lock;
+};
+
+#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
+#define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8)
+
struct kvmppc_pte {
ulong eaddr;
u64 vpage;
@@ -163,16 +240,18 @@ struct kvmppc_mmu {
bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
};
-struct hpte_cache {
- struct hlist_node list_pte;
- struct hlist_node list_pte_long;
- struct hlist_node list_vpte;
- struct hlist_node list_vpte_long;
- struct rcu_head rcu_head;
- u64 host_va;
- u64 pfn;
- ulong slot;
- struct kvmppc_pte pte;
+struct kvmppc_slb {
+ u64 esid;
+ u64 vsid;
+ u64 orige;
+ u64 origv;
+ bool valid : 1;
+ bool Ks : 1;
+ bool Kp : 1;
+ bool nx : 1;
+ bool large : 1; /* PTEs are 16MB */
+ bool tb : 1; /* 1TB segment */
+ bool class : 1;
};
struct kvm_vcpu_arch {
@@ -187,6 +266,9 @@ struct kvm_vcpu_arch {
ulong highmem_handler;
ulong rmcall;
ulong host_paca_phys;
+ struct kvmppc_slb slb[64];
+ int slb_max; /* 1 + index of last valid entry in slb[] */
+ int slb_nr; /* total number of entries in SLB */
struct kvmppc_mmu mmu;
#endif
@@ -195,13 +277,19 @@ struct kvm_vcpu_arch {
u64 fpr[32];
u64 fpscr;
+#ifdef CONFIG_SPE
+ ulong evr[32];
+ ulong spefscr;
+ ulong host_spefscr;
+ u64 acc;
+#endif
#ifdef CONFIG_ALTIVEC
vector128 vr[32];
vector128 vscr;
#endif
#ifdef CONFIG_VSX
- u64 vsr[32];
+ u64 vsr[64];
#endif
#ifdef CONFIG_PPC_BOOK3S
@@ -209,22 +297,27 @@ struct kvm_vcpu_arch {
u32 qpr[32];
#endif
-#ifdef CONFIG_BOOKE
ulong pc;
ulong ctr;
ulong lr;
ulong xer;
u32 cr;
-#endif
#ifdef CONFIG_PPC_BOOK3S
- ulong shadow_msr;
ulong hflags;
ulong guest_owned_ext;
+ ulong purr;
+ ulong spurr;
+ ulong dscr;
+ ulong amr;
+ ulong uamor;
+ u32 ctrl;
+ ulong dabr;
#endif
u32 vrsave; /* also USPRG0 */
u32 mmucr;
+ ulong shadow_msr;
ulong sprg4;
ulong sprg5;
ulong sprg6;
@@ -249,6 +342,7 @@ struct kvm_vcpu_arch {
u32 pvr;
u32 shadow_pid;
+ u32 shadow_pid1;
u32 pid;
u32 swap_pid;
@@ -258,6 +352,9 @@ struct kvm_vcpu_arch {
u32 dbcr1;
u32 dbsr;
+ u64 mmcr[3];
+ u32 pmc[8];
+
#ifdef CONFIG_KVM_EXIT_TIMING
struct mutex exit_timing_lock;
struct kvmppc_exit_timing timing_exit;
@@ -272,8 +369,12 @@ struct kvm_vcpu_arch {
struct dentry *debugfs_exit_timing;
#endif
+#ifdef CONFIG_PPC_BOOK3S
+ ulong fault_dar;
+ u32 fault_dsisr;
+#endif
+
#ifdef CONFIG_BOOKE
- u32 last_inst;
ulong fault_dear;
ulong fault_esr;
ulong queued_dear;
@@ -288,25 +389,47 @@ struct kvm_vcpu_arch {
u8 dcr_is_write;
u8 osi_needed;
u8 osi_enabled;
+ u8 hcall_needed;
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
struct hrtimer dec_timer;
struct tasklet_struct tasklet;
u64 dec_jiffies;
+ u64 dec_expires;
unsigned long pending_exceptions;
+ u16 last_cpu;
+ u8 ceded;
+ u8 prodded;
+ u32 last_inst;
+
+ struct lppaca *vpa;
+ struct slb_shadow *slb_shadow;
+ struct dtl *dtl;
+ struct dtl *dtl_end;
+
+ struct kvmppc_vcore *vcore;
+ int ret;
+ int trap;
+ int state;
+ int ptid;
+ wait_queue_head_t cpu_run;
+
struct kvm_vcpu_arch_shared *shared;
unsigned long magic_page_pa; /* phys addr to map the magic page to */
unsigned long magic_page_ea; /* effect. addr to map the magic page to */
-#ifdef CONFIG_PPC_BOOK3S
- struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
- struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
- struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
- struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
- int hpte_cache_count;
- spinlock_t mmu_lock;
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ struct kvm_vcpu_arch_shared shregs;
+
+ struct list_head run_list;
+ struct task_struct *run_task;
+ struct kvm_run *kvm_run;
#endif
};
+#define KVMPPC_VCPU_BUSY_IN_HOST 0
+#define KVMPPC_VCPU_BLOCKED 1
+#define KVMPPC_VCPU_RUNNABLE 2
+
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9345238edec..d121f49d62b 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -33,6 +33,9 @@
#else
#include <asm/kvm_booke.h>
#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/paca.h>
+#endif
enum emulation_result {
EMULATE_DONE, /* no further processing */
@@ -42,6 +45,7 @@ enum emulation_result {
EMULATE_AGAIN, /* something went wrong. go again */
};
+extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern char kvmppc_handlers_start[];
extern unsigned long kvmppc_handler_len;
@@ -109,6 +113,27 @@ extern void kvmppc_booke_exit(void);
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
+extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
+
+extern long kvmppc_alloc_hpt(struct kvm *kvm);
+extern void kvmppc_free_hpt(struct kvm *kvm);
+extern long kvmppc_prepare_vrma(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
+extern void kvmppc_map_vrma(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
+extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
+extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+ struct kvm_create_spapr_tce *args);
+extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
+ struct kvm_allocate_rma *rma);
+extern struct kvmppc_rma_info *kvm_alloc_rma(void);
+extern void kvm_release_rma(struct kvmppc_rma_info *ri);
+extern int kvmppc_core_init_vm(struct kvm *kvm);
+extern void kvmppc_core_destroy_vm(struct kvm *kvm);
+extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
+extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
/*
* Cuts out inst bits with ordering according to spec.
@@ -151,4 +176,20 @@ int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+{
+ paca[cpu].kvm_hstate.xics_phys = addr;
+}
+
+extern void kvm_rma_init(void);
+
+#else
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+{}
+
+static inline void kvm_rma_init(void)
+{}
+#endif
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index c2410af6bfd..b8da9136386 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -2,7 +2,7 @@
#define _ARCH_POWERPC_LOCAL_H
#include <linux/percpu.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
typedef struct
{
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h
index 7ab82c825a0..27af7f8bbb8 100644
--- a/arch/powerpc/include/asm/macio.h
+++ b/arch/powerpc/include/asm/macio.h
@@ -76,7 +76,7 @@ static inline unsigned long macio_resource_len(struct macio_dev *dev, int resour
struct resource *res = &dev->resource[resource_no];
if (res->start == 0 || res->end == 0 || res->end < res->start)
return 0;
- return res->end - res->start + 1;
+ return resource_size(res);
}
extern int macio_enable_devres(struct macio_dev *dev);
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index d865bd909c7..b445e0af4c2 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -90,13 +90,19 @@ extern char initial_stab[];
#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
#define HPTE_R_TS ASM_CONST(0x4000000000000000)
+#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
#define HPTE_R_RPN_SHIFT 12
-#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
-#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
+#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
#define HPTE_R_N ASM_CONST(0x0000000000000004)
+#define HPTE_R_G ASM_CONST(0x0000000000000008)
+#define HPTE_R_M ASM_CONST(0x0000000000000010)
+#define HPTE_R_I ASM_CONST(0x0000000000000020)
+#define HPTE_R_W ASM_CONST(0x0000000000000040)
+#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
#define HPTE_R_C ASM_CONST(0x0000000000000080)
#define HPTE_R_R ASM_CONST(0x0000000000000100)
+#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 4138b21ae80..698b3063868 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -115,14 +115,24 @@
#ifndef __ASSEMBLY__
#include <asm/cputable.h>
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#include <asm/percpu.h>
+DECLARE_PER_CPU(int, next_tlbcam_idx);
+#endif
+
static inline int mmu_has_feature(unsigned long feature)
{
return (cur_cpu_spec->mmu_features & feature);
}
+static inline void mmu_clear_feature(unsigned long feature)
+{
+ cur_cpu_spec->mmu_features &= ~feature;
+}
+
extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
-/* MMU initialization (64-bit only fo now) */
+/* MMU initialization */
extern void early_init_mmu(void);
extern void early_init_mmu_secondary(void);
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h
index 89d2f99c1bf..23cd6cc30bc 100644
--- a/arch/powerpc/include/asm/pSeries_reconfig.h
+++ b/arch/powerpc/include/asm/pSeries_reconfig.h
@@ -17,7 +17,7 @@
#ifdef CONFIG_PPC_PSERIES
extern int pSeries_reconfig_notifier_register(struct notifier_block *);
extern void pSeries_reconfig_notifier_unregister(struct notifier_block *);
-extern struct blocking_notifier_head pSeries_reconfig_chain;
+extern int pSeries_reconfig_notify(unsigned long action, void *p);
/* Not the best place to put this, will be fixed when we move some
* of the rtas suspend-me stuff to pseries */
extern void pSeries_coalesce_init(void);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 74126765106..516bfb3f47d 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -103,11 +103,12 @@ struct paca_struct {
#endif /* CONFIG_PPC_STD_MMU_64 */
#ifdef CONFIG_PPC_BOOK3E
- pgd_t *pgd; /* Current PGD */
- pgd_t *kernel_pgd; /* Kernel PGD */
u64 exgen[8] __attribute__((aligned(0x80)));
+ /* Keep pgd in the same cacheline as the start of extlb */
+ pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */
+ pgd_t *kernel_pgd; /* Kernel PGD */
/* We can have up to 3 levels of reentrancy in the TLB miss handler */
- u64 extlb[3][EX_TLB_SIZE / sizeof(u64)] __attribute__((aligned(0x80)));
+ u64 extlb[3][EX_TLB_SIZE / sizeof(u64)];
u64 exmc[8]; /* used for machine checks */
u64 excrit[8]; /* used for crit interrupts */
u64 exdbg[8]; /* used for debug interrupts */
@@ -147,9 +148,12 @@ struct paca_struct {
struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
#ifdef CONFIG_KVM_BOOK3S_HANDLER
+#ifdef CONFIG_KVM_BOOK3S_PR
/* We use this to store guest state in */
struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
#endif
+ struct kvmppc_host_state kvm_hstate;
+#endif
};
extern struct paca_struct *paca;
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index b90dbf8e5cd..56b879ab3a4 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -10,58 +10,10 @@
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/ioport.h>
+#include <asm-generic/pci-bridge.h>
struct device_node;
-enum {
- /* Force re-assigning all resources (ignore firmware
- * setup completely)
- */
- PPC_PCI_REASSIGN_ALL_RSRC = 0x00000001,
-
- /* Re-assign all bus numbers */
- PPC_PCI_REASSIGN_ALL_BUS = 0x00000002,
-
- /* Do not try to assign, just use existing setup */
- PPC_PCI_PROBE_ONLY = 0x00000004,
-
- /* Don't bother with ISA alignment unless the bridge has
- * ISA forwarding enabled
- */
- PPC_PCI_CAN_SKIP_ISA_ALIGN = 0x00000008,
-
- /* Enable domain numbers in /proc */
- PPC_PCI_ENABLE_PROC_DOMAINS = 0x00000010,
- /* ... except for domain 0 */
- PPC_PCI_COMPAT_DOMAIN_0 = 0x00000020,
-};
-#ifdef CONFIG_PCI
-extern unsigned int ppc_pci_flags;
-
-static inline void ppc_pci_set_flags(int flags)
-{
- ppc_pci_flags = flags;
-}
-
-static inline void ppc_pci_add_flags(int flags)
-{
- ppc_pci_flags |= flags;
-}
-
-static inline int ppc_pci_has_flag(int flag)
-{
- return (ppc_pci_flags & flag);
-}
-#else
-static inline void ppc_pci_set_flags(int flags) { }
-static inline void ppc_pci_add_flags(int flags) { }
-static inline int ppc_pci_has_flag(int flag)
-{
- return 0;
-}
-#endif
-
-
/*
* Structure of a PCI controller (host bridge)
*/
@@ -171,15 +123,9 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
#ifndef CONFIG_PPC64
-static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
-{
- struct pci_controller *host;
-
- if (bus->self)
- return pci_device_to_OF_node(bus->self);
- host = pci_bus_to_host(bus);
- return host ? host->dn : NULL;
-}
+extern int pci_device_from_OF_node(struct device_node *node,
+ u8 *bus, u8 *devfn);
+extern void pci_create_OF_bus_map(void);
static inline int isa_vaddr_is_ioport(void __iomem *address)
{
@@ -223,17 +169,8 @@ struct pci_dn {
/* Get the pointer to a device_node's pci_dn */
#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
-extern struct device_node *fetch_dev_dn(struct pci_dev *dev);
extern void * update_dn_pci_info(struct device_node *dn, void *data);
-/* Get a device_node from a pci_dev. This code must be fast except
- * in the case where the sysdata is incorrect and needs to be fixed
- * up (this will only happen once). */
-static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
-{
- return dev->dev.of_node ? dev->dev.of_node : fetch_dev_dn(dev);
-}
-
static inline int pci_device_from_OF_node(struct device_node *np,
u8 *bus, u8 *devfn)
{
@@ -244,14 +181,6 @@ static inline int pci_device_from_OF_node(struct device_node *np,
return 0;
}
-static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
-{
- if (bus->self)
- return pci_device_to_OF_node(bus->self);
- else
- return bus->dev.of_node; /* Must be root bus (PHB) */
-}
-
/** Find the bus corresponding to the indicated device node */
extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 7d7790954e0..49c3de582be 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -44,7 +44,7 @@ struct pci_dev;
* bus numbers (don't do that on ppc64 yet !)
*/
#define pcibios_assign_all_busses() \
- (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
+ (pci_has_flag(PCI_REASSIGN_ALL_BUS))
static inline void pcibios_set_master(struct pci_dev *dev)
{
@@ -179,8 +179,7 @@ extern int remove_phb_dynamic(struct pci_controller *phb);
extern struct pci_dev *of_create_pci_dev(struct device_node *node,
struct pci_bus *bus, int devfn);
-extern void of_scan_pci_bridge(struct device_node *node,
- struct pci_dev *dev);
+extern void of_scan_pci_bridge(struct pci_dev *dev);
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 81576ee0cfb..c4205616dfb 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -357,7 +357,8 @@ void pgtable_cache_init(void);
/*
* find_linux_pte returns the address of a linux pte for a given
* effective address and directory. If not found, it returns zero.
- */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
+ */
+static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
{
pgd_t *pg;
pud_t *pu;
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index e472659d906..e980faae422 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -71,6 +71,42 @@
#define PPC_INST_ERATSX 0x7c000126
#define PPC_INST_ERATSX_DOT 0x7c000127
+/* Misc instructions for BPF compiler */
+#define PPC_INST_LD 0xe8000000
+#define PPC_INST_LHZ 0xa0000000
+#define PPC_INST_LWZ 0x80000000
+#define PPC_INST_STD 0xf8000000
+#define PPC_INST_STDU 0xf8000001
+#define PPC_INST_MFLR 0x7c0802a6
+#define PPC_INST_MTLR 0x7c0803a6
+#define PPC_INST_CMPWI 0x2c000000
+#define PPC_INST_CMPDI 0x2c200000
+#define PPC_INST_CMPLW 0x7c000040
+#define PPC_INST_CMPLWI 0x28000000
+#define PPC_INST_ADDI 0x38000000
+#define PPC_INST_ADDIS 0x3c000000
+#define PPC_INST_ADD 0x7c000214
+#define PPC_INST_SUB 0x7c000050
+#define PPC_INST_BLR 0x4e800020
+#define PPC_INST_BLRL 0x4e800021
+#define PPC_INST_MULLW 0x7c0001d6
+#define PPC_INST_MULHWU 0x7c000016
+#define PPC_INST_MULLI 0x1c000000
+#define PPC_INST_DIVWU 0x7c0003d6
+#define PPC_INST_RLWINM 0x54000000
+#define PPC_INST_RLDICR 0x78000004
+#define PPC_INST_SLW 0x7c000030
+#define PPC_INST_SRW 0x7c000430
+#define PPC_INST_AND 0x7c000038
+#define PPC_INST_ANDDOT 0x7c000039
+#define PPC_INST_OR 0x7c000378
+#define PPC_INST_ANDI 0x70000000
+#define PPC_INST_ORI 0x60000000
+#define PPC_INST_ORIS 0x64000000
+#define PPC_INST_NEG 0x7c0000d0
+#define PPC_INST_BRANCH 0x48000000
+#define PPC_INST_BRANCH_COND 0x40800000
+
/* macros to insert fields into opcodes */
#define __PPC_RA(a) (((a) & 0x1f) << 16)
#define __PPC_RB(b) (((b) & 0x1f) << 11)
@@ -83,6 +119,10 @@
#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
#define __PPC_WC(w) (((w) & 0x3) << 21)
#define __PPC_WS(w) (((w) & 0x1f) << 11)
+#define __PPC_SH(s) __PPC_WS(s)
+#define __PPC_MB(s) (((s) & 0x1f) << 6)
+#define __PPC_ME(s) (((s) & 0x1f) << 1)
+#define __PPC_BI(s) (((s) & 0x1f) << 16)
/*
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 1b422381fc1..368f72f7980 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -150,18 +150,22 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base)
#define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base)
-#define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
-#define SAVE_2EVRS(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
-#define SAVE_4EVRS(n,s,base) SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base)
-#define SAVE_8EVRS(n,s,base) SAVE_4EVRS(n,s,base); SAVE_4EVRS(n+4,s,base)
-#define SAVE_16EVRS(n,s,base) SAVE_8EVRS(n,s,base); SAVE_8EVRS(n+8,s,base)
-#define SAVE_32EVRS(n,s,base) SAVE_16EVRS(n,s,base); SAVE_16EVRS(n+16,s,base)
-#define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n
-#define REST_2EVRS(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base)
-#define REST_4EVRS(n,s,base) REST_2EVRS(n,s,base); REST_2EVRS(n+2,s,base)
-#define REST_8EVRS(n,s,base) REST_4EVRS(n,s,base); REST_4EVRS(n+4,s,base)
-#define REST_16EVRS(n,s,base) REST_8EVRS(n,s,base); REST_8EVRS(n+8,s,base)
-#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)
+/*
+ * b = base register for addressing, o = base offset from register of 1st EVR
+ * n = first EVR, s = scratch
+ */
+#define SAVE_EVR(n,s,b,o) evmergehi s,s,n; stw s,o+4*(n)(b)
+#define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o)
+#define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o)
+#define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o)
+#define SAVE_16EVRS(n,s,b,o) SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o)
+#define SAVE_32EVRS(n,s,b,o) SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o)
+#define REST_EVR(n,s,b,o) lwz s,o+4*(n)(b); evmergelo n,s,n
+#define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o)
+#define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o)
+#define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o)
+#define REST_16EVRS(n,s,b,o) REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o)
+#define REST_32EVRS(n,s,b,o) REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o)
/* Macros to adjust thread priority for hardware multithreading */
#define HMT_VERY_LOW or 31,31,31 # very low priority
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index d50c2b6d9bc..eb11a446720 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -20,6 +20,7 @@
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
+#include <linux/cache.h>
#include <asm/ptrace.h>
#include <asm/types.h>
@@ -156,6 +157,10 @@ struct thread_struct {
#endif
struct pt_regs *regs; /* Pointer to saved register state */
mm_segment_t fs; /* for get_fs() validation */
+#ifdef CONFIG_BOOKE
+ /* BookE base exception scratch space; align on cacheline */
+ unsigned long normsave[8] ____cacheline_aligned;
+#endif
#ifdef CONFIG_PPC32
void *pgdir; /* root of page-table tree */
#endif
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index c189aa5fe1f..b5c91901e38 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -18,24 +18,10 @@
*/
#include <linux/types.h>
#include <asm/irq.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define HAVE_ARCH_DEVTREE_FIXUPS
-#ifdef CONFIG_PPC32
-/*
- * PCI <-> OF matching functions
- * (XXX should these be here?)
- */
-struct pci_bus;
-struct pci_dev;
-extern int pci_device_from_OF_node(struct device_node *node,
- u8* bus, u8* devfn);
-extern struct device_node* pci_busdev_to_OF_node(struct pci_bus *, int);
-extern struct device_node* pci_device_to_OF_node(struct pci_dev *);
-extern void pci_create_OF_bus_map(void);
-#endif
-
/*
* OF address retreival & translation
*/
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c5cae0dd176..e8aaf6fce38 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -189,6 +189,9 @@
#define SPRN_CTR 0x009 /* Count Register */
#define SPRN_DSCR 0x11
#define SPRN_CFAR 0x1c /* Come From Address Register */
+#define SPRN_AMR 0x1d /* Authority Mask Register */
+#define SPRN_UAMOR 0x9d /* User Authority Mask Override Register */
+#define SPRN_AMOR 0x15d /* Authority Mask Override Register */
#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
#define SPRN_CTRLF 0x088
#define SPRN_CTRLT 0x098
@@ -232,22 +235,28 @@
#define LPCR_VPM0 (1ul << (63-0))
#define LPCR_VPM1 (1ul << (63-1))
#define LPCR_ISL (1ul << (63-2))
+#define LPCR_VC_SH (63-2)
#define LPCR_DPFD_SH (63-11)
#define LPCR_VRMA_L (1ul << (63-12))
#define LPCR_VRMA_LP0 (1ul << (63-15))
#define LPCR_VRMA_LP1 (1ul << (63-16))
+#define LPCR_VRMASD_SH (63-16)
#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
+#define LPCR_RMLS_SH (63-37)
#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
#define LPCR_PECE 0x00007000 /* powersave exit cause enable */
#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
#define LPCR_MER 0x00000800 /* Mediated External Exception */
+#define LPCR_LPES 0x0000000c
#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
+#define LPCR_LPES_SH 2
#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
#define SPRN_LPID 0x13F /* Logical Partition Identifier */
+#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
#define SPRN_HMER 0x150 /* Hardware m? error recovery */
#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
@@ -298,6 +307,7 @@
#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
+#define HID0_HDICE_SH (63 - 23) /* 970 HDEC interrupt enable */
#define HID0_EMCP (1<<31) /* Enable Machine Check pin */
#define HID0_EBA (1<<29) /* Enable Bus Address Parity */
#define HID0_EBD (1<<28) /* Enable Bus Data Parity */
@@ -353,6 +363,13 @@
#define SPRN_IABR2 0x3FA /* 83xx */
#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
#define SPRN_HID4 0x3F4 /* 970 HID4 */
+#define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */
+#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
+#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */
+#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */
+#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */
+#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */
+#define HID4_LPID1_SH 0 /* partition ID top 2 bits */
#define SPRN_HID4_GEKKO 0x3F3 /* Gekko HID4 */
#define SPRN_HID5 0x3F6 /* 970 HID5 */
#define SPRN_HID6 0x3F9 /* BE HID 6 */
@@ -802,28 +819,28 @@
mfspr rX,SPRN_SPRG_PACA; \
FTR_SECTION_ELSE_NESTED(66); \
mfspr rX,SPRN_SPRG_HPACA; \
- ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
#define SET_PACA(rX) \
BEGIN_FTR_SECTION_NESTED(66); \
mtspr SPRN_SPRG_PACA,rX; \
FTR_SECTION_ELSE_NESTED(66); \
mtspr SPRN_SPRG_HPACA,rX; \
- ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
#define GET_SCRATCH0(rX) \
BEGIN_FTR_SECTION_NESTED(66); \
mfspr rX,SPRN_SPRG_SCRATCH0; \
FTR_SECTION_ELSE_NESTED(66); \
mfspr rX,SPRN_SPRG_HSCRATCH0; \
- ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
#define SET_SCRATCH0(rX) \
BEGIN_FTR_SECTION_NESTED(66); \
mtspr SPRN_SPRG_SCRATCH0,rX; \
FTR_SECTION_ELSE_NESTED(66); \
mtspr SPRN_SPRG_HSCRATCH0,rX; \
- ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
#else /* CONFIG_PPC_BOOK3S_64 */
#define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0
@@ -872,8 +889,8 @@
#define SPRN_SPRG_WSCRATCH2 SPRN_SPRG4W
#define SPRN_SPRG_RSCRATCH3 SPRN_SPRG5R
#define SPRN_SPRG_WSCRATCH3 SPRN_SPRG5W
-#define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG6R
-#define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG6W
+#define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG1
+#define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG1
#define SPRN_SPRG_RSCRATCH4 SPRN_SPRG7R
#define SPRN_SPRG_WSCRATCH4 SPRN_SPRG7W
#ifdef CONFIG_E200
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 0f0ad9fa01c..9ec0b39f9dd 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -318,6 +318,7 @@
#define ESR_ILK 0x00100000 /* Instr. Cache Locking */
#define ESR_PUO 0x00040000 /* Unimplemented Operation exception */
#define ESR_BO 0x00020000 /* Byte Ordering */
+#define ESR_SPV 0x00000080 /* Signal Processing operation */
/* Bit definitions related to the DBCR0. */
#if defined(CONFIG_40x)
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index dae19342f0b..186e0fb835b 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -3,4 +3,8 @@
#include <asm-generic/setup.h>
+#ifndef __ASSEMBLY__
+extern void ppc_printk_progress(char *s, unsigned short hex);
+#endif
+
#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 11eb404b560..15a70b7f638 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -30,7 +30,7 @@
#include <asm/percpu.h>
extern int boot_cpuid;
-extern int boot_cpu_count;
+extern int spinning_secondaries;
extern void cpu_die(void);
@@ -119,7 +119,6 @@ extern const char *smp_ipi_name[];
/* for irq controllers with only a single ipi */
extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
extern void smp_muxed_ipi_message_pass(int cpu, int msg);
-extern void smp_muxed_ipi_resend(void);
extern irqreturn_t smp_ipi_demux(void);
void smp_init_iSeries(void);
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
index e3bdada8c54..ae20ce1af4c 100644
--- a/arch/powerpc/include/asm/smu.h
+++ b/arch/powerpc/include/asm/smu.h
@@ -547,7 +547,7 @@ struct smu_sdbp_header {
* (currently, afaik, this concerns only the FVT partition
* (0x12)
*/
-#define SMU_U16_MIX(x) le16_to_cpu(x);
+#define SMU_U16_MIX(x) le16_to_cpu(x)
#define SMU_U32_MIX(x) ((((x) & 0xff00ff00u) >> 8)|(((x) & 0x00ff00ffu) << 8))
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index 2dc595dda03..e30a13d1ee7 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -120,7 +120,6 @@ extern void do_dabr(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
#endif
extern void print_backtrace(unsigned long *);
-extern void show_regs(struct pt_regs * regs);
extern void flush_instruction_cache(void);
extern void hard_reset_now(void);
extern void poweroff_now(void);
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 58580e94a2b..93e05d1b34b 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -40,6 +40,7 @@ extern void udbg_adb_init_early(void);
extern void __init udbg_early_init(void);
extern void __init udbg_init_debug_lpar(void);
+extern void __init udbg_init_debug_lpar_hvsi(void);
extern void __init udbg_init_pmac_realmode(void);
extern void __init udbg_init_maple_realmode(void);
extern void __init udbg_init_pas_realmode(void);
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index e8b981897d4..ce4f7f17911 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_44x) += cpu_setup_44x.o
obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o
obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
extra-y := head_$(CONFIG_WORD_SIZE).o
extra-$(CONFIG_40x) := head_40x.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 36e1c8a29be..5f078bc2063 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -82,6 +82,9 @@ int main(void)
DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
+#ifdef CONFIG_BOOKE
+ DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
+#endif
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
@@ -128,6 +131,7 @@ int main(void)
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
/* paca */
DEFINE(PACA_SIZE, sizeof(struct paca_struct));
+ DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token));
DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
@@ -187,7 +191,9 @@ int main(void)
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
+ DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use));
DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
+ DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count));
DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
#endif /* CONFIG_PPC_STD_MMU_64 */
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
@@ -198,11 +204,6 @@ int main(void)
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
- DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
- DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb));
- DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max));
-#endif
#endif /* CONFIG_PPC64 */
/* RTAS */
@@ -397,67 +398,160 @@ int main(void)
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
+ DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
+ DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
+#ifdef CONFIG_ALTIVEC
+ DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr));
+ DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
+#endif
+#ifdef CONFIG_VSX
+ DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
+#endif
+ DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
+ DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
+ DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+ DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
+ DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
+ DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
+ DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
+ DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0));
+ DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1));
+ DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
+ DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
+#endif
DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
+ DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1));
DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
+ DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
/* book3s */
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
+ DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
+ DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
+ DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
+ DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
+ DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
+ DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter));
+ DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
+ DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
+ DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
+ DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
+ DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
+#endif
#ifdef CONFIG_PPC_BOOK3S
+ DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
+ DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
- DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
+ DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
+ DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
+ DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
+ DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
+ DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
+ DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
+ DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
+ DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
+ DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
+ DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions));
+ DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa));
+ DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
+ DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+ DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
+ DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
+ DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
+ DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu));
+ DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
+ DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
+ DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
+ DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
+ DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
+ DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
+ DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
+ DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
offsetof(struct kvmppc_vcpu_book3s, vcpu));
- DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr));
- DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer));
- DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr));
- DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr));
- DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc));
- DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0]));
- DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1]));
- DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2]));
- DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3]));
- DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4]));
- DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5]));
- DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6]));
- DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7]));
- DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8]));
- DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9]));
- DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10]));
- DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11]));
- DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12]));
- DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13]));
- DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1));
- DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2));
- DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu,
- vmhandler));
- DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu,
- scratch0));
- DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu,
- scratch1));
- DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu,
- in_guest));
- DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu,
- fault_dsisr));
- DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu,
- fault_dar));
- DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu,
- last_inst));
- DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu,
- shadow_srr1));
+ DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
+ DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
+ DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_KVM_BOOK3S_PR
+# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
+#else
+# define SVCPU_FIELD(x, f)
+#endif
+# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
+#else /* 32-bit */
+# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
+# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f))
+#endif
+
+ SVCPU_FIELD(SVCPU_CR, cr);
+ SVCPU_FIELD(SVCPU_XER, xer);
+ SVCPU_FIELD(SVCPU_CTR, ctr);
+ SVCPU_FIELD(SVCPU_LR, lr);
+ SVCPU_FIELD(SVCPU_PC, pc);
+ SVCPU_FIELD(SVCPU_R0, gpr[0]);
+ SVCPU_FIELD(SVCPU_R1, gpr[1]);
+ SVCPU_FIELD(SVCPU_R2, gpr[2]);
+ SVCPU_FIELD(SVCPU_R3, gpr[3]);
+ SVCPU_FIELD(SVCPU_R4, gpr[4]);
+ SVCPU_FIELD(SVCPU_R5, gpr[5]);
+ SVCPU_FIELD(SVCPU_R6, gpr[6]);
+ SVCPU_FIELD(SVCPU_R7, gpr[7]);
+ SVCPU_FIELD(SVCPU_R8, gpr[8]);
+ SVCPU_FIELD(SVCPU_R9, gpr[9]);
+ SVCPU_FIELD(SVCPU_R10, gpr[10]);
+ SVCPU_FIELD(SVCPU_R11, gpr[11]);
+ SVCPU_FIELD(SVCPU_R12, gpr[12]);
+ SVCPU_FIELD(SVCPU_R13, gpr[13]);
+ SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr);
+ SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar);
+ SVCPU_FIELD(SVCPU_LAST_INST, last_inst);
+ SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1);
#ifdef CONFIG_PPC_BOOK3S_32
- DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr));
+ SVCPU_FIELD(SVCPU_SR, sr);
#endif
-#else
+#ifdef CONFIG_PPC64
+ SVCPU_FIELD(SVCPU_SLB, slb);
+ SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
+#endif
+
+ HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
+ HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
+ HSTATE_FIELD(HSTATE_HOST_MSR, host_msr);
+ HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
+ HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
+ HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
+ HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
+
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
+ HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
+ HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
+ HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
+ HSTATE_FIELD(HSTATE_PMC, host_pmc);
+ HSTATE_FIELD(HSTATE_PURR, host_purr);
+ HSTATE_FIELD(HSTATE_SPURR, host_spurr);
+ HSTATE_FIELD(HSTATE_DSCR, host_dscr);
+ HSTATE_FIELD(HSTATE_DABR, dabr);
+ HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
+
+#else /* CONFIG_PPC_BOOK3S */
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
@@ -467,7 +561,7 @@ int main(void)
DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
#endif /* CONFIG_PPC_BOOK3S */
-#endif
+#endif /* CONFIG_KVM */
#ifdef CONFIG_KVM_GUEST
DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
@@ -497,6 +591,13 @@ int main(void)
DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
#endif
+#if defined(CONFIG_KVM) && defined(CONFIG_SPE)
+ DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0]));
+ DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc));
+ DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr));
+ DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
+#endif
+
#ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
arch.timing_exit.tv32.tbu));
diff --git a/arch/powerpc/kernel/cpu_setup_power7.S b/arch/powerpc/kernel/cpu_setup_power7.S
index 4f9a93fcfe0..76797c5105d 100644
--- a/arch/powerpc/kernel/cpu_setup_power7.S
+++ b/arch/powerpc/kernel/cpu_setup_power7.S
@@ -45,12 +45,12 @@ _GLOBAL(__restore_cpu_power7)
blr
__init_hvmode_206:
- /* Disable CPU_FTR_HVMODE_206 and exit if MSR:HV is not set */
+ /* Disable CPU_FTR_HVMODE and exit if MSR:HV is not set */
mfmsr r3
rldicl. r0,r3,4,63
bnelr
ld r5,CPU_SPEC_FEATURES(r4)
- LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE_206)
+ LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
xor r5,r5,r6
std r5,CPU_SPEC_FEATURES(r4)
blr
@@ -61,19 +61,23 @@ __init_LPCR:
* LPES = 0b01 (HSRR0/1 used for 0x500)
* PECE = 0b111
* DPFD = 4
+ * HDICE = 0
+ * VC = 0b100 (VPM0=1, VPM1=0, ISL=0)
+ * VRMASD = 0b10000 (L=1, LP=00)
*
* Other bits untouched for now
*/
mfspr r3,SPRN_LPCR
- ori r3,r3,(LPCR_LPES0|LPCR_LPES1)
- xori r3,r3, LPCR_LPES0
+ li r5,1
+ rldimi r3,r5, LPCR_LPES_SH, 64-LPCR_LPES_SH-2
ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
- li r5,7
- sldi r5,r5,LPCR_DPFD_SH
- andc r3,r3,r5
li r5,4
- sldi r5,r5,LPCR_DPFD_SH
- or r3,r3,r5
+ rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3
+ clrrdi r3,r3,1 /* clear HDICE */
+ li r5,4
+ rldimi r3,r5, LPCR_VC_SH, 0
+ li r5,0x10
+ rldimi r3,r5, LPCR_VRMASD_SH, 64-LPCR_VRMASD_SH-5
mtspr SPRN_LPCR,r3
isync
blr
diff --git a/arch/powerpc/kernel/cpu_setup_ppc970.S b/arch/powerpc/kernel/cpu_setup_ppc970.S
index 27f2507279d..12fac8df01c 100644
--- a/arch/powerpc/kernel/cpu_setup_ppc970.S
+++ b/arch/powerpc/kernel/cpu_setup_ppc970.S
@@ -76,7 +76,7 @@ _GLOBAL(__setup_cpu_ppc970)
/* Do nothing if not running in HV mode */
mfmsr r0
rldicl. r0,r0,4,63
- beqlr
+ beq no_hv_mode
mfspr r0,SPRN_HID0
li r11,5 /* clear DOZE and SLEEP */
@@ -90,7 +90,7 @@ _GLOBAL(__setup_cpu_ppc970MP)
/* Do nothing if not running in HV mode */
mfmsr r0
rldicl. r0,r0,4,63
- beqlr
+ beq no_hv_mode
mfspr r0,SPRN_HID0
li r11,0x15 /* clear DOZE and SLEEP */
@@ -109,6 +109,14 @@ load_hids:
sync
isync
+ /* Try to set LPES = 01 in HID4 */
+ mfspr r0,SPRN_HID4
+ clrldi r0,r0,1 /* clear LPES0 */
+ ori r0,r0,HID4_LPES1 /* set LPES1 */
+ sync
+ mtspr SPRN_HID4,r0
+ isync
+
/* Save away cpu state */
LOAD_REG_ADDR(r5,cpu_state_storage)
@@ -117,11 +125,21 @@ load_hids:
std r3,CS_HID0(r5)
mfspr r3,SPRN_HID1
std r3,CS_HID1(r5)
- mfspr r3,SPRN_HID4
- std r3,CS_HID4(r5)
+ mfspr r4,SPRN_HID4
+ std r4,CS_HID4(r5)
mfspr r3,SPRN_HID5
std r3,CS_HID5(r5)
+ /* See if we successfully set LPES1 to 1; if not we are in Apple mode */
+ andi. r4,r4,HID4_LPES1
+ bnelr
+
+no_hv_mode:
+ /* Disable CPU_FTR_HVMODE and exit, since we don't have HV mode */
+ ld r5,CPU_SPEC_FEATURES(r4)
+ LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
+ andc r5,r5,r6
+ std r5,CPU_SPEC_FEATURES(r4)
blr
/* Called with no MMU context (typically MSR:IR/DR off) to
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 4e6ee944495..cc6a9d5d69a 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -242,12 +242,8 @@ static void crash_kexec_wait_realmode(int cpu)
while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
barrier();
- if (!cpu_possible(i)) {
+ if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0))
break;
- }
- if (!cpu_online(i)) {
- break;
- }
msecs--;
mdelay(1);
}
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index d238c082c3c..4f0959fbfbe 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -161,9 +161,7 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
if (ppc_md.dma_set_mask)
return ppc_md.dma_set_mask(dev, dma_mask);
- if (unlikely(dma_ops == NULL))
- return -EIO;
- if (dma_ops->set_dma_mask != NULL)
+ if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
return dma_ops->set_dma_mask(dev, dma_mask);
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/kernel/e500-pmu.c
index b150b510510..cb2e2949c8d 100644
--- a/arch/powerpc/kernel/e500-pmu.c
+++ b/arch/powerpc/kernel/e500-pmu.c
@@ -75,6 +75,11 @@ static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
static int num_events = 128;
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index d24d4400cc7..429983c06f9 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -120,6 +120,12 @@
std r14,PACA_EXMC+EX_R14(r13); \
std r15,PACA_EXMC+EX_R15(r13)
+#define PROLOG_ADDITION_DOORBELL_GEN \
+ lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
+ cmpwi cr0,r11,0; /* yes -> go out of line */ \
+ beq masked_doorbell_book3e
+
+
/* Core exception code for all exceptions except TLB misses.
* XXX: Needs to make SPRN_SPRG_GEN depend on exception type
*/
@@ -522,7 +528,13 @@ kernel_dbg_exc:
MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE)
/* Doorbell interrupt */
- MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE)
+ START_EXCEPTION(doorbell)
+ NORMAL_EXCEPTION_PROLOG(0x2070, PROLOG_ADDITION_DOORBELL)
+ EXCEPTION_COMMON(0x2070, PACA_EXGEN, INTS_DISABLE_ALL)
+ CHECK_NAPPING()
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .doorbell_exception
+ b .ret_from_except_lite
/* Doorbell critical Interrupt */
START_EXCEPTION(doorbell_crit);
@@ -545,8 +557,16 @@ kernel_dbg_exc:
* An interrupt came in while soft-disabled; clear EE in SRR1,
* clear paca->hard_enabled and return.
*/
+masked_doorbell_book3e:
+ mtcr r10
+ /* Resend the doorbell to fire again when ints enabled */
+ mfspr r10,SPRN_PIR
+ PPC_MSGSND(r10)
+ b masked_interrupt_book3e_common
+
masked_interrupt_book3e:
mtcr r10
+masked_interrupt_book3e_common:
stb r11,PACAHARDIRQEN(r13)
mfspr r10,SPRN_SRR1
rldicl r11,r10,48,1 /* clear MSR_EE */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a85f4874cba..41b02c792aa 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -40,7 +40,6 @@ __start_interrupts:
.globl system_reset_pSeries;
system_reset_pSeries:
HMT_MEDIUM;
- DO_KVM 0x100;
SET_SCRATCH0(r13)
#ifdef CONFIG_PPC_P7_NAP
BEGIN_FTR_SECTION
@@ -50,82 +49,73 @@ BEGIN_FTR_SECTION
* state loss at this time.
*/
mfspr r13,SPRN_SRR1
- rlwinm r13,r13,47-31,30,31
- cmpwi cr0,r13,1
- bne 1f
- b .power7_wakeup_noloss
-1: cmpwi cr0,r13,2
- bne 1f
- b .power7_wakeup_loss
+ rlwinm. r13,r13,47-31,30,31
+ beq 9f
+
+ /* waking up from powersave (nap) state */
+ cmpwi cr1,r13,2
/* Total loss of HV state is fatal, we could try to use the
* PIR to locate a PACA, then use an emergency stack etc...
* but for now, let's just stay stuck here
*/
-1: cmpwi cr0,r13,3
- beq .
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206)
+ bgt cr1,.
+ GET_PACA(r13)
+
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ lbz r0,PACAPROCSTART(r13)
+ cmpwi r0,0x80
+ bne 1f
+ li r0,0
+ stb r0,PACAPROCSTART(r13)
+ b kvm_start_guest
+1:
+#endif
+
+ beq cr1,2f
+ b .power7_wakeup_noloss
+2: b .power7_wakeup_loss
+9:
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif /* CONFIG_PPC_P7_NAP */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+ NOTEST, 0x100)
. = 0x200
-_machine_check_pSeries:
- HMT_MEDIUM
- DO_KVM 0x200
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
+machine_check_pSeries_1:
+ /* This is moved out of line as it can be patched by FW, but
+ * some code path might still want to branch into the original
+ * vector
+ */
+ b machine_check_pSeries
. = 0x300
.globl data_access_pSeries
data_access_pSeries:
HMT_MEDIUM
- DO_KVM 0x300
SET_SCRATCH0(r13)
+#ifndef CONFIG_POWER4_ONLY
BEGIN_FTR_SECTION
- GET_PACA(r13)
- std r9,PACA_EXSLB+EX_R9(r13)
- std r10,PACA_EXSLB+EX_R10(r13)
- mfspr r10,SPRN_DAR
- mfspr r9,SPRN_DSISR
- srdi r10,r10,60
- rlwimi r10,r9,16,0x20
- mfcr r9
- cmpwi r10,0x2c
- beq do_stab_bolted_pSeries
- ld r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXGEN+EX_R11(r13)
- ld r11,PACA_EXSLB+EX_R9(r13)
- std r12,PACA_EXGEN+EX_R12(r13)
- GET_SCRATCH0(r12)
- std r10,PACA_EXGEN+EX_R10(r13)
- std r11,PACA_EXGEN+EX_R9(r13)
- std r12,PACA_EXGEN+EX_R13(r13)
- EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD)
-FTR_SECTION_ELSE
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD)
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
+ b data_access_check_stab
+data_access_not_stab:
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
+#endif
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
+ KVMTEST_PR, 0x300)
. = 0x380
.globl data_access_slb_pSeries
data_access_slb_pSeries:
HMT_MEDIUM
- DO_KVM 0x380
SET_SCRATCH0(r13)
- GET_PACA(r13)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_DAR
- std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
- mfcr r9
#ifdef __DISABLED__
/* Keep that around for when we re-implement dynamic VSIDs */
cmpdi r3,0
bge slb_miss_user_pseries
#endif /* __DISABLED__ */
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- GET_SCRATCH0(r10)
- std r10,PACA_EXSLB+EX_R13(r13)
- mfspr r12,SPRN_SRR1 /* and SRR1 */
+ mfspr r12,SPRN_SRR1
#ifndef CONFIG_RELOCATABLE
b .slb_miss_realmode
#else
@@ -147,24 +137,16 @@ data_access_slb_pSeries:
.globl instruction_access_slb_pSeries
instruction_access_slb_pSeries:
HMT_MEDIUM
- DO_KVM 0x480
SET_SCRATCH0(r13)
- GET_PACA(r13)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
- std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
- mfcr r9
#ifdef __DISABLED__
/* Keep that around for when we re-implement dynamic VSIDs */
cmpdi r3,0
bge slb_miss_user_pseries
#endif /* __DISABLED__ */
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- GET_SCRATCH0(r10)
- std r10,PACA_EXSLB+EX_R13(r13)
- mfspr r12,SPRN_SRR1 /* and SRR1 */
+ mfspr r12,SPRN_SRR1
#ifndef CONFIG_RELOCATABLE
b .slb_miss_realmode
#else
@@ -184,26 +166,46 @@ instruction_access_slb_pSeries:
hardware_interrupt_pSeries:
hardware_interrupt_hv:
BEGIN_FTR_SECTION
- _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD)
+ _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
+ EXC_HV, SOFTEN_TEST_HV)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
FTR_SECTION_ELSE
- _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV)
- ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206)
+ _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
+ EXC_STD, SOFTEN_TEST_HV_201)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
+
STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
+
STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
- MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer)
+ MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
+
STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
. = 0xc00
.globl system_call_pSeries
system_call_pSeries:
HMT_MEDIUM
- DO_KVM 0xc00
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
+ std r9,PACA_EXGEN+EX_R9(r13)
+ std r10,PACA_EXGEN+EX_R10(r13)
+ mfcr r9
+ KVMTEST(0xc00)
+ GET_SCRATCH0(r13)
+#endif
BEGIN_FTR_SECTION
cmpdi r0,0x1ebe
beq- 1f
@@ -220,6 +222,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
rfid
b . /* prevent speculative execution */
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
+
/* Fast LE/BE switch system call */
1: mfspr r12,SPRN_SRR1
xori r12,r12,MSR_LE
@@ -228,6 +232,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
b .
STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
* out of line to handle them
@@ -262,30 +267,93 @@ vsx_unavailable_pSeries_1:
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
#endif /* CONFIG_CBE_RAS */
+
STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
+
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
#endif /* CONFIG_CBE_RAS */
+
STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
+
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
#endif /* CONFIG_CBE_RAS */
. = 0x3000
/*** Out of line interrupts support ***/
+ /* moved from 0x200 */
+machine_check_pSeries:
+ .globl machine_check_fwnmi
+machine_check_fwnmi:
+ HMT_MEDIUM
+ SET_SCRATCH0(r13) /* save r13 */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
+ EXC_STD, KVMTEST, 0x200)
+ KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
+
+#ifndef CONFIG_POWER4_ONLY
+ /* moved from 0x300 */
+data_access_check_stab:
+ GET_PACA(r13)
+ std r9,PACA_EXSLB+EX_R9(r13)
+ std r10,PACA_EXSLB+EX_R10(r13)
+ mfspr r10,SPRN_DAR
+ mfspr r9,SPRN_DSISR
+ srdi r10,r10,60
+ rlwimi r10,r9,16,0x20
+#ifdef CONFIG_KVM_BOOK3S_PR
+ lbz r9,HSTATE_IN_GUEST(r13)
+ rlwimi r10,r9,8,0x300
+#endif
+ mfcr r9
+ cmpwi r10,0x2c
+ beq do_stab_bolted_pSeries
+ mtcrf 0x80,r9
+ ld r9,PACA_EXSLB+EX_R9(r13)
+ ld r10,PACA_EXSLB+EX_R10(r13)
+ b data_access_not_stab
+do_stab_bolted_pSeries:
+ std r11,PACA_EXSLB+EX_R11(r13)
+ std r12,PACA_EXSLB+EX_R12(r13)
+ GET_SCRATCH0(r10)
+ std r10,PACA_EXSLB+EX_R13(r13)
+ EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
+#endif /* CONFIG_POWER4_ONLY */
+
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300)
+ KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
+ KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
+
+ .align 7
/* moved from 0xe00 */
- STD_EXCEPTION_HV(., 0xe00, h_data_storage)
- STD_EXCEPTION_HV(., 0xe20, h_instr_storage)
- STD_EXCEPTION_HV(., 0xe40, emulation_assist)
- STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */
+ STD_EXCEPTION_HV(., 0xe02, h_data_storage)
+ KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
+ STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
+ STD_EXCEPTION_HV(., 0xe42, emulation_assist)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
+ STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
/* moved from 0xf00 */
STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
/*
* An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -317,14 +385,6 @@ masked_Hinterrupt:
hrfid
b .
- .align 7
-do_stab_bolted_pSeries:
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- GET_SCRATCH0(r10)
- std r10,PACA_EXSLB+EX_R13(r13)
- EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
-
#ifdef CONFIG_PPC_PSERIES
/*
* Vectors for the FWNMI option. Share common code.
@@ -334,14 +394,8 @@ do_stab_bolted_pSeries:
system_reset_fwnmi:
HMT_MEDIUM
SET_SCRATCH0(r13) /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
-
- .globl machine_check_fwnmi
- .align 7
-machine_check_fwnmi:
- HMT_MEDIUM
- SET_SCRATCH0(r13) /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+ NOTEST, 0x100)
#endif /* CONFIG_PPC_PSERIES */
@@ -376,7 +430,11 @@ slb_miss_user_pseries:
/* KVM's trampoline code needs to be close to the interrupt handlers */
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#ifdef CONFIG_KVM_BOOK3S_PR
#include "../kvm/book3s_rmhandlers.S"
+#else
+#include "../kvm/book3s_hv_rmhandlers.S"
+#endif
#endif
.align 7
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 5e12b741ba5..f8e971ba94f 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -93,6 +93,30 @@ _ENTRY(_start);
bl early_init
+#ifdef CONFIG_RELOCATABLE
+ /*
+ * r25 will contain RPN/ERPN for the start address of memory
+ *
+ * Add the difference between KERNELBASE and PAGE_OFFSET to the
+ * start of physical memory to get kernstart_addr.
+ */
+ lis r3,kernstart_addr@ha
+ la r3,kernstart_addr@l(r3)
+
+ lis r4,KERNELBASE@h
+ ori r4,r4,KERNELBASE@l
+ lis r5,PAGE_OFFSET@h
+ ori r5,r5,PAGE_OFFSET@l
+ subf r4,r5,r4
+
+ rlwinm r6,r25,0,28,31 /* ERPN */
+ rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */
+ add r7,r7,r4
+
+ stw r6,0(r3)
+ stw r7,4(r3)
+#endif
+
/*
* Decide what sort of machine this is and initialize the MMU.
*/
@@ -1001,9 +1025,6 @@ clear_utlb_entry:
lis r3,PAGE_OFFSET@h
ori r3,r3,PAGE_OFFSET@l
- /* Kernel is at the base of RAM */
- li r4, 0 /* Load the kernel physical address */
-
/* Load the kernel PID = 0 */
li r0,0
mtspr SPRN_PID,r0
@@ -1013,9 +1034,8 @@ clear_utlb_entry:
clrrwi r3,r3,12 /* Mask off the effective page number */
ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
- /* Word 1 */
- clrrwi r4,r4,12 /* Mask off the real page number */
- /* ERPN is 0 for first 4GB page */
+ /* Word 1 - use r25. RPN is the same as the original entry */
+
/* Word 2 */
li r5,0
ori r5,r5,PPC47x_TLB2_S_RWX
@@ -1026,7 +1046,7 @@ clear_utlb_entry:
/* We write to way 0 and bolted 0 */
lis r0,0x8800
tlbwe r3,r0,0
- tlbwe r4,r0,1
+ tlbwe r25,r0,1
tlbwe r5,r0,2
/*
@@ -1124,7 +1144,13 @@ head_start_common:
lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
mtspr SPRN_IVPR,r4
- addis r22,r22,KERNELBASE@h
+ /*
+ * If the kernel was loaded at a non-zero 256 MB page, we need to
+ * mask off the most significant 4 bits to get the relative address
+ * from the start of physical memory
+ */
+ rlwinm r22,r22,0,4,31
+ addis r22,r22,PAGE_OFFSET@h
mtlr r22
isync
blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index ba504099844..3564c49c683 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -255,7 +255,7 @@ generic_secondary_common_init:
mtctr r23
bctrl
-3: LOAD_REG_ADDR(r3, boot_cpu_count) /* Decrement boot_cpu_count */
+3: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */
lwarx r4,0,r3
subi r4,r4,1
stwcx. r4,0,r3
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index a0bf158c8b4..fc921bf62e1 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -20,33 +20,43 @@
addi reg,reg,val@l
#endif
+/*
+ * Macro used to get to thread save registers.
+ * Note that entries 0-3 are used for the prolog code, and the remaining
+ * entries are available for specific exception use in the event a handler
+ * requires more than 4 scratch registers.
+ */
+#define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4))
+
#define NORMAL_EXCEPTION_PROLOG \
- mtspr SPRN_SPRG_WSCRATCH0,r10;/* save two registers to work with */\
- mtspr SPRN_SPRG_WSCRATCH1,r11; \
- mtspr SPRN_SPRG_WSCRATCH2,r1; \
- mfcr r10; /* save CR in r10 for now */\
+ mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
+ mfspr r10, SPRN_SPRG_THREAD; \
+ stw r11, THREAD_NORMSAVE(0)(r10); \
+ stw r13, THREAD_NORMSAVE(2)(r10); \
+ mfcr r13; /* save CR in r13 for now */\
mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
+ mr r11, r1; \
beq 1f; \
- mfspr r1,SPRN_SPRG_THREAD; /* if from user, start at top of */\
- lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\
- ALLOC_STACK_FRAME(r1, THREAD_SIZE); \
-1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
- mr r11,r1; \
- stw r10,_CCR(r11); /* save various registers */\
+ /* if from user, start at top of this thread's kernel stack */ \
+ lwz r11, THREAD_INFO-THREAD(r10); \
+ ALLOC_STACK_FRAME(r11, THREAD_SIZE); \
+1 : subi r11, r11, INT_FRAME_SIZE; /* Allocate exception frame */ \
+ stw r13, _CCR(r11); /* save various registers */ \
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
- mfspr r10,SPRN_SPRG_RSCRATCH0; \
- stw r10,GPR10(r11); \
- mfspr r12,SPRN_SPRG_RSCRATCH1; \
+ mfspr r13, SPRN_SPRG_RSCRATCH0; \
+ stw r13, GPR10(r11); \
+ lwz r12, THREAD_NORMSAVE(0)(r10); \
stw r12,GPR11(r11); \
+ lwz r13, THREAD_NORMSAVE(2)(r10); /* restore r13 */ \
mflr r10; \
stw r10,_LINK(r11); \
- mfspr r10,SPRN_SPRG_RSCRATCH2; \
mfspr r12,SPRN_SRR0; \
- stw r10,GPR1(r11); \
+ stw r1, GPR1(r11); \
mfspr r9,SPRN_SRR1; \
- stw r10,0(r11); \
+ stw r1, 0(r11); \
+ mr r1, r11; \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \
lis r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 5ecf54cfa7d..50845924b7d 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -346,11 +346,12 @@ interrupt_base:
/* Data TLB Error Interrupt */
START_EXCEPTION(DataTLBError)
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
- mtspr SPRN_SPRG_WSCRATCH1, r11
- mtspr SPRN_SPRG_WSCRATCH2, r12
- mtspr SPRN_SPRG_WSCRATCH3, r13
- mfcr r11
- mtspr SPRN_SPRG_WSCRATCH4, r11
+ mfspr r10, SPRN_SPRG_THREAD
+ stw r11, THREAD_NORMSAVE(0)(r10)
+ stw r12, THREAD_NORMSAVE(1)(r10)
+ stw r13, THREAD_NORMSAVE(2)(r10)
+ mfcr r13
+ stw r13, THREAD_NORMSAVE(3)(r10)
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -416,11 +417,12 @@ interrupt_base:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
- mfspr r11, SPRN_SPRG_RSCRATCH4
+ mfspr r10, SPRN_SPRG_THREAD
+ lwz r11, THREAD_NORMSAVE(3)(r10)
mtcr r11
- mfspr r13, SPRN_SPRG_RSCRATCH3
- mfspr r12, SPRN_SPRG_RSCRATCH2
- mfspr r11, SPRN_SPRG_RSCRATCH1
+ lwz r13, THREAD_NORMSAVE(2)(r10)
+ lwz r12, THREAD_NORMSAVE(1)(r10)
+ lwz r11, THREAD_NORMSAVE(0)(r10)
mfspr r10, SPRN_SPRG_RSCRATCH0
b DataStorage
@@ -432,11 +434,12 @@ interrupt_base:
*/
START_EXCEPTION(InstructionTLBError)
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
- mtspr SPRN_SPRG_WSCRATCH1, r11
- mtspr SPRN_SPRG_WSCRATCH2, r12
- mtspr SPRN_SPRG_WSCRATCH3, r13
- mfcr r11
- mtspr SPRN_SPRG_WSCRATCH4, r11
+ mfspr r10, SPRN_SPRG_THREAD
+ stw r11, THREAD_NORMSAVE(0)(r10)
+ stw r12, THREAD_NORMSAVE(1)(r10)
+ stw r13, THREAD_NORMSAVE(2)(r10)
+ mfcr r13
+ stw r13, THREAD_NORMSAVE(3)(r10)
mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -496,11 +499,12 @@ interrupt_base:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
- mfspr r11, SPRN_SPRG_RSCRATCH4
+ mfspr r10, SPRN_SPRG_THREAD
+ lwz r11, THREAD_NORMSAVE(3)(r10)
mtcr r11
- mfspr r13, SPRN_SPRG_RSCRATCH3
- mfspr r12, SPRN_SPRG_RSCRATCH2
- mfspr r11, SPRN_SPRG_RSCRATCH1
+ lwz r13, THREAD_NORMSAVE(2)(r10)
+ lwz r12, THREAD_NORMSAVE(1)(r10)
+ lwz r11, THREAD_NORMSAVE(0)(r10)
mfspr r10, SPRN_SPRG_RSCRATCH0
b InstructionStorage
@@ -621,11 +625,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
tlbwe
/* Done...restore registers and get out of here. */
- mfspr r11, SPRN_SPRG_RSCRATCH4
+ mfspr r10, SPRN_SPRG_THREAD
+ lwz r11, THREAD_NORMSAVE(3)(r10)
mtcr r11
- mfspr r13, SPRN_SPRG_RSCRATCH3
- mfspr r12, SPRN_SPRG_RSCRATCH2
- mfspr r11, SPRN_SPRG_RSCRATCH1
+ lwz r13, THREAD_NORMSAVE(2)(r10)
+ lwz r12, THREAD_NORMSAVE(1)(r10)
+ lwz r11, THREAD_NORMSAVE(0)(r10)
mfspr r10, SPRN_SPRG_RSCRATCH0
rfi /* Force context change */
@@ -656,7 +661,7 @@ load_up_spe:
cmpi 0,r4,0
beq 1f
addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
- SAVE_32EVRS(0,r10,r4)
+ SAVE_32EVRS(0,r10,r4,THREAD_EVR0)
evxor evr10, evr10, evr10 /* clear out evr10 */
evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
li r5,THREAD_ACC
@@ -676,7 +681,7 @@ load_up_spe:
stw r4,THREAD_USED_SPE(r5)
evlddx evr4,r10,r5
evmra evr4,evr4
- REST_32EVRS(0,r10,r5)
+ REST_32EVRS(0,r10,r5,THREAD_EVR0)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
stw r4,last_task_used_spe@l(r3)
@@ -787,13 +792,11 @@ _GLOBAL(giveup_spe)
addi r3,r3,THREAD /* want THREAD of task */
lwz r5,PT_REGS(r3)
cmpi 0,r5,0
- SAVE_32EVRS(0, r4, r3)
+ SAVE_32EVRS(0, r4, r3, THREAD_EVR0)
evxor evr6, evr6, evr6 /* clear out evr6 */
evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
li r4,THREAD_ACC
evstddx evr6, r4, r3 /* save off accumulator */
- mfspr r6,SPRN_SPEFSCR
- stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
beq 1f
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r3,MSR_SPE@h
diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S
index 47a1a983ff8..3e2b95c6ae6 100644
--- a/arch/powerpc/kernel/idle_e500.S
+++ b/arch/powerpc/kernel/idle_e500.S
@@ -26,6 +26,17 @@ _GLOBAL(e500_idle)
ori r4,r4,_TLF_NAPPING /* so when we take an exception */
stw r4,TI_LOCAL_FLAGS(r3) /* it will return to our caller */
+#ifdef CONFIG_E500MC
+ wrteei 1
+1: wait
+
+ /*
+ * Guard against spurious wakeups (e.g. from a hypervisor) --
+ * any real interrupt will cause us to return to LR due to
+ * _TLF_NAPPING.
+ */
+ b 1b
+#else
/* Check if we can nap or doze, put HID0 mask in r3 */
lis r3,0
BEGIN_FTR_SECTION
@@ -72,6 +83,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_L2CSR|CPU_FTR_CAN_NAP)
mtmsr r7
isync
2: b 2b
+#endif /* !E500MC */
/*
* Return from NAP/DOZE mode, restore some CPU specific registers,
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index f8f0bc7f1d4..3a70845a51c 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -73,7 +73,6 @@ _GLOBAL(power7_idle)
b .
_GLOBAL(power7_wakeup_loss)
- GET_PACA(r13)
ld r1,PACAR1(r13)
REST_NVGPRS(r1)
REST_GPR(2, r1)
@@ -87,7 +86,6 @@ _GLOBAL(power7_wakeup_loss)
rfid
_GLOBAL(power7_wakeup_noloss)
- GET_PACA(r13)
ld r1,PACAR1(r13)
ld r4,_MSR(r1)
ld r5,_NIP(r1)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5b428e30866..d281fb6f12f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -157,12 +157,6 @@ notrace void arch_local_irq_restore(unsigned long en)
if (get_hard_enabled())
return;
-#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP)
- /* Check for pending doorbell interrupts and resend to ourself */
- if (cpu_has_feature(CPU_FTR_DBELL))
- smp_muxed_ipi_resend();
-#endif
-
/*
* Need to hard-enable interrupts here. Since currently disabled,
* no need to take further asm precautions against preemption; but
@@ -457,11 +451,18 @@ static inline void do_softirq_onstack(void)
curtp = current_thread_info();
irqtp = softirq_ctx[smp_processor_id()];
irqtp->task = curtp->task;
+ irqtp->flags = 0;
current->thread.ksp_limit = (unsigned long)irqtp +
_ALIGN_UP(sizeof(struct thread_info), 16);
call_do_softirq(irqtp);
current->thread.ksp_limit = saved_sp_limit;
irqtp->task = NULL;
+
+ /* Set any flag that may have been set on the
+ * alternate stack
+ */
+ if (irqtp->flags)
+ set_bits(irqtp->flags, &curtp->flags);
}
void do_softirq(void)
@@ -750,7 +751,7 @@ unsigned int irq_create_mapping(struct irq_host *host,
if (irq_setup_virq(host, virq, hwirq))
return NO_IRQ;
- printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
+ pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
hwirq, host->of_node ? host->of_node->full_name : "null", virq);
return virq;
@@ -882,6 +883,41 @@ unsigned int irq_find_mapping(struct irq_host *host,
}
EXPORT_SYMBOL_GPL(irq_find_mapping);
+#ifdef CONFIG_SMP
+int irq_choose_cpu(const struct cpumask *mask)
+{
+ int cpuid;
+
+ if (cpumask_equal(mask, cpu_all_mask)) {
+ static int irq_rover;
+ static DEFINE_RAW_SPINLOCK(irq_rover_lock);
+ unsigned long flags;
+
+ /* Round-robin distribution... */
+do_round_robin:
+ raw_spin_lock_irqsave(&irq_rover_lock, flags);
+
+ irq_rover = cpumask_next(irq_rover, cpu_online_mask);
+ if (irq_rover >= nr_cpu_ids)
+ irq_rover = cpumask_first(cpu_online_mask);
+
+ cpuid = irq_rover;
+
+ raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
+ } else {
+ cpuid = cpumask_first_and(mask, cpu_online_mask);
+ if (cpuid >= nr_cpu_ids)
+ goto do_round_robin;
+ }
+
+ return get_hard_smp_processor_id(cpuid);
+}
+#else
+int irq_choose_cpu(const struct cpumask *mask)
+{
+ return hard_smp_processor_id();
+}
+#endif
unsigned int irq_radix_revmap_lookup(struct irq_host *host,
irq_hw_number_t hwirq)
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
new file mode 100644
index 00000000000..368d158d665
--- /dev/null
+++ b/arch/powerpc/kernel/jump_label.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2010 Michael Ellerman, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <asm/code-patching.h>
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ u32 *addr = (u32 *)(unsigned long)entry->code;
+
+ if (type == JUMP_LABEL_ENABLE)
+ patch_branch(addr, entry->target, 0);
+ else
+ patch_instruction(addr, PPC_INST_NOP);
+}
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 7ee50f0547c..6658a158995 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -126,7 +126,7 @@ void __init reserve_crashkernel(void)
/* We might have got these values via the command line or the
* device tree, either way sanitise them now. */
- crash_size = crashk_res.end - crashk_res.start + 1;
+ crash_size = resource_size(&crashk_res);
#ifndef CONFIG_RELOCATABLE
if (crashk_res.start != KDUMP_KERNELBASE)
@@ -222,7 +222,7 @@ static void __init export_crashk_values(struct device_node *node)
if (crashk_res.start != 0) {
prom_add_property(node, &crashk_base_prop);
- crashk_size = crashk_res.end - crashk_res.start + 1;
+ crashk_size = resource_size(&crashk_res);
prom_add_property(node, &crashk_size_prop);
}
}
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index e89df59cdc5..616921ef143 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -339,7 +339,7 @@ _GLOBAL(real_205_writeb)
#endif /* CONFIG_PPC_PASEMI */
-#ifdef CONFIG_CPU_FREQ_PMAC64
+#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
/*
* SCOM access functions for 970 (FX only for now)
*
@@ -408,7 +408,7 @@ _GLOBAL(scom970_write)
/* restore interrupts */
mtmsrd r5,1
blr
-#endif /* CONFIG_CPU_FREQ_PMAC64 */
+#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
/*
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 49cee9df225..a1cd701b575 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -31,20 +31,6 @@
LIST_HEAD(module_bug_list);
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
-
- return vmalloc_exec(size);
-}
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *name)
@@ -93,7 +79,3 @@ int module_finalize(const Elf_Ehdr *hdr,
return 0;
}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index f832773fc28..0b6d79617d7 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -174,17 +174,6 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
return 0;
}
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *module)
-{
- printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n",
- module->name);
- return -ENOEXEC;
-}
-
static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
{
if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 8fbb12508bf..9f44a775a10 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -243,16 +243,6 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
return 0;
}
-int apply_relocate(Elf64_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", me->name);
- return -ENOEXEC;
-}
-
/* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this
gives the value maximum span in an instruction which uses a signed
offset) */
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c
index 2cc5e0301d0..fe21b515ca4 100644
--- a/arch/powerpc/kernel/mpc7450-pmu.c
+++ b/arch/powerpc/kernel/mpc7450-pmu.c
@@ -388,6 +388,11 @@ static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
struct power_pmu mpc7450_pmu = {
@@ -405,7 +410,7 @@ struct power_pmu mpc7450_pmu = {
.cache_events = &mpc7450_cache_events,
};
-static int init_mpc7450_pmu(void)
+static int __init init_mpc7450_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/7450"))
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 24582181b6e..59dbf6abaaf 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -26,7 +26,7 @@
#include <asm/topology.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#ifdef CONFIG_PPC_OF_PLATFORM_PCI
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index efeb8818418..0a5a899846b 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -167,7 +167,7 @@ void setup_paca(struct paca_struct *new_paca)
* if we do a GET_PACA() before the feature fixups have been
* applied
*/
- if (cpu_has_feature(CPU_FTR_HVMODE_206))
+ if (cpu_has_feature(CPU_FTR_HVMODE))
mtspr(SPRN_SPRG_HPACA, local_paca);
#endif
mtspr(SPRN_SPRG_PACA, local_paca);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 893af2a9cd0..32656f10525 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -50,7 +50,7 @@ static int global_phb_number; /* Global phb counter */
resource_size_t isa_mem_base;
/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
-unsigned int ppc_pci_flags = 0;
+unsigned int pci_flags = 0;
static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
@@ -107,7 +107,7 @@ static resource_size_t pcibios_io_size(const struct pci_controller *hose)
#ifdef CONFIG_PPC64
return hose->pci_io_size;
#else
- return hose->io_resource.end - hose->io_resource.start + 1;
+ return resource_size(&hose->io_resource);
#endif
}
@@ -842,9 +842,9 @@ int pci_proc_domain(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
- if (!(ppc_pci_flags & PPC_PCI_ENABLE_PROC_DOMAINS))
+ if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
return 0;
- if (ppc_pci_flags & PPC_PCI_COMPAT_DOMAIN_0)
+ if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
return hose->global_number != 0;
return 1;
}
@@ -920,13 +920,13 @@ static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
struct resource *res = dev->resource + i;
if (!res->flags)
continue;
- /* On platforms that have PPC_PCI_PROBE_ONLY set, we don't
+ /* On platforms that have PCI_PROBE_ONLY set, we don't
* consider 0 as an unassigned BAR value. It's technically
* a valid value, but linux doesn't like it... so when we can
* re-assign things, we do so, but if we can't, we keep it
* around and hope for the best...
*/
- if (res->start == 0 && !(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
+ if (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY)) {
pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] is unassigned\n",
pci_name(dev), i,
(unsigned long long)res->start,
@@ -973,7 +973,7 @@ static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
int i;
/* We don't do anything if PCI_PROBE_ONLY is set */
- if (ppc_pci_flags & PPC_PCI_PROBE_ONLY)
+ if (pci_has_flag(PCI_PROBE_ONLY))
return 0;
/* Job is a bit different between memory and IO */
@@ -1097,9 +1097,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
if (dev->is_added)
continue;
- /* Setup OF node pointer in the device */
- dev->dev.of_node = pci_device_to_OF_node(dev);
-
/* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init
*/
@@ -1146,7 +1143,7 @@ void __devinit pci_fixup_cardbus(struct pci_bus *bus)
static int skip_isa_ioresource_align(struct pci_dev *dev)
{
- if ((ppc_pci_flags & PPC_PCI_CAN_SKIP_ISA_ALIGN) &&
+ if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
!(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
return 1;
return 0;
@@ -1274,7 +1271,7 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
* and as such ensure proper re-allocation
* later.
*/
- if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC)
+ if (pci_has_flag(PCI_REASSIGN_ALL_RSRC))
goto clear_resource;
pr = pci_find_parent_resource(bus->self, res);
if (pr == res) {
@@ -1459,7 +1456,7 @@ void __init pcibios_resource_survey(void)
list_for_each_entry(b, &pci_root_buses, node)
pcibios_allocate_bus_resources(b);
- if (!(ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC)) {
+ if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
pcibios_allocate_resources(0);
pcibios_allocate_resources(1);
}
@@ -1468,7 +1465,7 @@ void __init pcibios_resource_survey(void)
* the low IO area and the VGA memory area if they intersect the
* bus available resources to avoid allocating things on top of them
*/
- if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
+ if (!pci_has_flag(PCI_PROBE_ONLY)) {
list_for_each_entry(b, &pci_root_buses, node)
pcibios_reserve_legacy_regions(b);
}
@@ -1476,7 +1473,7 @@ void __init pcibios_resource_survey(void)
/* Now, if the platform didn't decide to blindly trust the firmware,
* we proceed to assigning things that were left unassigned
*/
- if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
+ if (!pci_has_flag(PCI_PROBE_ONLY)) {
pr_debug("PCI: Assigning unassigned resources...\n");
pci_assign_unassigned_resources();
}
@@ -1685,6 +1682,13 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn,
return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
}
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ return of_node_get(hose->dn);
+}
+
/**
* pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
* @hose: Pointer to the PCI host controller instance structure
@@ -1705,7 +1709,6 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose)
hose->global_number);
return;
}
- bus->dev.of_node = of_node_get(node);
bus->secondary = hose->first_busno;
hose->bus = bus;
@@ -1728,3 +1731,21 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose)
if (mode == PCI_PROBE_NORMAL)
hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
}
+
+static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
+{
+ int i, class = dev->class >> 8;
+
+ if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
+ class == PCI_CLASS_BRIDGE_OTHER) &&
+ (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
+ (dev->bus->parent == NULL)) {
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ dev->resource[i].start = 0;
+ dev->resource[i].end = 0;
+ dev->resource[i].flags = 0;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index bedb370459f..bb154511db5 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -51,25 +51,6 @@ struct pci_dev *isa_bridge_pcidev;
EXPORT_SYMBOL_GPL(isa_bridge_pcidev);
static void
-fixup_hide_host_resource_fsl(struct pci_dev *dev)
-{
- int i, class = dev->class >> 8;
-
- if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
- class == PCI_CLASS_BRIDGE_OTHER) &&
- (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
- (dev->bus->parent == NULL)) {
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- dev->resource[i].start = 0;
- dev->resource[i].end = 0;
- dev->resource[i].flags = 0;
- }
- }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
-
-static void
fixup_cpc710_pci64(struct pci_dev* dev)
{
/* Hide the PCI64 BARs from the kernel as their content doesn't
@@ -167,150 +148,26 @@ pcibios_make_OF_bus_map(void)
#endif
}
-typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
-
-static struct device_node*
-scan_OF_pci_childs(struct device_node *parent, pci_OF_scan_iterator filter, void* data)
-{
- struct device_node *node;
- struct device_node* sub_node;
-
- for_each_child_of_node(parent, node) {
- const unsigned int *class_code;
-
- if (filter(node, data)) {
- of_node_put(node);
- return node;
- }
-
- /* For PCI<->PCI bridges or CardBus bridges, we go down
- * Note: some OFs create a parent node "multifunc-device" as
- * a fake root for all functions of a multi-function device,
- * we go down them as well.
- */
- class_code = of_get_property(node, "class-code", NULL);
- if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
- (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
- strcmp(node->name, "multifunc-device"))
- continue;
- sub_node = scan_OF_pci_childs(node, filter, data);
- if (sub_node) {
- of_node_put(node);
- return sub_node;
- }
- }
- return NULL;
-}
-
-static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
- unsigned int devfn)
-{
- struct device_node *np, *cnp;
- const u32 *reg;
- unsigned int psize;
-
- for_each_child_of_node(parent, np) {
- reg = of_get_property(np, "reg", &psize);
- if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn)
- return np;
-
- /* Note: some OFs create a parent node "multifunc-device" as
- * a fake root for all functions of a multi-function device,
- * we go down them as well. */
- if (!strcmp(np->name, "multifunc-device")) {
- cnp = scan_OF_for_pci_dev(np, devfn);
- if (cnp)
- return cnp;
- }
- }
- return NULL;
-}
-
-
-static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
-{
- struct device_node *parent, *np;
-
- /* Are we a root bus ? */
- if (bus->self == NULL || bus->parent == NULL) {
- struct pci_controller *hose = pci_bus_to_host(bus);
- if (hose == NULL)
- return NULL;
- return of_node_get(hose->dn);
- }
-
- /* not a root bus, we need to get our parent */
- parent = scan_OF_for_pci_bus(bus->parent);
- if (parent == NULL)
- return NULL;
-
- /* now iterate for children for a match */
- np = scan_OF_for_pci_dev(parent, bus->self->devfn);
- of_node_put(parent);
-
- return np;
-}
-
-/*
- * Scans the OF tree for a device node matching a PCI device
- */
-struct device_node *
-pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
-{
- struct device_node *parent, *np;
-
- pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
- parent = scan_OF_for_pci_bus(bus);
- if (parent == NULL)
- return NULL;
- pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
- np = scan_OF_for_pci_dev(parent, devfn);
- of_node_put(parent);
- pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
-
- /* XXX most callers don't release the returned node
- * mostly because ppc64 doesn't increase the refcount,
- * we need to fix that.
- */
- return np;
-}
-EXPORT_SYMBOL(pci_busdev_to_OF_node);
-
-struct device_node*
-pci_device_to_OF_node(struct pci_dev *dev)
-{
- return pci_busdev_to_OF_node(dev->bus, dev->devfn);
-}
-EXPORT_SYMBOL(pci_device_to_OF_node);
-
-static int
-find_OF_pci_device_filter(struct device_node* node, void* data)
-{
- return ((void *)node == data);
-}
/*
* Returns the PCI device matching a given OF node
*/
-int
-pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
+int pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
{
- const unsigned int *reg;
- struct pci_controller* hose;
- struct pci_dev* dev = NULL;
-
- /* Make sure it's really a PCI device */
- hose = pci_find_hose_for_OF_device(node);
- if (!hose || !hose->dn)
- return -ENODEV;
- if (!scan_OF_pci_childs(hose->dn,
- find_OF_pci_device_filter, (void *)node))
+ struct pci_dev *dev = NULL;
+ const __be32 *reg;
+ int size;
+
+ /* Check if it might have a chance to be a PCI device */
+ if (!pci_find_hose_for_OF_device(node))
return -ENODEV;
- reg = of_get_property(node, "reg", NULL);
- if (!reg)
+
+ reg = of_get_property(node, "reg", &size);
+ if (!reg || size < 5 * sizeof(u32))
return -ENODEV;
- *bus = (reg[0] >> 16) & 0xff;
- *devfn = ((reg[0] >> 8) & 0xff);
+
+ *bus = (be32_to_cpup(&reg[0]) >> 16) & 0xff;
+ *devfn = (be32_to_cpup(&reg[0]) >> 8) & 0xff;
/* Ok, here we need some tweak. If we have already renumbered
* all busses, we can't rely on the OF bus number any more.
@@ -373,7 +230,7 @@ static int __init pcibios_init(void)
printk(KERN_INFO "PCI: Probing PCI hardware\n");
- if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_BUS)
+ if (pci_has_flag(PCI_REASSIGN_ALL_BUS))
pci_assign_all_buses = 1;
/* Scan all of the recorded PCI controllers. */
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index fc6452b6be9..ab34046752b 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -55,12 +55,12 @@ static int __init pcibios_init(void)
ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
if (pci_probe_only)
- ppc_pci_flags |= PPC_PCI_PROBE_ONLY;
+ pci_add_flags(PCI_PROBE_ONLY);
/* On ppc64, we always enable PCI domains and we keep domain 0
* backward compatible in /proc for video cards
*/
- ppc_pci_flags |= PPC_PCI_ENABLE_PROC_DOMAINS | PPC_PCI_COMPAT_DOMAIN_0;
+ pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 6baabc13306..478f8d78716 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -142,53 +142,6 @@ void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
traverse_pci_devices(dn, update_dn_pci_info, phb);
}
-/*
- * Traversal func that looks for a <busno,devfcn> value.
- * If found, the pci_dn is returned (thus terminating the traversal).
- */
-static void *is_devfn_node(struct device_node *dn, void *data)
-{
- int busno = ((unsigned long)data >> 8) & 0xff;
- int devfn = ((unsigned long)data) & 0xff;
- struct pci_dn *pci = dn->data;
-
- if (pci && (devfn == pci->devfn) && (busno == pci->busno))
- return dn;
- return NULL;
-}
-
-/*
- * This is the "slow" path for looking up a device_node from a
- * pci_dev. It will hunt for the device under its parent's
- * phb and then update of_node pointer.
- *
- * It may also do fixups on the actual device since this happens
- * on the first read/write.
- *
- * Note that it also must deal with devices that don't exist.
- * In this case it may probe for real hardware ("just in case")
- * and add a device_node to the device tree if necessary.
- *
- * Is this function necessary anymore now that dev->dev.of_node is
- * used to store the node pointer?
- *
- */
-struct device_node *fetch_dev_dn(struct pci_dev *dev)
-{
- struct pci_controller *phb = dev->sysdata;
- struct device_node *dn;
- unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
-
- if (WARN_ON(!phb))
- return NULL;
-
- dn = traverse_pci_devices(phb->dn, is_devfn_node, (void *)searchval);
- if (dn)
- dev->dev.of_node = dn;
- return dn;
-}
-EXPORT_SYMBOL(fetch_dev_dn);
-
/**
* pci_devs_phb_init - Initialize phbs and pci devs under them.
*
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 1e89a72fd03..fe0a5ad6f73 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -202,9 +202,9 @@ EXPORT_SYMBOL(of_create_pci_dev);
* this routine in turn call of_scan_bus() recusively to scan for more child
* devices.
*/
-void __devinit of_scan_pci_bridge(struct device_node *node,
- struct pci_dev *dev)
+void __devinit of_scan_pci_bridge(struct pci_dev *dev)
{
+ struct device_node *node = dev->dev.of_node;
struct pci_bus *bus;
const u32 *busrange, *ranges;
int len, i, mode;
@@ -238,7 +238,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
bus->primary = dev->bus->number;
bus->subordinate = busrange[1];
bus->bridge_ctl = 0;
- bus->dev.of_node = of_node_get(node);
/* parse ranges property */
/* PCI #address-cells == 3 and #size-cells == 2 always */
@@ -335,9 +334,7 @@ static void __devinit __of_scan_bus(struct device_node *node,
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
- struct device_node *child = pci_device_to_OF_node(dev);
- if (child)
- of_scan_pci_bridge(child, dev);
+ of_scan_pci_bridge(dev);
}
}
}
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 822f63008ae..10a140f82cb 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1207,7 +1207,7 @@ struct pmu power_pmu = {
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
- struct pt_regs *regs, int nmi)
+ struct pt_regs *regs)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
@@ -1258,7 +1258,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
- if (perf_event_overflow(event, nmi, &data, regs))
+ if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
}
}
@@ -1346,7 +1346,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
if ((int)val < 0) {
/* event has overflowed */
found = 1;
- record_and_restart(event, val, regs, nmi);
+ record_and_restart(event, val, regs);
}
}
@@ -1408,7 +1408,7 @@ power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu
return NOTIFY_OK;
}
-int register_power_pmu(struct power_pmu *pmu)
+int __cpuinit register_power_pmu(struct power_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index b0dc8f7069c..0a6d2a9d569 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -568,7 +568,7 @@ static struct pmu fsl_emb_pmu = {
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
- struct pt_regs *regs, int nmi)
+ struct pt_regs *regs)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
@@ -616,7 +616,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
- if (perf_event_overflow(event, nmi, &data, regs))
+ if (perf_event_overflow(event, &data, regs))
fsl_emb_pmu_stop(event, 0);
}
}
@@ -644,7 +644,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
if (event) {
/* event has overflowed */
found = 1;
- record_and_restart(event, val, regs, nmi);
+ record_and_restart(event, val, regs);
} else {
/*
* Disabled counter is negative,
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
index ead8b3c2649..b4f1dda4d08 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -587,6 +587,11 @@ static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
static struct power_pmu power4_pmu = {
@@ -604,7 +609,7 @@ static struct power_pmu power4_pmu = {
.cache_events = &power4_cache_events,
};
-static int init_power4_pmu(void)
+static int __init init_power4_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power4"))
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index eca0ac595cb..a8757baa28f 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -653,6 +653,11 @@ static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
static struct power_pmu power5p_pmu = {
@@ -672,7 +677,7 @@ static struct power_pmu power5p_pmu = {
.cache_events = &power5p_cache_events,
};
-static int init_power5p_pmu(void)
+static int __init init_power5p_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
(strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+")
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
index d5ff0f64a5e..e7f06eb7a86 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -595,6 +595,11 @@ static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
static struct power_pmu power5_pmu = {
@@ -612,7 +617,7 @@ static struct power_pmu power5_pmu = {
.cache_events = &power5_cache_events,
};
-static int init_power5_pmu(void)
+static int __init init_power5_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5"))
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index 31603927e37..03b95e2c6d6 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -516,6 +516,11 @@ static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
static struct power_pmu power6_pmu = {
@@ -535,7 +540,7 @@ static struct power_pmu power6_pmu = {
.cache_events = &power6_cache_events,
};
-static int init_power6_pmu(void)
+static int __init init_power6_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6"))
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index 593740fcb79..de83d6060dd 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -342,6 +342,11 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
static struct power_pmu power7_pmu = {
@@ -360,7 +365,7 @@ static struct power_pmu power7_pmu = {
.cache_events = &power7_cache_events,
};
-static int init_power7_pmu(void)
+static int __init init_power7_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7"))
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
index 9a6e093858f..8c219020696 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -467,6 +467,11 @@ static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
+ [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { -1, -1 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
};
static struct power_pmu ppc970_pmu = {
@@ -484,7 +489,7 @@ static struct power_pmu ppc970_pmu = {
.cache_events = &ppc970_cache_events,
};
-static int init_ppc970_pmu(void)
+static int __init init_ppc970_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
(strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970")
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 7d28f540200..f5ae872a2ef 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -18,7 +18,7 @@
#include <asm/cacheflush.h>
#include <asm/uaccess.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/checksum.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 91e52df3d81..8f53954e75a 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -96,6 +96,7 @@ void flush_fp_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(flush_fp_to_thread);
void enable_kernel_fp(void)
{
@@ -145,6 +146,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
@@ -186,6 +188,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
@@ -213,6 +216,7 @@ void flush_spe_to_thread(struct task_struct *tsk)
#ifdef CONFIG_SMP
BUG_ON(tsk != current);
#endif
+ tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
giveup_spe(tsk);
}
preempt_enable();
@@ -650,6 +654,8 @@ void show_regs(struct pt_regs * regs)
printbits(regs->msr, msr_bits);
printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
trap = TRAP(regs);
+ if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
+ printk("CFAR: "REG"\n", regs->orig_gpr3);
if (trap == 0x300 || trap == 0x600)
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
@@ -831,8 +837,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
#endif
- set_fs(USER_DS);
-
/*
* If we exec out of a kernel thread then thread.regs will not be
* set. Do it now.
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8c3112a57cf..174e1e96175 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -69,6 +69,7 @@ unsigned long tce_alloc_start, tce_alloc_end;
u64 ppc64_rma_size;
#endif
static phys_addr_t first_memblock_size;
+static int __initdata boot_cpu_count;
static int __init early_parse_mem(char *p)
{
@@ -769,6 +770,13 @@ void __init early_init_devtree(void *params)
*/
of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
+#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
+ /* We'll later wait for secondaries to check in; there are
+ * NCPUS-1 non-boot CPUs :-)
+ */
+ spinning_secondaries = boot_cpu_count - 1;
+#endif
+
DBG(" <- early_init_devtree()\n");
}
@@ -862,16 +870,14 @@ static int prom_reconfig_notifier(struct notifier_block *nb,
switch (action) {
case PSERIES_RECONFIG_ADD:
err = of_finish_dynamic_node(node);
- if (err < 0) {
+ if (err < 0)
printk(KERN_ERR "finish_node returned %d\n", err);
- err = NOTIFY_BAD;
- }
break;
default:
- err = NOTIFY_DONE;
+ err = 0;
break;
}
- return err;
+ return notifier_from_errno(err);
}
static struct notifier_block prom_reconfig_nb = {
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index cb22024f2b4..05b7dd217f6 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -882,7 +882,7 @@ void user_disable_single_step(struct task_struct *task)
}
#ifdef CONFIG_HAVE_HW_BREAKPOINT
-void ptrace_triggered(struct perf_event *bp, int nmi,
+void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
@@ -973,7 +973,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
&attr.bp_type);
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
- ptrace_triggered, task);
+ ptrace_triggered, NULL, task);
if (IS_ERR(bp)) {
thread->ptrace_bps[0] = NULL;
ptrace_put_breakpoints(task);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 271ff6318ed..d5ca8236315 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -24,6 +24,7 @@
#include <linux/cpumask.h>
#include <linux/memblock.h>
#include <linux/slab.h>
+#include <linux/reboot.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -38,7 +39,7 @@
#include <asm/udbg.h>
#include <asm/syscalls.h>
#include <asm/smp.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/time.h>
#include <asm/mmu.h>
#include <asm/topology.h>
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index bf5f5ce3a7b..e037c7494fd 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
+#include <linux/reboot.h>
#include <asm/delay.h>
#include <asm/uaccess.h>
#include <asm/rtas.h>
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 54e66da8f74..6cd8f0196b6 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -291,7 +291,7 @@ void __init find_and_init_phbs(void)
prop = of_get_property(of_chosen,
"linux,pci-assign-all-buses", NULL);
if (prop && *prop)
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
#endif /* CONFIG_PPC32 */
}
}
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 67f6c3b5135..481ef064c8f 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -27,7 +27,7 @@
#include <asm/rtas.h>
#include <asm/prom.h>
#include <asm/nvram.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 79fca2651b6..b1d738d1289 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -375,6 +375,9 @@ void __init check_for_initrd(void)
int threads_per_core, threads_shift;
cpumask_t threads_core_mask;
+EXPORT_SYMBOL_GPL(threads_per_core);
+EXPORT_SYMBOL_GPL(threads_shift);
+EXPORT_SYMBOL_GPL(threads_core_mask);
static void __init cpu_init_thread_core_maps(int tpc)
{
@@ -704,29 +707,14 @@ static int powerpc_debugfs_init(void)
arch_initcall(powerpc_debugfs_init);
#endif
-static int ppc_dflt_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
+void ppc_printk_progress(char *s, unsigned short hex)
{
- struct device *dev = data;
-
- /* We are only intereted in device addition */
- if (action != BUS_NOTIFY_ADD_DEVICE)
- return 0;
-
- set_dma_ops(dev, &dma_direct_ops);
-
- return NOTIFY_DONE;
+ pr_info("%s\n", s);
}
-static struct notifier_block ppc_dflt_plat_bus_notifier = {
- .notifier_call = ppc_dflt_bus_notify,
- .priority = INT_MAX,
-};
-
-static int __init setup_bus_notifier(void)
+void arch_setup_pdev_archdata(struct platform_device *pdev)
{
- bus_register_notifier(&platform_bus_type, &ppc_dflt_plat_bus_notifier);
- return 0;
+ pdev->archdata.dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &pdev->archdata.dma_mask;
+ set_dma_ops(&pdev->dev, &dma_direct_ops);
}
-
-arch_initcall(setup_bus_notifier);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 620d792b52e..209135af0a4 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -48,8 +48,8 @@ extern void bootx_init(unsigned long r4, unsigned long phys);
int boot_cpuid = -1;
EXPORT_SYMBOL_GPL(boot_cpuid);
-int __initdata boot_cpu_count;
int boot_cpuid_phys;
+EXPORT_SYMBOL_GPL(boot_cpuid_phys);
int smp_hw_index[NR_CPUS];
@@ -127,6 +127,8 @@ notrace void __init machine_init(unsigned long dt_ptr)
/* Do some early initialization based on the flat device tree */
early_init_devtree(__va(dt_ptr));
+ early_init_mmu();
+
probe_machine();
setup_kdump_trampoline();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a88bf2713d4..aebef1320ed 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -63,6 +63,7 @@
#include <asm/kexec.h>
#include <asm/mmu_context.h>
#include <asm/code-patching.h>
+#include <asm/kvm_ppc.h>
#include "setup.h"
@@ -73,7 +74,7 @@
#endif
int boot_cpuid = 0;
-int __initdata boot_cpu_count;
+int __initdata spinning_secondaries;
u64 ppc64_pft_size;
/* Pick defaults since we might want to patch instructions
@@ -253,11 +254,11 @@ void smp_release_cpus(void)
for (i = 0; i < 100000; i++) {
mb();
HMT_low();
- if (boot_cpu_count == 0)
+ if (spinning_secondaries == 0)
break;
udelay(1);
}
- DBG("boot_cpu_count = %d\n", boot_cpu_count);
+ DBG("spinning_secondaries = %d\n", spinning_secondaries);
DBG(" <- smp_release_cpus()\n");
}
@@ -580,6 +581,8 @@ void __init setup_arch(char **cmdline_p)
/* Initialize the MMU context management stuff */
mmu_context_init();
+ kvm_rma_init();
+
ppc64_boot_msg(0x15, "Setup Done");
}
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index 03e45c4a9ef..640de836e46 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -11,7 +11,7 @@
#include <linux/unistd.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/smp.h>
#include <asm/time.h>
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ebc6700b98..7bf2187dfd9 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -33,7 +33,7 @@
#include <linux/topology.h>
#include <asm/ptrace.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -202,14 +202,6 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
smp_ops->cause_ipi(cpu, info->data);
}
-void smp_muxed_ipi_resend(void)
-{
- struct cpu_messages *info = &__get_cpu_var(ipi_message);
-
- if (info->messages)
- smp_ops->cause_ipi(smp_processor_id(), info->data);
-}
-
irqreturn_t smp_ipi_demux(void)
{
struct cpu_messages *info = &__get_cpu_var(ipi_message);
@@ -238,15 +230,26 @@ irqreturn_t smp_ipi_demux(void)
}
#endif /* CONFIG_PPC_SMP_MUXED_IPI */
+static inline void do_message_pass(int cpu, int msg)
+{
+ if (smp_ops->message_pass)
+ smp_ops->message_pass(cpu, msg);
+#ifdef CONFIG_PPC_SMP_MUXED_IPI
+ else
+ smp_muxed_ipi_message_pass(cpu, msg);
+#endif
+}
+
void smp_send_reschedule(int cpu)
{
if (likely(smp_ops))
- smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
+ do_message_pass(cpu, PPC_MSG_RESCHEDULE);
}
+EXPORT_SYMBOL_GPL(smp_send_reschedule);
void arch_send_call_function_single_ipi(int cpu)
{
- smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
+ do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -254,7 +257,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
unsigned int cpu;
for_each_cpu(cpu, mask)
- smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
+ do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
@@ -268,7 +271,7 @@ void smp_send_debugger_break(void)
for_each_online_cpu(cpu)
if (cpu != me)
- smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
+ do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
}
#endif
@@ -303,6 +306,10 @@ struct thread_info *current_set[NR_CPUS];
static void __devinit smp_store_cpu_info(int id)
{
per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
+#ifdef CONFIG_PPC_FSL_BOOK3E
+ per_cpu(next_tlbcam_idx, id)
+ = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
+#endif
}
void __init smp_prepare_cpus(unsigned int max_cpus)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index f33acfd872a..03b29a6759a 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -544,7 +544,7 @@ DEFINE_PER_CPU(u8, irq_work_pending);
#endif /* 32 vs 64 bit */
-void set_irq_work_pending(void)
+void arch_irq_work_raise(void)
{
preempt_disable();
set_irq_work_pending_flag();
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1a0141426cd..f19d9777d3c 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1387,10 +1387,7 @@ void SPEFloatingPointException(struct pt_regs *regs)
int code = 0;
int err;
- preempt_disable();
- if (regs->msr & MSR_SPE)
- giveup_spe(current);
- preempt_enable();
+ flush_spe_to_thread(current);
spefscr = current->thread.spefscr;
fpexc_mode = current->thread.fpexc_mode;
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 23d65abbedc..faa82c1f3f6 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -31,6 +31,9 @@ void __init udbg_early_init(void)
#if defined(CONFIG_PPC_EARLY_DEBUG_LPAR)
/* For LPAR machines that have an HVC console on vterm 0 */
udbg_init_debug_lpar();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI)
+ /* For LPAR machines that have an HVSI console on vterm 0 */
+ udbg_init_debug_lpar_hvsi();
#elif defined(CONFIG_PPC_EARLY_DEBUG_G5)
/* For use on Apple G5 machines */
udbg_init_pmac_realmode();
@@ -68,6 +71,8 @@ void __init udbg_early_init(void)
#ifdef CONFIG_PPC_EARLY_DEBUG
console_loglevel = 10;
+
+ register_early_udbg_console();
#endif
}
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 5f3cff83e08..33aa715dab2 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -387,8 +387,10 @@ static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
}
}
-void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
+ int usermode = vcpu->arch.shared->msr & MSR_PR;
+
vcpu->arch.shadow_pid = !usermode;
}
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index b7baff78f90..78133deb4b6 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -20,7 +20,6 @@ config KVM
bool
select PREEMPT_NOTIFIERS
select ANON_INODES
- select KVM_MMIO
config KVM_BOOK3S_HANDLER
bool
@@ -28,16 +27,22 @@ config KVM_BOOK3S_HANDLER
config KVM_BOOK3S_32_HANDLER
bool
select KVM_BOOK3S_HANDLER
+ select KVM_MMIO
config KVM_BOOK3S_64_HANDLER
bool
select KVM_BOOK3S_HANDLER
+config KVM_BOOK3S_PR
+ bool
+ select KVM_MMIO
+
config KVM_BOOK3S_32
tristate "KVM support for PowerPC book3s_32 processors"
depends on EXPERIMENTAL && PPC_BOOK3S_32 && !SMP && !PTE_64BIT
select KVM
select KVM_BOOK3S_32_HANDLER
+ select KVM_BOOK3S_PR
---help---
Support running unmodified book3s_32 guest kernels
in virtual machines on book3s_32 host processors.
@@ -50,8 +55,8 @@ config KVM_BOOK3S_32
config KVM_BOOK3S_64
tristate "KVM support for PowerPC book3s_64 processors"
depends on EXPERIMENTAL && PPC_BOOK3S_64
- select KVM
select KVM_BOOK3S_64_HANDLER
+ select KVM
---help---
Support running unmodified book3s_64 and book3s_32 guest kernels
in virtual machines on book3s_64 host processors.
@@ -61,10 +66,34 @@ config KVM_BOOK3S_64
If unsure, say N.
+config KVM_BOOK3S_64_HV
+ bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
+ depends on KVM_BOOK3S_64
+ ---help---
+ Support running unmodified book3s_64 guest kernels in
+ virtual machines on POWER7 and PPC970 processors that have
+ hypervisor mode available to the host.
+
+ If you say Y here, KVM will use the hardware virtualization
+ facilities of POWER7 (and later) processors, meaning that
+ guest operating systems will run at full hardware speed
+ using supervisor and user modes. However, this also means
+ that KVM is not usable under PowerVM (pHyp), is only usable
+ on POWER7 (or later) processors and PPC970-family processors,
+ and cannot emulate a different processor from the host processor.
+
+ If unsure, say N.
+
+config KVM_BOOK3S_64_PR
+ def_bool y
+ depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
+ select KVM_BOOK3S_PR
+
config KVM_440
bool "KVM support for PowerPC 440 processors"
depends on EXPERIMENTAL && 44x
select KVM
+ select KVM_MMIO
---help---
Support running unmodified 440 guest kernels in virtual machines on
440 host processors.
@@ -89,6 +118,7 @@ config KVM_E500
bool "KVM support for PowerPC E500 processors"
depends on EXPERIMENTAL && E500
select KVM
+ select KVM_MMIO
---help---
Support running unmodified E500 guest kernels in virtual machines on
E500 host processors.
@@ -99,6 +129,5 @@ config KVM_E500
If unsure, say N.
source drivers/vhost/Kconfig
-source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 4d6863823f6..08428e2c188 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -38,24 +38,42 @@ kvm-e500-objs := \
e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs)
-kvm-book3s_64-objs := \
- $(common-objs-y) \
+kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
+ ../../../virt/kvm/coalesced_mmio.o \
fpu.o \
book3s_paired_singles.o \
- book3s.o \
+ book3s_pr.o \
book3s_emulate.o \
book3s_interrupts.o \
book3s_mmu_hpte.o \
book3s_64_mmu_host.o \
book3s_64_mmu.o \
book3s_32_mmu.o
-kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs)
+
+kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
+ book3s_hv.o \
+ book3s_hv_interrupts.o \
+ book3s_64_mmu_hv.o
+kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
+ book3s_hv_rm_mmu.o \
+ book3s_64_vio_hv.o \
+ book3s_hv_builtin.o
+
+kvm-book3s_64-module-objs := \
+ ../../../virt/kvm/kvm_main.o \
+ powerpc.o \
+ emulate.o \
+ book3s.o \
+ $(kvm-book3s_64-objs-y)
+
+kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)
kvm-book3s_32-objs := \
$(common-objs-y) \
fpu.o \
book3s_paired_singles.o \
book3s.o \
+ book3s_pr.o \
book3s_emulate.o \
book3s_interrupts.o \
book3s_mmu_hpte.o \
@@ -70,3 +88,4 @@ obj-$(CONFIG_KVM_E500) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
+obj-y += $(kvm-book3s_64-builtin-objs-y)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 0f95b5cce03..f68a34d1603 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -17,7 +17,6 @@
#include <linux/kvm_host.h>
#include <linux/err.h>
#include <linux/slab.h>
-#include "trace.h"
#include <asm/reg.h>
#include <asm/cputable.h>
@@ -28,25 +27,17 @@
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
+#include <asm/page.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
+#include "trace.h"
+
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
/* #define EXIT_DEBUG */
-/* #define DEBUG_EXT */
-
-static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
- ulong msr);
-
-/* Some compatibility defines */
-#ifdef CONFIG_PPC_BOOK3S_32
-#define MSR_USER32 MSR_USER
-#define MSR_USER64 MSR_USER
-#define HW_PAGE_SIZE PAGE_SIZE
-#endif
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "exits", VCPU_STAT(sum_exits) },
@@ -77,100 +68,11 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
{
}
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
-#ifdef CONFIG_PPC_BOOK3S_64
- memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
- memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
- sizeof(get_paca()->shadow_vcpu));
- to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
-#endif
-
-#ifdef CONFIG_PPC_BOOK3S_32
- current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
-#endif
-}
-
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_BOOK3S_64
- memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
- memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
- sizeof(get_paca()->shadow_vcpu));
- to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
-#endif
-
- kvmppc_giveup_ext(vcpu, MSR_FP);
- kvmppc_giveup_ext(vcpu, MSR_VEC);
- kvmppc_giveup_ext(vcpu, MSR_VSX);
-}
-
-static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
-{
- ulong smsr = vcpu->arch.shared->msr;
-
- /* Guest MSR values */
- smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
- /* Process MSR values */
- smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
- /* External providers the guest reserved */
- smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
- /* 64-bit Process MSR values */
-#ifdef CONFIG_PPC_BOOK3S_64
- smsr |= MSR_ISF | MSR_HV;
-#endif
- vcpu->arch.shadow_msr = smsr;
-}
-
-void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
-{
- ulong old_msr = vcpu->arch.shared->msr;
-
-#ifdef EXIT_DEBUG
- printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
-#endif
-
- msr &= to_book3s(vcpu)->msr_mask;
- vcpu->arch.shared->msr = msr;
- kvmppc_recalc_shadow_msr(vcpu);
-
- if (msr & MSR_POW) {
- if (!vcpu->arch.pending_exceptions) {
- kvm_vcpu_block(vcpu);
- vcpu->stat.halt_wakeup++;
-
- /* Unset POW bit after we woke up */
- msr &= ~MSR_POW;
- vcpu->arch.shared->msr = msr;
- }
- }
-
- if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
- (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
- kvmppc_mmu_flush_segments(vcpu);
- kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
-
- /* Preload magic page segment when in kernel mode */
- if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
- struct kvm_vcpu_arch *a = &vcpu->arch;
-
- if (msr & MSR_DR)
- kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
- else
- kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
- }
- }
-
- /* Preload FPU if it's enabled */
- if (vcpu->arch.shared->msr & MSR_FP)
- kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
-}
-
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
{
vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
- kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
+ kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
vcpu->arch.mmu.reset_msr(vcpu);
}
@@ -204,11 +106,13 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec)
static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
unsigned int vec)
{
+ unsigned long old_pending = vcpu->arch.pending_exceptions;
+
clear_bit(kvmppc_book3s_vec2irqprio(vec),
&vcpu->arch.pending_exceptions);
- if (!vcpu->arch.pending_exceptions)
- vcpu->arch.shared->int_pending = 0;
+ kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
+ old_pending);
}
void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
@@ -225,8 +129,8 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{
- to_book3s(vcpu)->prog_flags = flags;
- kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM);
+ /* might as well deliver this straight away */
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
}
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
@@ -266,21 +170,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
{
int deliver = 1;
int vec = 0;
- ulong flags = 0ULL;
- ulong crit_raw = vcpu->arch.shared->critical;
- ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
- bool crit;
-
- /* Truncate crit indicators in 32 bit mode */
- if (!(vcpu->arch.shared->msr & MSR_SF)) {
- crit_raw &= 0xffffffff;
- crit_r1 &= 0xffffffff;
- }
-
- /* Critical section when crit == r1 */
- crit = (crit_raw == crit_r1);
- /* ... and we're in supervisor mode */
- crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
+ bool crit = kvmppc_critical_section(vcpu);
switch (priority) {
case BOOK3S_IRQPRIO_DECREMENTER:
@@ -315,7 +205,6 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
break;
case BOOK3S_IRQPRIO_PROGRAM:
vec = BOOK3S_INTERRUPT_PROGRAM;
- flags = to_book3s(vcpu)->prog_flags;
break;
case BOOK3S_IRQPRIO_VSX:
vec = BOOK3S_INTERRUPT_VSX;
@@ -346,7 +235,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
#endif
if (deliver)
- kvmppc_inject_interrupt(vcpu, vec, flags);
+ kvmppc_inject_interrupt(vcpu, vec, 0);
return deliver;
}
@@ -392,64 +281,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
}
/* Tell the guest about our interrupt status */
- if (*pending)
- vcpu->arch.shared->int_pending = 1;
- else if (old_pending)
- vcpu->arch.shared->int_pending = 0;
-}
-
-void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
-{
- u32 host_pvr;
-
- vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
- vcpu->arch.pvr = pvr;
-#ifdef CONFIG_PPC_BOOK3S_64
- if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
- kvmppc_mmu_book3s_64_init(vcpu);
- to_book3s(vcpu)->hior = 0xfff00000;
- to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
- } else
-#endif
- {
- kvmppc_mmu_book3s_32_init(vcpu);
- to_book3s(vcpu)->hior = 0;
- to_book3s(vcpu)->msr_mask = 0xffffffffULL;
- }
-
- /* If we are in hypervisor level on 970, we can tell the CPU to
- * treat DCBZ as 32 bytes store */
- vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
- if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
- !strcmp(cur_cpu_spec->platform, "ppc970"))
- vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
-
- /* Cell performs badly if MSR_FEx are set. So let's hope nobody
- really needs them in a VM on Cell and force disable them. */
- if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
- to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
-
-#ifdef CONFIG_PPC_BOOK3S_32
- /* 32 bit Book3S always has 32 byte dcbz */
- vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
-#endif
-
- /* On some CPUs we can execute paired single operations natively */
- asm ( "mfpvr %0" : "=r"(host_pvr));
- switch (host_pvr) {
- case 0x00080200: /* lonestar 2.0 */
- case 0x00088202: /* lonestar 2.2 */
- case 0x70000100: /* gekko 1.0 */
- case 0x00080100: /* gekko 2.0 */
- case 0x00083203: /* gekko 2.3a */
- case 0x00083213: /* gekko 2.3b */
- case 0x00083204: /* gekko 2.4 */
- case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
- case 0x00087200: /* broadway */
- vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
- /* Enable HID2.PSE - in case we need it later */
- mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
- }
+ kvmppc_update_int_pending(vcpu, *pending, old_pending);
}
pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -471,44 +303,6 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
return gfn_to_pfn(vcpu->kvm, gfn);
}
-/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
- * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
- * emulate 32 bytes dcbz length.
- *
- * The Book3s_64 inventors also realized this case and implemented a special bit
- * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
- *
- * My approach here is to patch the dcbz instruction on executing pages.
- */
-static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
-{
- struct page *hpage;
- u64 hpage_offset;
- u32 *page;
- int i;
-
- hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
- if (is_error_page(hpage)) {
- kvm_release_page_clean(hpage);
- return;
- }
-
- hpage_offset = pte->raddr & ~PAGE_MASK;
- hpage_offset &= ~0xFFFULL;
- hpage_offset /= 4;
-
- get_page(hpage);
- page = kmap_atomic(hpage, KM_USER0);
-
- /* patch dcbz into reserved instruction, so we trap */
- for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
- if ((page[i] & 0xff0007ff) == INS_DCBZ)
- page[i] &= 0xfffffff7;
-
- kunmap_atomic(page, KM_USER0);
- put_page(hpage);
-}
-
static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
struct kvmppc_pte *pte)
{
@@ -606,519 +400,6 @@ mmio:
return EMULATE_DO_MMIO;
}
-static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
-{
- ulong mp_pa = vcpu->arch.magic_page_pa;
-
- if (unlikely(mp_pa) &&
- unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
- return 1;
- }
-
- return kvm_is_visible_gfn(vcpu->kvm, gfn);
-}
-
-int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
- ulong eaddr, int vec)
-{
- bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
- int r = RESUME_GUEST;
- int relocated;
- int page_found = 0;
- struct kvmppc_pte pte;
- bool is_mmio = false;
- bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
- bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
- u64 vsid;
-
- relocated = data ? dr : ir;
-
- /* Resolve real address if translation turned on */
- if (relocated) {
- page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
- } else {
- pte.may_execute = true;
- pte.may_read = true;
- pte.may_write = true;
- pte.raddr = eaddr & KVM_PAM;
- pte.eaddr = eaddr;
- pte.vpage = eaddr >> 12;
- }
-
- switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
- case 0:
- pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
- break;
- case MSR_DR:
- case MSR_IR:
- vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
-
- if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
- pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
- else
- pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
- pte.vpage |= vsid;
-
- if (vsid == -1)
- page_found = -EINVAL;
- break;
- }
-
- if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
- (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
- /*
- * If we do the dcbz hack, we have to NX on every execution,
- * so we can patch the executing code. This renders our guest
- * NX-less.
- */
- pte.may_execute = !data;
- }
-
- if (page_found == -ENOENT) {
- /* Page not found in guest PTE entries */
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
- vcpu->arch.shared->msr |=
- (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
- kvmppc_book3s_queue_irqprio(vcpu, vec);
- } else if (page_found == -EPERM) {
- /* Storage protection */
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- vcpu->arch.shared->dsisr =
- to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
- vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
- vcpu->arch.shared->msr |=
- (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
- kvmppc_book3s_queue_irqprio(vcpu, vec);
- } else if (page_found == -EINVAL) {
- /* Page not found in guest SLB */
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
- } else if (!is_mmio &&
- kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
- /* The guest's PTE is not mapped yet. Map on the host */
- kvmppc_mmu_map_page(vcpu, &pte);
- if (data)
- vcpu->stat.sp_storage++;
- else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
- (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
- kvmppc_patch_dcbz(vcpu, &pte);
- } else {
- /* MMIO */
- vcpu->stat.mmio_exits++;
- vcpu->arch.paddr_accessed = pte.raddr;
- r = kvmppc_emulate_mmio(run, vcpu);
- if ( r == RESUME_HOST_NV )
- r = RESUME_HOST;
- }
-
- return r;
-}
-
-static inline int get_fpr_index(int i)
-{
-#ifdef CONFIG_VSX
- i *= 2;
-#endif
- return i;
-}
-
-/* Give up external provider (FPU, Altivec, VSX) */
-void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
-{
- struct thread_struct *t = &current->thread;
- u64 *vcpu_fpr = vcpu->arch.fpr;
-#ifdef CONFIG_VSX
- u64 *vcpu_vsx = vcpu->arch.vsr;
-#endif
- u64 *thread_fpr = (u64*)t->fpr;
- int i;
-
- if (!(vcpu->arch.guest_owned_ext & msr))
- return;
-
-#ifdef DEBUG_EXT
- printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
-#endif
-
- switch (msr) {
- case MSR_FP:
- giveup_fpu(current);
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
- vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
-
- vcpu->arch.fpscr = t->fpscr.val;
- break;
- case MSR_VEC:
-#ifdef CONFIG_ALTIVEC
- giveup_altivec(current);
- memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
- vcpu->arch.vscr = t->vscr;
-#endif
- break;
- case MSR_VSX:
-#ifdef CONFIG_VSX
- __giveup_vsx(current);
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
- vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
-#endif
- break;
- default:
- BUG();
- }
-
- vcpu->arch.guest_owned_ext &= ~msr;
- current->thread.regs->msr &= ~msr;
- kvmppc_recalc_shadow_msr(vcpu);
-}
-
-static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
-{
- ulong srr0 = kvmppc_get_pc(vcpu);
- u32 last_inst = kvmppc_get_last_inst(vcpu);
- int ret;
-
- ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
- if (ret == -ENOENT) {
- ulong msr = vcpu->arch.shared->msr;
-
- msr = kvmppc_set_field(msr, 33, 33, 1);
- msr = kvmppc_set_field(msr, 34, 36, 0);
- vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
- kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
- return EMULATE_AGAIN;
- }
-
- return EMULATE_DONE;
-}
-
-static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
-{
-
- /* Need to do paired single emulation? */
- if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
- return EMULATE_DONE;
-
- /* Read out the instruction */
- if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
- /* Need to emulate */
- return EMULATE_FAIL;
-
- return EMULATE_AGAIN;
-}
-
-/* Handle external providers (FPU, Altivec, VSX) */
-static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
- ulong msr)
-{
- struct thread_struct *t = &current->thread;
- u64 *vcpu_fpr = vcpu->arch.fpr;
-#ifdef CONFIG_VSX
- u64 *vcpu_vsx = vcpu->arch.vsr;
-#endif
- u64 *thread_fpr = (u64*)t->fpr;
- int i;
-
- /* When we have paired singles, we emulate in software */
- if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
- return RESUME_GUEST;
-
- if (!(vcpu->arch.shared->msr & msr)) {
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- return RESUME_GUEST;
- }
-
- /* We already own the ext */
- if (vcpu->arch.guest_owned_ext & msr) {
- return RESUME_GUEST;
- }
-
-#ifdef DEBUG_EXT
- printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
-#endif
-
- current->thread.regs->msr |= msr;
-
- switch (msr) {
- case MSR_FP:
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
- thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
-
- t->fpscr.val = vcpu->arch.fpscr;
- t->fpexc_mode = 0;
- kvmppc_load_up_fpu();
- break;
- case MSR_VEC:
-#ifdef CONFIG_ALTIVEC
- memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
- t->vscr = vcpu->arch.vscr;
- t->vrsave = -1;
- kvmppc_load_up_altivec();
-#endif
- break;
- case MSR_VSX:
-#ifdef CONFIG_VSX
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
- thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
- kvmppc_load_up_vsx();
-#endif
- break;
- default:
- BUG();
- }
-
- vcpu->arch.guest_owned_ext |= msr;
-
- kvmppc_recalc_shadow_msr(vcpu);
-
- return RESUME_GUEST;
-}
-
-int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
-{
- int r = RESUME_HOST;
-
- vcpu->stat.sum_exits++;
-
- run->exit_reason = KVM_EXIT_UNKNOWN;
- run->ready_for_interrupt_injection = 1;
-
- trace_kvm_book3s_exit(exit_nr, vcpu);
- kvm_resched(vcpu);
- switch (exit_nr) {
- case BOOK3S_INTERRUPT_INST_STORAGE:
- vcpu->stat.pf_instruc++;
-
-#ifdef CONFIG_PPC_BOOK3S_32
- /* We set segments as unused segments when invalidating them. So
- * treat the respective fault as segment fault. */
- if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
- == SR_INVALID) {
- kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
- r = RESUME_GUEST;
- break;
- }
-#endif
-
- /* only care about PTEG not found errors, but leave NX alone */
- if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
- r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
- vcpu->stat.sp_instruc++;
- } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
- (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
- /*
- * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
- * so we can't use the NX bit inside the guest. Let's cross our fingers,
- * that no guest that needs the dcbz hack does NX.
- */
- kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
- r = RESUME_GUEST;
- } else {
- vcpu->arch.shared->msr |=
- to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- r = RESUME_GUEST;
- }
- break;
- case BOOK3S_INTERRUPT_DATA_STORAGE:
- {
- ulong dar = kvmppc_get_fault_dar(vcpu);
- vcpu->stat.pf_storage++;
-
-#ifdef CONFIG_PPC_BOOK3S_32
- /* We set segments as unused segments when invalidating them. So
- * treat the respective fault as segment fault. */
- if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
- kvmppc_mmu_map_segment(vcpu, dar);
- r = RESUME_GUEST;
- break;
- }
-#endif
-
- /* The only case we need to handle is missing shadow PTEs */
- if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
- r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
- } else {
- vcpu->arch.shared->dar = dar;
- vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- r = RESUME_GUEST;
- }
- break;
- }
- case BOOK3S_INTERRUPT_DATA_SEGMENT:
- if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- kvmppc_book3s_queue_irqprio(vcpu,
- BOOK3S_INTERRUPT_DATA_SEGMENT);
- }
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_INST_SEGMENT:
- if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
- kvmppc_book3s_queue_irqprio(vcpu,
- BOOK3S_INTERRUPT_INST_SEGMENT);
- }
- r = RESUME_GUEST;
- break;
- /* We're good on these - the host merely wanted to get our attention */
- case BOOK3S_INTERRUPT_DECREMENTER:
- vcpu->stat.dec_exits++;
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_EXTERNAL:
- vcpu->stat.ext_intr_exits++;
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_PERFMON:
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_PROGRAM:
- {
- enum emulation_result er;
- ulong flags;
-
-program_interrupt:
- flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
-
- if (vcpu->arch.shared->msr & MSR_PR) {
-#ifdef EXIT_DEBUG
- printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
-#endif
- if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
- (INS_DCBZ & 0xfffffff7)) {
- kvmppc_core_queue_program(vcpu, flags);
- r = RESUME_GUEST;
- break;
- }
- }
-
- vcpu->stat.emulated_inst_exits++;
- er = kvmppc_emulate_instruction(run, vcpu);
- switch (er) {
- case EMULATE_DONE:
- r = RESUME_GUEST_NV;
- break;
- case EMULATE_AGAIN:
- r = RESUME_GUEST;
- break;
- case EMULATE_FAIL:
- printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
- __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
- kvmppc_core_queue_program(vcpu, flags);
- r = RESUME_GUEST;
- break;
- case EMULATE_DO_MMIO:
- run->exit_reason = KVM_EXIT_MMIO;
- r = RESUME_HOST_NV;
- break;
- default:
- BUG();
- }
- break;
- }
- case BOOK3S_INTERRUPT_SYSCALL:
- if (vcpu->arch.osi_enabled &&
- (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
- (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
- /* MOL hypercalls */
- u64 *gprs = run->osi.gprs;
- int i;
-
- run->exit_reason = KVM_EXIT_OSI;
- for (i = 0; i < 32; i++)
- gprs[i] = kvmppc_get_gpr(vcpu, i);
- vcpu->arch.osi_needed = 1;
- r = RESUME_HOST_NV;
- } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
- (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
- /* KVM PV hypercalls */
- kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
- r = RESUME_GUEST;
- } else {
- /* Guest syscalls */
- vcpu->stat.syscall_exits++;
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- r = RESUME_GUEST;
- }
- break;
- case BOOK3S_INTERRUPT_FP_UNAVAIL:
- case BOOK3S_INTERRUPT_ALTIVEC:
- case BOOK3S_INTERRUPT_VSX:
- {
- int ext_msr = 0;
-
- switch (exit_nr) {
- case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
- case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
- case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
- }
-
- switch (kvmppc_check_ext(vcpu, exit_nr)) {
- case EMULATE_DONE:
- /* everything ok - let's enable the ext */
- r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
- break;
- case EMULATE_FAIL:
- /* we need to emulate this instruction */
- goto program_interrupt;
- break;
- default:
- /* nothing to worry about - go again */
- break;
- }
- break;
- }
- case BOOK3S_INTERRUPT_ALIGNMENT:
- if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
- vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
- kvmppc_get_last_inst(vcpu));
- vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
- kvmppc_get_last_inst(vcpu));
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- }
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_MACHINE_CHECK:
- case BOOK3S_INTERRUPT_TRACE:
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- r = RESUME_GUEST;
- break;
- default:
- /* Ugh - bork here! What did we get? */
- printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
- exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
- r = RESUME_HOST;
- BUG();
- break;
- }
-
-
- if (!(r & RESUME_HOST)) {
- /* To avoid clobbering exit_reason, only check for signals if
- * we aren't already exiting to userspace for some other
- * reason. */
- if (signal_pending(current)) {
-#ifdef EXIT_DEBUG
- printk(KERN_EMERG "KVM: Going back to host\n");
-#endif
- vcpu->stat.signal_exits++;
- run->exit_reason = KVM_EXIT_INTR;
- r = -EINTR;
- } else {
- /* In case an interrupt came in that was triggered
- * from userspace (like DEC), we need to check what
- * to inject now! */
- kvmppc_core_deliver_interrupts(vcpu);
- }
- }
-
- trace_kvm_book3s_reenter(r, vcpu);
-
- return r;
-}
-
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
return 0;
@@ -1179,69 +460,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return 0;
}
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
-{
- struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
- int i;
-
- sregs->pvr = vcpu->arch.pvr;
-
- sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
- if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
- for (i = 0; i < 64; i++) {
- sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i;
- sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
- }
- } else {
- for (i = 0; i < 16; i++)
- sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
-
- for (i = 0; i < 8; i++) {
- sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
- sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
- }
- }
-
- return 0;
-}
-
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
-{
- struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
- int i;
-
- kvmppc_set_pvr(vcpu, sregs->pvr);
-
- vcpu3s->sdr1 = sregs->u.s.sdr1;
- if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
- for (i = 0; i < 64; i++) {
- vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
- sregs->u.s.ppc64.slb[i].slbe);
- }
- } else {
- for (i = 0; i < 16; i++) {
- vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
- }
- for (i = 0; i < 8; i++) {
- kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
- (u32)sregs->u.s.ppc32.ibat[i]);
- kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
- (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
- kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
- (u32)sregs->u.s.ppc32.dbat[i]);
- kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
- (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
- }
- }
-
- /* Flush the MMU after messing with the segments */
- kvmppc_mmu_pte_flush(vcpu, 0, 0);
-
- return 0;
-}
-
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -ENOTSUPP;
@@ -1296,202 +514,3 @@ out:
mutex_unlock(&kvm->slots_lock);
return r;
}
-
-int kvmppc_core_check_processor_compat(void)
-{
- return 0;
-}
-
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
-{
- struct kvmppc_vcpu_book3s *vcpu_book3s;
- struct kvm_vcpu *vcpu;
- int err = -ENOMEM;
- unsigned long p;
-
- vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
- if (!vcpu_book3s)
- goto out;
-
- vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
- kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
- if (!vcpu_book3s->shadow_vcpu)
- goto free_vcpu;
-
- vcpu = &vcpu_book3s->vcpu;
- err = kvm_vcpu_init(vcpu, kvm, id);
- if (err)
- goto free_shadow_vcpu;
-
- p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
- /* the real shared page fills the last 4k of our page */
- vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
- if (!p)
- goto uninit_vcpu;
-
- vcpu->arch.host_retip = kvm_return_point;
- vcpu->arch.host_msr = mfmsr();
-#ifdef CONFIG_PPC_BOOK3S_64
- /* default to book3s_64 (970fx) */
- vcpu->arch.pvr = 0x3C0301;
-#else
- /* default to book3s_32 (750) */
- vcpu->arch.pvr = 0x84202;
-#endif
- kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
- vcpu_book3s->slb_nr = 64;
-
- /* remember where some real-mode handlers are */
- vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
- vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
- vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
-#ifdef CONFIG_PPC_BOOK3S_64
- vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
-#else
- vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
-#endif
-
- vcpu->arch.shadow_msr = MSR_USER64;
-
- err = kvmppc_mmu_init(vcpu);
- if (err < 0)
- goto uninit_vcpu;
-
- return vcpu;
-
-uninit_vcpu:
- kvm_vcpu_uninit(vcpu);
-free_shadow_vcpu:
- kfree(vcpu_book3s->shadow_vcpu);
-free_vcpu:
- vfree(vcpu_book3s);
-out:
- return ERR_PTR(err);
-}
-
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
-{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
-
- free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
- kvm_vcpu_uninit(vcpu);
- kfree(vcpu_book3s->shadow_vcpu);
- vfree(vcpu_book3s);
-}
-
-extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
-int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
-{
- int ret;
- double fpr[32][TS_FPRWIDTH];
- unsigned int fpscr;
- int fpexc_mode;
-#ifdef CONFIG_ALTIVEC
- vector128 vr[32];
- vector128 vscr;
- unsigned long uninitialized_var(vrsave);
- int used_vr;
-#endif
-#ifdef CONFIG_VSX
- int used_vsr;
-#endif
- ulong ext_msr;
-
- /* No need to go into the guest when all we do is going out */
- if (signal_pending(current)) {
- kvm_run->exit_reason = KVM_EXIT_INTR;
- return -EINTR;
- }
-
- /* Save FPU state in stack */
- if (current->thread.regs->msr & MSR_FP)
- giveup_fpu(current);
- memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
- fpscr = current->thread.fpscr.val;
- fpexc_mode = current->thread.fpexc_mode;
-
-#ifdef CONFIG_ALTIVEC
- /* Save Altivec state in stack */
- used_vr = current->thread.used_vr;
- if (used_vr) {
- if (current->thread.regs->msr & MSR_VEC)
- giveup_altivec(current);
- memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
- vscr = current->thread.vscr;
- vrsave = current->thread.vrsave;
- }
-#endif
-
-#ifdef CONFIG_VSX
- /* Save VSX state in stack */
- used_vsr = current->thread.used_vsr;
- if (used_vsr && (current->thread.regs->msr & MSR_VSX))
- __giveup_vsx(current);
-#endif
-
- /* Remember the MSR with disabled extensions */
- ext_msr = current->thread.regs->msr;
-
- /* XXX we get called with irq disabled - change that! */
- local_irq_enable();
-
- /* Preload FPU if it's enabled */
- if (vcpu->arch.shared->msr & MSR_FP)
- kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
-
- ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
-
- local_irq_disable();
-
- current->thread.regs->msr = ext_msr;
-
- /* Make sure we save the guest FPU/Altivec/VSX state */
- kvmppc_giveup_ext(vcpu, MSR_FP);
- kvmppc_giveup_ext(vcpu, MSR_VEC);
- kvmppc_giveup_ext(vcpu, MSR_VSX);
-
- /* Restore FPU state from stack */
- memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
- current->thread.fpscr.val = fpscr;
- current->thread.fpexc_mode = fpexc_mode;
-
-#ifdef CONFIG_ALTIVEC
- /* Restore Altivec state from stack */
- if (used_vr && current->thread.used_vr) {
- memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
- current->thread.vscr = vscr;
- current->thread.vrsave = vrsave;
- }
- current->thread.used_vr = used_vr;
-#endif
-
-#ifdef CONFIG_VSX
- current->thread.used_vsr = used_vsr;
-#endif
-
- return ret;
-}
-
-static int kvmppc_book3s_init(void)
-{
- int r;
-
- r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
- THIS_MODULE);
-
- if (r)
- return r;
-
- r = kvmppc_mmu_hpte_sysinit();
-
- return r;
-}
-
-static void kvmppc_book3s_exit(void)
-{
- kvmppc_mmu_hpte_sysexit();
- kvm_exit();
-}
-
-module_init(kvmppc_book3s_init);
-module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index d7889ef3211..c6d3e194b6b 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -41,36 +41,36 @@ static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
}
static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
- struct kvmppc_vcpu_book3s *vcpu_book3s,
+ struct kvm_vcpu *vcpu,
gva_t eaddr)
{
int i;
u64 esid = GET_ESID(eaddr);
u64 esid_1t = GET_ESID_1T(eaddr);
- for (i = 0; i < vcpu_book3s->slb_nr; i++) {
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
u64 cmp_esid = esid;
- if (!vcpu_book3s->slb[i].valid)
+ if (!vcpu->arch.slb[i].valid)
continue;
- if (vcpu_book3s->slb[i].tb)
+ if (vcpu->arch.slb[i].tb)
cmp_esid = esid_1t;
- if (vcpu_book3s->slb[i].esid == cmp_esid)
- return &vcpu_book3s->slb[i];
+ if (vcpu->arch.slb[i].esid == cmp_esid)
+ return &vcpu->arch.slb[i];
}
dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
eaddr, esid, esid_1t);
- for (i = 0; i < vcpu_book3s->slb_nr; i++) {
- if (vcpu_book3s->slb[i].vsid)
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
+ if (vcpu->arch.slb[i].vsid)
dprintk(" %d: %c%c%c %llx %llx\n", i,
- vcpu_book3s->slb[i].valid ? 'v' : ' ',
- vcpu_book3s->slb[i].large ? 'l' : ' ',
- vcpu_book3s->slb[i].tb ? 't' : ' ',
- vcpu_book3s->slb[i].esid,
- vcpu_book3s->slb[i].vsid);
+ vcpu->arch.slb[i].valid ? 'v' : ' ',
+ vcpu->arch.slb[i].large ? 'l' : ' ',
+ vcpu->arch.slb[i].tb ? 't' : ' ',
+ vcpu->arch.slb[i].esid,
+ vcpu->arch.slb[i].vsid);
}
return NULL;
@@ -81,7 +81,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
{
struct kvmppc_slb *slb;
- slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), eaddr);
+ slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
if (!slb)
return 0;
@@ -180,7 +180,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
return 0;
}
- slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr);
+ slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
if (!slbe)
goto no_seg_found;
@@ -320,10 +320,10 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
esid_1t = GET_ESID_1T(rb);
slb_nr = rb & 0xfff;
- if (slb_nr > vcpu_book3s->slb_nr)
+ if (slb_nr > vcpu->arch.slb_nr)
return;
- slbe = &vcpu_book3s->slb[slb_nr];
+ slbe = &vcpu->arch.slb[slb_nr];
slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
@@ -344,38 +344,35 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe;
- if (slb_nr > vcpu_book3s->slb_nr)
+ if (slb_nr > vcpu->arch.slb_nr)
return 0;
- slbe = &vcpu_book3s->slb[slb_nr];
+ slbe = &vcpu->arch.slb[slb_nr];
return slbe->orige;
}
static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe;
- if (slb_nr > vcpu_book3s->slb_nr)
+ if (slb_nr > vcpu->arch.slb_nr)
return 0;
- slbe = &vcpu_book3s->slb[slb_nr];
+ slbe = &vcpu->arch.slb[slb_nr];
return slbe->origv;
}
static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe;
dprintk("KVM MMU: slbie(0x%llx)\n", ea);
- slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, ea);
+ slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
if (!slbe)
return;
@@ -389,13 +386,12 @@ static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
int i;
dprintk("KVM MMU: slbia()\n");
- for (i = 1; i < vcpu_book3s->slb_nr; i++)
- vcpu_book3s->slb[i].valid = false;
+ for (i = 1; i < vcpu->arch.slb_nr; i++)
+ vcpu->arch.slb[i].valid = false;
if (vcpu->arch.shared->msr & MSR_IR) {
kvmppc_mmu_flush_segments(vcpu);
@@ -464,7 +460,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
ulong mp_ea = vcpu->arch.magic_page_ea;
if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
- slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
+ slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
if (slb)
gvsid = slb->vsid;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
new file mode 100644
index 00000000000..bc3a2ea9421
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -0,0 +1,180 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/hugetlb.h>
+
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash64.h>
+#include <asm/hvcall.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+#include <asm/cputable.h>
+
+/* For now use fixed-size 16MB page table */
+#define HPT_ORDER 24
+#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
+#define HPT_HASH_MASK (HPT_NPTEG - 1)
+
+/* Pages in the VRMA are 16MB pages */
+#define VRMA_PAGE_ORDER 24
+#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
+
+/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
+#define MAX_LPID_970 63
+#define NR_LPIDS (LPID_RSVD + 1)
+unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
+
+long kvmppc_alloc_hpt(struct kvm *kvm)
+{
+ unsigned long hpt;
+ unsigned long lpid;
+
+ hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN,
+ HPT_ORDER - PAGE_SHIFT);
+ if (!hpt) {
+ pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
+ return -ENOMEM;
+ }
+ kvm->arch.hpt_virt = hpt;
+
+ do {
+ lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
+ if (lpid >= NR_LPIDS) {
+ pr_err("kvm_alloc_hpt: No LPIDs free\n");
+ free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
+ return -ENOMEM;
+ }
+ } while (test_and_set_bit(lpid, lpid_inuse));
+
+ kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
+ kvm->arch.lpid = lpid;
+
+ pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
+ return 0;
+}
+
+void kvmppc_free_hpt(struct kvm *kvm)
+{
+ clear_bit(kvm->arch.lpid, lpid_inuse);
+ free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
+}
+
+void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
+{
+ unsigned long i;
+ unsigned long npages = kvm->arch.ram_npages;
+ unsigned long pfn;
+ unsigned long *hpte;
+ unsigned long hash;
+ struct kvmppc_pginfo *pginfo = kvm->arch.ram_pginfo;
+
+ if (!pginfo)
+ return;
+
+ /* VRMA can't be > 1TB */
+ if (npages > 1ul << (40 - kvm->arch.ram_porder))
+ npages = 1ul << (40 - kvm->arch.ram_porder);
+ /* Can't use more than 1 HPTE per HPTEG */
+ if (npages > HPT_NPTEG)
+ npages = HPT_NPTEG;
+
+ for (i = 0; i < npages; ++i) {
+ pfn = pginfo[i].pfn;
+ if (!pfn)
+ break;
+ /* can't use hpt_hash since va > 64 bits */
+ hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
+ /*
+ * We assume that the hash table is empty and no
+ * vcpus are using it at this stage. Since we create
+ * at most one HPTE per HPTEG, we just assume entry 7
+ * is available and use it.
+ */
+ hpte = (unsigned long *) (kvm->arch.hpt_virt + (hash << 7));
+ hpte += 7 * 2;
+ /* HPTE low word - RPN, protection, etc. */
+ hpte[1] = (pfn << PAGE_SHIFT) | HPTE_R_R | HPTE_R_C |
+ HPTE_R_M | PP_RWXX;
+ wmb();
+ hpte[0] = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
+ (i << (VRMA_PAGE_ORDER - 16)) | HPTE_V_BOLTED |
+ HPTE_V_LARGE | HPTE_V_VALID;
+ }
+}
+
+int kvmppc_mmu_hv_init(void)
+{
+ unsigned long host_lpid, rsvd_lpid;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return -EINVAL;
+
+ memset(lpid_inuse, 0, sizeof(lpid_inuse));
+
+ if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+ host_lpid = mfspr(SPRN_LPID); /* POWER7 */
+ rsvd_lpid = LPID_RSVD;
+ } else {
+ host_lpid = 0; /* PPC970 */
+ rsvd_lpid = MAX_LPID_970;
+ }
+
+ set_bit(host_lpid, lpid_inuse);
+ /* rsvd_lpid is reserved for use in partition switching */
+ set_bit(rsvd_lpid, lpid_inuse);
+
+ return 0;
+}
+
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+{
+}
+
+static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
+{
+ kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
+}
+
+static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, bool data)
+{
+ return -ENOENT;
+}
+
+void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_206))
+ vcpu->arch.slb_nr = 32; /* POWER7 */
+ else
+ vcpu->arch.slb_nr = 64;
+
+ mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
+ mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
+
+ vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
+}
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
new file mode 100644
index 00000000000..ea0f8c537c2
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -0,0 +1,73 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/hugetlb.h>
+#include <linux/list.h>
+
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash64.h>
+#include <asm/hvcall.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+#include <asm/kvm_host.h>
+#include <asm/udbg.h>
+
+#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
+
+long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba, unsigned long tce)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvmppc_spapr_tce_table *stt;
+
+ /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
+ /* liobn, ioba, tce); */
+
+ list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
+ if (stt->liobn == liobn) {
+ unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
+ struct page *page;
+ u64 *tbl;
+
+ /* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p window_size=0x%x\n", */
+ /* liobn, stt, stt->window_size); */
+ if (ioba >= stt->window_size)
+ return H_PARAMETER;
+
+ page = stt->pages[idx / TCES_PER_PAGE];
+ tbl = (u64 *)page_address(page);
+
+ /* FIXME: Need to validate the TCE itself */
+ /* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */
+ tbl[idx % TCES_PER_PAGE] = tce;
+ return H_SUCCESS;
+ }
+ }
+
+ /* Didn't find the liobn, punt it to userspace */
+ return H_TOO_HARD;
+}
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 1dd5a1ddfd0..88c8f26add0 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -20,8 +20,11 @@
#include <linux/module.h>
#include <asm/kvm_book3s.h>
-EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter);
-EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem);
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
+#else
+EXPORT_SYMBOL_GPL(kvmppc_handler_trampoline_enter);
+EXPORT_SYMBOL_GPL(kvmppc_handler_lowmem_trampoline);
EXPORT_SYMBOL_GPL(kvmppc_rmcall);
EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
#ifdef CONFIG_ALTIVEC
@@ -30,3 +33,5 @@ EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
#ifdef CONFIG_VSX
EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx);
#endif
+#endif
+
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
new file mode 100644
index 00000000000..cc0d7f1b19a
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -0,0 +1,1269 @@
+/*
+ * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
+ *
+ * Authors:
+ * Paul Mackerras <paulus@au1.ibm.com>
+ * Alexander Graf <agraf@suse.de>
+ * Kevin Wolf <mail@kevin-wolf.de>
+ *
+ * Description: KVM functions specific to running on Book 3S
+ * processors in hypervisor mode (specifically POWER7 and later).
+ *
+ * This file is derived from arch/powerpc/kvm/book3s.c,
+ * by Alexander Graf <agraf@suse.de>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/preempt.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/cpumask.h>
+#include <linux/spinlock.h>
+#include <linux/page-flags.h>
+
+#include <asm/reg.h>
+#include <asm/cputable.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu_context.h>
+#include <asm/lppaca.h>
+#include <asm/processor.h>
+#include <asm/cputhreads.h>
+#include <asm/page.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+
+/*
+ * For now, limit memory to 64GB and require it to be large pages.
+ * This value is chosen because it makes the ram_pginfo array be
+ * 64kB in size, which is about as large as we want to be trying
+ * to allocate with kmalloc.
+ */
+#define MAX_MEM_ORDER 36
+
+#define LARGE_PAGE_ORDER 24 /* 16MB pages */
+
+/* #define EXIT_DEBUG */
+/* #define EXIT_DEBUG_SIMPLE */
+/* #define EXIT_DEBUG_INT */
+
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ local_paca->kvm_hstate.kvm_vcpu = vcpu;
+ local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore;
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+}
+
+static void kvmppc_vcpu_blocked(struct kvm_vcpu *vcpu);
+static void kvmppc_vcpu_unblocked(struct kvm_vcpu *vcpu);
+
+void kvmppc_vcpu_block(struct kvm_vcpu *vcpu)
+{
+ u64 now;
+ unsigned long dec_nsec;
+
+ now = get_tb();
+ if (now >= vcpu->arch.dec_expires && !kvmppc_core_pending_dec(vcpu))
+ kvmppc_core_queue_dec(vcpu);
+ if (vcpu->arch.pending_exceptions)
+ return;
+ if (vcpu->arch.dec_expires != ~(u64)0) {
+ dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC /
+ tb_ticks_per_sec;
+ hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
+ HRTIMER_MODE_REL);
+ }
+
+ kvmppc_vcpu_blocked(vcpu);
+
+ kvm_vcpu_block(vcpu);
+ vcpu->stat.halt_wakeup++;
+
+ if (vcpu->arch.dec_expires != ~(u64)0)
+ hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
+
+ kvmppc_vcpu_unblocked(vcpu);
+}
+
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
+{
+ vcpu->arch.shregs.msr = msr;
+}
+
+void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
+{
+ vcpu->arch.pvr = pvr;
+}
+
+void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
+{
+ int r;
+
+ pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
+ pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
+ vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
+ for (r = 0; r < 16; ++r)
+ pr_err("r%2d = %.16lx r%d = %.16lx\n",
+ r, kvmppc_get_gpr(vcpu, r),
+ r+16, kvmppc_get_gpr(vcpu, r+16));
+ pr_err("ctr = %.16lx lr = %.16lx\n",
+ vcpu->arch.ctr, vcpu->arch.lr);
+ pr_err("srr0 = %.16llx srr1 = %.16llx\n",
+ vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
+ pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
+ vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
+ pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
+ vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
+ pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
+ vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
+ pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
+ pr_err("fault dar = %.16lx dsisr = %.8x\n",
+ vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
+ pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
+ for (r = 0; r < vcpu->arch.slb_max; ++r)
+ pr_err(" ESID = %.16llx VSID = %.16llx\n",
+ vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
+ pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
+ vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
+ vcpu->arch.last_inst);
+}
+
+struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
+{
+ int r;
+ struct kvm_vcpu *v, *ret = NULL;
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(r, v, kvm) {
+ if (v->vcpu_id == id) {
+ ret = v;
+ break;
+ }
+ }
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
+{
+ vpa->shared_proc = 1;
+ vpa->yield_count = 1;
+}
+
+static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
+ unsigned long flags,
+ unsigned long vcpuid, unsigned long vpa)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long pg_index, ra, len;
+ unsigned long pg_offset;
+ void *va;
+ struct kvm_vcpu *tvcpu;
+
+ tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
+ if (!tvcpu)
+ return H_PARAMETER;
+
+ flags >>= 63 - 18;
+ flags &= 7;
+ if (flags == 0 || flags == 4)
+ return H_PARAMETER;
+ if (flags < 4) {
+ if (vpa & 0x7f)
+ return H_PARAMETER;
+ /* registering new area; convert logical addr to real */
+ pg_index = vpa >> kvm->arch.ram_porder;
+ pg_offset = vpa & (kvm->arch.ram_psize - 1);
+ if (pg_index >= kvm->arch.ram_npages)
+ return H_PARAMETER;
+ if (kvm->arch.ram_pginfo[pg_index].pfn == 0)
+ return H_PARAMETER;
+ ra = kvm->arch.ram_pginfo[pg_index].pfn << PAGE_SHIFT;
+ ra |= pg_offset;
+ va = __va(ra);
+ if (flags <= 1)
+ len = *(unsigned short *)(va + 4);
+ else
+ len = *(unsigned int *)(va + 4);
+ if (pg_offset + len > kvm->arch.ram_psize)
+ return H_PARAMETER;
+ switch (flags) {
+ case 1: /* register VPA */
+ if (len < 640)
+ return H_PARAMETER;
+ tvcpu->arch.vpa = va;
+ init_vpa(vcpu, va);
+ break;
+ case 2: /* register DTL */
+ if (len < 48)
+ return H_PARAMETER;
+ if (!tvcpu->arch.vpa)
+ return H_RESOURCE;
+ len -= len % 48;
+ tvcpu->arch.dtl = va;
+ tvcpu->arch.dtl_end = va + len;
+ break;
+ case 3: /* register SLB shadow buffer */
+ if (len < 8)
+ return H_PARAMETER;
+ if (!tvcpu->arch.vpa)
+ return H_RESOURCE;
+ tvcpu->arch.slb_shadow = va;
+ len = (len - 16) / 16;
+ tvcpu->arch.slb_shadow = va;
+ break;
+ }
+ } else {
+ switch (flags) {
+ case 5: /* unregister VPA */
+ if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
+ return H_RESOURCE;
+ tvcpu->arch.vpa = NULL;
+ break;
+ case 6: /* unregister DTL */
+ tvcpu->arch.dtl = NULL;
+ break;
+ case 7: /* unregister SLB shadow buffer */
+ tvcpu->arch.slb_shadow = NULL;
+ break;
+ }
+ }
+ return H_SUCCESS;
+}
+
+int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
+{
+ unsigned long req = kvmppc_get_gpr(vcpu, 3);
+ unsigned long target, ret = H_SUCCESS;
+ struct kvm_vcpu *tvcpu;
+
+ switch (req) {
+ case H_CEDE:
+ vcpu->arch.shregs.msr |= MSR_EE;
+ vcpu->arch.ceded = 1;
+ smp_mb();
+ if (!vcpu->arch.prodded)
+ kvmppc_vcpu_block(vcpu);
+ else
+ vcpu->arch.prodded = 0;
+ smp_mb();
+ vcpu->arch.ceded = 0;
+ break;
+ case H_PROD:
+ target = kvmppc_get_gpr(vcpu, 4);
+ tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+ if (!tvcpu) {
+ ret = H_PARAMETER;
+ break;
+ }
+ tvcpu->arch.prodded = 1;
+ smp_mb();
+ if (vcpu->arch.ceded) {
+ if (waitqueue_active(&vcpu->wq)) {
+ wake_up_interruptible(&vcpu->wq);
+ vcpu->stat.halt_wakeup++;
+ }
+ }
+ break;
+ case H_CONFER:
+ break;
+ case H_REGISTER_VPA:
+ ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
+ break;
+ default:
+ return RESUME_HOST;
+ }
+ kvmppc_set_gpr(vcpu, 3, ret);
+ vcpu->arch.hcall_needed = 0;
+ return RESUME_GUEST;
+}
+
+static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ struct task_struct *tsk)
+{
+ int r = RESUME_HOST;
+
+ vcpu->stat.sum_exits++;
+
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->ready_for_interrupt_injection = 1;
+ switch (vcpu->arch.trap) {
+ /* We're good on these - the host merely wanted to get our attention */
+ case BOOK3S_INTERRUPT_HV_DECREMENTER:
+ vcpu->stat.dec_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_EXTERNAL:
+ vcpu->stat.ext_intr_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_PERFMON:
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_PROGRAM:
+ {
+ ulong flags;
+ /*
+ * Normally program interrupts are delivered directly
+ * to the guest by the hardware, but we can get here
+ * as a result of a hypervisor emulation interrupt
+ * (e40) getting turned into a 700 by BML RTAS.
+ */
+ flags = vcpu->arch.shregs.msr & 0x1f0000ull;
+ kvmppc_core_queue_program(vcpu, flags);
+ r = RESUME_GUEST;
+ break;
+ }
+ case BOOK3S_INTERRUPT_SYSCALL:
+ {
+ /* hcall - punt to userspace */
+ int i;
+
+ if (vcpu->arch.shregs.msr & MSR_PR) {
+ /* sc 1 from userspace - reflect to guest syscall */
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
+ r = RESUME_GUEST;
+ break;
+ }
+ run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
+ for (i = 0; i < 9; ++i)
+ run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
+ run->exit_reason = KVM_EXIT_PAPR_HCALL;
+ vcpu->arch.hcall_needed = 1;
+ r = RESUME_HOST;
+ break;
+ }
+ /*
+ * We get these next two if the guest does a bad real-mode access,
+ * as we have enabled VRMA (virtualized real mode area) mode in the
+ * LPCR. We just generate an appropriate DSI/ISI to the guest.
+ */
+ case BOOK3S_INTERRUPT_H_DATA_STORAGE:
+ vcpu->arch.shregs.dsisr = vcpu->arch.fault_dsisr;
+ vcpu->arch.shregs.dar = vcpu->arch.fault_dar;
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_H_INST_STORAGE:
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE,
+ 0x08000000);
+ r = RESUME_GUEST;
+ break;
+ /*
+ * This occurs if the guest executes an illegal instruction.
+ * We just generate a program interrupt to the guest, since
+ * we don't emulate any guest instructions at this stage.
+ */
+ case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
+ kvmppc_core_queue_program(vcpu, 0x80000);
+ r = RESUME_GUEST;
+ break;
+ default:
+ kvmppc_dump_regs(vcpu);
+ printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
+ vcpu->arch.trap, kvmppc_get_pc(vcpu),
+ vcpu->arch.shregs.msr);
+ r = RESUME_HOST;
+ BUG();
+ break;
+ }
+
+
+ if (!(r & RESUME_HOST)) {
+ /* To avoid clobbering exit_reason, only check for signals if
+ * we aren't already exiting to userspace for some other
+ * reason. */
+ if (signal_pending(tsk)) {
+ vcpu->stat.signal_exits++;
+ run->exit_reason = KVM_EXIT_INTR;
+ r = -EINTR;
+ } else {
+ kvmppc_core_deliver_interrupts(vcpu);
+ }
+ }
+
+ return r;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ int i;
+
+ sregs->pvr = vcpu->arch.pvr;
+
+ memset(sregs, 0, sizeof(struct kvm_sregs));
+ for (i = 0; i < vcpu->arch.slb_max; i++) {
+ sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
+ sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
+ }
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ int i, j;
+
+ kvmppc_set_pvr(vcpu, sregs->pvr);
+
+ j = 0;
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
+ if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
+ vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
+ vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
+ ++j;
+ }
+ }
+ vcpu->arch.slb_max = j;
+
+ return 0;
+}
+
+int kvmppc_core_check_processor_compat(void)
+{
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ return 0;
+ return -EIO;
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ struct kvm_vcpu *vcpu;
+ int err = -EINVAL;
+ int core;
+ struct kvmppc_vcore *vcore;
+
+ core = id / threads_per_core;
+ if (core >= KVM_MAX_VCORES)
+ goto out;
+
+ err = -ENOMEM;
+ vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+ if (!vcpu)
+ goto out;
+
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_vcpu;
+
+ vcpu->arch.shared = &vcpu->arch.shregs;
+ vcpu->arch.last_cpu = -1;
+ vcpu->arch.mmcr[0] = MMCR0_FC;
+ vcpu->arch.ctrl = CTRL_RUNLATCH;
+ /* default to host PVR, since we can't spoof it */
+ vcpu->arch.pvr = mfspr(SPRN_PVR);
+ kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
+
+ kvmppc_mmu_book3s_hv_init(vcpu);
+
+ /*
+ * Some vcpus may start out in stopped state. If we initialize
+ * them to busy-in-host state they will stop other vcpus in the
+ * vcore from running. Instead we initialize them to blocked
+ * state, effectively considering them to be stopped until we
+ * see the first run ioctl for them.
+ */
+ vcpu->arch.state = KVMPPC_VCPU_BLOCKED;
+
+ init_waitqueue_head(&vcpu->arch.cpu_run);
+
+ mutex_lock(&kvm->lock);
+ vcore = kvm->arch.vcores[core];
+ if (!vcore) {
+ vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
+ if (vcore) {
+ INIT_LIST_HEAD(&vcore->runnable_threads);
+ spin_lock_init(&vcore->lock);
+ }
+ kvm->arch.vcores[core] = vcore;
+ }
+ mutex_unlock(&kvm->lock);
+
+ if (!vcore)
+ goto free_vcpu;
+
+ spin_lock(&vcore->lock);
+ ++vcore->num_threads;
+ ++vcore->n_blocked;
+ spin_unlock(&vcore->lock);
+ vcpu->arch.vcore = vcore;
+
+ return vcpu;
+
+free_vcpu:
+ kfree(vcpu);
+out:
+ return ERR_PTR(err);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_uninit(vcpu);
+ kfree(vcpu);
+}
+
+static void kvmppc_vcpu_blocked(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ spin_lock(&vc->lock);
+ vcpu->arch.state = KVMPPC_VCPU_BLOCKED;
+ ++vc->n_blocked;
+ if (vc->n_runnable > 0 &&
+ vc->n_runnable + vc->n_blocked == vc->num_threads) {
+ vcpu = list_first_entry(&vc->runnable_threads, struct kvm_vcpu,
+ arch.run_list);
+ wake_up(&vcpu->arch.cpu_run);
+ }
+ spin_unlock(&vc->lock);
+}
+
+static void kvmppc_vcpu_unblocked(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ spin_lock(&vc->lock);
+ vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
+ --vc->n_blocked;
+ spin_unlock(&vc->lock);
+}
+
+extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern void xics_wake_cpu(int cpu);
+
+static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
+ struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu *v;
+
+ if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
+ return;
+ vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
+ --vc->n_runnable;
+ /* decrement the physical thread id of each following vcpu */
+ v = vcpu;
+ list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
+ --v->arch.ptid;
+ list_del(&vcpu->arch.run_list);
+}
+
+static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
+{
+ int cpu;
+ struct paca_struct *tpaca;
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ cpu = vc->pcpu + vcpu->arch.ptid;
+ tpaca = &paca[cpu];
+ tpaca->kvm_hstate.kvm_vcpu = vcpu;
+ tpaca->kvm_hstate.kvm_vcore = vc;
+ smp_wmb();
+#ifdef CONFIG_PPC_ICP_NATIVE
+ if (vcpu->arch.ptid) {
+ tpaca->cpu_start = 0x80;
+ tpaca->kvm_hstate.in_guest = KVM_GUEST_MODE_GUEST;
+ wmb();
+ xics_wake_cpu(cpu);
+ ++vc->n_woken;
+ }
+#endif
+}
+
+static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
+{
+ int i;
+
+ HMT_low();
+ i = 0;
+ while (vc->nap_count < vc->n_woken) {
+ if (++i >= 1000000) {
+ pr_err("kvmppc_wait_for_nap timeout %d %d\n",
+ vc->nap_count, vc->n_woken);
+ break;
+ }
+ cpu_relax();
+ }
+ HMT_medium();
+}
+
+/*
+ * Check that we are on thread 0 and that any other threads in
+ * this core are off-line.
+ */
+static int on_primary_thread(void)
+{
+ int cpu = smp_processor_id();
+ int thr = cpu_thread_in_core(cpu);
+
+ if (thr)
+ return 0;
+ while (++thr < threads_per_core)
+ if (cpu_online(cpu + thr))
+ return 0;
+ return 1;
+}
+
+/*
+ * Run a set of guest threads on a physical core.
+ * Called with vc->lock held.
+ */
+static int kvmppc_run_core(struct kvmppc_vcore *vc)
+{
+ struct kvm_vcpu *vcpu, *vnext;
+ long ret;
+ u64 now;
+
+ /* don't start if any threads have a signal pending */
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ if (signal_pending(vcpu->arch.run_task))
+ return 0;
+
+ /*
+ * Make sure we are running on thread 0, and that
+ * secondary threads are offline.
+ * XXX we should also block attempts to bring any
+ * secondary threads online.
+ */
+ if (threads_per_core > 1 && !on_primary_thread()) {
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ vcpu->arch.ret = -EBUSY;
+ goto out;
+ }
+
+ vc->n_woken = 0;
+ vc->nap_count = 0;
+ vc->entry_exit_count = 0;
+ vc->vcore_running = 1;
+ vc->in_guest = 0;
+ vc->pcpu = smp_processor_id();
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ kvmppc_start_thread(vcpu);
+ vcpu = list_first_entry(&vc->runnable_threads, struct kvm_vcpu,
+ arch.run_list);
+
+ spin_unlock(&vc->lock);
+
+ preempt_disable();
+ kvm_guest_enter();
+ __kvmppc_vcore_entry(NULL, vcpu);
+
+ /* wait for secondary threads to finish writing their state to memory */
+ spin_lock(&vc->lock);
+ if (vc->nap_count < vc->n_woken)
+ kvmppc_wait_for_nap(vc);
+ /* prevent other vcpu threads from doing kvmppc_start_thread() now */
+ vc->vcore_running = 2;
+ spin_unlock(&vc->lock);
+
+ /* make sure updates to secondary vcpu structs are visible now */
+ smp_mb();
+ kvm_guest_exit();
+
+ preempt_enable();
+ kvm_resched(vcpu);
+
+ now = get_tb();
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
+ /* cancel pending dec exception if dec is positive */
+ if (now < vcpu->arch.dec_expires &&
+ kvmppc_core_pending_dec(vcpu))
+ kvmppc_core_dequeue_dec(vcpu);
+ if (!vcpu->arch.trap) {
+ if (signal_pending(vcpu->arch.run_task)) {
+ vcpu->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
+ vcpu->arch.ret = -EINTR;
+ }
+ continue; /* didn't get to run */
+ }
+ ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
+ vcpu->arch.run_task);
+ vcpu->arch.ret = ret;
+ vcpu->arch.trap = 0;
+ }
+
+ spin_lock(&vc->lock);
+ out:
+ vc->vcore_running = 0;
+ list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
+ arch.run_list) {
+ if (vcpu->arch.ret != RESUME_GUEST) {
+ kvmppc_remove_runnable(vc, vcpu);
+ wake_up(&vcpu->arch.cpu_run);
+ }
+ }
+
+ return 1;
+}
+
+static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+ int ptid;
+ int wait_state;
+ struct kvmppc_vcore *vc;
+ DEFINE_WAIT(wait);
+
+ /* No need to go into the guest when all we do is going out */
+ if (signal_pending(current)) {
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ return -EINTR;
+ }
+
+ /* On PPC970, check that we have an RMA region */
+ if (!vcpu->kvm->arch.rma && cpu_has_feature(CPU_FTR_ARCH_201))
+ return -EPERM;
+
+ kvm_run->exit_reason = 0;
+ vcpu->arch.ret = RESUME_GUEST;
+ vcpu->arch.trap = 0;
+
+ flush_fp_to_thread(current);
+ flush_altivec_to_thread(current);
+ flush_vsx_to_thread(current);
+
+ /*
+ * Synchronize with other threads in this virtual core
+ */
+ vc = vcpu->arch.vcore;
+ spin_lock(&vc->lock);
+ /* This happens the first time this is called for a vcpu */
+ if (vcpu->arch.state == KVMPPC_VCPU_BLOCKED)
+ --vc->n_blocked;
+ vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
+ ptid = vc->n_runnable;
+ vcpu->arch.run_task = current;
+ vcpu->arch.kvm_run = kvm_run;
+ vcpu->arch.ptid = ptid;
+ list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
+ ++vc->n_runnable;
+
+ wait_state = TASK_INTERRUPTIBLE;
+ while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
+ if (signal_pending(current)) {
+ if (!vc->vcore_running) {
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ vcpu->arch.ret = -EINTR;
+ break;
+ }
+ /* have to wait for vcore to stop executing guest */
+ wait_state = TASK_UNINTERRUPTIBLE;
+ smp_send_reschedule(vc->pcpu);
+ }
+
+ if (!vc->vcore_running &&
+ vc->n_runnable + vc->n_blocked == vc->num_threads) {
+ /* we can run now */
+ if (kvmppc_run_core(vc))
+ continue;
+ }
+
+ if (vc->vcore_running == 1 && VCORE_EXIT_COUNT(vc) == 0)
+ kvmppc_start_thread(vcpu);
+
+ /* wait for other threads to come in, or wait for vcore */
+ prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
+ spin_unlock(&vc->lock);
+ schedule();
+ finish_wait(&vcpu->arch.cpu_run, &wait);
+ spin_lock(&vc->lock);
+ }
+
+ if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
+ kvmppc_remove_runnable(vc, vcpu);
+ spin_unlock(&vc->lock);
+
+ return vcpu->arch.ret;
+}
+
+int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ int r;
+
+ do {
+ r = kvmppc_run_vcpu(run, vcpu);
+
+ if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
+ !(vcpu->arch.shregs.msr & MSR_PR)) {
+ r = kvmppc_pseries_do_hcall(vcpu);
+ kvmppc_core_deliver_interrupts(vcpu);
+ }
+ } while (r == RESUME_GUEST);
+ return r;
+}
+
+static long kvmppc_stt_npages(unsigned long window_size)
+{
+ return ALIGN((window_size >> SPAPR_TCE_SHIFT)
+ * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
+}
+
+static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
+{
+ struct kvm *kvm = stt->kvm;
+ int i;
+
+ mutex_lock(&kvm->lock);
+ list_del(&stt->list);
+ for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
+ __free_page(stt->pages[i]);
+ kfree(stt);
+ mutex_unlock(&kvm->lock);
+
+ kvm_put_kvm(kvm);
+}
+
+static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
+ struct page *page;
+
+ if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
+ return VM_FAULT_SIGBUS;
+
+ page = stt->pages[vmf->pgoff];
+ get_page(page);
+ vmf->page = page;
+ return 0;
+}
+
+static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
+ .fault = kvm_spapr_tce_fault,
+};
+
+static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &kvm_spapr_tce_vm_ops;
+ return 0;
+}
+
+static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
+{
+ struct kvmppc_spapr_tce_table *stt = filp->private_data;
+
+ release_spapr_tce_table(stt);
+ return 0;
+}
+
+static struct file_operations kvm_spapr_tce_fops = {
+ .mmap = kvm_spapr_tce_mmap,
+ .release = kvm_spapr_tce_release,
+};
+
+long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+ struct kvm_create_spapr_tce *args)
+{
+ struct kvmppc_spapr_tce_table *stt = NULL;
+ long npages;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Check this LIOBN hasn't been previously allocated */
+ list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
+ if (stt->liobn == args->liobn)
+ return -EBUSY;
+ }
+
+ npages = kvmppc_stt_npages(args->window_size);
+
+ stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
+ GFP_KERNEL);
+ if (!stt)
+ goto fail;
+
+ stt->liobn = args->liobn;
+ stt->window_size = args->window_size;
+ stt->kvm = kvm;
+
+ for (i = 0; i < npages; i++) {
+ stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!stt->pages[i])
+ goto fail;
+ }
+
+ kvm_get_kvm(kvm);
+
+ mutex_lock(&kvm->lock);
+ list_add(&stt->list, &kvm->arch.spapr_tce_tables);
+
+ mutex_unlock(&kvm->lock);
+
+ return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
+ stt, O_RDWR);
+
+fail:
+ if (stt) {
+ for (i = 0; i < npages; i++)
+ if (stt->pages[i])
+ __free_page(stt->pages[i]);
+
+ kfree(stt);
+ }
+ return ret;
+}
+
+/* Work out RMLS (real mode limit selector) field value for a given RMA size.
+ Assumes POWER7 or PPC970. */
+static inline int lpcr_rmls(unsigned long rma_size)
+{
+ switch (rma_size) {
+ case 32ul << 20: /* 32 MB */
+ if (cpu_has_feature(CPU_FTR_ARCH_206))
+ return 8; /* only supported on POWER7 */
+ return -1;
+ case 64ul << 20: /* 64 MB */
+ return 3;
+ case 128ul << 20: /* 128 MB */
+ return 7;
+ case 256ul << 20: /* 256 MB */
+ return 4;
+ case 1ul << 30: /* 1 GB */
+ return 2;
+ case 16ul << 30: /* 16 GB */
+ return 1;
+ case 256ul << 30: /* 256 GB */
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct kvmppc_rma_info *ri = vma->vm_file->private_data;
+ struct page *page;
+
+ if (vmf->pgoff >= ri->npages)
+ return VM_FAULT_SIGBUS;
+
+ page = pfn_to_page(ri->base_pfn + vmf->pgoff);
+ get_page(page);
+ vmf->page = page;
+ return 0;
+}
+
+static const struct vm_operations_struct kvm_rma_vm_ops = {
+ .fault = kvm_rma_fault,
+};
+
+static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_ops = &kvm_rma_vm_ops;
+ return 0;
+}
+
+static int kvm_rma_release(struct inode *inode, struct file *filp)
+{
+ struct kvmppc_rma_info *ri = filp->private_data;
+
+ kvm_release_rma(ri);
+ return 0;
+}
+
+static struct file_operations kvm_rma_fops = {
+ .mmap = kvm_rma_mmap,
+ .release = kvm_rma_release,
+};
+
+long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
+{
+ struct kvmppc_rma_info *ri;
+ long fd;
+
+ ri = kvm_alloc_rma();
+ if (!ri)
+ return -ENOMEM;
+
+ fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR);
+ if (fd < 0)
+ kvm_release_rma(ri);
+
+ ret->rma_size = ri->npages << PAGE_SHIFT;
+ return fd;
+}
+
+static struct page *hva_to_page(unsigned long addr)
+{
+ struct page *page[1];
+ int npages;
+
+ might_sleep();
+
+ npages = get_user_pages_fast(addr, 1, 1, page);
+
+ if (unlikely(npages != 1))
+ return 0;
+
+ return page[0];
+}
+
+int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+ unsigned long psize, porder;
+ unsigned long i, npages, totalpages;
+ unsigned long pg_ix;
+ struct kvmppc_pginfo *pginfo;
+ unsigned long hva;
+ struct kvmppc_rma_info *ri = NULL;
+ struct page *page;
+
+ /* For now, only allow 16MB pages */
+ porder = LARGE_PAGE_ORDER;
+ psize = 1ul << porder;
+ if ((mem->memory_size & (psize - 1)) ||
+ (mem->guest_phys_addr & (psize - 1))) {
+ pr_err("bad memory_size=%llx @ %llx\n",
+ mem->memory_size, mem->guest_phys_addr);
+ return -EINVAL;
+ }
+
+ npages = mem->memory_size >> porder;
+ totalpages = (mem->guest_phys_addr + mem->memory_size) >> porder;
+
+ /* More memory than we have space to track? */
+ if (totalpages > (1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER)))
+ return -EINVAL;
+
+ /* Do we already have an RMA registered? */
+ if (mem->guest_phys_addr == 0 && kvm->arch.rma)
+ return -EINVAL;
+
+ if (totalpages > kvm->arch.ram_npages)
+ kvm->arch.ram_npages = totalpages;
+
+ /* Is this one of our preallocated RMAs? */
+ if (mem->guest_phys_addr == 0) {
+ struct vm_area_struct *vma;
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, mem->userspace_addr);
+ if (vma && vma->vm_file &&
+ vma->vm_file->f_op == &kvm_rma_fops &&
+ mem->userspace_addr == vma->vm_start)
+ ri = vma->vm_file->private_data;
+ up_read(&current->mm->mmap_sem);
+ if (!ri && cpu_has_feature(CPU_FTR_ARCH_201)) {
+ pr_err("CPU requires an RMO\n");
+ return -EINVAL;
+ }
+ }
+
+ if (ri) {
+ unsigned long rma_size;
+ unsigned long lpcr;
+ long rmls;
+
+ rma_size = ri->npages << PAGE_SHIFT;
+ if (rma_size > mem->memory_size)
+ rma_size = mem->memory_size;
+ rmls = lpcr_rmls(rma_size);
+ if (rmls < 0) {
+ pr_err("Can't use RMA of 0x%lx bytes\n", rma_size);
+ return -EINVAL;
+ }
+ atomic_inc(&ri->use_count);
+ kvm->arch.rma = ri;
+ kvm->arch.n_rma_pages = rma_size >> porder;
+
+ /* Update LPCR and RMOR */
+ lpcr = kvm->arch.lpcr;
+ if (cpu_has_feature(CPU_FTR_ARCH_201)) {
+ /* PPC970; insert RMLS value (split field) in HID4 */
+ lpcr &= ~((1ul << HID4_RMLS0_SH) |
+ (3ul << HID4_RMLS2_SH));
+ lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
+ ((rmls & 3) << HID4_RMLS2_SH);
+ /* RMOR is also in HID4 */
+ lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
+ << HID4_RMOR_SH;
+ } else {
+ /* POWER7 */
+ lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
+ lpcr |= rmls << LPCR_RMLS_SH;
+ kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
+ }
+ kvm->arch.lpcr = lpcr;
+ pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n",
+ ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
+ }
+
+ pg_ix = mem->guest_phys_addr >> porder;
+ pginfo = kvm->arch.ram_pginfo + pg_ix;
+ for (i = 0; i < npages; ++i, ++pg_ix) {
+ if (ri && pg_ix < kvm->arch.n_rma_pages) {
+ pginfo[i].pfn = ri->base_pfn +
+ (pg_ix << (porder - PAGE_SHIFT));
+ continue;
+ }
+ hva = mem->userspace_addr + (i << porder);
+ page = hva_to_page(hva);
+ if (!page) {
+ pr_err("oops, no pfn for hva %lx\n", hva);
+ goto err;
+ }
+ /* Check it's a 16MB page */
+ if (!PageHead(page) ||
+ compound_order(page) != (LARGE_PAGE_ORDER - PAGE_SHIFT)) {
+ pr_err("page at %lx isn't 16MB (o=%d)\n",
+ hva, compound_order(page));
+ goto err;
+ }
+ pginfo[i].pfn = page_to_pfn(page);
+ }
+
+ return 0;
+
+ err:
+ return -EINVAL;
+}
+
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+ if (mem->guest_phys_addr == 0 && mem->memory_size != 0 &&
+ !kvm->arch.rma)
+ kvmppc_map_vrma(kvm, mem);
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ long r;
+ unsigned long npages = 1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER);
+ long err = -ENOMEM;
+ unsigned long lpcr;
+
+ /* Allocate hashed page table */
+ r = kvmppc_alloc_hpt(kvm);
+ if (r)
+ return r;
+
+ INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+
+ kvm->arch.ram_pginfo = kzalloc(npages * sizeof(struct kvmppc_pginfo),
+ GFP_KERNEL);
+ if (!kvm->arch.ram_pginfo) {
+ pr_err("kvmppc_core_init_vm: couldn't alloc %lu bytes\n",
+ npages * sizeof(struct kvmppc_pginfo));
+ goto out_free;
+ }
+
+ kvm->arch.ram_npages = 0;
+ kvm->arch.ram_psize = 1ul << LARGE_PAGE_ORDER;
+ kvm->arch.ram_porder = LARGE_PAGE_ORDER;
+ kvm->arch.rma = NULL;
+ kvm->arch.n_rma_pages = 0;
+
+ kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_201)) {
+ /* PPC970; HID4 is effectively the LPCR */
+ unsigned long lpid = kvm->arch.lpid;
+ kvm->arch.host_lpid = 0;
+ kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
+ lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
+ lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
+ ((lpid & 0xf) << HID4_LPID5_SH);
+ } else {
+ /* POWER7; init LPCR for virtual RMA mode */
+ kvm->arch.host_lpid = mfspr(SPRN_LPID);
+ kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
+ lpcr &= LPCR_PECE | LPCR_LPES;
+ lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
+ LPCR_VPM0 | LPCR_VRMA_L;
+ }
+ kvm->arch.lpcr = lpcr;
+
+ return 0;
+
+ out_free:
+ kvmppc_free_hpt(kvm);
+ return err;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+ struct kvmppc_pginfo *pginfo;
+ unsigned long i;
+
+ if (kvm->arch.ram_pginfo) {
+ pginfo = kvm->arch.ram_pginfo;
+ kvm->arch.ram_pginfo = NULL;
+ for (i = kvm->arch.n_rma_pages; i < kvm->arch.ram_npages; ++i)
+ if (pginfo[i].pfn)
+ put_page(pfn_to_page(pginfo[i].pfn));
+ kfree(pginfo);
+ }
+ if (kvm->arch.rma) {
+ kvm_release_rma(kvm->arch.rma);
+ kvm->arch.rma = NULL;
+ }
+
+ kvmppc_free_hpt(kvm);
+ WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
+}
+
+/* These are stubs for now */
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
+{
+}
+
+/* We don't need to emulate any privileged instructions or dcbz */
+int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance)
+{
+ return EMULATE_FAIL;
+}
+
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+{
+ return EMULATE_FAIL;
+}
+
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+{
+ return EMULATE_FAIL;
+}
+
+static int kvmppc_book3s_hv_init(void)
+{
+ int r;
+
+ r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+
+ if (r)
+ return r;
+
+ r = kvmppc_mmu_hv_init();
+
+ return r;
+}
+
+static void kvmppc_book3s_hv_exit(void)
+{
+ kvm_exit();
+}
+
+module_init(kvmppc_book3s_hv_init);
+module_exit(kvmppc_book3s_hv_exit);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
new file mode 100644
index 00000000000..d43120355ee
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/preempt.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/bootmem.h>
+#include <linux/init.h>
+
+#include <asm/cputable.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+
+/*
+ * This maintains a list of RMAs (real mode areas) for KVM guests to use.
+ * Each RMA has to be physically contiguous and of a size that the
+ * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
+ * and other larger sizes. Since we are unlikely to be allocate that
+ * much physically contiguous memory after the system is up and running,
+ * we preallocate a set of RMAs in early boot for KVM to use.
+ */
+static unsigned long kvm_rma_size = 64 << 20; /* 64MB */
+static unsigned long kvm_rma_count;
+
+static int __init early_parse_rma_size(char *p)
+{
+ if (!p)
+ return 1;
+
+ kvm_rma_size = memparse(p, &p);
+
+ return 0;
+}
+early_param("kvm_rma_size", early_parse_rma_size);
+
+static int __init early_parse_rma_count(char *p)
+{
+ if (!p)
+ return 1;
+
+ kvm_rma_count = simple_strtoul(p, NULL, 0);
+
+ return 0;
+}
+early_param("kvm_rma_count", early_parse_rma_count);
+
+static struct kvmppc_rma_info *rma_info;
+static LIST_HEAD(free_rmas);
+static DEFINE_SPINLOCK(rma_lock);
+
+/* Work out RMLS (real mode limit selector) field value for a given RMA size.
+ Assumes POWER7 or PPC970. */
+static inline int lpcr_rmls(unsigned long rma_size)
+{
+ switch (rma_size) {
+ case 32ul << 20: /* 32 MB */
+ if (cpu_has_feature(CPU_FTR_ARCH_206))
+ return 8; /* only supported on POWER7 */
+ return -1;
+ case 64ul << 20: /* 64 MB */
+ return 3;
+ case 128ul << 20: /* 128 MB */
+ return 7;
+ case 256ul << 20: /* 256 MB */
+ return 4;
+ case 1ul << 30: /* 1 GB */
+ return 2;
+ case 16ul << 30: /* 16 GB */
+ return 1;
+ case 256ul << 30: /* 256 GB */
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+/*
+ * Called at boot time while the bootmem allocator is active,
+ * to allocate contiguous physical memory for the real memory
+ * areas for guests.
+ */
+void kvm_rma_init(void)
+{
+ unsigned long i;
+ unsigned long j, npages;
+ void *rma;
+ struct page *pg;
+
+ /* Only do this on PPC970 in HV mode */
+ if (!cpu_has_feature(CPU_FTR_HVMODE) ||
+ !cpu_has_feature(CPU_FTR_ARCH_201))
+ return;
+
+ if (!kvm_rma_size || !kvm_rma_count)
+ return;
+
+ /* Check that the requested size is one supported in hardware */
+ if (lpcr_rmls(kvm_rma_size) < 0) {
+ pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
+ return;
+ }
+
+ npages = kvm_rma_size >> PAGE_SHIFT;
+ rma_info = alloc_bootmem(kvm_rma_count * sizeof(struct kvmppc_rma_info));
+ for (i = 0; i < kvm_rma_count; ++i) {
+ rma = alloc_bootmem_align(kvm_rma_size, kvm_rma_size);
+ pr_info("Allocated KVM RMA at %p (%ld MB)\n", rma,
+ kvm_rma_size >> 20);
+ rma_info[i].base_virt = rma;
+ rma_info[i].base_pfn = __pa(rma) >> PAGE_SHIFT;
+ rma_info[i].npages = npages;
+ list_add_tail(&rma_info[i].list, &free_rmas);
+ atomic_set(&rma_info[i].use_count, 0);
+
+ pg = pfn_to_page(rma_info[i].base_pfn);
+ for (j = 0; j < npages; ++j) {
+ atomic_inc(&pg->_count);
+ ++pg;
+ }
+ }
+}
+
+struct kvmppc_rma_info *kvm_alloc_rma(void)
+{
+ struct kvmppc_rma_info *ri;
+
+ ri = NULL;
+ spin_lock(&rma_lock);
+ if (!list_empty(&free_rmas)) {
+ ri = list_first_entry(&free_rmas, struct kvmppc_rma_info, list);
+ list_del(&ri->list);
+ atomic_inc(&ri->use_count);
+ }
+ spin_unlock(&rma_lock);
+ return ri;
+}
+EXPORT_SYMBOL_GPL(kvm_alloc_rma);
+
+void kvm_release_rma(struct kvmppc_rma_info *ri)
+{
+ if (atomic_dec_and_test(&ri->use_count)) {
+ spin_lock(&rma_lock);
+ list_add_tail(&ri->list, &free_rmas);
+ spin_unlock(&rma_lock);
+
+ }
+}
+EXPORT_SYMBOL_GPL(kvm_release_rma);
+
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
new file mode 100644
index 00000000000..3f7b674dd4b
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -0,0 +1,166 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * Derived from book3s_interrupts.S, which is:
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/exception-64s.h>
+#include <asm/ppc-opcode.h>
+
+/*****************************************************************************
+ * *
+ * Guest entry / exit code that is in kernel module memory (vmalloc) *
+ * *
+ ****************************************************************************/
+
+/* Registers:
+ * r4: vcpu pointer
+ */
+_GLOBAL(__kvmppc_vcore_entry)
+
+ /* Write correct stack frame */
+ mflr r0
+ std r0,PPC_LR_STKOFF(r1)
+
+ /* Save host state to the stack */
+ stdu r1, -SWITCH_FRAME_SIZE(r1)
+
+ /* Save non-volatile registers (r14 - r31) */
+ SAVE_NVGPRS(r1)
+
+ /* Save host DSCR */
+BEGIN_FTR_SECTION
+ mfspr r3, SPRN_DSCR
+ std r3, HSTATE_DSCR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Save host DABR */
+ mfspr r3, SPRN_DABR
+ std r3, HSTATE_DABR(r13)
+
+ /* Hard-disable interrupts */
+ mfmsr r10
+ std r10, HSTATE_HOST_MSR(r13)
+ rldicl r10,r10,48,1
+ rotldi r10,r10,16
+ mtmsrd r10,1
+
+ /* Save host PMU registers and load guest PMU registers */
+ /* R4 is live here (vcpu pointer) but not r3 or r5 */
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mfspr r7, SPRN_MMCR0 /* save MMCR0 */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
+ isync
+ ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
+ lbz r5, LPPACA_PMCINUSE(r3)
+ cmpwi r5, 0
+ beq 31f /* skip if not */
+ mfspr r5, SPRN_MMCR1
+ mfspr r6, SPRN_MMCRA
+ std r7, HSTATE_MMCR(r13)
+ std r5, HSTATE_MMCR + 8(r13)
+ std r6, HSTATE_MMCR + 16(r13)
+ mfspr r3, SPRN_PMC1
+ mfspr r5, SPRN_PMC2
+ mfspr r6, SPRN_PMC3
+ mfspr r7, SPRN_PMC4
+ mfspr r8, SPRN_PMC5
+ mfspr r9, SPRN_PMC6
+BEGIN_FTR_SECTION
+ mfspr r10, SPRN_PMC7
+ mfspr r11, SPRN_PMC8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ stw r3, HSTATE_PMC(r13)
+ stw r5, HSTATE_PMC + 4(r13)
+ stw r6, HSTATE_PMC + 8(r13)
+ stw r7, HSTATE_PMC + 12(r13)
+ stw r8, HSTATE_PMC + 16(r13)
+ stw r9, HSTATE_PMC + 20(r13)
+BEGIN_FTR_SECTION
+ stw r10, HSTATE_PMC + 24(r13)
+ stw r11, HSTATE_PMC + 28(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+31:
+
+ /*
+ * Put whatever is in the decrementer into the
+ * hypervisor decrementer.
+ */
+ mfspr r8,SPRN_DEC
+ mftb r7
+ mtspr SPRN_HDEC,r8
+ extsw r8,r8
+ add r8,r8,r7
+ std r8,HSTATE_DECEXP(r13)
+
+ /*
+ * On PPC970, if the guest vcpu has an external interrupt pending,
+ * send ourselves an IPI so as to interrupt the guest once it
+ * enables interrupts. (It must have interrupts disabled,
+ * otherwise we would already have delivered the interrupt.)
+ */
+BEGIN_FTR_SECTION
+ ld r0, VCPU_PENDING_EXC(r4)
+ li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
+ oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
+ and. r0, r0, r7
+ beq 32f
+ mr r31, r4
+ lhz r3, PACAPACAINDEX(r13)
+ bl smp_send_reschedule
+ nop
+ mr r4, r31
+32:
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+
+ /* Jump to partition switch code */
+ bl .kvmppc_hv_entry_trampoline
+ nop
+
+/*
+ * We return here in virtual mode after the guest exits
+ * with something that we can't handle in real mode.
+ * Interrupts are enabled again at this point.
+ */
+
+.global kvmppc_handler_highmem
+kvmppc_handler_highmem:
+
+ /*
+ * Register usage at this point:
+ *
+ * R1 = host R1
+ * R2 = host R2
+ * R12 = exit handler id
+ * R13 = PACA
+ */
+
+ /* Restore non-volatile host registers (r14 - r31) */
+ REST_NVGPRS(r1)
+
+ addi r1, r1, SWITCH_FRAME_SIZE
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
+ blr
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
new file mode 100644
index 00000000000..fcfe6b05555
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -0,0 +1,370 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/hugetlb.h>
+
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash64.h>
+#include <asm/hvcall.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+
+/* For now use fixed-size 16MB page table */
+#define HPT_ORDER 24
+#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
+#define HPT_HASH_MASK (HPT_NPTEG - 1)
+
+#define HPTE_V_HVLOCK 0x40UL
+
+static inline long lock_hpte(unsigned long *hpte, unsigned long bits)
+{
+ unsigned long tmp, old;
+
+ asm volatile(" ldarx %0,0,%2\n"
+ " and. %1,%0,%3\n"
+ " bne 2f\n"
+ " ori %0,%0,%4\n"
+ " stdcx. %0,0,%2\n"
+ " beq+ 2f\n"
+ " li %1,%3\n"
+ "2: isync"
+ : "=&r" (tmp), "=&r" (old)
+ : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
+ : "cc", "memory");
+ return old == 0;
+}
+
+long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel)
+{
+ unsigned long porder;
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long i, lpn, pa;
+ unsigned long *hpte;
+
+ /* only handle 4k, 64k and 16M pages for now */
+ porder = 12;
+ if (pteh & HPTE_V_LARGE) {
+ if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+ (ptel & 0xf000) == 0x1000) {
+ /* 64k page */
+ porder = 16;
+ } else if ((ptel & 0xff000) == 0) {
+ /* 16M page */
+ porder = 24;
+ /* lowest AVA bit must be 0 for 16M pages */
+ if (pteh & 0x80)
+ return H_PARAMETER;
+ } else
+ return H_PARAMETER;
+ }
+ lpn = (ptel & HPTE_R_RPN) >> kvm->arch.ram_porder;
+ if (lpn >= kvm->arch.ram_npages || porder > kvm->arch.ram_porder)
+ return H_PARAMETER;
+ pa = kvm->arch.ram_pginfo[lpn].pfn << PAGE_SHIFT;
+ if (!pa)
+ return H_PARAMETER;
+ /* Check WIMG */
+ if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
+ (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
+ return H_PARAMETER;
+ pteh &= ~0x60UL;
+ ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
+ ptel |= pa;
+ if (pte_index >= (HPT_NPTEG << 3))
+ return H_PARAMETER;
+ if (likely((flags & H_EXACT) == 0)) {
+ pte_index &= ~7UL;
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ for (i = 0; ; ++i) {
+ if (i == 8)
+ return H_PTEG_FULL;
+ if ((*hpte & HPTE_V_VALID) == 0 &&
+ lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
+ break;
+ hpte += 2;
+ }
+ } else {
+ i = 0;
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
+ return H_PTEG_FULL;
+ }
+ hpte[1] = ptel;
+ eieio();
+ hpte[0] = pteh;
+ asm volatile("ptesync" : : : "memory");
+ atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt);
+ vcpu->arch.gpr[4] = pte_index + i;
+ return H_SUCCESS;
+}
+
+static unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
+ unsigned long pte_index)
+{
+ unsigned long rb, va_low;
+
+ rb = (v & ~0x7fUL) << 16; /* AVA field */
+ va_low = pte_index >> 3;
+ if (v & HPTE_V_SECONDARY)
+ va_low = ~va_low;
+ /* xor vsid from AVA */
+ if (!(v & HPTE_V_1TB_SEG))
+ va_low ^= v >> 12;
+ else
+ va_low ^= v >> 24;
+ va_low &= 0x7ff;
+ if (v & HPTE_V_LARGE) {
+ rb |= 1; /* L field */
+ if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+ (r & 0xff000)) {
+ /* non-16MB large page, must be 64k */
+ /* (masks depend on page size) */
+ rb |= 0x1000; /* page encoding in LP field */
+ rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
+ rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
+ }
+ } else {
+ /* 4kB page */
+ rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
+ }
+ rb |= (v >> 54) & 0x300; /* B field */
+ return rb;
+}
+
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
+
+static inline int try_lock_tlbie(unsigned int *lock)
+{
+ unsigned int tmp, old;
+ unsigned int token = LOCK_TOKEN;
+
+ asm volatile("1:lwarx %1,0,%2\n"
+ " cmpwi cr0,%1,0\n"
+ " bne 2f\n"
+ " stwcx. %3,0,%2\n"
+ " bne- 1b\n"
+ " isync\n"
+ "2:"
+ : "=&r" (tmp), "=&r" (old)
+ : "r" (lock), "r" (token)
+ : "cc", "memory");
+ return old == 0;
+}
+
+long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn,
+ unsigned long va)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long *hpte;
+ unsigned long v, r, rb;
+
+ if (pte_index >= (HPT_NPTEG << 3))
+ return H_PARAMETER;
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ while (!lock_hpte(hpte, HPTE_V_HVLOCK))
+ cpu_relax();
+ if ((hpte[0] & HPTE_V_VALID) == 0 ||
+ ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
+ ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
+ hpte[0] &= ~HPTE_V_HVLOCK;
+ return H_NOT_FOUND;
+ }
+ if (atomic_read(&kvm->online_vcpus) == 1)
+ flags |= H_LOCAL;
+ vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
+ vcpu->arch.gpr[5] = r = hpte[1];
+ rb = compute_tlbie_rb(v, r, pte_index);
+ hpte[0] = 0;
+ if (!(flags & H_LOCAL)) {
+ while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ asm volatile("ptesync" : : : "memory");
+ asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
+ : : "r" (rb), "r" (kvm->arch.lpid));
+ asm volatile("ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ asm volatile("ptesync" : : : "memory");
+ asm volatile("tlbiel %0" : : "r" (rb));
+ asm volatile("ptesync" : : : "memory");
+ }
+ return H_SUCCESS;
+}
+
+long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long *args = &vcpu->arch.gpr[4];
+ unsigned long *hp, tlbrb[4];
+ long int i, found;
+ long int n_inval = 0;
+ unsigned long flags, req, pte_index;
+ long int local = 0;
+ long int ret = H_SUCCESS;
+
+ if (atomic_read(&kvm->online_vcpus) == 1)
+ local = 1;
+ for (i = 0; i < 4; ++i) {
+ pte_index = args[i * 2];
+ flags = pte_index >> 56;
+ pte_index &= ((1ul << 56) - 1);
+ req = flags >> 6;
+ flags &= 3;
+ if (req == 3)
+ break;
+ if (req != 1 || flags == 3 ||
+ pte_index >= (HPT_NPTEG << 3)) {
+ /* parameter error */
+ args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
+ ret = H_PARAMETER;
+ break;
+ }
+ hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ while (!lock_hpte(hp, HPTE_V_HVLOCK))
+ cpu_relax();
+ found = 0;
+ if (hp[0] & HPTE_V_VALID) {
+ switch (flags & 3) {
+ case 0: /* absolute */
+ found = 1;
+ break;
+ case 1: /* andcond */
+ if (!(hp[0] & args[i * 2 + 1]))
+ found = 1;
+ break;
+ case 2: /* AVPN */
+ if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ hp[0] &= ~HPTE_V_HVLOCK;
+ args[i * 2] = ((0x90 | flags) << 56) + pte_index;
+ continue;
+ }
+ /* insert R and C bits from PTE */
+ flags |= (hp[1] >> 5) & 0x0c;
+ args[i * 2] = ((0x80 | flags) << 56) + pte_index;
+ tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
+ hp[0] = 0;
+ }
+ if (n_inval == 0)
+ return ret;
+
+ if (!local) {
+ while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < n_inval; ++i)
+ asm volatile(PPC_TLBIE(%1,%0)
+ : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < n_inval; ++i)
+ asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
+ asm volatile("ptesync" : : : "memory");
+ }
+ return ret;
+}
+
+long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn,
+ unsigned long va)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long *hpte;
+ unsigned long v, r, rb;
+
+ if (pte_index >= (HPT_NPTEG << 3))
+ return H_PARAMETER;
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ while (!lock_hpte(hpte, HPTE_V_HVLOCK))
+ cpu_relax();
+ if ((hpte[0] & HPTE_V_VALID) == 0 ||
+ ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
+ hpte[0] &= ~HPTE_V_HVLOCK;
+ return H_NOT_FOUND;
+ }
+ if (atomic_read(&kvm->online_vcpus) == 1)
+ flags |= H_LOCAL;
+ v = hpte[0];
+ r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
+ HPTE_R_KEY_HI | HPTE_R_KEY_LO);
+ r |= (flags << 55) & HPTE_R_PP0;
+ r |= (flags << 48) & HPTE_R_KEY_HI;
+ r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
+ rb = compute_tlbie_rb(v, r, pte_index);
+ hpte[0] = v & ~HPTE_V_VALID;
+ if (!(flags & H_LOCAL)) {
+ while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ asm volatile("ptesync" : : : "memory");
+ asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
+ : : "r" (rb), "r" (kvm->arch.lpid));
+ asm volatile("ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ asm volatile("ptesync" : : : "memory");
+ asm volatile("tlbiel %0" : : "r" (rb));
+ asm volatile("ptesync" : : : "memory");
+ }
+ hpte[1] = r;
+ eieio();
+ hpte[0] = v & ~HPTE_V_HVLOCK;
+ asm volatile("ptesync" : : : "memory");
+ return H_SUCCESS;
+}
+
+static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr)
+{
+ long int i;
+ unsigned long offset, rpn;
+
+ offset = realaddr & (kvm->arch.ram_psize - 1);
+ rpn = (realaddr - offset) >> PAGE_SHIFT;
+ for (i = 0; i < kvm->arch.ram_npages; ++i)
+ if (rpn == kvm->arch.ram_pginfo[i].pfn)
+ return (i << PAGE_SHIFT) + offset;
+ return HPTE_R_RPN; /* all 1s in the RPN field */
+}
+
+long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long *hpte, r;
+ int i, n = 1;
+
+ if (pte_index >= (HPT_NPTEG << 3))
+ return H_PARAMETER;
+ if (flags & H_READ_4) {
+ pte_index &= ~3;
+ n = 4;
+ }
+ for (i = 0; i < n; ++i, ++pte_index) {
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ r = hpte[1];
+ if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID))
+ r = reverse_xlate(kvm, r & HPTE_R_RPN) |
+ (r & ~HPTE_R_RPN);
+ vcpu->arch.gpr[4 + i * 2] = hpte[0];
+ vcpu->arch.gpr[5 + i * 2] = r;
+ }
+ return H_SUCCESS;
+}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
new file mode 100644
index 00000000000..6dd33581a22
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -0,0 +1,1345 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * Derived from book3s_rmhandlers.S and other files, which are:
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/exception-64s.h>
+
+/*****************************************************************************
+ * *
+ * Real Mode handlers that need to be in the linear mapping *
+ * *
+ ****************************************************************************/
+
+ .globl kvmppc_skip_interrupt
+kvmppc_skip_interrupt:
+ mfspr r13,SPRN_SRR0
+ addi r13,r13,4
+ mtspr SPRN_SRR0,r13
+ GET_SCRATCH0(r13)
+ rfid
+ b .
+
+ .globl kvmppc_skip_Hinterrupt
+kvmppc_skip_Hinterrupt:
+ mfspr r13,SPRN_HSRR0
+ addi r13,r13,4
+ mtspr SPRN_HSRR0,r13
+ GET_SCRATCH0(r13)
+ hrfid
+ b .
+
+/*
+ * Call kvmppc_handler_trampoline_enter in real mode.
+ * Must be called with interrupts hard-disabled.
+ *
+ * Input Registers:
+ *
+ * LR = return address to continue at after eventually re-enabling MMU
+ */
+_GLOBAL(kvmppc_hv_entry_trampoline)
+ mfmsr r10
+ LOAD_REG_ADDR(r5, kvmppc_hv_entry)
+ li r0,MSR_RI
+ andc r0,r10,r0
+ li r6,MSR_IR | MSR_DR
+ andc r6,r10,r6
+ mtmsrd r0,1 /* clear RI in MSR */
+ mtsrr0 r5
+ mtsrr1 r6
+ RFI
+
+#define ULONG_SIZE 8
+#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
+
+/******************************************************************************
+ * *
+ * Entry code *
+ * *
+ *****************************************************************************/
+
+#define XICS_XIRR 4
+#define XICS_QIRR 0xc
+
+/*
+ * We come in here when wakened from nap mode on a secondary hw thread.
+ * Relocation is off and most register values are lost.
+ * r13 points to the PACA.
+ */
+ .globl kvm_start_guest
+kvm_start_guest:
+ ld r1,PACAEMERGSP(r13)
+ subi r1,r1,STACK_FRAME_OVERHEAD
+
+ /* get vcpu pointer */
+ ld r4, HSTATE_KVM_VCPU(r13)
+
+ /* We got here with an IPI; clear it */
+ ld r5, HSTATE_XICS_PHYS(r13)
+ li r0, 0xff
+ li r6, XICS_QIRR
+ li r7, XICS_XIRR
+ lwzcix r8, r5, r7 /* ack the interrupt */
+ sync
+ stbcix r0, r5, r6 /* clear it */
+ stwcix r8, r5, r7 /* EOI it */
+
+.global kvmppc_hv_entry
+kvmppc_hv_entry:
+
+ /* Required state:
+ *
+ * R4 = vcpu pointer
+ * MSR = ~IR|DR
+ * R13 = PACA
+ * R1 = host R1
+ * all other volatile GPRS = free
+ */
+ mflr r0
+ std r0, HSTATE_VMHANDLER(r13)
+
+ ld r14, VCPU_GPR(r14)(r4)
+ ld r15, VCPU_GPR(r15)(r4)
+ ld r16, VCPU_GPR(r16)(r4)
+ ld r17, VCPU_GPR(r17)(r4)
+ ld r18, VCPU_GPR(r18)(r4)
+ ld r19, VCPU_GPR(r19)(r4)
+ ld r20, VCPU_GPR(r20)(r4)
+ ld r21, VCPU_GPR(r21)(r4)
+ ld r22, VCPU_GPR(r22)(r4)
+ ld r23, VCPU_GPR(r23)(r4)
+ ld r24, VCPU_GPR(r24)(r4)
+ ld r25, VCPU_GPR(r25)(r4)
+ ld r26, VCPU_GPR(r26)(r4)
+ ld r27, VCPU_GPR(r27)(r4)
+ ld r28, VCPU_GPR(r28)(r4)
+ ld r29, VCPU_GPR(r29)(r4)
+ ld r30, VCPU_GPR(r30)(r4)
+ ld r31, VCPU_GPR(r31)(r4)
+
+ /* Load guest PMU registers */
+ /* R4 is live here (vcpu pointer) */
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ isync
+ lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
+ lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
+ lwz r6, VCPU_PMC + 8(r4)
+ lwz r7, VCPU_PMC + 12(r4)
+ lwz r8, VCPU_PMC + 16(r4)
+ lwz r9, VCPU_PMC + 20(r4)
+BEGIN_FTR_SECTION
+ lwz r10, VCPU_PMC + 24(r4)
+ lwz r11, VCPU_PMC + 28(r4)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ mtspr SPRN_PMC1, r3
+ mtspr SPRN_PMC2, r5
+ mtspr SPRN_PMC3, r6
+ mtspr SPRN_PMC4, r7
+ mtspr SPRN_PMC5, r8
+ mtspr SPRN_PMC6, r9
+BEGIN_FTR_SECTION
+ mtspr SPRN_PMC7, r10
+ mtspr SPRN_PMC8, r11
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ ld r3, VCPU_MMCR(r4)
+ ld r5, VCPU_MMCR + 8(r4)
+ ld r6, VCPU_MMCR + 16(r4)
+ mtspr SPRN_MMCR1, r5
+ mtspr SPRN_MMCRA, r6
+ mtspr SPRN_MMCR0, r3
+ isync
+
+ /* Load up FP, VMX and VSX registers */
+ bl kvmppc_load_fp
+
+BEGIN_FTR_SECTION
+ /* Switch DSCR to guest value */
+ ld r5, VCPU_DSCR(r4)
+ mtspr SPRN_DSCR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /*
+ * Set the decrementer to the guest decrementer.
+ */
+ ld r8,VCPU_DEC_EXPIRES(r4)
+ mftb r7
+ subf r3,r7,r8
+ mtspr SPRN_DEC,r3
+ stw r3,VCPU_DEC(r4)
+
+ ld r5, VCPU_SPRG0(r4)
+ ld r6, VCPU_SPRG1(r4)
+ ld r7, VCPU_SPRG2(r4)
+ ld r8, VCPU_SPRG3(r4)
+ mtspr SPRN_SPRG0, r5
+ mtspr SPRN_SPRG1, r6
+ mtspr SPRN_SPRG2, r7
+ mtspr SPRN_SPRG3, r8
+
+ /* Save R1 in the PACA */
+ std r1, HSTATE_HOST_R1(r13)
+
+ /* Increment yield count if they have a VPA */
+ ld r3, VCPU_VPA(r4)
+ cmpdi r3, 0
+ beq 25f
+ lwz r5, LPPACA_YIELDCOUNT(r3)
+ addi r5, r5, 1
+ stw r5, LPPACA_YIELDCOUNT(r3)
+25:
+ /* Load up DAR and DSISR */
+ ld r5, VCPU_DAR(r4)
+ lwz r6, VCPU_DSISR(r4)
+ mtspr SPRN_DAR, r5
+ mtspr SPRN_DSISR, r6
+
+ /* Set partition DABR */
+ li r5,3
+ ld r6,VCPU_DABR(r4)
+ mtspr SPRN_DABRX,r5
+ mtspr SPRN_DABR,r6
+
+BEGIN_FTR_SECTION
+ /* Restore AMR and UAMOR, set AMOR to all 1s */
+ ld r5,VCPU_AMR(r4)
+ ld r6,VCPU_UAMOR(r4)
+ li r7,-1
+ mtspr SPRN_AMR,r5
+ mtspr SPRN_UAMOR,r6
+ mtspr SPRN_AMOR,r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Clear out SLB */
+ li r6,0
+ slbmte r6,r6
+ slbia
+ ptesync
+
+BEGIN_FTR_SECTION
+ b 30f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ /*
+ * POWER7 host -> guest partition switch code.
+ * We don't have to lock against concurrent tlbies,
+ * but we do have to coordinate across hardware threads.
+ */
+ /* Increment entry count iff exit count is zero. */
+ ld r5,HSTATE_KVM_VCORE(r13)
+ addi r9,r5,VCORE_ENTRY_EXIT
+21: lwarx r3,0,r9
+ cmpwi r3,0x100 /* any threads starting to exit? */
+ bge secondary_too_late /* if so we're too late to the party */
+ addi r3,r3,1
+ stwcx. r3,0,r9
+ bne 21b
+
+ /* Primary thread switches to guest partition. */
+ ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
+ lwz r6,VCPU_PTID(r4)
+ cmpwi r6,0
+ bne 20f
+ ld r6,KVM_SDR1(r9)
+ lwz r7,KVM_LPID(r9)
+ li r0,LPID_RSVD /* switch to reserved LPID */
+ mtspr SPRN_LPID,r0
+ ptesync
+ mtspr SPRN_SDR1,r6 /* switch to partition page table */
+ mtspr SPRN_LPID,r7
+ isync
+ li r0,1
+ stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
+ b 10f
+
+ /* Secondary threads wait for primary to have done partition switch */
+20: lbz r0,VCORE_IN_GUEST(r5)
+ cmpwi r0,0
+ beq 20b
+
+ /* Set LPCR. Set the MER bit if there is a pending external irq. */
+10: ld r8,KVM_LPCR(r9)
+ ld r0,VCPU_PENDING_EXC(r4)
+ li r7,(1 << BOOK3S_IRQPRIO_EXTERNAL)
+ oris r7,r7,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
+ and. r0,r0,r7
+ beq 11f
+ ori r8,r8,LPCR_MER
+11: mtspr SPRN_LPCR,r8
+ ld r8,KVM_RMOR(r9)
+ mtspr SPRN_RMOR,r8
+ isync
+
+ /* Check if HDEC expires soon */
+ mfspr r3,SPRN_HDEC
+ cmpwi r3,10
+ li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
+ mr r9,r4
+ blt hdec_soon
+
+ /*
+ * Invalidate the TLB if we could possibly have stale TLB
+ * entries for this partition on this core due to the use
+ * of tlbiel.
+ * XXX maybe only need this on primary thread?
+ */
+ ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
+ lwz r5,VCPU_VCPUID(r4)
+ lhz r6,PACAPACAINDEX(r13)
+ rldimi r6,r5,0,62 /* XXX map as if threads 1:1 p:v */
+ lhz r8,VCPU_LAST_CPU(r4)
+ sldi r7,r6,1 /* see if this is the same vcpu */
+ add r7,r7,r9 /* as last ran on this pcpu */
+ lhz r0,KVM_LAST_VCPU(r7)
+ cmpw r6,r8 /* on the same cpu core as last time? */
+ bne 3f
+ cmpw r0,r5 /* same vcpu as this core last ran? */
+ beq 1f
+3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */
+ sth r5,KVM_LAST_VCPU(r7)
+ li r6,128
+ mtctr r6
+ li r7,0x800 /* IS field = 0b10 */
+ ptesync
+2: tlbiel r7
+ addi r7,r7,0x1000
+ bdnz 2b
+ ptesync
+1:
+
+ /* Save purr/spurr */
+ mfspr r5,SPRN_PURR
+ mfspr r6,SPRN_SPURR
+ std r5,HSTATE_PURR(r13)
+ std r6,HSTATE_SPURR(r13)
+ ld r7,VCPU_PURR(r4)
+ ld r8,VCPU_SPURR(r4)
+ mtspr SPRN_PURR,r7
+ mtspr SPRN_SPURR,r8
+ b 31f
+
+ /*
+ * PPC970 host -> guest partition switch code.
+ * We have to lock against concurrent tlbies,
+ * using native_tlbie_lock to lock against host tlbies
+ * and kvm->arch.tlbie_lock to lock against guest tlbies.
+ * We also have to invalidate the TLB since its
+ * entries aren't tagged with the LPID.
+ */
+30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
+
+ /* first take native_tlbie_lock */
+ .section ".toc","aw"
+toc_tlbie_lock:
+ .tc native_tlbie_lock[TC],native_tlbie_lock
+ .previous
+ ld r3,toc_tlbie_lock@toc(2)
+ lwz r8,PACA_LOCK_TOKEN(r13)
+24: lwarx r0,0,r3
+ cmpwi r0,0
+ bne 24b
+ stwcx. r8,0,r3
+ bne 24b
+ isync
+
+ ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
+ li r0,0x18f
+ rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
+ or r0,r7,r0
+ ptesync
+ sync
+ mtspr SPRN_HID4,r0 /* switch to reserved LPID */
+ isync
+ li r0,0
+ stw r0,0(r3) /* drop native_tlbie_lock */
+
+ /* invalidate the whole TLB */
+ li r0,256
+ mtctr r0
+ li r6,0
+25: tlbiel r6
+ addi r6,r6,0x1000
+ bdnz 25b
+ ptesync
+
+ /* Take the guest's tlbie_lock */
+ addi r3,r9,KVM_TLBIE_LOCK
+24: lwarx r0,0,r3
+ cmpwi r0,0
+ bne 24b
+ stwcx. r8,0,r3
+ bne 24b
+ isync
+ ld r6,KVM_SDR1(r9)
+ mtspr SPRN_SDR1,r6 /* switch to partition page table */
+
+ /* Set up HID4 with the guest's LPID etc. */
+ sync
+ mtspr SPRN_HID4,r7
+ isync
+
+ /* drop the guest's tlbie_lock */
+ li r0,0
+ stw r0,0(r3)
+
+ /* Check if HDEC expires soon */
+ mfspr r3,SPRN_HDEC
+ cmpwi r3,10
+ li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
+ mr r9,r4
+ blt hdec_soon
+
+ /* Enable HDEC interrupts */
+ mfspr r0,SPRN_HID0
+ li r3,1
+ rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
+ sync
+ mtspr SPRN_HID0,r0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+
+ /* Load up guest SLB entries */
+31: lwz r5,VCPU_SLB_MAX(r4)
+ cmpwi r5,0
+ beq 9f
+ mtctr r5
+ addi r6,r4,VCPU_SLB
+1: ld r8,VCPU_SLB_E(r6)
+ ld r9,VCPU_SLB_V(r6)
+ slbmte r9,r8
+ addi r6,r6,VCPU_SLB_SIZE
+ bdnz 1b
+9:
+
+ /* Restore state of CTRL run bit; assume 1 on entry */
+ lwz r5,VCPU_CTRL(r4)
+ andi. r5,r5,1
+ bne 4f
+ mfspr r6,SPRN_CTRLF
+ clrrdi r6,r6,1
+ mtspr SPRN_CTRLT,r6
+4:
+ ld r6, VCPU_CTR(r4)
+ lwz r7, VCPU_XER(r4)
+
+ mtctr r6
+ mtxer r7
+
+ /* Move SRR0 and SRR1 into the respective regs */
+ ld r6, VCPU_SRR0(r4)
+ ld r7, VCPU_SRR1(r4)
+ mtspr SPRN_SRR0, r6
+ mtspr SPRN_SRR1, r7
+
+ ld r10, VCPU_PC(r4)
+
+ ld r11, VCPU_MSR(r4) /* r10 = vcpu->arch.msr & ~MSR_HV */
+ rldicl r11, r11, 63 - MSR_HV_LG, 1
+ rotldi r11, r11, 1 + MSR_HV_LG
+ ori r11, r11, MSR_ME
+
+fast_guest_return:
+ mtspr SPRN_HSRR0,r10
+ mtspr SPRN_HSRR1,r11
+
+ /* Activate guest mode, so faults get handled by KVM */
+ li r9, KVM_GUEST_MODE_GUEST
+ stb r9, HSTATE_IN_GUEST(r13)
+
+ /* Enter guest */
+
+ ld r5, VCPU_LR(r4)
+ lwz r6, VCPU_CR(r4)
+ mtlr r5
+ mtcr r6
+
+ ld r0, VCPU_GPR(r0)(r4)
+ ld r1, VCPU_GPR(r1)(r4)
+ ld r2, VCPU_GPR(r2)(r4)
+ ld r3, VCPU_GPR(r3)(r4)
+ ld r5, VCPU_GPR(r5)(r4)
+ ld r6, VCPU_GPR(r6)(r4)
+ ld r7, VCPU_GPR(r7)(r4)
+ ld r8, VCPU_GPR(r8)(r4)
+ ld r9, VCPU_GPR(r9)(r4)
+ ld r10, VCPU_GPR(r10)(r4)
+ ld r11, VCPU_GPR(r11)(r4)
+ ld r12, VCPU_GPR(r12)(r4)
+ ld r13, VCPU_GPR(r13)(r4)
+
+ ld r4, VCPU_GPR(r4)(r4)
+
+ hrfid
+ b .
+
+/******************************************************************************
+ * *
+ * Exit code *
+ * *
+ *****************************************************************************/
+
+/*
+ * We come here from the first-level interrupt handlers.
+ */
+ .globl kvmppc_interrupt
+kvmppc_interrupt:
+ /*
+ * Register contents:
+ * R12 = interrupt vector
+ * R13 = PACA
+ * guest CR, R12 saved in shadow VCPU SCRATCH1/0
+ * guest R13 saved in SPRN_SCRATCH0
+ */
+ /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
+ std r9, HSTATE_HOST_R2(r13)
+ ld r9, HSTATE_KVM_VCPU(r13)
+
+ /* Save registers */
+
+ std r0, VCPU_GPR(r0)(r9)
+ std r1, VCPU_GPR(r1)(r9)
+ std r2, VCPU_GPR(r2)(r9)
+ std r3, VCPU_GPR(r3)(r9)
+ std r4, VCPU_GPR(r4)(r9)
+ std r5, VCPU_GPR(r5)(r9)
+ std r6, VCPU_GPR(r6)(r9)
+ std r7, VCPU_GPR(r7)(r9)
+ std r8, VCPU_GPR(r8)(r9)
+ ld r0, HSTATE_HOST_R2(r13)
+ std r0, VCPU_GPR(r9)(r9)
+ std r10, VCPU_GPR(r10)(r9)
+ std r11, VCPU_GPR(r11)(r9)
+ ld r3, HSTATE_SCRATCH0(r13)
+ lwz r4, HSTATE_SCRATCH1(r13)
+ std r3, VCPU_GPR(r12)(r9)
+ stw r4, VCPU_CR(r9)
+
+ /* Restore R1/R2 so we can handle faults */
+ ld r1, HSTATE_HOST_R1(r13)
+ ld r2, PACATOC(r13)
+
+ mfspr r10, SPRN_SRR0
+ mfspr r11, SPRN_SRR1
+ std r10, VCPU_SRR0(r9)
+ std r11, VCPU_SRR1(r9)
+ andi. r0, r12, 2 /* need to read HSRR0/1? */
+ beq 1f
+ mfspr r10, SPRN_HSRR0
+ mfspr r11, SPRN_HSRR1
+ clrrdi r12, r12, 2
+1: std r10, VCPU_PC(r9)
+ std r11, VCPU_MSR(r9)
+
+ GET_SCRATCH0(r3)
+ mflr r4
+ std r3, VCPU_GPR(r13)(r9)
+ std r4, VCPU_LR(r9)
+
+ /* Unset guest mode */
+ li r0, KVM_GUEST_MODE_NONE
+ stb r0, HSTATE_IN_GUEST(r13)
+
+ stw r12,VCPU_TRAP(r9)
+
+ /* See if this is a leftover HDEC interrupt */
+ cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
+ bne 2f
+ mfspr r3,SPRN_HDEC
+ cmpwi r3,0
+ bge ignore_hdec
+2:
+ /* See if this is something we can handle in real mode */
+ cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
+ beq hcall_try_real_mode
+hcall_real_cont:
+
+ /* Check for mediated interrupts (could be done earlier really ...) */
+BEGIN_FTR_SECTION
+ cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
+ bne+ 1f
+ ld r5,VCPU_KVM(r9)
+ ld r5,KVM_LPCR(r5)
+ andi. r0,r11,MSR_EE
+ beq 1f
+ andi. r0,r5,LPCR_MER
+ bne bounce_ext_interrupt
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Save DEC */
+ mfspr r5,SPRN_DEC
+ mftb r6
+ extsw r5,r5
+ add r5,r5,r6
+ std r5,VCPU_DEC_EXPIRES(r9)
+
+ /* Save HEIR (HV emulation assist reg) in last_inst
+ if this is an HEI (HV emulation interrupt, e40) */
+ li r3,-1
+BEGIN_FTR_SECTION
+ cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
+ bne 11f
+ mfspr r3,SPRN_HEIR
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+11: stw r3,VCPU_LAST_INST(r9)
+
+ /* Save more register state */
+ mfxer r5
+ mfdar r6
+ mfdsisr r7
+ mfctr r8
+
+ stw r5, VCPU_XER(r9)
+ std r6, VCPU_DAR(r9)
+ stw r7, VCPU_DSISR(r9)
+ std r8, VCPU_CTR(r9)
+ /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
+BEGIN_FTR_SECTION
+ cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
+ beq 6f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+7: std r6, VCPU_FAULT_DAR(r9)
+ stw r7, VCPU_FAULT_DSISR(r9)
+
+ /* Save guest CTRL register, set runlatch to 1 */
+ mfspr r6,SPRN_CTRLF
+ stw r6,VCPU_CTRL(r9)
+ andi. r0,r6,1
+ bne 4f
+ ori r6,r6,1
+ mtspr SPRN_CTRLT,r6
+4:
+ /* Read the guest SLB and save it away */
+ lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
+ mtctr r0
+ li r6,0
+ addi r7,r9,VCPU_SLB
+ li r5,0
+1: slbmfee r8,r6
+ andis. r0,r8,SLB_ESID_V@h
+ beq 2f
+ add r8,r8,r6 /* put index in */
+ slbmfev r3,r6
+ std r8,VCPU_SLB_E(r7)
+ std r3,VCPU_SLB_V(r7)
+ addi r7,r7,VCPU_SLB_SIZE
+ addi r5,r5,1
+2: addi r6,r6,1
+ bdnz 1b
+ stw r5,VCPU_SLB_MAX(r9)
+
+ /*
+ * Save the guest PURR/SPURR
+ */
+BEGIN_FTR_SECTION
+ mfspr r5,SPRN_PURR
+ mfspr r6,SPRN_SPURR
+ ld r7,VCPU_PURR(r9)
+ ld r8,VCPU_SPURR(r9)
+ std r5,VCPU_PURR(r9)
+ std r6,VCPU_SPURR(r9)
+ subf r5,r7,r5
+ subf r6,r8,r6
+
+ /*
+ * Restore host PURR/SPURR and add guest times
+ * so that the time in the guest gets accounted.
+ */
+ ld r3,HSTATE_PURR(r13)
+ ld r4,HSTATE_SPURR(r13)
+ add r3,r3,r5
+ add r4,r4,r6
+ mtspr SPRN_PURR,r3
+ mtspr SPRN_SPURR,r4
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
+
+ /* Clear out SLB */
+ li r5,0
+ slbmte r5,r5
+ slbia
+ ptesync
+
+hdec_soon:
+BEGIN_FTR_SECTION
+ b 32f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ /*
+ * POWER7 guest -> host partition switch code.
+ * We don't have to lock against tlbies but we do
+ * have to coordinate the hardware threads.
+ */
+ /* Increment the threads-exiting-guest count in the 0xff00
+ bits of vcore->entry_exit_count */
+ lwsync
+ ld r5,HSTATE_KVM_VCORE(r13)
+ addi r6,r5,VCORE_ENTRY_EXIT
+41: lwarx r3,0,r6
+ addi r0,r3,0x100
+ stwcx. r0,0,r6
+ bne 41b
+
+ /*
+ * At this point we have an interrupt that we have to pass
+ * up to the kernel or qemu; we can't handle it in real mode.
+ * Thus we have to do a partition switch, so we have to
+ * collect the other threads, if we are the first thread
+ * to take an interrupt. To do this, we set the HDEC to 0,
+ * which causes an HDEC interrupt in all threads within 2ns
+ * because the HDEC register is shared between all 4 threads.
+ * However, we don't need to bother if this is an HDEC
+ * interrupt, since the other threads will already be on their
+ * way here in that case.
+ */
+ cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
+ beq 40f
+ cmpwi r3,0x100 /* Are we the first here? */
+ bge 40f
+ cmpwi r3,1
+ ble 40f
+ li r0,0
+ mtspr SPRN_HDEC,r0
+40:
+
+ /* Secondary threads wait for primary to do partition switch */
+ ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
+ ld r5,HSTATE_KVM_VCORE(r13)
+ lwz r3,VCPU_PTID(r9)
+ cmpwi r3,0
+ beq 15f
+ HMT_LOW
+13: lbz r3,VCORE_IN_GUEST(r5)
+ cmpwi r3,0
+ bne 13b
+ HMT_MEDIUM
+ b 16f
+
+ /* Primary thread waits for all the secondaries to exit guest */
+15: lwz r3,VCORE_ENTRY_EXIT(r5)
+ srwi r0,r3,8
+ clrldi r3,r3,56
+ cmpw r3,r0
+ bne 15b
+ isync
+
+ /* Primary thread switches back to host partition */
+ ld r6,KVM_HOST_SDR1(r4)
+ lwz r7,KVM_HOST_LPID(r4)
+ li r8,LPID_RSVD /* switch to reserved LPID */
+ mtspr SPRN_LPID,r8
+ ptesync
+ mtspr SPRN_SDR1,r6 /* switch to partition page table */
+ mtspr SPRN_LPID,r7
+ isync
+ li r0,0
+ stb r0,VCORE_IN_GUEST(r5)
+ lis r8,0x7fff /* MAX_INT@h */
+ mtspr SPRN_HDEC,r8
+
+16: ld r8,KVM_HOST_LPCR(r4)
+ mtspr SPRN_LPCR,r8
+ isync
+ b 33f
+
+ /*
+ * PPC970 guest -> host partition switch code.
+ * We have to lock against concurrent tlbies, and
+ * we have to flush the whole TLB.
+ */
+32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
+
+ /* Take the guest's tlbie_lock */
+ lwz r8,PACA_LOCK_TOKEN(r13)
+ addi r3,r4,KVM_TLBIE_LOCK
+24: lwarx r0,0,r3
+ cmpwi r0,0
+ bne 24b
+ stwcx. r8,0,r3
+ bne 24b
+ isync
+
+ ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
+ li r0,0x18f
+ rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
+ or r0,r7,r0
+ ptesync
+ sync
+ mtspr SPRN_HID4,r0 /* switch to reserved LPID */
+ isync
+ li r0,0
+ stw r0,0(r3) /* drop guest tlbie_lock */
+
+ /* invalidate the whole TLB */
+ li r0,256
+ mtctr r0
+ li r6,0
+25: tlbiel r6
+ addi r6,r6,0x1000
+ bdnz 25b
+ ptesync
+
+ /* take native_tlbie_lock */
+ ld r3,toc_tlbie_lock@toc(2)
+24: lwarx r0,0,r3
+ cmpwi r0,0
+ bne 24b
+ stwcx. r8,0,r3
+ bne 24b
+ isync
+
+ ld r6,KVM_HOST_SDR1(r4)
+ mtspr SPRN_SDR1,r6 /* switch to host page table */
+
+ /* Set up host HID4 value */
+ sync
+ mtspr SPRN_HID4,r7
+ isync
+ li r0,0
+ stw r0,0(r3) /* drop native_tlbie_lock */
+
+ lis r8,0x7fff /* MAX_INT@h */
+ mtspr SPRN_HDEC,r8
+
+ /* Disable HDEC interrupts */
+ mfspr r0,SPRN_HID0
+ li r3,0
+ rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
+ sync
+ mtspr SPRN_HID0,r0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+
+ /* load host SLB entries */
+33: ld r8,PACA_SLBSHADOWPTR(r13)
+
+ .rept SLB_NUM_BOLTED
+ ld r5,SLBSHADOW_SAVEAREA(r8)
+ ld r6,SLBSHADOW_SAVEAREA+8(r8)
+ andis. r7,r5,SLB_ESID_V@h
+ beq 1f
+ slbmte r6,r5
+1: addi r8,r8,16
+ .endr
+
+ /* Save and reset AMR and UAMOR before turning on the MMU */
+BEGIN_FTR_SECTION
+ mfspr r5,SPRN_AMR
+ mfspr r6,SPRN_UAMOR
+ std r5,VCPU_AMR(r9)
+ std r6,VCPU_UAMOR(r9)
+ li r6,0
+ mtspr SPRN_AMR,r6
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Restore host DABR and DABRX */
+ ld r5,HSTATE_DABR(r13)
+ li r6,7
+ mtspr SPRN_DABR,r5
+ mtspr SPRN_DABRX,r6
+
+ /* Switch DSCR back to host value */
+BEGIN_FTR_SECTION
+ mfspr r8, SPRN_DSCR
+ ld r7, HSTATE_DSCR(r13)
+ std r8, VCPU_DSCR(r7)
+ mtspr SPRN_DSCR, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Save non-volatile GPRs */
+ std r14, VCPU_GPR(r14)(r9)
+ std r15, VCPU_GPR(r15)(r9)
+ std r16, VCPU_GPR(r16)(r9)
+ std r17, VCPU_GPR(r17)(r9)
+ std r18, VCPU_GPR(r18)(r9)
+ std r19, VCPU_GPR(r19)(r9)
+ std r20, VCPU_GPR(r20)(r9)
+ std r21, VCPU_GPR(r21)(r9)
+ std r22, VCPU_GPR(r22)(r9)
+ std r23, VCPU_GPR(r23)(r9)
+ std r24, VCPU_GPR(r24)(r9)
+ std r25, VCPU_GPR(r25)(r9)
+ std r26, VCPU_GPR(r26)(r9)
+ std r27, VCPU_GPR(r27)(r9)
+ std r28, VCPU_GPR(r28)(r9)
+ std r29, VCPU_GPR(r29)(r9)
+ std r30, VCPU_GPR(r30)(r9)
+ std r31, VCPU_GPR(r31)(r9)
+
+ /* Save SPRGs */
+ mfspr r3, SPRN_SPRG0
+ mfspr r4, SPRN_SPRG1
+ mfspr r5, SPRN_SPRG2
+ mfspr r6, SPRN_SPRG3
+ std r3, VCPU_SPRG0(r9)
+ std r4, VCPU_SPRG1(r9)
+ std r5, VCPU_SPRG2(r9)
+ std r6, VCPU_SPRG3(r9)
+
+ /* Increment yield count if they have a VPA */
+ ld r8, VCPU_VPA(r9) /* do they have a VPA? */
+ cmpdi r8, 0
+ beq 25f
+ lwz r3, LPPACA_YIELDCOUNT(r8)
+ addi r3, r3, 1
+ stw r3, LPPACA_YIELDCOUNT(r8)
+25:
+ /* Save PMU registers if requested */
+ /* r8 and cr0.eq are live here */
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mfspr r4, SPRN_MMCR0 /* save MMCR0 */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ isync
+ beq 21f /* if no VPA, save PMU stuff anyway */
+ lbz r7, LPPACA_PMCINUSE(r8)
+ cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
+ bne 21f
+ std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
+ b 22f
+21: mfspr r5, SPRN_MMCR1
+ mfspr r6, SPRN_MMCRA
+ std r4, VCPU_MMCR(r9)
+ std r5, VCPU_MMCR + 8(r9)
+ std r6, VCPU_MMCR + 16(r9)
+ mfspr r3, SPRN_PMC1
+ mfspr r4, SPRN_PMC2
+ mfspr r5, SPRN_PMC3
+ mfspr r6, SPRN_PMC4
+ mfspr r7, SPRN_PMC5
+ mfspr r8, SPRN_PMC6
+BEGIN_FTR_SECTION
+ mfspr r10, SPRN_PMC7
+ mfspr r11, SPRN_PMC8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ stw r3, VCPU_PMC(r9)
+ stw r4, VCPU_PMC + 4(r9)
+ stw r5, VCPU_PMC + 8(r9)
+ stw r6, VCPU_PMC + 12(r9)
+ stw r7, VCPU_PMC + 16(r9)
+ stw r8, VCPU_PMC + 20(r9)
+BEGIN_FTR_SECTION
+ stw r10, VCPU_PMC + 24(r9)
+ stw r11, VCPU_PMC + 28(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+22:
+ /* save FP state */
+ mr r3, r9
+ bl .kvmppc_save_fp
+
+ /* Secondary threads go off to take a nap on POWER7 */
+BEGIN_FTR_SECTION
+ lwz r0,VCPU_PTID(r3)
+ cmpwi r0,0
+ bne secondary_nap
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /*
+ * Reload DEC. HDEC interrupts were disabled when
+ * we reloaded the host's LPCR value.
+ */
+ ld r3, HSTATE_DECEXP(r13)
+ mftb r4
+ subf r4, r4, r3
+ mtspr SPRN_DEC, r4
+
+ /* Reload the host's PMU registers */
+ ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
+ lbz r4, LPPACA_PMCINUSE(r3)
+ cmpwi r4, 0
+ beq 23f /* skip if not */
+ lwz r3, HSTATE_PMC(r13)
+ lwz r4, HSTATE_PMC + 4(r13)
+ lwz r5, HSTATE_PMC + 8(r13)
+ lwz r6, HSTATE_PMC + 12(r13)
+ lwz r8, HSTATE_PMC + 16(r13)
+ lwz r9, HSTATE_PMC + 20(r13)
+BEGIN_FTR_SECTION
+ lwz r10, HSTATE_PMC + 24(r13)
+ lwz r11, HSTATE_PMC + 28(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ mtspr SPRN_PMC1, r3
+ mtspr SPRN_PMC2, r4
+ mtspr SPRN_PMC3, r5
+ mtspr SPRN_PMC4, r6
+ mtspr SPRN_PMC5, r8
+ mtspr SPRN_PMC6, r9
+BEGIN_FTR_SECTION
+ mtspr SPRN_PMC7, r10
+ mtspr SPRN_PMC8, r11
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ ld r3, HSTATE_MMCR(r13)
+ ld r4, HSTATE_MMCR + 8(r13)
+ ld r5, HSTATE_MMCR + 16(r13)
+ mtspr SPRN_MMCR1, r4
+ mtspr SPRN_MMCRA, r5
+ mtspr SPRN_MMCR0, r3
+ isync
+23:
+ /*
+ * For external and machine check interrupts, we need
+ * to call the Linux handler to process the interrupt.
+ * We do that by jumping to the interrupt vector address
+ * which we have in r12. The [h]rfid at the end of the
+ * handler will return to the book3s_hv_interrupts.S code.
+ * For other interrupts we do the rfid to get back
+ * to the book3s_interrupts.S code here.
+ */
+ ld r8, HSTATE_VMHANDLER(r13)
+ ld r7, HSTATE_HOST_MSR(r13)
+
+ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
+ beq 11f
+ cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
+
+ /* RFI into the highmem handler, or branch to interrupt handler */
+12: mfmsr r6
+ mtctr r12
+ li r0, MSR_RI
+ andc r6, r6, r0
+ mtmsrd r6, 1 /* Clear RI in MSR */
+ mtsrr0 r8
+ mtsrr1 r7
+ beqctr
+ RFI
+
+11:
+BEGIN_FTR_SECTION
+ b 12b
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ mtspr SPRN_HSRR0, r8
+ mtspr SPRN_HSRR1, r7
+ ba 0x500
+
+6: mfspr r6,SPRN_HDAR
+ mfspr r7,SPRN_HDSISR
+ b 7b
+
+/*
+ * Try to handle an hcall in real mode.
+ * Returns to the guest if we handle it, or continues on up to
+ * the kernel if we can't (i.e. if we don't have a handler for
+ * it, or if the handler returns H_TOO_HARD).
+ */
+ .globl hcall_try_real_mode
+hcall_try_real_mode:
+ ld r3,VCPU_GPR(r3)(r9)
+ andi. r0,r11,MSR_PR
+ bne hcall_real_cont
+ clrrdi r3,r3,2
+ cmpldi r3,hcall_real_table_end - hcall_real_table
+ bge hcall_real_cont
+ LOAD_REG_ADDR(r4, hcall_real_table)
+ lwzx r3,r3,r4
+ cmpwi r3,0
+ beq hcall_real_cont
+ add r3,r3,r4
+ mtctr r3
+ mr r3,r9 /* get vcpu pointer */
+ ld r4,VCPU_GPR(r4)(r9)
+ bctrl
+ cmpdi r3,H_TOO_HARD
+ beq hcall_real_fallback
+ ld r4,HSTATE_KVM_VCPU(r13)
+ std r3,VCPU_GPR(r3)(r4)
+ ld r10,VCPU_PC(r4)
+ ld r11,VCPU_MSR(r4)
+ b fast_guest_return
+
+ /* We've attempted a real mode hcall, but it's punted it back
+ * to userspace. We need to restore some clobbered volatiles
+ * before resuming the pass-it-to-qemu path */
+hcall_real_fallback:
+ li r12,BOOK3S_INTERRUPT_SYSCALL
+ ld r9, HSTATE_KVM_VCPU(r13)
+ ld r11, VCPU_MSR(r9)
+
+ b hcall_real_cont
+
+ .globl hcall_real_table
+hcall_real_table:
+ .long 0 /* 0 - unused */
+ .long .kvmppc_h_remove - hcall_real_table
+ .long .kvmppc_h_enter - hcall_real_table
+ .long .kvmppc_h_read - hcall_real_table
+ .long 0 /* 0x10 - H_CLEAR_MOD */
+ .long 0 /* 0x14 - H_CLEAR_REF */
+ .long .kvmppc_h_protect - hcall_real_table
+ .long 0 /* 0x1c - H_GET_TCE */
+ .long .kvmppc_h_put_tce - hcall_real_table
+ .long 0 /* 0x24 - H_SET_SPRG0 */
+ .long .kvmppc_h_set_dabr - hcall_real_table
+ .long 0 /* 0x2c */
+ .long 0 /* 0x30 */
+ .long 0 /* 0x34 */
+ .long 0 /* 0x38 */
+ .long 0 /* 0x3c */
+ .long 0 /* 0x40 */
+ .long 0 /* 0x44 */
+ .long 0 /* 0x48 */
+ .long 0 /* 0x4c */
+ .long 0 /* 0x50 */
+ .long 0 /* 0x54 */
+ .long 0 /* 0x58 */
+ .long 0 /* 0x5c */
+ .long 0 /* 0x60 */
+ .long 0 /* 0x64 */
+ .long 0 /* 0x68 */
+ .long 0 /* 0x6c */
+ .long 0 /* 0x70 */
+ .long 0 /* 0x74 */
+ .long 0 /* 0x78 */
+ .long 0 /* 0x7c */
+ .long 0 /* 0x80 */
+ .long 0 /* 0x84 */
+ .long 0 /* 0x88 */
+ .long 0 /* 0x8c */
+ .long 0 /* 0x90 */
+ .long 0 /* 0x94 */
+ .long 0 /* 0x98 */
+ .long 0 /* 0x9c */
+ .long 0 /* 0xa0 */
+ .long 0 /* 0xa4 */
+ .long 0 /* 0xa8 */
+ .long 0 /* 0xac */
+ .long 0 /* 0xb0 */
+ .long 0 /* 0xb4 */
+ .long 0 /* 0xb8 */
+ .long 0 /* 0xbc */
+ .long 0 /* 0xc0 */
+ .long 0 /* 0xc4 */
+ .long 0 /* 0xc8 */
+ .long 0 /* 0xcc */
+ .long 0 /* 0xd0 */
+ .long 0 /* 0xd4 */
+ .long 0 /* 0xd8 */
+ .long 0 /* 0xdc */
+ .long 0 /* 0xe0 */
+ .long 0 /* 0xe4 */
+ .long 0 /* 0xe8 */
+ .long 0 /* 0xec */
+ .long 0 /* 0xf0 */
+ .long 0 /* 0xf4 */
+ .long 0 /* 0xf8 */
+ .long 0 /* 0xfc */
+ .long 0 /* 0x100 */
+ .long 0 /* 0x104 */
+ .long 0 /* 0x108 */
+ .long 0 /* 0x10c */
+ .long 0 /* 0x110 */
+ .long 0 /* 0x114 */
+ .long 0 /* 0x118 */
+ .long 0 /* 0x11c */
+ .long 0 /* 0x120 */
+ .long .kvmppc_h_bulk_remove - hcall_real_table
+hcall_real_table_end:
+
+ignore_hdec:
+ mr r4,r9
+ b fast_guest_return
+
+bounce_ext_interrupt:
+ mr r4,r9
+ mtspr SPRN_SRR0,r10
+ mtspr SPRN_SRR1,r11
+ li r10,BOOK3S_INTERRUPT_EXTERNAL
+ LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME);
+ b fast_guest_return
+
+_GLOBAL(kvmppc_h_set_dabr)
+ std r4,VCPU_DABR(r3)
+ mtspr SPRN_DABR,r4
+ li r3,0
+ blr
+
+secondary_too_late:
+ ld r5,HSTATE_KVM_VCORE(r13)
+ HMT_LOW
+13: lbz r3,VCORE_IN_GUEST(r5)
+ cmpwi r3,0
+ bne 13b
+ HMT_MEDIUM
+ ld r11,PACA_SLBSHADOWPTR(r13)
+
+ .rept SLB_NUM_BOLTED
+ ld r5,SLBSHADOW_SAVEAREA(r11)
+ ld r6,SLBSHADOW_SAVEAREA+8(r11)
+ andis. r7,r5,SLB_ESID_V@h
+ beq 1f
+ slbmte r6,r5
+1: addi r11,r11,16
+ .endr
+ b 50f
+
+secondary_nap:
+ /* Clear any pending IPI */
+50: ld r5, HSTATE_XICS_PHYS(r13)
+ li r0, 0xff
+ li r6, XICS_QIRR
+ stbcix r0, r5, r6
+
+ /* increment the nap count and then go to nap mode */
+ ld r4, HSTATE_KVM_VCORE(r13)
+ addi r4, r4, VCORE_NAP_COUNT
+ lwsync /* make previous updates visible */
+51: lwarx r3, 0, r4
+ addi r3, r3, 1
+ stwcx. r3, 0, r4
+ bne 51b
+ isync
+
+ mfspr r4, SPRN_LPCR
+ li r0, LPCR_PECE
+ andc r4, r4, r0
+ ori r4, r4, LPCR_PECE0 /* exit nap on interrupt */
+ mtspr SPRN_LPCR, r4
+ li r0, 0
+ std r0, HSTATE_SCRATCH0(r13)
+ ptesync
+ ld r0, HSTATE_SCRATCH0(r13)
+1: cmpd r0, r0
+ bne 1b
+ nap
+ b .
+
+/*
+ * Save away FP, VMX and VSX registers.
+ * r3 = vcpu pointer
+ */
+_GLOBAL(kvmppc_save_fp)
+ mfmsr r9
+ ori r8,r9,MSR_FP
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ oris r8,r8,MSR_VEC@h
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ oris r8,r8,MSR_VSX@h
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ mtmsrd r8
+ isync
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ reg = 0
+ .rept 32
+ li r6,reg*16+VCPU_VSRS
+ stxvd2x reg,r6,r3
+ reg = reg + 1
+ .endr
+FTR_SECTION_ELSE
+#endif
+ reg = 0
+ .rept 32
+ stfd reg,reg*8+VCPU_FPRS(r3)
+ reg = reg + 1
+ .endr
+#ifdef CONFIG_VSX
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
+#endif
+ mffs fr0
+ stfd fr0,VCPU_FPSCR(r3)
+
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ reg = 0
+ .rept 32
+ li r6,reg*16+VCPU_VRS
+ stvx reg,r6,r3
+ reg = reg + 1
+ .endr
+ mfvscr vr0
+ li r6,VCPU_VSCR
+ stvx vr0,r6,r3
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+ mfspr r6,SPRN_VRSAVE
+ stw r6,VCPU_VRSAVE(r3)
+ mtmsrd r9
+ isync
+ blr
+
+/*
+ * Load up FP, VMX and VSX registers
+ * r4 = vcpu pointer
+ */
+ .globl kvmppc_load_fp
+kvmppc_load_fp:
+ mfmsr r9
+ ori r8,r9,MSR_FP
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ oris r8,r8,MSR_VEC@h
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ oris r8,r8,MSR_VSX@h
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ mtmsrd r8
+ isync
+ lfd fr0,VCPU_FPSCR(r4)
+ MTFSF_L(fr0)
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ reg = 0
+ .rept 32
+ li r7,reg*16+VCPU_VSRS
+ lxvd2x reg,r7,r4
+ reg = reg + 1
+ .endr
+FTR_SECTION_ELSE
+#endif
+ reg = 0
+ .rept 32
+ lfd reg,reg*8+VCPU_FPRS(r4)
+ reg = reg + 1
+ .endr
+#ifdef CONFIG_VSX
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
+#endif
+
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ li r7,VCPU_VSCR
+ lvx vr0,r7,r4
+ mtvscr vr0
+ reg = 0
+ .rept 32
+ li r7,reg*16+VCPU_VRS
+ lvx reg,r7,r4
+ reg = reg + 1
+ .endr
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+ lwz r7,VCPU_VRSAVE(r4)
+ mtspr SPRN_VRSAVE,r7
+ blr
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 2f0bc928b08..c54b0e30cf3 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -29,8 +29,7 @@
#define ULONG_SIZE 8
#define FUNC(name) GLUE(.,name)
-#define GET_SHADOW_VCPU(reg) \
- addi reg, r13, PACA_KVM_SVCPU
+#define GET_SHADOW_VCPU_R13
#define DISABLE_INTERRUPTS \
mfmsr r0; \
@@ -43,8 +42,8 @@
#define ULONG_SIZE 4
#define FUNC(name) name
-#define GET_SHADOW_VCPU(reg) \
- lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
+#define GET_SHADOW_VCPU_R13 \
+ lwz r13, (THREAD + THREAD_KVM_SVCPU)(r2)
#define DISABLE_INTERRUPTS \
mfmsr r0; \
@@ -85,7 +84,7 @@
* r3: kvm_run pointer
* r4: vcpu pointer
*/
-_GLOBAL(__kvmppc_vcpu_entry)
+_GLOBAL(__kvmppc_vcpu_run)
kvm_start_entry:
/* Write correct stack frame */
@@ -107,17 +106,11 @@ kvm_start_entry:
/* Load non-volatile guest state from the vcpu */
VCPU_LOAD_NVGPRS(r4)
- GET_SHADOW_VCPU(r5)
-
- /* Save R1/R2 in the PACA */
- PPC_STL r1, SVCPU_HOST_R1(r5)
- PPC_STL r2, SVCPU_HOST_R2(r5)
+kvm_start_lightweight:
- /* XXX swap in/out on load? */
+ GET_SHADOW_VCPU_R13
PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4)
- PPC_STL r3, SVCPU_VMHANDLER(r5)
-
-kvm_start_lightweight:
+ PPC_STL r3, HSTATE_VMHANDLER(r13)
PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 79751d8dd13..41cb0017e75 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -21,7 +21,6 @@
#include <linux/kvm_host.h>
#include <linux/hash.h>
#include <linux/slab.h>
-#include "trace.h"
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@@ -29,6 +28,8 @@
#include <asm/mmu_context.h>
#include <asm/hw_irq.h>
+#include "trace.h"
+
#define PTE_SIZE 12
static struct kmem_cache *hpte_cache;
@@ -58,30 +59,31 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
u64 index;
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
trace_kvm_book3s_mmu_map(pte);
- spin_lock(&vcpu->arch.mmu_lock);
+ spin_lock(&vcpu3s->mmu_lock);
/* Add to ePTE list */
index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
- hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]);
+ hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
/* Add to ePTE_long list */
index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
hlist_add_head_rcu(&pte->list_pte_long,
- &vcpu->arch.hpte_hash_pte_long[index]);
+ &vcpu3s->hpte_hash_pte_long[index]);
/* Add to vPTE list */
index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
- hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]);
+ hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
/* Add to vPTE_long list */
index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
hlist_add_head_rcu(&pte->list_vpte_long,
- &vcpu->arch.hpte_hash_vpte_long[index]);
+ &vcpu3s->hpte_hash_vpte_long[index]);
- spin_unlock(&vcpu->arch.mmu_lock);
+ spin_unlock(&vcpu3s->mmu_lock);
}
static void free_pte_rcu(struct rcu_head *head)
@@ -92,16 +94,18 @@ static void free_pte_rcu(struct rcu_head *head)
static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+
trace_kvm_book3s_mmu_invalidate(pte);
/* Different for 32 and 64 bit */
kvmppc_mmu_invalidate_pte(vcpu, pte);
- spin_lock(&vcpu->arch.mmu_lock);
+ spin_lock(&vcpu3s->mmu_lock);
/* pte already invalidated in between? */
if (hlist_unhashed(&pte->list_pte)) {
- spin_unlock(&vcpu->arch.mmu_lock);
+ spin_unlock(&vcpu3s->mmu_lock);
return;
}
@@ -115,14 +119,15 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
else
kvm_release_pfn_clean(pte->pfn);
- spin_unlock(&vcpu->arch.mmu_lock);
+ spin_unlock(&vcpu3s->mmu_lock);
- vcpu->arch.hpte_cache_count--;
+ vcpu3s->hpte_cache_count--;
call_rcu(&pte->rcu_head, free_pte_rcu);
}
static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hpte_cache *pte;
struct hlist_node *node;
int i;
@@ -130,7 +135,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
rcu_read_lock();
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
- struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
+ struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
invalidate_pte(vcpu, pte);
@@ -141,12 +146,13 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte;
/* Find the list of entries in the map */
- list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
+ list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
rcu_read_lock();
@@ -160,12 +166,13 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte;
/* Find the list of entries in the map */
- list = &vcpu->arch.hpte_hash_pte_long[
+ list = &vcpu3s->hpte_hash_pte_long[
kvmppc_mmu_hash_pte_long(guest_ea)];
rcu_read_lock();
@@ -203,12 +210,13 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
/* Flush with mask 0xfffffffff */
static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte;
u64 vp_mask = 0xfffffffffULL;
- list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
+ list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
rcu_read_lock();
@@ -223,12 +231,13 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
/* Flush with mask 0xffffff000 */
static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte;
u64 vp_mask = 0xffffff000ULL;
- list = &vcpu->arch.hpte_hash_vpte_long[
+ list = &vcpu3s->hpte_hash_vpte_long[
kvmppc_mmu_hash_vpte_long(guest_vp)];
rcu_read_lock();
@@ -261,6 +270,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_node *node;
struct hpte_cache *pte;
int i;
@@ -270,7 +280,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
rcu_read_lock();
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
- struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
+ struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
if ((pte->pte.raddr >= pa_start) &&
@@ -283,12 +293,13 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hpte_cache *pte;
pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
- vcpu->arch.hpte_cache_count++;
+ vcpu3s->hpte_cache_count++;
- if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM)
+ if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
kvmppc_mmu_pte_flush_all(vcpu);
return pte;
@@ -309,17 +320,19 @@ static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+
/* init hpte lookup hashes */
- kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte,
- ARRAY_SIZE(vcpu->arch.hpte_hash_pte));
- kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long,
- ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long));
- kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte,
- ARRAY_SIZE(vcpu->arch.hpte_hash_vpte));
- kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long,
- ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long));
-
- spin_lock_init(&vcpu->arch.mmu_lock);
+ kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
+ ARRAY_SIZE(vcpu3s->hpte_hash_pte));
+ kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
+ ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
+ kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
+ ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
+ kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
+ ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
+
+ spin_lock_init(&vcpu3s->mmu_lock);
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
new file mode 100644
index 00000000000..0c0d3f27443
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -0,0 +1,1029 @@
+/*
+ * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
+ *
+ * Authors:
+ * Alexander Graf <agraf@suse.de>
+ * Kevin Wolf <mail@kevin-wolf.de>
+ * Paul Mackerras <paulus@samba.org>
+ *
+ * Description:
+ * Functions relating to running KVM on Book 3S processors where
+ * we don't have access to hypervisor mode, and we run the guest
+ * in problem state (user mode).
+ *
+ * This file is derived from arch/powerpc/kvm/44x.c,
+ * by Hollis Blanchard <hollisb@us.ibm.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <asm/reg.h>
+#include <asm/cputable.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu_context.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+
+#include "trace.h"
+
+/* #define EXIT_DEBUG */
+/* #define DEBUG_EXT */
+
+static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
+ ulong msr);
+
+/* Some compatibility defines */
+#ifdef CONFIG_PPC_BOOK3S_32
+#define MSR_USER32 MSR_USER
+#define MSR_USER64 MSR_USER
+#define HW_PAGE_SIZE PAGE_SIZE
+#endif
+
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
+ memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
+ sizeof(get_paca()->shadow_vcpu));
+ to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
+#endif
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
+ memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
+ sizeof(get_paca()->shadow_vcpu));
+ to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
+#endif
+
+ kvmppc_giveup_ext(vcpu, MSR_FP);
+ kvmppc_giveup_ext(vcpu, MSR_VEC);
+ kvmppc_giveup_ext(vcpu, MSR_VSX);
+}
+
+static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
+{
+ ulong smsr = vcpu->arch.shared->msr;
+
+ /* Guest MSR values */
+ smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
+ /* Process MSR values */
+ smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
+ /* External providers the guest reserved */
+ smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
+ /* 64-bit Process MSR values */
+#ifdef CONFIG_PPC_BOOK3S_64
+ smsr |= MSR_ISF | MSR_HV;
+#endif
+ vcpu->arch.shadow_msr = smsr;
+}
+
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
+{
+ ulong old_msr = vcpu->arch.shared->msr;
+
+#ifdef EXIT_DEBUG
+ printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
+#endif
+
+ msr &= to_book3s(vcpu)->msr_mask;
+ vcpu->arch.shared->msr = msr;
+ kvmppc_recalc_shadow_msr(vcpu);
+
+ if (msr & MSR_POW) {
+ if (!vcpu->arch.pending_exceptions) {
+ kvm_vcpu_block(vcpu);
+ vcpu->stat.halt_wakeup++;
+
+ /* Unset POW bit after we woke up */
+ msr &= ~MSR_POW;
+ vcpu->arch.shared->msr = msr;
+ }
+ }
+
+ if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
+ (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
+ kvmppc_mmu_flush_segments(vcpu);
+ kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
+
+ /* Preload magic page segment when in kernel mode */
+ if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
+ struct kvm_vcpu_arch *a = &vcpu->arch;
+
+ if (msr & MSR_DR)
+ kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
+ else
+ kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
+ }
+ }
+
+ /* Preload FPU if it's enabled */
+ if (vcpu->arch.shared->msr & MSR_FP)
+ kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
+}
+
+void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
+{
+ u32 host_pvr;
+
+ vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
+ vcpu->arch.pvr = pvr;
+#ifdef CONFIG_PPC_BOOK3S_64
+ if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
+ kvmppc_mmu_book3s_64_init(vcpu);
+ to_book3s(vcpu)->hior = 0xfff00000;
+ to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
+ } else
+#endif
+ {
+ kvmppc_mmu_book3s_32_init(vcpu);
+ to_book3s(vcpu)->hior = 0;
+ to_book3s(vcpu)->msr_mask = 0xffffffffULL;
+ }
+
+ /* If we are in hypervisor level on 970, we can tell the CPU to
+ * treat DCBZ as 32 bytes store */
+ vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
+ if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
+ !strcmp(cur_cpu_spec->platform, "ppc970"))
+ vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
+
+ /* Cell performs badly if MSR_FEx are set. So let's hope nobody
+ really needs them in a VM on Cell and force disable them. */
+ if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
+ to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* 32 bit Book3S always has 32 byte dcbz */
+ vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
+#endif
+
+ /* On some CPUs we can execute paired single operations natively */
+ asm ( "mfpvr %0" : "=r"(host_pvr));
+ switch (host_pvr) {
+ case 0x00080200: /* lonestar 2.0 */
+ case 0x00088202: /* lonestar 2.2 */
+ case 0x70000100: /* gekko 1.0 */
+ case 0x00080100: /* gekko 2.0 */
+ case 0x00083203: /* gekko 2.3a */
+ case 0x00083213: /* gekko 2.3b */
+ case 0x00083204: /* gekko 2.4 */
+ case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
+ case 0x00087200: /* broadway */
+ vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
+ /* Enable HID2.PSE - in case we need it later */
+ mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
+ }
+}
+
+/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
+ * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
+ * emulate 32 bytes dcbz length.
+ *
+ * The Book3s_64 inventors also realized this case and implemented a special bit
+ * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
+ *
+ * My approach here is to patch the dcbz instruction on executing pages.
+ */
+static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
+{
+ struct page *hpage;
+ u64 hpage_offset;
+ u32 *page;
+ int i;
+
+ hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
+ if (is_error_page(hpage)) {
+ kvm_release_page_clean(hpage);
+ return;
+ }
+
+ hpage_offset = pte->raddr & ~PAGE_MASK;
+ hpage_offset &= ~0xFFFULL;
+ hpage_offset /= 4;
+
+ get_page(hpage);
+ page = kmap_atomic(hpage, KM_USER0);
+
+ /* patch dcbz into reserved instruction, so we trap */
+ for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
+ if ((page[i] & 0xff0007ff) == INS_DCBZ)
+ page[i] &= 0xfffffff7;
+
+ kunmap_atomic(page, KM_USER0);
+ put_page(hpage);
+}
+
+static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ ulong mp_pa = vcpu->arch.magic_page_pa;
+
+ if (unlikely(mp_pa) &&
+ unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
+ return 1;
+ }
+
+ return kvm_is_visible_gfn(vcpu->kvm, gfn);
+}
+
+int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ ulong eaddr, int vec)
+{
+ bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
+ int r = RESUME_GUEST;
+ int relocated;
+ int page_found = 0;
+ struct kvmppc_pte pte;
+ bool is_mmio = false;
+ bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
+ bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
+ u64 vsid;
+
+ relocated = data ? dr : ir;
+
+ /* Resolve real address if translation turned on */
+ if (relocated) {
+ page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
+ } else {
+ pte.may_execute = true;
+ pte.may_read = true;
+ pte.may_write = true;
+ pte.raddr = eaddr & KVM_PAM;
+ pte.eaddr = eaddr;
+ pte.vpage = eaddr >> 12;
+ }
+
+ switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
+ case 0:
+ pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
+ break;
+ case MSR_DR:
+ case MSR_IR:
+ vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
+
+ if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
+ pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
+ else
+ pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
+ pte.vpage |= vsid;
+
+ if (vsid == -1)
+ page_found = -EINVAL;
+ break;
+ }
+
+ if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+ (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
+ /*
+ * If we do the dcbz hack, we have to NX on every execution,
+ * so we can patch the executing code. This renders our guest
+ * NX-less.
+ */
+ pte.may_execute = !data;
+ }
+
+ if (page_found == -ENOENT) {
+ /* Page not found in guest PTE entries */
+ vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+ vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
+ vcpu->arch.shared->msr |=
+ (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
+ kvmppc_book3s_queue_irqprio(vcpu, vec);
+ } else if (page_found == -EPERM) {
+ /* Storage protection */
+ vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+ vcpu->arch.shared->dsisr =
+ to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
+ vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
+ vcpu->arch.shared->msr |=
+ (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
+ kvmppc_book3s_queue_irqprio(vcpu, vec);
+ } else if (page_found == -EINVAL) {
+ /* Page not found in guest SLB */
+ vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+ kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
+ } else if (!is_mmio &&
+ kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
+ /* The guest's PTE is not mapped yet. Map on the host */
+ kvmppc_mmu_map_page(vcpu, &pte);
+ if (data)
+ vcpu->stat.sp_storage++;
+ else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+ (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
+ kvmppc_patch_dcbz(vcpu, &pte);
+ } else {
+ /* MMIO */
+ vcpu->stat.mmio_exits++;
+ vcpu->arch.paddr_accessed = pte.raddr;
+ r = kvmppc_emulate_mmio(run, vcpu);
+ if ( r == RESUME_HOST_NV )
+ r = RESUME_HOST;
+ }
+
+ return r;
+}
+
+static inline int get_fpr_index(int i)
+{
+#ifdef CONFIG_VSX
+ i *= 2;
+#endif
+ return i;
+}
+
+/* Give up external provider (FPU, Altivec, VSX) */
+void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
+{
+ struct thread_struct *t = &current->thread;
+ u64 *vcpu_fpr = vcpu->arch.fpr;
+#ifdef CONFIG_VSX
+ u64 *vcpu_vsx = vcpu->arch.vsr;
+#endif
+ u64 *thread_fpr = (u64*)t->fpr;
+ int i;
+
+ if (!(vcpu->arch.guest_owned_ext & msr))
+ return;
+
+#ifdef DEBUG_EXT
+ printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
+#endif
+
+ switch (msr) {
+ case MSR_FP:
+ giveup_fpu(current);
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
+ vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
+
+ vcpu->arch.fpscr = t->fpscr.val;
+ break;
+ case MSR_VEC:
+#ifdef CONFIG_ALTIVEC
+ giveup_altivec(current);
+ memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
+ vcpu->arch.vscr = t->vscr;
+#endif
+ break;
+ case MSR_VSX:
+#ifdef CONFIG_VSX
+ __giveup_vsx(current);
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
+ vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
+#endif
+ break;
+ default:
+ BUG();
+ }
+
+ vcpu->arch.guest_owned_ext &= ~msr;
+ current->thread.regs->msr &= ~msr;
+ kvmppc_recalc_shadow_msr(vcpu);
+}
+
+static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
+{
+ ulong srr0 = kvmppc_get_pc(vcpu);
+ u32 last_inst = kvmppc_get_last_inst(vcpu);
+ int ret;
+
+ ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
+ if (ret == -ENOENT) {
+ ulong msr = vcpu->arch.shared->msr;
+
+ msr = kvmppc_set_field(msr, 33, 33, 1);
+ msr = kvmppc_set_field(msr, 34, 36, 0);
+ vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
+ return EMULATE_AGAIN;
+ }
+
+ return EMULATE_DONE;
+}
+
+static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
+{
+
+ /* Need to do paired single emulation? */
+ if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
+ return EMULATE_DONE;
+
+ /* Read out the instruction */
+ if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
+ /* Need to emulate */
+ return EMULATE_FAIL;
+
+ return EMULATE_AGAIN;
+}
+
+/* Handle external providers (FPU, Altivec, VSX) */
+static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
+ ulong msr)
+{
+ struct thread_struct *t = &current->thread;
+ u64 *vcpu_fpr = vcpu->arch.fpr;
+#ifdef CONFIG_VSX
+ u64 *vcpu_vsx = vcpu->arch.vsr;
+#endif
+ u64 *thread_fpr = (u64*)t->fpr;
+ int i;
+
+ /* When we have paired singles, we emulate in software */
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
+ return RESUME_GUEST;
+
+ if (!(vcpu->arch.shared->msr & msr)) {
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ return RESUME_GUEST;
+ }
+
+ /* We already own the ext */
+ if (vcpu->arch.guest_owned_ext & msr) {
+ return RESUME_GUEST;
+ }
+
+#ifdef DEBUG_EXT
+ printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
+#endif
+
+ current->thread.regs->msr |= msr;
+
+ switch (msr) {
+ case MSR_FP:
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
+ thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
+
+ t->fpscr.val = vcpu->arch.fpscr;
+ t->fpexc_mode = 0;
+ kvmppc_load_up_fpu();
+ break;
+ case MSR_VEC:
+#ifdef CONFIG_ALTIVEC
+ memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
+ t->vscr = vcpu->arch.vscr;
+ t->vrsave = -1;
+ kvmppc_load_up_altivec();
+#endif
+ break;
+ case MSR_VSX:
+#ifdef CONFIG_VSX
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
+ thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
+ kvmppc_load_up_vsx();
+#endif
+ break;
+ default:
+ BUG();
+ }
+
+ vcpu->arch.guest_owned_ext |= msr;
+
+ kvmppc_recalc_shadow_msr(vcpu);
+
+ return RESUME_GUEST;
+}
+
+int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int exit_nr)
+{
+ int r = RESUME_HOST;
+
+ vcpu->stat.sum_exits++;
+
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->ready_for_interrupt_injection = 1;
+
+ trace_kvm_book3s_exit(exit_nr, vcpu);
+ kvm_resched(vcpu);
+ switch (exit_nr) {
+ case BOOK3S_INTERRUPT_INST_STORAGE:
+ vcpu->stat.pf_instruc++;
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* We set segments as unused segments when invalidating them. So
+ * treat the respective fault as segment fault. */
+ if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
+ == SR_INVALID) {
+ kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
+ r = RESUME_GUEST;
+ break;
+ }
+#endif
+
+ /* only care about PTEG not found errors, but leave NX alone */
+ if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
+ r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
+ vcpu->stat.sp_instruc++;
+ } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+ (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
+ /*
+ * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
+ * so we can't use the NX bit inside the guest. Let's cross our fingers,
+ * that no guest that needs the dcbz hack does NX.
+ */
+ kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
+ r = RESUME_GUEST;
+ } else {
+ vcpu->arch.shared->msr |=
+ to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ }
+ break;
+ case BOOK3S_INTERRUPT_DATA_STORAGE:
+ {
+ ulong dar = kvmppc_get_fault_dar(vcpu);
+ vcpu->stat.pf_storage++;
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* We set segments as unused segments when invalidating them. So
+ * treat the respective fault as segment fault. */
+ if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
+ kvmppc_mmu_map_segment(vcpu, dar);
+ r = RESUME_GUEST;
+ break;
+ }
+#endif
+
+ /* The only case we need to handle is missing shadow PTEs */
+ if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
+ r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
+ } else {
+ vcpu->arch.shared->dar = dar;
+ vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ }
+ break;
+ }
+ case BOOK3S_INTERRUPT_DATA_SEGMENT:
+ if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
+ vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+ kvmppc_book3s_queue_irqprio(vcpu,
+ BOOK3S_INTERRUPT_DATA_SEGMENT);
+ }
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_INST_SEGMENT:
+ if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
+ kvmppc_book3s_queue_irqprio(vcpu,
+ BOOK3S_INTERRUPT_INST_SEGMENT);
+ }
+ r = RESUME_GUEST;
+ break;
+ /* We're good on these - the host merely wanted to get our attention */
+ case BOOK3S_INTERRUPT_DECREMENTER:
+ vcpu->stat.dec_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_EXTERNAL:
+ vcpu->stat.ext_intr_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_PERFMON:
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_PROGRAM:
+ {
+ enum emulation_result er;
+ ulong flags;
+
+program_interrupt:
+ flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
+
+ if (vcpu->arch.shared->msr & MSR_PR) {
+#ifdef EXIT_DEBUG
+ printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
+#endif
+ if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
+ (INS_DCBZ & 0xfffffff7)) {
+ kvmppc_core_queue_program(vcpu, flags);
+ r = RESUME_GUEST;
+ break;
+ }
+ }
+
+ vcpu->stat.emulated_inst_exits++;
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ r = RESUME_GUEST_NV;
+ break;
+ case EMULATE_AGAIN:
+ r = RESUME_GUEST;
+ break;
+ case EMULATE_FAIL:
+ printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
+ __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
+ kvmppc_core_queue_program(vcpu, flags);
+ r = RESUME_GUEST;
+ break;
+ case EMULATE_DO_MMIO:
+ run->exit_reason = KVM_EXIT_MMIO;
+ r = RESUME_HOST_NV;
+ break;
+ default:
+ BUG();
+ }
+ break;
+ }
+ case BOOK3S_INTERRUPT_SYSCALL:
+ if (vcpu->arch.osi_enabled &&
+ (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
+ (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
+ /* MOL hypercalls */
+ u64 *gprs = run->osi.gprs;
+ int i;
+
+ run->exit_reason = KVM_EXIT_OSI;
+ for (i = 0; i < 32; i++)
+ gprs[i] = kvmppc_get_gpr(vcpu, i);
+ vcpu->arch.osi_needed = 1;
+ r = RESUME_HOST_NV;
+ } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
+ (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
+ /* KVM PV hypercalls */
+ kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
+ r = RESUME_GUEST;
+ } else {
+ /* Guest syscalls */
+ vcpu->stat.syscall_exits++;
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ }
+ break;
+ case BOOK3S_INTERRUPT_FP_UNAVAIL:
+ case BOOK3S_INTERRUPT_ALTIVEC:
+ case BOOK3S_INTERRUPT_VSX:
+ {
+ int ext_msr = 0;
+
+ switch (exit_nr) {
+ case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
+ case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
+ case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
+ }
+
+ switch (kvmppc_check_ext(vcpu, exit_nr)) {
+ case EMULATE_DONE:
+ /* everything ok - let's enable the ext */
+ r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
+ break;
+ case EMULATE_FAIL:
+ /* we need to emulate this instruction */
+ goto program_interrupt;
+ break;
+ default:
+ /* nothing to worry about - go again */
+ break;
+ }
+ break;
+ }
+ case BOOK3S_INTERRUPT_ALIGNMENT:
+ if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
+ vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
+ kvmppc_get_last_inst(vcpu));
+ vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
+ kvmppc_get_last_inst(vcpu));
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ }
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_MACHINE_CHECK:
+ case BOOK3S_INTERRUPT_TRACE:
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ break;
+ default:
+ /* Ugh - bork here! What did we get? */
+ printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
+ exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
+ r = RESUME_HOST;
+ BUG();
+ break;
+ }
+
+
+ if (!(r & RESUME_HOST)) {
+ /* To avoid clobbering exit_reason, only check for signals if
+ * we aren't already exiting to userspace for some other
+ * reason. */
+ if (signal_pending(current)) {
+#ifdef EXIT_DEBUG
+ printk(KERN_EMERG "KVM: Going back to host\n");
+#endif
+ vcpu->stat.signal_exits++;
+ run->exit_reason = KVM_EXIT_INTR;
+ r = -EINTR;
+ } else {
+ /* In case an interrupt came in that was triggered
+ * from userspace (like DEC), we need to check what
+ * to inject now! */
+ kvmppc_core_deliver_interrupts(vcpu);
+ }
+ }
+
+ trace_kvm_book3s_reenter(r, vcpu);
+
+ return r;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ int i;
+
+ sregs->pvr = vcpu->arch.pvr;
+
+ sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
+ for (i = 0; i < 64; i++) {
+ sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
+ sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
+ }
+ } else {
+ for (i = 0; i < 16; i++)
+ sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
+
+ for (i = 0; i < 8; i++) {
+ sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
+ sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
+ }
+ }
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ int i;
+
+ kvmppc_set_pvr(vcpu, sregs->pvr);
+
+ vcpu3s->sdr1 = sregs->u.s.sdr1;
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
+ for (i = 0; i < 64; i++) {
+ vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
+ sregs->u.s.ppc64.slb[i].slbe);
+ }
+ } else {
+ for (i = 0; i < 16; i++) {
+ vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
+ }
+ for (i = 0; i < 8; i++) {
+ kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
+ (u32)sregs->u.s.ppc32.ibat[i]);
+ kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
+ (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
+ kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
+ (u32)sregs->u.s.ppc32.dbat[i]);
+ kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
+ (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
+ }
+ }
+
+ /* Flush the MMU after messing with the segments */
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+
+ return 0;
+}
+
+int kvmppc_core_check_processor_compat(void)
+{
+ return 0;
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s;
+ struct kvm_vcpu *vcpu;
+ int err = -ENOMEM;
+ unsigned long p;
+
+ vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
+ if (!vcpu_book3s)
+ goto out;
+
+ vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
+ kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
+ if (!vcpu_book3s->shadow_vcpu)
+ goto free_vcpu;
+
+ vcpu = &vcpu_book3s->vcpu;
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_shadow_vcpu;
+
+ p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
+ /* the real shared page fills the last 4k of our page */
+ vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
+ if (!p)
+ goto uninit_vcpu;
+
+ vcpu->arch.host_retip = kvm_return_point;
+ vcpu->arch.host_msr = mfmsr();
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* default to book3s_64 (970fx) */
+ vcpu->arch.pvr = 0x3C0301;
+#else
+ /* default to book3s_32 (750) */
+ vcpu->arch.pvr = 0x84202;
+#endif
+ kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
+ vcpu->arch.slb_nr = 64;
+
+ /* remember where some real-mode handlers are */
+ vcpu->arch.trampoline_lowmem = __pa(kvmppc_handler_lowmem_trampoline);
+ vcpu->arch.trampoline_enter = __pa(kvmppc_handler_trampoline_enter);
+ vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
+#ifdef CONFIG_PPC_BOOK3S_64
+ vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
+#else
+ vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
+#endif
+
+ vcpu->arch.shadow_msr = MSR_USER64;
+
+ err = kvmppc_mmu_init(vcpu);
+ if (err < 0)
+ goto uninit_vcpu;
+
+ return vcpu;
+
+uninit_vcpu:
+ kvm_vcpu_uninit(vcpu);
+free_shadow_vcpu:
+ kfree(vcpu_book3s->shadow_vcpu);
+free_vcpu:
+ vfree(vcpu_book3s);
+out:
+ return ERR_PTR(err);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+
+ free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
+ kvm_vcpu_uninit(vcpu);
+ kfree(vcpu_book3s->shadow_vcpu);
+ vfree(vcpu_book3s);
+}
+
+int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+ int ret;
+ double fpr[32][TS_FPRWIDTH];
+ unsigned int fpscr;
+ int fpexc_mode;
+#ifdef CONFIG_ALTIVEC
+ vector128 vr[32];
+ vector128 vscr;
+ unsigned long uninitialized_var(vrsave);
+ int used_vr;
+#endif
+#ifdef CONFIG_VSX
+ int used_vsr;
+#endif
+ ulong ext_msr;
+
+ /* No need to go into the guest when all we do is going out */
+ if (signal_pending(current)) {
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ return -EINTR;
+ }
+
+ /* Save FPU state in stack */
+ if (current->thread.regs->msr & MSR_FP)
+ giveup_fpu(current);
+ memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
+ fpscr = current->thread.fpscr.val;
+ fpexc_mode = current->thread.fpexc_mode;
+
+#ifdef CONFIG_ALTIVEC
+ /* Save Altivec state in stack */
+ used_vr = current->thread.used_vr;
+ if (used_vr) {
+ if (current->thread.regs->msr & MSR_VEC)
+ giveup_altivec(current);
+ memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
+ vscr = current->thread.vscr;
+ vrsave = current->thread.vrsave;
+ }
+#endif
+
+#ifdef CONFIG_VSX
+ /* Save VSX state in stack */
+ used_vsr = current->thread.used_vsr;
+ if (used_vsr && (current->thread.regs->msr & MSR_VSX))
+ __giveup_vsx(current);
+#endif
+
+ /* Remember the MSR with disabled extensions */
+ ext_msr = current->thread.regs->msr;
+
+ /* Preload FPU if it's enabled */
+ if (vcpu->arch.shared->msr & MSR_FP)
+ kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
+
+ kvm_guest_enter();
+
+ ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+
+ kvm_guest_exit();
+
+ local_irq_disable();
+
+ current->thread.regs->msr = ext_msr;
+
+ /* Make sure we save the guest FPU/Altivec/VSX state */
+ kvmppc_giveup_ext(vcpu, MSR_FP);
+ kvmppc_giveup_ext(vcpu, MSR_VEC);
+ kvmppc_giveup_ext(vcpu, MSR_VSX);
+
+ /* Restore FPU state from stack */
+ memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
+ current->thread.fpscr.val = fpscr;
+ current->thread.fpexc_mode = fpexc_mode;
+
+#ifdef CONFIG_ALTIVEC
+ /* Restore Altivec state from stack */
+ if (used_vr && current->thread.used_vr) {
+ memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
+ current->thread.vscr = vscr;
+ current->thread.vrsave = vrsave;
+ }
+ current->thread.used_vr = used_vr;
+#endif
+
+#ifdef CONFIG_VSX
+ current->thread.used_vsr = used_vsr;
+#endif
+
+ return ret;
+}
+
+int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+ return 0;
+}
+
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+}
+
+static int kvmppc_book3s_init(void)
+{
+ int r;
+
+ r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
+ THIS_MODULE);
+
+ if (r)
+ return r;
+
+ r = kvmppc_mmu_hpte_sysinit();
+
+ return r;
+}
+
+static void kvmppc_book3s_exit(void)
+{
+ kvmppc_mmu_hpte_sysexit();
+ kvm_exit();
+}
+
+module_init(kvmppc_book3s_init);
+module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 1a1b34487e7..c1f877c4a88 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,41 +36,44 @@
#if defined(CONFIG_PPC_BOOK3S_64)
#define LOAD_SHADOW_VCPU(reg) GET_PACA(reg)
-#define SHADOW_VCPU_OFF PACA_KVM_SVCPU
#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR)
#define FUNC(name) GLUE(.,name)
+kvmppc_skip_interrupt:
+ /*
+ * Here all GPRs are unchanged from when the interrupt happened
+ * except for r13, which is saved in SPRG_SCRATCH0.
+ */
+ mfspr r13, SPRN_SRR0
+ addi r13, r13, 4
+ mtspr SPRN_SRR0, r13
+ GET_SCRATCH0(r13)
+ rfid
+ b .
+
+kvmppc_skip_Hinterrupt:
+ /*
+ * Here all GPRs are unchanged from when the interrupt happened
+ * except for r13, which is saved in SPRG_SCRATCH0.
+ */
+ mfspr r13, SPRN_HSRR0
+ addi r13, r13, 4
+ mtspr SPRN_HSRR0, r13
+ GET_SCRATCH0(r13)
+ hrfid
+ b .
+
#elif defined(CONFIG_PPC_BOOK3S_32)
-#define LOAD_SHADOW_VCPU(reg) \
- mfspr reg, SPRN_SPRG_THREAD; \
- lwz reg, THREAD_KVM_SVCPU(reg); \
- /* PPC32 can have a NULL pointer - let's check for that */ \
- mtspr SPRN_SPRG_SCRATCH1, r12; /* Save r12 */ \
- mfcr r12; \
- cmpwi reg, 0; \
- bne 1f; \
- mfspr reg, SPRN_SPRG_SCRATCH0; \
- mtcr r12; \
- mfspr r12, SPRN_SPRG_SCRATCH1; \
- b kvmppc_resume_\intno; \
-1:; \
- mtcr r12; \
- mfspr r12, SPRN_SPRG_SCRATCH1; \
- tophys(reg, reg)
-
-#define SHADOW_VCPU_OFF 0
#define MSR_NOIRQ MSR_KERNEL
#define FUNC(name) name
-#endif
-
.macro INTERRUPT_TRAMPOLINE intno
.global kvmppc_trampoline_\intno
kvmppc_trampoline_\intno:
- SET_SCRATCH0(r13) /* Save r13 */
+ mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */
/*
* First thing to do is to find out if we're coming
@@ -78,19 +81,28 @@ kvmppc_trampoline_\intno:
*
* To distinguish, we check a magic byte in the PACA/current
*/
- LOAD_SHADOW_VCPU(r13)
- PPC_STL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
+ mfspr r13, SPRN_SPRG_THREAD
+ lwz r13, THREAD_KVM_SVCPU(r13)
+ /* PPC32 can have a NULL pointer - let's check for that */
+ mtspr SPRN_SPRG_SCRATCH1, r12 /* Save r12 */
mfcr r12
- stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
- lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
+ cmpwi r13, 0
+ bne 1f
+2: mtcr r12
+ mfspr r12, SPRN_SPRG_SCRATCH1
+ mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
+ b kvmppc_resume_\intno /* Get back original handler */
+
+1: tophys(r13, r13)
+ stw r12, HSTATE_SCRATCH1(r13)
+ mfspr r12, SPRN_SPRG_SCRATCH1
+ stw r12, HSTATE_SCRATCH0(r13)
+ lbz r12, HSTATE_IN_GUEST(r13)
cmpwi r12, KVM_GUEST_MODE_NONE
bne ..kvmppc_handler_hasmagic_\intno
/* No KVM guest? Then jump back to the Linux handler! */
- lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
- mtcr r12
- PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
- GET_SCRATCH0(r13) /* r13 = original r13 */
- b kvmppc_resume_\intno /* Get back original handler */
+ lwz r12, HSTATE_SCRATCH1(r13)
+ b 2b
/* Now we know we're handling a KVM guest */
..kvmppc_handler_hasmagic_\intno:
@@ -112,9 +124,6 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
-#ifdef CONFIG_PPC_BOOK3S_64
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL_HV
-#endif
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL
@@ -124,14 +133,6 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
-/* Those are only available on 64 bit machines */
-
-#ifdef CONFIG_PPC_BOOK3S_64
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
-#endif
-
/*
* Bring us back to the faulting code, but skip the
* faulting instruction.
@@ -143,8 +144,8 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
*
* R12 = free
* R13 = Shadow VCPU (PACA)
- * SVCPU.SCRATCH0 = guest R12
- * SVCPU.SCRATCH1 = guest CR
+ * HSTATE.SCRATCH0 = guest R12
+ * HSTATE.SCRATCH1 = guest CR
* SPRG_SCRATCH0 = guest R13
*
*/
@@ -156,13 +157,14 @@ kvmppc_handler_skip_ins:
mtsrr0 r12
/* Clean up all state */
- lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
+ lwz r12, HSTATE_SCRATCH1(r13)
mtcr r12
- PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
+ PPC_LL r12, HSTATE_SCRATCH0(r13)
GET_SCRATCH0(r13)
/* And get back into the code */
RFI
+#endif
/*
* This trampoline brings us back to a real mode handler
@@ -251,12 +253,4 @@ define_load_up(altivec)
define_load_up(vsx)
#endif
-.global kvmppc_trampoline_lowmem
-kvmppc_trampoline_lowmem:
- PPC_LONG kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START
-
-.global kvmppc_trampoline_enter
-kvmppc_trampoline_enter:
- PPC_LONG kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START
-
#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 451264274b8..aed32e51721 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -22,7 +22,7 @@
#if defined(CONFIG_PPC_BOOK3S_64)
#define GET_SHADOW_VCPU(reg) \
- addi reg, r13, PACA_KVM_SVCPU
+ mr reg, r13
#elif defined(CONFIG_PPC_BOOK3S_32)
@@ -71,6 +71,10 @@ kvmppc_handler_trampoline_enter:
/* r3 = shadow vcpu */
GET_SHADOW_VCPU(r3)
+ /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */
+ PPC_STL r1, HSTATE_HOST_R1(r3)
+ PPC_STL r2, HSTATE_HOST_R2(r3)
+
/* Move SRR0 and SRR1 into the respective regs */
PPC_LL r9, SVCPU_PC(r3)
mtsrr0 r9
@@ -78,36 +82,36 @@ kvmppc_handler_trampoline_enter:
/* Activate guest mode, so faults get handled by KVM */
li r11, KVM_GUEST_MODE_GUEST
- stb r11, SVCPU_IN_GUEST(r3)
+ stb r11, HSTATE_IN_GUEST(r3)
/* Switch to guest segment. This is subarch specific. */
LOAD_GUEST_SEGMENTS
/* Enter guest */
- PPC_LL r4, (SVCPU_CTR)(r3)
- PPC_LL r5, (SVCPU_LR)(r3)
- lwz r6, (SVCPU_CR)(r3)
- lwz r7, (SVCPU_XER)(r3)
+ PPC_LL r4, SVCPU_CTR(r3)
+ PPC_LL r5, SVCPU_LR(r3)
+ lwz r6, SVCPU_CR(r3)
+ lwz r7, SVCPU_XER(r3)
mtctr r4
mtlr r5
mtcr r6
mtxer r7
- PPC_LL r0, (SVCPU_R0)(r3)
- PPC_LL r1, (SVCPU_R1)(r3)
- PPC_LL r2, (SVCPU_R2)(r3)
- PPC_LL r4, (SVCPU_R4)(r3)
- PPC_LL r5, (SVCPU_R5)(r3)
- PPC_LL r6, (SVCPU_R6)(r3)
- PPC_LL r7, (SVCPU_R7)(r3)
- PPC_LL r8, (SVCPU_R8)(r3)
- PPC_LL r9, (SVCPU_R9)(r3)
- PPC_LL r10, (SVCPU_R10)(r3)
- PPC_LL r11, (SVCPU_R11)(r3)
- PPC_LL r12, (SVCPU_R12)(r3)
- PPC_LL r13, (SVCPU_R13)(r3)
+ PPC_LL r0, SVCPU_R0(r3)
+ PPC_LL r1, SVCPU_R1(r3)
+ PPC_LL r2, SVCPU_R2(r3)
+ PPC_LL r4, SVCPU_R4(r3)
+ PPC_LL r5, SVCPU_R5(r3)
+ PPC_LL r6, SVCPU_R6(r3)
+ PPC_LL r7, SVCPU_R7(r3)
+ PPC_LL r8, SVCPU_R8(r3)
+ PPC_LL r9, SVCPU_R9(r3)
+ PPC_LL r10, SVCPU_R10(r3)
+ PPC_LL r11, SVCPU_R11(r3)
+ PPC_LL r12, SVCPU_R12(r3)
+ PPC_LL r13, SVCPU_R13(r3)
PPC_LL r3, (SVCPU_R3)(r3)
@@ -125,56 +129,63 @@ kvmppc_handler_trampoline_enter_end:
.global kvmppc_handler_trampoline_exit
kvmppc_handler_trampoline_exit:
+.global kvmppc_interrupt
+kvmppc_interrupt:
+
/* Register usage at this point:
*
* SPRG_SCRATCH0 = guest R13
* R12 = exit handler id
- * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
- * SVCPU.SCRATCH0 = guest R12
- * SVCPU.SCRATCH1 = guest CR
+ * R13 = shadow vcpu (32-bit) or PACA (64-bit)
+ * HSTATE.SCRATCH0 = guest R12
+ * HSTATE.SCRATCH1 = guest CR
*
*/
/* Save registers */
- PPC_STL r0, (SHADOW_VCPU_OFF + SVCPU_R0)(r13)
- PPC_STL r1, (SHADOW_VCPU_OFF + SVCPU_R1)(r13)
- PPC_STL r2, (SHADOW_VCPU_OFF + SVCPU_R2)(r13)
- PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_R3)(r13)
- PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_R4)(r13)
- PPC_STL r5, (SHADOW_VCPU_OFF + SVCPU_R5)(r13)
- PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_R6)(r13)
- PPC_STL r7, (SHADOW_VCPU_OFF + SVCPU_R7)(r13)
- PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R8)(r13)
- PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R9)(r13)
- PPC_STL r10, (SHADOW_VCPU_OFF + SVCPU_R10)(r13)
- PPC_STL r11, (SHADOW_VCPU_OFF + SVCPU_R11)(r13)
+ PPC_STL r0, SVCPU_R0(r13)
+ PPC_STL r1, SVCPU_R1(r13)
+ PPC_STL r2, SVCPU_R2(r13)
+ PPC_STL r3, SVCPU_R3(r13)
+ PPC_STL r4, SVCPU_R4(r13)
+ PPC_STL r5, SVCPU_R5(r13)
+ PPC_STL r6, SVCPU_R6(r13)
+ PPC_STL r7, SVCPU_R7(r13)
+ PPC_STL r8, SVCPU_R8(r13)
+ PPC_STL r9, SVCPU_R9(r13)
+ PPC_STL r10, SVCPU_R10(r13)
+ PPC_STL r11, SVCPU_R11(r13)
/* Restore R1/R2 so we can handle faults */
- PPC_LL r1, (SHADOW_VCPU_OFF + SVCPU_HOST_R1)(r13)
- PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13)
+ PPC_LL r1, HSTATE_HOST_R1(r13)
+ PPC_LL r2, HSTATE_HOST_R2(r13)
/* Save guest PC and MSR */
+#ifdef CONFIG_PPC64
+BEGIN_FTR_SECTION
andi. r0,r12,0x2
beq 1f
mfspr r3,SPRN_HSRR0
mfspr r4,SPRN_HSRR1
andi. r12,r12,0x3ffd
b 2f
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+#endif
1: mfsrr0 r3
mfsrr1 r4
2:
- PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13)
- PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13)
+ PPC_STL r3, SVCPU_PC(r13)
+ PPC_STL r4, SVCPU_SHADOW_SRR1(r13)
/* Get scratch'ed off registers */
GET_SCRATCH0(r9)
- PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
- lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
+ PPC_LL r8, HSTATE_SCRATCH0(r13)
+ lwz r7, HSTATE_SCRATCH1(r13)
- PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R13)(r13)
- PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R12)(r13)
- stw r7, (SHADOW_VCPU_OFF + SVCPU_CR)(r13)
+ PPC_STL r9, SVCPU_R13(r13)
+ PPC_STL r8, SVCPU_R12(r13)
+ stw r7, SVCPU_CR(r13)
/* Save more register state */
@@ -184,11 +195,11 @@ kvmppc_handler_trampoline_exit:
mfctr r8
mflr r9
- stw r5, (SHADOW_VCPU_OFF + SVCPU_XER)(r13)
- PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_FAULT_DAR)(r13)
- stw r7, (SHADOW_VCPU_OFF + SVCPU_FAULT_DSISR)(r13)
- PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_CTR)(r13)
- PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_LR)(r13)
+ stw r5, SVCPU_XER(r13)
+ PPC_STL r6, SVCPU_FAULT_DAR(r13)
+ stw r7, SVCPU_FAULT_DSISR(r13)
+ PPC_STL r8, SVCPU_CTR(r13)
+ PPC_STL r9, SVCPU_LR(r13)
/*
* In order for us to easily get the last instruction,
@@ -218,7 +229,7 @@ ld_last_inst:
/* Set guest mode to 'jump over instruction' so if lwz faults
* we'll just continue at the next IP. */
li r9, KVM_GUEST_MODE_SKIP
- stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
+ stb r9, HSTATE_IN_GUEST(r13)
/* 1) enable paging for data */
mfmsr r9
@@ -232,13 +243,13 @@ ld_last_inst:
sync
#endif
- stw r0, (SHADOW_VCPU_OFF + SVCPU_LAST_INST)(r13)
+ stw r0, SVCPU_LAST_INST(r13)
no_ld_last_inst:
/* Unset guest mode */
li r9, KVM_GUEST_MODE_NONE
- stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
+ stb r9, HSTATE_IN_GUEST(r13)
/* Switch back to host MMU */
LOAD_HOST_SEGMENTS
@@ -248,7 +259,7 @@ no_ld_last_inst:
* R1 = host R1
* R2 = host R2
* R12 = exit handler id
- * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
+ * R13 = shadow vcpu (32-bit) or PACA (64-bit)
* SVCPU.* = guest *
*
*/
@@ -258,7 +269,7 @@ no_ld_last_inst:
ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */
mtsrr1 r7
/* Load highmem handler address */
- PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_VMHANDLER)(r13)
+ PPC_LL r8, HSTATE_VMHANDLER(r13)
mtsrr0 r8
RFI
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 8462b3a1c1c..ee45fa01220 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -13,6 +13,7 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2007
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
* Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
@@ -78,6 +79,60 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
}
}
+#ifdef CONFIG_SPE
+void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ enable_kernel_spe();
+ kvmppc_save_guest_spe(vcpu);
+ vcpu->arch.shadow_msr &= ~MSR_SPE;
+ preempt_enable();
+}
+
+static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ enable_kernel_spe();
+ kvmppc_load_guest_spe(vcpu);
+ vcpu->arch.shadow_msr |= MSR_SPE;
+ preempt_enable();
+}
+
+static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.shared->msr & MSR_SPE) {
+ if (!(vcpu->arch.shadow_msr & MSR_SPE))
+ kvmppc_vcpu_enable_spe(vcpu);
+ } else if (vcpu->arch.shadow_msr & MSR_SPE) {
+ kvmppc_vcpu_disable_spe(vcpu);
+ }
+}
+#else
+static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
+{
+}
+#endif
+
+/*
+ * Helper function for "full" MSR writes. No need to call this if only
+ * EE/CE/ME/DE/RI are changing.
+ */
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
+{
+ u32 old_msr = vcpu->arch.shared->msr;
+
+ vcpu->arch.shared->msr = new_msr;
+
+ kvmppc_mmu_msr_notify(vcpu, old_msr);
+
+ if (vcpu->arch.shared->msr & MSR_WE) {
+ kvm_vcpu_block(vcpu);
+ kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
+ };
+
+ kvmppc_vcpu_sync_spe(vcpu);
+}
+
static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
unsigned int priority)
{
@@ -257,6 +312,19 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
vcpu->arch.shared->int_pending = 0;
}
+int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ local_irq_disable();
+ kvm_guest_enter();
+ ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+ kvm_guest_exit();
+ local_irq_enable();
+
+ return ret;
+}
+
/**
* kvmppc_handle_exit
*
@@ -344,10 +412,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
- case BOOKE_INTERRUPT_SPE_UNAVAIL:
- kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
+#ifdef CONFIG_SPE
+ case BOOKE_INTERRUPT_SPE_UNAVAIL: {
+ if (vcpu->arch.shared->msr & MSR_SPE)
+ kvmppc_vcpu_enable_spe(vcpu);
+ else
+ kvmppc_booke_queue_irqprio(vcpu,
+ BOOKE_IRQPRIO_SPE_UNAVAIL);
r = RESUME_GUEST;
break;
+ }
case BOOKE_INTERRUPT_SPE_FP_DATA:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
@@ -358,6 +432,28 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
r = RESUME_GUEST;
break;
+#else
+ case BOOKE_INTERRUPT_SPE_UNAVAIL:
+ /*
+ * Guest wants SPE, but host kernel doesn't support it. Send
+ * an "unimplemented operation" program check to the guest.
+ */
+ kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
+ r = RESUME_GUEST;
+ break;
+
+ /*
+ * These really should never happen without CONFIG_SPE,
+ * as we should never enable the real MSR[SPE] in the guest.
+ */
+ case BOOKE_INTERRUPT_SPE_FP_DATA:
+ case BOOKE_INTERRUPT_SPE_FP_ROUND:
+ printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
+ __func__, exit_nr, vcpu->arch.pc);
+ run->hw.hardware_exit_reason = exit_nr;
+ r = RESUME_HOST;
+ break;
+#endif
case BOOKE_INTERRUPT_DATA_STORAGE:
kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
@@ -392,6 +488,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
gpa_t gpaddr;
gfn_t gfn;
+#ifdef CONFIG_KVM_E500
+ if (!(vcpu->arch.shared->msr & MSR_PR) &&
+ (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
+ kvmppc_map_magic(vcpu);
+ kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
+ r = RESUME_GUEST;
+
+ break;
+ }
+#endif
+
/* Check the guest TLB. */
gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
if (gtlb_index < 0) {
@@ -514,6 +621,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.pc = 0;
vcpu->arch.shared->msr = 0;
+ vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
vcpu->arch.shadow_pid = 1;
@@ -770,6 +878,26 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
return -ENOTSUPP;
}
+int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+ return 0;
+}
+
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+}
+
int __init kvmppc_booke_init(void)
{
unsigned long ivor[16];
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 492bb703035..8e1fe33d64e 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -52,24 +52,19 @@
extern unsigned long kvmppc_booke_handlers;
-/* Helper function for "full" MSR writes. No need to call this if only EE is
- * changing. */
-static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
-{
- if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
- kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
-
- vcpu->arch.shared->msr = new_msr;
-
- if (vcpu->arch.shared->msr & MSR_WE) {
- kvm_vcpu_block(vcpu);
- kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
- };
-}
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr);
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
+/* low-level asm code to transfer guest state */
+void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
+void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
+
+/* high-level function, manages flags, host state */
+void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
+
#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index b58ccae9590..42f2fb1f66e 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -13,6 +13,7 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2007
+ * Copyright 2011 Freescale Semiconductor, Inc.
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
@@ -24,8 +25,6 @@
#include <asm/page.h>
#include <asm/asm-offsets.h>
-#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)
-
#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
/* The host stack layout: */
@@ -192,6 +191,12 @@ _GLOBAL(kvmppc_resume_host)
lwz r3, VCPU_HOST_PID(r4)
mtspr SPRN_PID, r3
+#ifdef CONFIG_FSL_BOOKE
+ /* we cheat and know that Linux doesn't use PID1 which is always 0 */
+ lis r3, 0
+ mtspr SPRN_PID1, r3
+#endif
+
/* Restore host IVPR before re-enabling interrupts. We cheat and know
* that Linux IVPR is always 0xc0000000. */
lis r3, 0xc000
@@ -241,6 +246,14 @@ _GLOBAL(kvmppc_resume_host)
heavyweight_exit:
/* Not returning to guest. */
+#ifdef CONFIG_SPE
+ /* save guest SPEFSCR and load host SPEFSCR */
+ mfspr r9, SPRN_SPEFSCR
+ stw r9, VCPU_SPEFSCR(r4)
+ lwz r9, VCPU_HOST_SPEFSCR(r4)
+ mtspr SPRN_SPEFSCR, r9
+#endif
+
/* We already saved guest volatile register state; now save the
* non-volatiles. */
stw r15, VCPU_GPR(r15)(r4)
@@ -342,6 +355,14 @@ _GLOBAL(__kvmppc_vcpu_run)
lwz r30, VCPU_GPR(r30)(r4)
lwz r31, VCPU_GPR(r31)(r4)
+#ifdef CONFIG_SPE
+ /* save host SPEFSCR and load guest SPEFSCR */
+ mfspr r3, SPRN_SPEFSCR
+ stw r3, VCPU_HOST_SPEFSCR(r4)
+ lwz r3, VCPU_SPEFSCR(r4)
+ mtspr SPRN_SPEFSCR, r3
+#endif
+
lightweight_exit:
stw r2, HOST_R2(r1)
@@ -350,6 +371,11 @@ lightweight_exit:
lwz r3, VCPU_SHADOW_PID(r4)
mtspr SPRN_PID, r3
+#ifdef CONFIG_FSL_BOOKE
+ lwz r3, VCPU_SHADOW_PID1(r4)
+ mtspr SPRN_PID1, r3
+#endif
+
#ifdef CONFIG_44x
iccci 0, 0 /* XXX hack */
#endif
@@ -405,20 +431,17 @@ lightweight_exit:
/* Finish loading guest volatiles and jump to guest. */
lwz r3, VCPU_CTR(r4)
+ lwz r5, VCPU_CR(r4)
+ lwz r6, VCPU_PC(r4)
+ lwz r7, VCPU_SHADOW_MSR(r4)
mtctr r3
- lwz r3, VCPU_CR(r4)
- mtcr r3
+ mtcr r5
+ mtsrr0 r6
+ mtsrr1 r7
lwz r5, VCPU_GPR(r5)(r4)
lwz r6, VCPU_GPR(r6)(r4)
lwz r7, VCPU_GPR(r7)(r4)
lwz r8, VCPU_GPR(r8)(r4)
- lwz r3, VCPU_PC(r4)
- mtsrr0 r3
- lwz r3, VCPU_SHARED(r4)
- lwz r3, (VCPU_SHARED_MSR + 4)(r3)
- oris r3, r3, KVMPPC_MSR_MASK@h
- ori r3, r3, KVMPPC_MSR_MASK@l
- mtsrr1 r3
/* Clear any debug events which occurred since we disabled MSR[DE].
* XXX This gives us a 3-instruction window in which a breakpoint
@@ -430,3 +453,24 @@ lightweight_exit:
lwz r3, VCPU_GPR(r3)(r4)
lwz r4, VCPU_GPR(r4)(r4)
rfi
+
+#ifdef CONFIG_SPE
+_GLOBAL(kvmppc_save_guest_spe)
+ cmpi 0,r3,0
+ beqlr-
+ SAVE_32EVRS(0, r4, r3, VCPU_EVR)
+ evxor evr6, evr6, evr6
+ evmwumiaa evr6, evr6, evr6
+ li r4,VCPU_ACC
+ evstddx evr6, r4, r3 /* save acc */
+ blr
+
+_GLOBAL(kvmppc_load_guest_spe)
+ cmpi 0,r3,0
+ beqlr-
+ li r4,VCPU_ACC
+ evlddx evr6,r4,r3
+ evmra evr6,evr6 /* load acc */
+ REST_32EVRS(0, r4, r3, VCPU_EVR)
+ blr
+#endif
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 318dbc61ba4..797a7447c26 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, <yu.liu@freescale.com>
*
@@ -41,6 +41,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
kvmppc_e500_tlb_put(vcpu);
+
+#ifdef CONFIG_SPE
+ if (vcpu->arch.shadow_msr & MSR_SPE)
+ kvmppc_vcpu_disable_spe(vcpu);
+#endif
}
int kvmppc_core_check_processor_compat(void)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 69cd665a0ca..d48ae396f41 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -81,8 +81,12 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
kvmppc_set_pid(vcpu, spr_val);
break;
case SPRN_PID1:
+ if (spr_val != 0)
+ return EMULATE_FAIL;
vcpu_e500->pid[1] = spr_val; break;
case SPRN_PID2:
+ if (spr_val != 0)
+ return EMULATE_FAIL;
vcpu_e500->pid[2] = spr_val; break;
case SPRN_MAS0:
vcpu_e500->mas0 = spr_val; break;
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index b18fe353397..13c432ea2fa 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -28,8 +28,196 @@
#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
+struct id {
+ unsigned long val;
+ struct id **pentry;
+};
+
+#define NUM_TIDS 256
+
+/*
+ * This table provide mappings from:
+ * (guestAS,guestTID,guestPR) --> ID of physical cpu
+ * guestAS [0..1]
+ * guestTID [0..255]
+ * guestPR [0..1]
+ * ID [1..255]
+ * Each vcpu keeps one vcpu_id_table.
+ */
+struct vcpu_id_table {
+ struct id id[2][NUM_TIDS][2];
+};
+
+/*
+ * This table provide reversed mappings of vcpu_id_table:
+ * ID --> address of vcpu_id_table item.
+ * Each physical core has one pcpu_id_table.
+ */
+struct pcpu_id_table {
+ struct id *entry[NUM_TIDS];
+};
+
+static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
+
+/* This variable keeps last used shadow ID on local core.
+ * The valid range of shadow ID is [1..255] */
+static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
+
static unsigned int tlb1_entry_num;
+/*
+ * Allocate a free shadow id and setup a valid sid mapping in given entry.
+ * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static inline int local_sid_setup_one(struct id *entry)
+{
+ unsigned long sid;
+ int ret = -1;
+
+ sid = ++(__get_cpu_var(pcpu_last_used_sid));
+ if (sid < NUM_TIDS) {
+ __get_cpu_var(pcpu_sids).entry[sid] = entry;
+ entry->val = sid;
+ entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
+ ret = sid;
+ }
+
+ /*
+ * If sid == NUM_TIDS, we've run out of sids. We return -1, and
+ * the caller will invalidate everything and start over.
+ *
+ * sid > NUM_TIDS indicates a race, which we disable preemption to
+ * avoid.
+ */
+ WARN_ON(sid > NUM_TIDS);
+
+ return ret;
+}
+
+/*
+ * Check if given entry contain a valid shadow id mapping.
+ * An ID mapping is considered valid only if
+ * both vcpu and pcpu know this mapping.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static inline int local_sid_lookup(struct id *entry)
+{
+ if (entry && entry->val != 0 &&
+ __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
+ entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
+ return entry->val;
+ return -1;
+}
+
+/* Invalidate all id mappings on local core */
+static inline void local_sid_destroy_all(void)
+{
+ preempt_disable();
+ __get_cpu_var(pcpu_last_used_sid) = 0;
+ memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
+ preempt_enable();
+}
+
+static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
+ return vcpu_e500->idt;
+}
+
+static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ kfree(vcpu_e500->idt);
+}
+
+/* Invalidate all mappings on vcpu */
+static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
+
+ /* Update shadow pid when mappings are changed */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+}
+
+/* Invalidate one ID mapping on vcpu */
+static inline void kvmppc_e500_id_table_reset_one(
+ struct kvmppc_vcpu_e500 *vcpu_e500,
+ int as, int pid, int pr)
+{
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+
+ BUG_ON(as >= 2);
+ BUG_ON(pid >= NUM_TIDS);
+ BUG_ON(pr >= 2);
+
+ idt->id[as][pid][pr].val = 0;
+ idt->id[as][pid][pr].pentry = NULL;
+
+ /* Update shadow pid when mappings are changed */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+}
+
+/*
+ * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
+ * This function first lookup if a valid mapping exists,
+ * if not, then creates a new one.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
+ unsigned int as, unsigned int gid,
+ unsigned int pr, int avoid_recursion)
+{
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+ int sid;
+
+ BUG_ON(as >= 2);
+ BUG_ON(gid >= NUM_TIDS);
+ BUG_ON(pr >= 2);
+
+ sid = local_sid_lookup(&idt->id[as][gid][pr]);
+
+ while (sid <= 0) {
+ /* No mapping yet */
+ sid = local_sid_setup_one(&idt->id[as][gid][pr]);
+ if (sid <= 0) {
+ _tlbil_all();
+ local_sid_destroy_all();
+ }
+
+ /* Update shadow pid when mappings are changed */
+ if (!avoid_recursion)
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+ }
+
+ return sid;
+}
+
+/* Map guest pid to shadow.
+ * We use PID to keep shadow of current guest non-zero PID,
+ * and use PID1 to keep shadow of guest zero PID.
+ * So that guest tlbe with TID=0 can be accessed at any time */
+void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ preempt_disable();
+ vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
+ get_cur_as(&vcpu_e500->vcpu),
+ get_cur_pid(&vcpu_e500->vcpu),
+ get_cur_pr(&vcpu_e500->vcpu), 1);
+ vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
+ get_cur_as(&vcpu_e500->vcpu), 0,
+ get_cur_pr(&vcpu_e500->vcpu), 1);
+ preempt_enable();
+}
+
void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -41,25 +229,14 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
for (tlbsel = 0; tlbsel < 2; tlbsel++) {
printk("Guest TLB%d:\n", tlbsel);
- for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
- tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
+ for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
+ tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
if (tlbe->mas1 & MAS1_VALID)
printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
tlbsel, i, tlbe->mas1, tlbe->mas2,
tlbe->mas3, tlbe->mas7);
}
}
-
- for (tlbsel = 0; tlbsel < 2; tlbsel++) {
- printk("Shadow TLB%d:\n", tlbsel);
- for (i = 0; i < vcpu_e500->shadow_tlb_size[tlbsel]; i++) {
- tlbe = &vcpu_e500->shadow_tlb[tlbsel][i];
- if (tlbe->mas1 & MAS1_VALID)
- printk(" S[%d][%3d] | %08X | %08X | %08X | %08X |\n",
- tlbsel, i, tlbe->mas1, tlbe->mas2,
- tlbe->mas3, tlbe->mas7);
- }
- }
}
static inline unsigned int tlb0_get_next_victim(
@@ -67,16 +244,17 @@ static inline unsigned int tlb0_get_next_victim(
{
unsigned int victim;
- victim = vcpu_e500->guest_tlb_nv[0]++;
- if (unlikely(vcpu_e500->guest_tlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
- vcpu_e500->guest_tlb_nv[0] = 0;
+ victim = vcpu_e500->gtlb_nv[0]++;
+ if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
+ vcpu_e500->gtlb_nv[0] = 0;
return victim;
}
static inline unsigned int tlb1_max_shadow_size(void)
{
- return tlb1_entry_num - tlbcam_index;
+ /* reserve one entry for magic page */
+ return tlb1_entry_num - tlbcam_index - 1;
}
static inline int tlbe_is_writable(struct tlbe *tlbe)
@@ -112,72 +290,149 @@ static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
/*
* writing shadow tlb entry to host TLB
*/
-static inline void __write_host_tlbe(struct tlbe *stlbe)
+static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mtspr(SPRN_MAS0, mas0);
mtspr(SPRN_MAS1, stlbe->mas1);
mtspr(SPRN_MAS2, stlbe->mas2);
mtspr(SPRN_MAS3, stlbe->mas3);
mtspr(SPRN_MAS7, stlbe->mas7);
- __asm__ __volatile__ ("tlbwe\n" : : );
+ asm volatile("isync; tlbwe" : : : "memory");
+ local_irq_restore(flags);
}
static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
+ int tlbsel, int esel, struct tlbe *stlbe)
{
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
-
- local_irq_disable();
if (tlbsel == 0) {
- __write_host_tlbe(stlbe);
+ __write_host_tlbe(stlbe,
+ MAS0_TLBSEL(0) |
+ MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1)));
} else {
- unsigned register mas0;
-
- mas0 = mfspr(SPRN_MAS0);
-
- mtspr(SPRN_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(esel)));
- __write_host_tlbe(stlbe);
-
- mtspr(SPRN_MAS0, mas0);
+ __write_host_tlbe(stlbe,
+ MAS0_TLBSEL(1) |
+ MAS0_ESEL(to_htlb1_esel(esel)));
}
- local_irq_enable();
+ trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
+ stlbe->mas3, stlbe->mas7);
+}
+
+void kvmppc_map_magic(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ struct tlbe magic;
+ ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
+ unsigned int stid;
+ pfn_t pfn;
+
+ pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
+ get_page(pfn_to_page(pfn));
+
+ preempt_disable();
+ stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
+
+ magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
+ MAS1_TSIZE(BOOK3E_PAGESZ_4K);
+ magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
+ magic.mas3 = (pfn << PAGE_SHIFT) |
+ MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
+ magic.mas7 = pfn >> (32 - PAGE_SHIFT);
+
+ __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
+ preempt_enable();
}
void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- int i;
- unsigned register mas0;
-
- /* Load all valid TLB1 entries to reduce guest tlb miss fault */
- local_irq_disable();
- mas0 = mfspr(SPRN_MAS0);
- for (i = 0; i < tlb1_max_shadow_size(); i++) {
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
-
- if (get_tlb_v(stlbe)) {
- mtspr(SPRN_MAS0, MAS0_TLBSEL(1)
- | MAS0_ESEL(to_htlb1_esel(i)));
- __write_host_tlbe(stlbe);
- }
- }
- mtspr(SPRN_MAS0, mas0);
- local_irq_enable();
+
+ /* Shadow PID may be expired on local core */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
}
void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
{
- _tlbil_all();
+}
+
+static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
+ int tlbsel, int esel)
+{
+ struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+ unsigned int pr, tid, ts, pid;
+ u32 val, eaddr;
+ unsigned long flags;
+
+ ts = get_tlb_ts(gtlbe);
+ tid = get_tlb_tid(gtlbe);
+
+ preempt_disable();
+
+ /* One guest ID may be mapped to two shadow IDs */
+ for (pr = 0; pr < 2; pr++) {
+ /*
+ * The shadow PID can have a valid mapping on at most one
+ * host CPU. In the common case, it will be valid on this
+ * CPU, in which case (for TLB0) we do a local invalidation
+ * of the specific address.
+ *
+ * If the shadow PID is not valid on the current host CPU, or
+ * if we're invalidating a TLB1 entry, we invalidate the
+ * entire shadow PID.
+ */
+ if (tlbsel == 1 ||
+ (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
+ kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
+ continue;
+ }
+
+ /*
+ * The guest is invalidating a TLB0 entry which is in a PID
+ * that has a valid shadow mapping on this host CPU. We
+ * search host TLB0 to invalidate it's shadow TLB entry,
+ * similar to __tlbil_va except that we need to look in AS1.
+ */
+ val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
+ eaddr = get_tlb_eaddr(gtlbe);
+
+ local_irq_save(flags);
+
+ mtspr(SPRN_MAS6, val);
+ asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
+ val = mfspr(SPRN_MAS1);
+ if (val & MAS1_VALID) {
+ mtspr(SPRN_MAS1, val & ~MAS1_VALID);
+ asm volatile("tlbwe");
+ }
+
+ local_irq_restore(flags);
+ }
+
+ preempt_enable();
}
/* Search the guest TLB for a matching entry. */
static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
gva_t eaddr, int tlbsel, unsigned int pid, int as)
{
+ int size = vcpu_e500->gtlb_size[tlbsel];
+ int set_base;
int i;
- /* XXX Replace loop with fancy data structures. */
- for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
- struct tlbe *tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
+ if (tlbsel == 0) {
+ int mask = size / KVM_E500_TLB0_WAY_NUM - 1;
+ set_base = (eaddr >> PAGE_SHIFT) & mask;
+ set_base *= KVM_E500_TLB0_WAY_NUM;
+ size = KVM_E500_TLB0_WAY_NUM;
+ } else {
+ set_base = 0;
+ }
+
+ for (i = 0; i < size; i++) {
+ struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][set_base + i];
unsigned int tid;
if (eaddr < get_tlb_eaddr(tlbe))
@@ -196,66 +451,32 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
if (get_tlb_ts(tlbe) != as && as != -1)
continue;
- return i;
+ return set_base + i;
}
return -1;
}
-static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
-{
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
- struct page *page = vcpu_e500->shadow_pages[tlbsel][esel];
-
- if (page) {
- vcpu_e500->shadow_pages[tlbsel][esel] = NULL;
-
- if (get_tlb_v(stlbe)) {
- if (tlbe_is_writable(stlbe))
- kvm_release_page_dirty(page);
- else
- kvm_release_page_clean(page);
- }
- }
-}
-
-static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
+static inline void kvmppc_e500_priv_setup(struct tlbe_priv *priv,
+ struct tlbe *gtlbe,
+ pfn_t pfn)
{
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
+ priv->pfn = pfn;
+ priv->flags = E500_TLB_VALID;
- kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
- stlbe->mas1 = 0;
- trace_kvm_stlb_inval(index_of(tlbsel, esel));
+ if (tlbe_is_writable(gtlbe))
+ priv->flags |= E500_TLB_DIRTY;
}
-static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
- gva_t eaddr, gva_t eend, u32 tid)
+static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv)
{
- unsigned int pid = tid & 0xff;
- unsigned int i;
-
- /* XXX Replace loop with fancy data structures. */
- for (i = 0; i < vcpu_e500->guest_tlb_size[1]; i++) {
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
- unsigned int tid;
-
- if (!get_tlb_v(stlbe))
- continue;
-
- if (eend < get_tlb_eaddr(stlbe))
- continue;
+ if (priv->flags & E500_TLB_VALID) {
+ if (priv->flags & E500_TLB_DIRTY)
+ kvm_release_pfn_dirty(priv->pfn);
+ else
+ kvm_release_pfn_clean(priv->pfn);
- if (eaddr > get_tlb_end(stlbe))
- continue;
-
- tid = get_tlb_tid(stlbe);
- if (tid && (tid != pid))
- continue;
-
- kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
- write_host_tlbe(vcpu_e500, 1, i);
+ priv->flags = 0;
}
}
@@ -273,7 +494,7 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
- | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
+ | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
| MAS1_TID(vcpu_e500->pid[pidsel])
| MAS1_TSIZE(tsized);
@@ -286,56 +507,154 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
vcpu_e500->mas7 = 0;
}
-static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel)
+static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct tlbe *gtlbe, int tsize,
+ struct tlbe_priv *priv,
+ u64 gvaddr, struct tlbe *stlbe)
{
- struct page *new_page;
- struct tlbe *stlbe;
- hpa_t hpaddr;
-
- stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
-
- /* Get reference to new page. */
- new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn);
- if (is_error_page(new_page)) {
- printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n",
- (long)gfn);
- kvm_release_page_clean(new_page);
- return;
- }
- hpaddr = page_to_phys(new_page);
-
- /* Drop reference to old page. */
- kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
+ pfn_t pfn = priv->pfn;
+ unsigned int stid;
- vcpu_e500->shadow_pages[tlbsel][esel] = new_page;
+ stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
+ get_tlb_tid(gtlbe),
+ get_cur_pr(&vcpu_e500->vcpu), 0);
- /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */
- stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K)
- | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
+ /* Force TS=1 IPROT=0 for all guest mappings. */
+ stlbe->mas1 = MAS1_TSIZE(tsize)
+ | MAS1_TID(stid) | MAS1_TS | MAS1_VALID;
stlbe->mas2 = (gvaddr & MAS2_EPN)
| e500_shadow_mas2_attrib(gtlbe->mas2,
vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
- stlbe->mas3 = (hpaddr & MAS3_RPN)
+ stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
| e500_shadow_mas3_attrib(gtlbe->mas3,
vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
- stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN;
+ stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
+}
- trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
- stlbe->mas3, stlbe->mas7);
+
+static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel,
+ struct tlbe *stlbe)
+{
+ struct kvm_memory_slot *slot;
+ unsigned long pfn, hva;
+ int pfnmap = 0;
+ int tsize = BOOK3E_PAGESZ_4K;
+ struct tlbe_priv *priv;
+
+ /*
+ * Translate guest physical to true physical, acquiring
+ * a page reference if it is normal, non-reserved memory.
+ *
+ * gfn_to_memslot() must succeed because otherwise we wouldn't
+ * have gotten this far. Eventually we should just pass the slot
+ * pointer through from the first lookup.
+ */
+ slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
+ hva = gfn_to_hva_memslot(slot, gfn);
+
+ if (tlbsel == 1) {
+ struct vm_area_struct *vma;
+ down_read(&current->mm->mmap_sem);
+
+ vma = find_vma(current->mm, hva);
+ if (vma && hva >= vma->vm_start &&
+ (vma->vm_flags & VM_PFNMAP)) {
+ /*
+ * This VMA is a physically contiguous region (e.g.
+ * /dev/mem) that bypasses normal Linux page
+ * management. Find the overlap between the
+ * vma and the memslot.
+ */
+
+ unsigned long start, end;
+ unsigned long slot_start, slot_end;
+
+ pfnmap = 1;
+
+ start = vma->vm_pgoff;
+ end = start +
+ ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
+
+ pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
+
+ slot_start = pfn - (gfn - slot->base_gfn);
+ slot_end = slot_start + slot->npages;
+
+ if (start < slot_start)
+ start = slot_start;
+ if (end > slot_end)
+ end = slot_end;
+
+ tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
+ MAS1_TSIZE_SHIFT;
+
+ /*
+ * e500 doesn't implement the lowest tsize bit,
+ * or 1K pages.
+ */
+ tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
+
+ /*
+ * Now find the largest tsize (up to what the guest
+ * requested) that will cover gfn, stay within the
+ * range, and for which gfn and pfn are mutually
+ * aligned.
+ */
+
+ for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
+ unsigned long gfn_start, gfn_end, tsize_pages;
+ tsize_pages = 1 << (tsize - 2);
+
+ gfn_start = gfn & ~(tsize_pages - 1);
+ gfn_end = gfn_start + tsize_pages;
+
+ if (gfn_start + pfn - gfn < start)
+ continue;
+ if (gfn_end + pfn - gfn > end)
+ continue;
+ if ((gfn & (tsize_pages - 1)) !=
+ (pfn & (tsize_pages - 1)))
+ continue;
+
+ gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
+ pfn &= ~(tsize_pages - 1);
+ break;
+ }
+ }
+
+ up_read(&current->mm->mmap_sem);
+ }
+
+ if (likely(!pfnmap)) {
+ pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
+ if (is_error_pfn(pfn)) {
+ printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
+ (long)gfn);
+ kvm_release_pfn_clean(pfn);
+ return;
+ }
+ }
+
+ /* Drop old priv and setup new one. */
+ priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
+ kvmppc_e500_priv_release(priv);
+ kvmppc_e500_priv_setup(priv, gtlbe, pfn);
+
+ kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, priv, gvaddr, stlbe);
}
/* XXX only map the one-one case, for now use TLB0 */
-static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
+static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ int esel, struct tlbe *stlbe)
{
struct tlbe *gtlbe;
- gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ gtlbe = &vcpu_e500->gtlb_arch[0][esel];
kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
- gtlbe, tlbsel, esel);
+ gtlbe, 0, esel, stlbe);
return esel;
}
@@ -344,53 +663,37 @@ static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500,
* the shadow TLB. */
/* XXX for both one-one and one-to-many , for now use TLB1 */
static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe)
+ u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe)
{
unsigned int victim;
- victim = vcpu_e500->guest_tlb_nv[1]++;
+ victim = vcpu_e500->gtlb_nv[1]++;
- if (unlikely(vcpu_e500->guest_tlb_nv[1] >= tlb1_max_shadow_size()))
- vcpu_e500->guest_tlb_nv[1] = 0;
+ if (unlikely(vcpu_e500->gtlb_nv[1] >= tlb1_max_shadow_size()))
+ vcpu_e500->gtlb_nv[1] = 0;
- kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim);
+ kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim, stlbe);
return victim;
}
-/* Invalidate all guest kernel mappings when enter usermode,
- * so that when they fault back in they will get the
- * proper permission bits. */
-void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
- if (usermode) {
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- int i;
-
- /* XXX Replace loop with fancy data structures. */
- for (i = 0; i < tlb1_max_shadow_size(); i++)
- kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- _tlbil_all();
- }
+ /* Recalc shadow pid since MSR changes */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
}
-static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
+static inline int kvmppc_e500_gtlbe_invalidate(
+ struct kvmppc_vcpu_e500 *vcpu_e500,
+ int tlbsel, int esel)
{
- struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
if (unlikely(get_tlb_iprot(gtlbe)))
return -1;
- if (tlbsel == 1) {
- kvmppc_e500_tlb1_invalidate(vcpu_e500, get_tlb_eaddr(gtlbe),
- get_tlb_end(gtlbe),
- get_tlb_tid(gtlbe));
- } else {
- kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
- }
-
gtlbe->mas1 = 0;
return 0;
@@ -401,13 +704,14 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
int esel;
if (value & MMUCSR0_TLB0FI)
- for (esel = 0; esel < vcpu_e500->guest_tlb_size[0]; esel++)
+ for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
if (value & MMUCSR0_TLB1FI)
- for (esel = 0; esel < vcpu_e500->guest_tlb_size[1]; esel++)
+ for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
- _tlbil_all();
+ /* Invalidate all vcpu id mappings */
+ kvmppc_e500_id_table_reset_all(vcpu_e500);
return EMULATE_DONE;
}
@@ -428,7 +732,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
if (ia) {
/* invalidate all entries */
- for (esel = 0; esel < vcpu_e500->guest_tlb_size[tlbsel]; esel++)
+ for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
} else {
ea &= 0xfffff000;
@@ -438,7 +742,8 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
}
- _tlbil_all();
+ /* Invalidate all vcpu id mappings */
+ kvmppc_e500_id_table_reset_all(vcpu_e500);
return EMULATE_DONE;
}
@@ -452,9 +757,9 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
tlbsel = get_tlb_tlbsel(vcpu_e500);
esel = get_tlb_esel(vcpu_e500, tlbsel);
- gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
vcpu_e500->mas0 &= ~MAS0_NV(~0);
- vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
+ vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu_e500->mas1 = gtlbe->mas1;
vcpu_e500->mas2 = gtlbe->mas2;
vcpu_e500->mas3 = gtlbe->mas3;
@@ -477,14 +782,14 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
for (tlbsel = 0; tlbsel < 2; tlbsel++) {
esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
if (esel >= 0) {
- gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
break;
}
}
if (gtlbe) {
vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
- | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
+ | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu_e500->mas1 = gtlbe->mas1;
vcpu_e500->mas2 = gtlbe->mas2;
vcpu_e500->mas3 = gtlbe->mas3;
@@ -497,7 +802,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
- | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
+ | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
| (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
| (vcpu_e500->mas4 & MAS4_TSIZED(~0));
@@ -514,23 +819,16 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- u64 eaddr;
- u64 raddr;
- u32 tid;
struct tlbe *gtlbe;
- int tlbsel, esel, stlbsel, sesel;
+ int tlbsel, esel;
tlbsel = get_tlb_tlbsel(vcpu_e500);
esel = get_tlb_esel(vcpu_e500, tlbsel);
- gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
- if (get_tlb_v(gtlbe) && tlbsel == 1) {
- eaddr = get_tlb_eaddr(gtlbe);
- tid = get_tlb_tid(gtlbe);
- kvmppc_e500_tlb1_invalidate(vcpu_e500, eaddr,
- get_tlb_end(gtlbe), tid);
- }
+ if (get_tlb_v(gtlbe))
+ kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
gtlbe->mas1 = vcpu_e500->mas1;
gtlbe->mas2 = vcpu_e500->mas2;
@@ -542,6 +840,12 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) {
+ struct tlbe stlbe;
+ int stlbsel, sesel;
+ u64 eaddr;
+ u64 raddr;
+
+ preempt_disable();
switch (tlbsel) {
case 0:
/* TLB0 */
@@ -549,7 +853,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
stlbsel = 0;
- sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel);
+ sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
break;
@@ -564,13 +868,14 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
* are mapped on the fly. */
stlbsel = 1;
sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
- raddr >> PAGE_SHIFT, gtlbe);
+ raddr >> PAGE_SHIFT, gtlbe, &stlbe);
break;
default:
BUG();
}
- write_host_tlbe(vcpu_e500, stlbsel, sesel);
+ write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
+ preempt_enable();
}
kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
@@ -610,7 +915,7 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
struct tlbe *gtlbe =
- &vcpu_e500->guest_tlb[tlbsel_of(index)][esel_of(index)];
+ &vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)];
u64 pgmask = get_tlb_bytes(gtlbe) - 1;
return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
@@ -618,38 +923,37 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- int tlbsel, i;
-
- for (tlbsel = 0; tlbsel < 2; tlbsel++)
- for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++)
- kvmppc_e500_shadow_release(vcpu_e500, tlbsel, i);
-
- /* discard all guest mapping */
- _tlbil_all();
}
void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
unsigned int index)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ struct tlbe_priv *priv;
+ struct tlbe *gtlbe, stlbe;
int tlbsel = tlbsel_of(index);
int esel = esel_of(index);
int stlbsel, sesel;
+ gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
+
+ preempt_disable();
switch (tlbsel) {
case 0:
stlbsel = 0;
sesel = esel;
+ priv = &vcpu_e500->gtlb_priv[stlbsel][sesel];
+
+ kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
+ priv, eaddr, &stlbe);
break;
case 1: {
gfn_t gfn = gpaddr >> PAGE_SHIFT;
- struct tlbe *gtlbe
- = &vcpu_e500->guest_tlb[tlbsel][esel];
stlbsel = 1;
- sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe);
+ sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
+ gtlbe, &stlbe);
break;
}
@@ -657,7 +961,9 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
BUG();
break;
}
- write_host_tlbe(vcpu_e500, stlbsel, sesel);
+
+ write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
+ preempt_enable();
}
int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
@@ -679,8 +985,10 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
- vcpu->arch.pid = pid;
+ if (vcpu->arch.pid != pid) {
+ vcpu_e500->pid[0] = vcpu->arch.pid = pid;
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+ }
}
void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -688,14 +996,14 @@ void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
struct tlbe *tlbe;
/* Insert large initial mapping for guest. */
- tlbe = &vcpu_e500->guest_tlb[1][0];
+ tlbe = &vcpu_e500->gtlb_arch[1][0];
tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
tlbe->mas2 = 0;
tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
tlbe->mas7 = 0;
/* 4K map for serial output. Used by kernel wrapper. */
- tlbe = &vcpu_e500->guest_tlb[1][1];
+ tlbe = &vcpu_e500->gtlb_arch[1][1];
tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
@@ -706,68 +1014,64 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
{
tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
- vcpu_e500->guest_tlb_size[0] = KVM_E500_TLB0_SIZE;
- vcpu_e500->guest_tlb[0] =
+ vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE;
+ vcpu_e500->gtlb_arch[0] =
kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
- if (vcpu_e500->guest_tlb[0] == NULL)
+ if (vcpu_e500->gtlb_arch[0] == NULL)
goto err_out;
- vcpu_e500->shadow_tlb_size[0] = KVM_E500_TLB0_SIZE;
- vcpu_e500->shadow_tlb[0] =
- kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
- if (vcpu_e500->shadow_tlb[0] == NULL)
- goto err_out_guest0;
-
- vcpu_e500->guest_tlb_size[1] = KVM_E500_TLB1_SIZE;
- vcpu_e500->guest_tlb[1] =
+ vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE;
+ vcpu_e500->gtlb_arch[1] =
kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
- if (vcpu_e500->guest_tlb[1] == NULL)
- goto err_out_shadow0;
+ if (vcpu_e500->gtlb_arch[1] == NULL)
+ goto err_out_guest0;
- vcpu_e500->shadow_tlb_size[1] = tlb1_entry_num;
- vcpu_e500->shadow_tlb[1] =
- kzalloc(sizeof(struct tlbe) * tlb1_entry_num, GFP_KERNEL);
- if (vcpu_e500->shadow_tlb[1] == NULL)
+ vcpu_e500->gtlb_priv[0] = (struct tlbe_priv *)
+ kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
+ if (vcpu_e500->gtlb_priv[0] == NULL)
goto err_out_guest1;
+ vcpu_e500->gtlb_priv[1] = (struct tlbe_priv *)
+ kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
- vcpu_e500->shadow_pages[0] = (struct page **)
- kzalloc(sizeof(struct page *) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
- if (vcpu_e500->shadow_pages[0] == NULL)
- goto err_out_shadow1;
+ if (vcpu_e500->gtlb_priv[1] == NULL)
+ goto err_out_priv0;
- vcpu_e500->shadow_pages[1] = (struct page **)
- kzalloc(sizeof(struct page *) * tlb1_entry_num, GFP_KERNEL);
- if (vcpu_e500->shadow_pages[1] == NULL)
- goto err_out_page0;
+ if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
+ goto err_out_priv1;
/* Init TLB configuration register */
vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
- vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0];
+ vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0];
vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
- vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1];
+ vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1];
return 0;
-err_out_page0:
- kfree(vcpu_e500->shadow_pages[0]);
-err_out_shadow1:
- kfree(vcpu_e500->shadow_tlb[1]);
+err_out_priv1:
+ kfree(vcpu_e500->gtlb_priv[1]);
+err_out_priv0:
+ kfree(vcpu_e500->gtlb_priv[0]);
err_out_guest1:
- kfree(vcpu_e500->guest_tlb[1]);
-err_out_shadow0:
- kfree(vcpu_e500->shadow_tlb[0]);
+ kfree(vcpu_e500->gtlb_arch[1]);
err_out_guest0:
- kfree(vcpu_e500->guest_tlb[0]);
+ kfree(vcpu_e500->gtlb_arch[0]);
err_out:
return -1;
}
void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- kfree(vcpu_e500->shadow_pages[1]);
- kfree(vcpu_e500->shadow_pages[0]);
- kfree(vcpu_e500->shadow_tlb[1]);
- kfree(vcpu_e500->guest_tlb[1]);
- kfree(vcpu_e500->shadow_tlb[0]);
- kfree(vcpu_e500->guest_tlb[0]);
+ int stlbsel, i;
+
+ /* release all privs */
+ for (stlbsel = 0; stlbsel < 2; stlbsel++)
+ for (i = 0; i < vcpu_e500->gtlb_size[stlbsel]; i++) {
+ struct tlbe_priv *priv =
+ &vcpu_e500->gtlb_priv[stlbsel][i];
+ kvmppc_e500_priv_release(priv);
+ }
+
+ kvmppc_e500_id_table_free(vcpu_e500);
+ kfree(vcpu_e500->gtlb_arch[1]);
+ kfree(vcpu_e500->gtlb_arch[0]);
}
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index 458946b4775..59b88e99a23 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, yu.liu@freescale.com
*
@@ -55,6 +55,7 @@ extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int);
extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *);
extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *);
extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
+extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *);
/* TLB helper functions */
static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
@@ -110,6 +111,16 @@ static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
return vcpu->arch.pid & 0xff;
}
+static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
+{
+ return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
+}
+
+static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
+{
+ return !!(vcpu->arch.shared->msr & MSR_PR);
+}
+
static inline unsigned int get_cur_spid(
const struct kvmppc_vcpu_e500 *vcpu_e500)
{
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 616dd516ca1..a107c9be0fb 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -30,6 +30,7 @@
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
#include <asm/tlbflush.h>
+#include <asm/cputhreads.h>
#include "timing.h"
#include "../mm/mmu_decl.h"
@@ -38,8 +39,12 @@
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
+#ifndef CONFIG_KVM_BOOK3S_64_HV
return !(v->arch.shared->msr & MSR_WE) ||
!!(v->arch.pending_exceptions);
+#else
+ return !(v->arch.ceded) || !!(v->arch.pending_exceptions);
+#endif
}
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
@@ -73,7 +78,8 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
}
case HC_VENDOR_KVM | KVM_HC_FEATURES:
r = HC_EV_SUCCESS;
-#if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */
+#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
+ /* XXX Missing magic page on 44x */
r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
#endif
@@ -147,7 +153,7 @@ void kvm_arch_check_processor_compat(void *rtn)
int kvm_arch_init_vm(struct kvm *kvm)
{
- return 0;
+ return kvmppc_core_init_vm(kvm);
}
void kvm_arch_destroy_vm(struct kvm *kvm)
@@ -163,6 +169,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm->vcpus[i] = NULL;
atomic_set(&kvm->online_vcpus, 0);
+
+ kvmppc_core_destroy_vm(kvm);
+
mutex_unlock(&kvm->lock);
}
@@ -180,10 +189,13 @@ int kvm_dev_ioctl_check_extension(long ext)
#else
case KVM_CAP_PPC_SEGSTATE:
#endif
- case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_UNSET_IRQ:
case KVM_CAP_PPC_IRQ_LEVEL:
case KVM_CAP_ENABLE_CAP:
+ r = 1;
+ break;
+#ifndef CONFIG_KVM_BOOK3S_64_HV
+ case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_OSI:
case KVM_CAP_PPC_GET_PVINFO:
r = 1;
@@ -191,6 +203,21 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ case KVM_CAP_SPAPR_TCE:
+ r = 1;
+ break;
+ case KVM_CAP_PPC_SMT:
+ r = threads_per_core;
+ break;
+ case KVM_CAP_PPC_RMA:
+ r = 1;
+ /* PPC970 requires an RMA */
+ if (cpu_has_feature(CPU_FTR_ARCH_201))
+ r = 2;
+ break;
+#endif
default:
r = 0;
break;
@@ -211,7 +238,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
- return 0;
+ return kvmppc_core_prepare_memory_region(kvm, mem);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
@@ -219,7 +246,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot old,
int user_alloc)
{
- return;
+ kvmppc_core_commit_memory_region(kvm, mem);
}
@@ -287,6 +314,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
+ vcpu->arch.dec_expires = ~(u64)0;
#ifdef CONFIG_KVM_EXIT_TIMING
mutex_init(&vcpu->arch.exit_timing_lock);
@@ -313,6 +341,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
#endif
kvmppc_core_vcpu_load(vcpu, cpu);
+ vcpu->cpu = smp_processor_id();
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -321,6 +350,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
#ifdef CONFIG_BOOKE
vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
#endif
+ vcpu->cpu = -1;
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -492,15 +522,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
for (i = 0; i < 32; i++)
kvmppc_set_gpr(vcpu, i, gprs[i]);
vcpu->arch.osi_needed = 0;
+ } else if (vcpu->arch.hcall_needed) {
+ int i;
+
+ kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
+ for (i = 0; i < 9; ++i)
+ kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
+ vcpu->arch.hcall_needed = 0;
}
kvmppc_core_deliver_interrupts(vcpu);
- local_irq_disable();
- kvm_guest_enter();
- r = __kvmppc_vcpu_run(run, vcpu);
- kvm_guest_exit();
- local_irq_enable();
+ r = kvmppc_vcpu_run(run, vcpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
@@ -518,6 +551,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
vcpu->stat.halt_wakeup++;
+ } else if (vcpu->cpu != -1) {
+ smp_send_reschedule(vcpu->cpu);
}
return 0;
@@ -633,6 +668,29 @@ long kvm_arch_vm_ioctl(struct file *filp,
break;
}
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ case KVM_CREATE_SPAPR_TCE: {
+ struct kvm_create_spapr_tce create_tce;
+ struct kvm *kvm = filp->private_data;
+
+ r = -EFAULT;
+ if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
+ goto out;
+ r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
+ goto out;
+ }
+
+ case KVM_ALLOCATE_RMA: {
+ struct kvm *kvm = filp->private_data;
+ struct kvm_allocate_rma rma;
+
+ r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
+ if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
+ r = -EFAULT;
+ break;
+ }
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
+
default:
r = -ENOTTY;
}
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 319177df958..07b6110a4bb 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -56,15 +56,6 @@ static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
{
u64 old;
- do_div(duration, tb_ticks_per_usec);
- if (unlikely(duration > 0xFFFFFFFF)) {
- printk(KERN_ERR"%s - duration too big -> overflow"
- " duration %lld type %d exit #%d\n",
- __func__, duration, type,
- vcpu->arch.timing_count_type[type]);
- return;
- }
-
mutex_lock(&vcpu->arch.exit_timing_lock);
vcpu->arch.timing_count_type[type]++;
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 3aca1b042b8..b135d3d397d 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -103,7 +103,7 @@ TRACE_EVENT(kvm_gtlb_write,
* Book3S trace points *
*************************************************************************/
-#ifdef CONFIG_PPC_BOOK3S
+#ifdef CONFIG_KVM_BOOK3S_PR
TRACE_EVENT(kvm_book3s_exit,
TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
@@ -252,7 +252,7 @@ TRACE_EVENT(kvm_book3s_mmu_flush,
),
TP_fast_assign(
- __entry->count = vcpu->arch.hpte_cache_count;
+ __entry->count = to_book3s(vcpu)->hpte_cache_count;
__entry->p1 = p1;
__entry->p2 = p2;
__entry->type = type;
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 024acab588f..f60e006d90c 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -186,10 +186,11 @@ void __init MMU_init_hw(void)
unsigned long __init mmu_mapin_ram(unsigned long top)
{
unsigned long addr;
+ unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
/* Pin in enough TLBs to cover any lowmem not covered by the
* initial 256M mapping established in head_44x.S */
- for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
+ for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr;
addr += PPC_PIN_SIZE) {
if (mmu_has_feature(MMU_FTR_TYPE_47x))
ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
@@ -218,19 +219,25 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
+ u64 size;
+
+#ifndef CONFIG_RELOCATABLE
/* We don't currently support the first MEMBLOCK not mapping 0
* physical on those processors
*/
BUG_ON(first_memblock_base != 0);
+#endif
/* 44x has a 256M TLB entry pinned at boot */
- memblock_set_current_limit(min_t(u64, first_memblock_size, PPC_PIN_SIZE));
+ size = (min_t(u64, first_memblock_size, PPC_PIN_SIZE));
+ memblock_set_current_limit(first_memblock_base + size);
}
#ifdef CONFIG_SMP
void __cpuinit mmu_init_secondary(int cpu)
{
unsigned long addr;
+ unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
/* Pin in enough TLBs to cover any lowmem not covered by the
* initial 256M mapping established in head_44x.S
@@ -241,7 +248,7 @@ void __cpuinit mmu_init_secondary(int cpu)
* stack. current (r2) isn't initialized, smp_processor_id()
* will not work, current thread info isn't accessible, ...
*/
- for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
+ for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr;
addr += PPC_PIN_SIZE) {
if (mmu_has_feature(MMU_FTR_TYPE_47x))
ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index ad35f66c69e..5efe8c96d37 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -174,7 +174,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
die("Weird page fault", regs, SIGSEGV);
}
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
@@ -320,7 +320,7 @@ good_area:
}
if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
#ifdef CONFIG_PPC_SMLPAR
if (firmware_has_feature(FW_FEATURE_CMO)) {
@@ -331,7 +331,7 @@ good_area:
#endif
} else {
current->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
up_read(&mm->mmap_sem);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index dfd764896db..90039bc6411 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -37,7 +37,7 @@
#define HPTE_LOCK_BIT 3
-static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
+DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void __tlbie(unsigned long va, int psize, int ssize)
{
@@ -51,7 +51,7 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
va &= ~0xffful;
va |= ssize << 8;
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
- : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206)
+ : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
default:
@@ -61,7 +61,7 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
va |= ssize << 8;
va |= 1; /* L */
asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
- : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206)
+ : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
}
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 5de0f254dbb..c77fef56dad 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -191,38 +191,6 @@ void __init *early_get_page(void)
return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
}
-/* Free up now-unused memory */
-static void free_sec(unsigned long start, unsigned long end, const char *name)
-{
- unsigned long cnt = 0;
-
- while (start < end) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- cnt++;
- start += PAGE_SIZE;
- }
- if (cnt) {
- printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
- totalram_pages += cnt;
- }
-}
-
-void free_initmem(void)
-{
-#define FREESEC(TYPE) \
- free_sec((unsigned long)(&__ ## TYPE ## _begin), \
- (unsigned long)(&__ ## TYPE ## _end), \
- #TYPE);
-
- printk ("Freeing unused kernel memory:");
- FREESEC(init);
- printk("\n");
- ppc_md.progress = NULL;
-#undef FREESEC
-}
-
#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index f6dbb4c20e6..e94b57fb79a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -83,22 +83,6 @@ EXPORT_SYMBOL_GPL(memstart_addr);
phys_addr_t kernstart_addr;
EXPORT_SYMBOL_GPL(kernstart_addr);
-void free_initmem(void)
-{
- unsigned long addr;
-
- addr = (unsigned long)__init_begin;
- for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
- memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- printk ("Freeing unused kernel memory: %luk freed\n",
- ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
-}
-
static void pgd_ctor(void *addr)
{
memset(addr, 0, PGD_TABLE_SIZE);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 29d4dde65c4..c781bbcf733 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -249,7 +249,7 @@ static int __init mark_nonram_nosave(void)
*/
void __init paging_init(void)
{
- unsigned long total_ram = memblock_phys_mem_size();
+ unsigned long long total_ram = memblock_phys_mem_size();
phys_addr_t top_of_ram = memblock_end_of_DRAM();
unsigned long max_zone_pfns[MAX_NR_ZONES];
@@ -269,7 +269,7 @@ void __init paging_init(void)
kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */
- printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%lx\n",
+ printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
(unsigned long long)top_of_ram, total_ram);
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(long int)((top_of_ram - total_ram) >> 20));
@@ -337,8 +337,9 @@ void __init mem_init(void)
highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
+ phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
struct page *page = pfn_to_page(pfn);
- if (memblock_is_reserved(pfn << PAGE_SHIFT))
+ if (memblock_is_reserved(paddr))
continue;
ClearPageReserved(page);
init_page_count(page);
@@ -352,6 +353,15 @@ void __init mem_init(void)
}
#endif /* CONFIG_HIGHMEM */
+#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
+ /*
+ * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
+ * functions.... do it here for the non-smp case.
+ */
+ per_cpu(next_tlbcam_idx, smp_processor_id()) =
+ (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
+#endif
+
printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
"%luk reserved, %luk data, %luk bss, %luk init)\n",
nr_free_pages() << (PAGE_SHIFT-10),
@@ -382,6 +392,25 @@ void __init mem_init(void)
mem_init_done = 1;
}
+void free_initmem(void)
+{
+ unsigned long addr;
+
+ ppc_md.progress = ppc_printk_progress;
+
+ addr = (unsigned long)__init_begin;
+ for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
+ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+ free_page(addr);
+ totalram_pages++;
+ }
+ pr_info("Freeing unused kernel memory: %luk freed\n",
+ ((unsigned long)__init_end -
+ (unsigned long)__init_begin) >> 10);
+}
+
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c
index 27b863c1494..9a445f64acc 100644
--- a/arch/powerpc/mm/tlb_hash32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -177,3 +177,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
flush_range(vma->vm_mm, start, end);
}
EXPORT_SYMBOL(flush_tlb_range);
+
+void __init early_init_mmu(void)
+{
+}
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index af089220941..4ebb34bc01d 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -30,6 +30,212 @@
#define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE)
#define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE)
+/**********************************************************************
+ * *
+ * TLB miss handling for Book3E with a bolted linear mapping *
+ * No virtual page table, no nested TLB misses *
+ * *
+ **********************************************************************/
+
+.macro tlb_prolog_bolted addr
+ mtspr SPRN_SPRG_TLB_SCRATCH,r13
+ mfspr r13,SPRN_SPRG_PACA
+ std r10,PACA_EXTLB+EX_TLB_R10(r13)
+ mfcr r10
+ std r11,PACA_EXTLB+EX_TLB_R11(r13)
+ std r16,PACA_EXTLB+EX_TLB_R16(r13)
+ mfspr r16,\addr /* get faulting address */
+ std r14,PACA_EXTLB+EX_TLB_R14(r13)
+ ld r14,PACAPGD(r13)
+ std r15,PACA_EXTLB+EX_TLB_R15(r13)
+ std r10,PACA_EXTLB+EX_TLB_CR(r13)
+ TLB_MISS_PROLOG_STATS_BOLTED
+.endm
+
+.macro tlb_epilog_bolted
+ ld r14,PACA_EXTLB+EX_TLB_CR(r13)
+ ld r10,PACA_EXTLB+EX_TLB_R10(r13)
+ ld r11,PACA_EXTLB+EX_TLB_R11(r13)
+ mtcr r14
+ ld r14,PACA_EXTLB+EX_TLB_R14(r13)
+ ld r15,PACA_EXTLB+EX_TLB_R15(r13)
+ TLB_MISS_RESTORE_STATS_BOLTED
+ ld r16,PACA_EXTLB+EX_TLB_R16(r13)
+ mfspr r13,SPRN_SPRG_TLB_SCRATCH
+.endm
+
+/* Data TLB miss */
+ START_EXCEPTION(data_tlb_miss_bolted)
+ tlb_prolog_bolted SPRN_DEAR
+
+ /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */
+
+ /* We do the user/kernel test for the PID here along with the RW test
+ */
+ /* We pre-test some combination of permissions to avoid double
+ * faults:
+ *
+ * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
+ * ESR_ST is 0x00800000
+ * _PAGE_BAP_SW is 0x00000010
+ * So the shift is >> 19. This tests for supervisor writeability.
+ * If the page happens to be supervisor writeable and not user
+ * writeable, we will take a new fault later, but that should be
+ * a rare enough case.
+ *
+ * We also move ESR_ST in _PAGE_DIRTY position
+ * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
+ *
+ * MAS1 is preset for all we need except for TID that needs to
+ * be cleared for kernel translations
+ */
+
+ mfspr r11,SPRN_ESR
+
+ srdi r15,r16,60 /* get region */
+ rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
+ bne- dtlb_miss_fault_bolted
+
+ rlwinm r10,r11,32-19,27,27
+ rlwimi r10,r11,32-16,19,19
+ cmpwi r15,0
+ ori r10,r10,_PAGE_PRESENT
+ oris r11,r10,_PAGE_ACCESSED@h
+
+ TLB_MISS_STATS_SAVE_INFO_BOLTED
+ bne tlb_miss_kernel_bolted
+
+tlb_miss_common_bolted:
+/*
+ * This is the guts of the TLB miss handler for bolted-linear.
+ * We are entered with:
+ *
+ * r16 = faulting address
+ * r15 = crap (free to use)
+ * r14 = page table base
+ * r13 = PACA
+ * r11 = PTE permission mask
+ * r10 = crap (free to use)
+ */
+ rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
+ cmpldi cr0,r14,0
+ clrrdi r15,r15,3
+ beq tlb_miss_fault_bolted
+
+BEGIN_MMU_FTR_SECTION
+ /* Set the TLB reservation and search for existing entry. Then load
+ * the entry.
+ */
+ PPC_TLBSRX_DOT(0,r16)
+ ldx r14,r14,r15
+ beq normal_tlb_miss_done
+MMU_FTR_SECTION_ELSE
+ ldx r14,r14,r15
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
+
+#ifndef CONFIG_PPC_64K_PAGES
+ rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
+ clrrdi r15,r15,3
+
+ cmpldi cr0,r14,0
+ beq tlb_miss_fault_bolted
+
+ ldx r14,r14,r15
+#endif /* CONFIG_PPC_64K_PAGES */
+
+ rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
+ clrrdi r15,r15,3
+
+ cmpldi cr0,r14,0
+ beq tlb_miss_fault_bolted
+
+ ldx r14,r14,r15
+
+ rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
+ clrrdi r15,r15,3
+
+ cmpldi cr0,r14,0
+ beq tlb_miss_fault_bolted
+
+ ldx r14,r14,r15
+
+ /* Check if required permissions are met */
+ andc. r15,r11,r14
+ rldicr r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
+ bne- tlb_miss_fault_bolted
+
+ /* Now we build the MAS:
+ *
+ * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
+ * MAS 1 : Almost fully setup
+ * - PID already updated by caller if necessary
+ * - TSIZE need change if !base page size, not
+ * yet implemented for now
+ * MAS 2 : Defaults not useful, need to be redone
+ * MAS 3+7 : Needs to be done
+ */
+ clrrdi r11,r16,12 /* Clear low crap in EA */
+ clrldi r15,r15,12 /* Clear crap at the top */
+ rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */
+ rlwimi r15,r14,32-8,22,25 /* Move in U bits */
+ mtspr SPRN_MAS2,r11
+ andi. r11,r14,_PAGE_DIRTY
+ rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */
+
+ /* Mask out SW and UW if !DIRTY (XXX optimize this !) */
+ bne 1f
+ li r11,MAS3_SW|MAS3_UW
+ andc r15,r15,r11
+1:
+ mtspr SPRN_MAS7_MAS3,r15
+ tlbwe
+
+ TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
+ tlb_epilog_bolted
+ rfi
+
+itlb_miss_kernel_bolted:
+ li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
+ oris r11,r11,_PAGE_ACCESSED@h
+tlb_miss_kernel_bolted:
+ mfspr r10,SPRN_MAS1
+ ld r14,PACA_KERNELPGD(r13)
+ cmpldi cr0,r15,8 /* Check for vmalloc region */
+ rlwinm r10,r10,0,16,1 /* Clear TID */
+ mtspr SPRN_MAS1,r10
+ beq+ tlb_miss_common_bolted
+
+tlb_miss_fault_bolted:
+ /* We need to check if it was an instruction miss */
+ andi. r10,r11,_PAGE_EXEC|_PAGE_BAP_SX
+ bne itlb_miss_fault_bolted
+dtlb_miss_fault_bolted:
+ TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
+ tlb_epilog_bolted
+ b exc_data_storage_book3e
+itlb_miss_fault_bolted:
+ TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
+ tlb_epilog_bolted
+ b exc_instruction_storage_book3e
+
+/* Instruction TLB miss */
+ START_EXCEPTION(instruction_tlb_miss_bolted)
+ tlb_prolog_bolted SPRN_SRR0
+
+ rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
+ srdi r15,r16,60 /* get region */
+ TLB_MISS_STATS_SAVE_INFO_BOLTED
+ bne- itlb_miss_fault_bolted
+
+ li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
+
+ /* We do the user/kernel test for the PID here along with the RW test
+ */
+
+ cmpldi cr0,r15,0 /* Check for user region */
+ oris r11,r11,_PAGE_ACCESSED@h
+ beq tlb_miss_common_bolted
+ b itlb_miss_kernel_bolted
/**********************************************************************
* *
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 0bdad3aecc6..d32ec643c23 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -35,6 +35,7 @@
#include <linux/preempt.h>
#include <linux/spinlock.h>
#include <linux/memblock.h>
+#include <linux/of_fdt.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
@@ -102,6 +103,12 @@ unsigned long linear_map_top; /* Top of linear mapping */
#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
+DEFINE_PER_CPU(int, next_tlbcam_idx);
+EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
+#endif
+
/*
* Base TLB flushing operations:
*
@@ -266,6 +273,17 @@ EXPORT_SYMBOL(flush_tlb_page);
#endif /* CONFIG_SMP */
+#ifdef CONFIG_PPC_47x
+void __init early_init_mmu_47x(void)
+{
+#ifdef CONFIG_SMP
+ unsigned long root = of_get_flat_dt_root();
+ if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
+ mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
+#endif /* CONFIG_SMP */
+}
+#endif /* CONFIG_PPC_47x */
+
/*
* Flush kernel TLB entries in the given range
*/
@@ -443,14 +461,27 @@ static void setup_page_sizes(void)
}
}
-static void setup_mmu_htw(void)
+static void __patch_exception(int exc, unsigned long addr)
{
extern unsigned int interrupt_base_book3e;
- extern unsigned int exc_data_tlb_miss_htw_book3e;
- extern unsigned int exc_instruction_tlb_miss_htw_book3e;
+ unsigned int *ibase = &interrupt_base_book3e;
+
+ /* Our exceptions vectors start with a NOP and -then- a branch
+ * to deal with single stepping from userspace which stops on
+ * the second instruction. Thus we need to patch the second
+ * instruction of the exception, not the first one
+ */
- unsigned int *ibase = &interrupt_base_book3e;
+ patch_branch(ibase + (exc / 4) + 1, addr, 0);
+}
+
+#define patch_exception(exc, name) do { \
+ extern unsigned int name; \
+ __patch_exception((exc), (unsigned long)&name); \
+} while (0)
+static void setup_mmu_htw(void)
+{
/* Check if HW tablewalk is present, and if yes, enable it by:
*
* - patching the TLB miss handlers to branch to the
@@ -462,19 +493,12 @@ static void setup_mmu_htw(void)
if ((tlb0cfg & TLBnCFG_IND) &&
(tlb0cfg & TLBnCFG_PT)) {
- /* Our exceptions vectors start with a NOP and -then- a branch
- * to deal with single stepping from userspace which stops on
- * the second instruction. Thus we need to patch the second
- * instruction of the exception, not the first one
- */
- patch_branch(ibase + (0x1c0 / 4) + 1,
- (unsigned long)&exc_data_tlb_miss_htw_book3e, 0);
- patch_branch(ibase + (0x1e0 / 4) + 1,
- (unsigned long)&exc_instruction_tlb_miss_htw_book3e, 0);
+ patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
+ patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
book3e_htw_enabled = 1;
}
- pr_info("MMU: Book3E Page Tables %s\n",
- book3e_htw_enabled ? "Enabled" : "Disabled");
+ pr_info("MMU: Book3E HW tablewalk %s\n",
+ book3e_htw_enabled ? "enabled" : "not supported");
}
/*
@@ -549,6 +573,9 @@ static void __early_init_mmu(int boot_cpu)
/* limit memory so we dont have linear faults */
memblock_enforce_memory_limit(linear_map_top);
memblock_analyze();
+
+ patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
+ patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
}
#endif
@@ -584,4 +611,11 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
/* Finally limit subsequent allocations */
memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
}
+#else /* ! CONFIG_PPC64 */
+void __init early_init_mmu(void)
+{
+#ifdef CONFIG_PPC_47x
+ early_init_mmu_47x();
+#endif
+}
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile
new file mode 100644
index 00000000000..266b3950c3a
--- /dev/null
+++ b/arch/powerpc/net/Makefile
@@ -0,0 +1,4 @@
+#
+# Arch-specific network modules
+#
+obj-$(CONFIG_BPF_JIT) += bpf_jit_64.o bpf_jit_comp.o
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
new file mode 100644
index 00000000000..af1ab5e9a69
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit.h
@@ -0,0 +1,227 @@
+/* bpf_jit.h: BPF JIT compiler for PPC64
+ *
+ * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _BPF_JIT_H
+#define _BPF_JIT_H
+
+#define BPF_PPC_STACK_LOCALS 32
+#define BPF_PPC_STACK_BASIC (48+64)
+#define BPF_PPC_STACK_SAVE (18*8)
+#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
+ BPF_PPC_STACK_SAVE)
+#define BPF_PPC_SLOWPATH_FRAME (48+64)
+
+/*
+ * Generated code register usage:
+ *
+ * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
+ *
+ * skb r3 (Entry parameter)
+ * A register r4
+ * X register r5
+ * addr param r6
+ * r7-r10 scratch
+ * skb->data r14
+ * skb headlen r15 (skb->len - skb->data_len)
+ * m[0] r16
+ * m[...] ...
+ * m[15] r31
+ */
+#define r_skb 3
+#define r_ret 3
+#define r_A 4
+#define r_X 5
+#define r_addr 6
+#define r_scratch1 7
+#define r_D 14
+#define r_HL 15
+#define r_M 16
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Assembly helpers from arch/powerpc/net/bpf_jit.S:
+ */
+extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
+
+#define FUNCTION_DESCR_SIZE 24
+
+/*
+ * 16-bit immediate helper macros: HA() is for use with sign-extending instrs
+ * (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the
+ * top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000).
+ */
+#define IMM_H(i) ((uintptr_t)(i)>>16)
+#define IMM_HA(i) (((uintptr_t)(i)>>16) + \
+ (((uintptr_t)(i) & 0x8000) >> 15))
+#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
+
+#define PLANT_INSTR(d, idx, instr) \
+ do { if (d) { (d)[idx] = instr; } idx++; } while (0)
+#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
+
+#define PPC_NOP() EMIT(PPC_INST_NOP)
+#define PPC_BLR() EMIT(PPC_INST_BLR)
+#define PPC_BLRL() EMIT(PPC_INST_BLRL)
+#define PPC_MTLR(r) EMIT(PPC_INST_MTLR | __PPC_RT(r))
+#define PPC_ADDI(d, a, i) EMIT(PPC_INST_ADDI | __PPC_RT(d) | \
+ __PPC_RA(a) | IMM_L(i))
+#define PPC_MR(d, a) PPC_OR(d, a, a)
+#define PPC_LI(r, i) PPC_ADDI(r, 0, i)
+#define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \
+ __PPC_RS(d) | __PPC_RA(a) | IMM_L(i))
+#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
+#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | __PPC_RS(r) | \
+ __PPC_RA(base) | ((i) & 0xfffc))
+
+#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | __PPC_RT(r) | \
+ __PPC_RA(base) | IMM_L(i))
+#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | __PPC_RT(r) | \
+ __PPC_RA(base) | IMM_L(i))
+#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | __PPC_RT(r) | \
+ __PPC_RA(base) | IMM_L(i))
+/* Convenience helpers for the above with 'far' offsets: */
+#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LD(r, r, IMM_L(i)); } } while(0)
+
+#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LWZ(r, r, IMM_L(i)); } } while(0)
+
+#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LHZ(r, r, IMM_L(i)); } } while(0)
+
+#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | __PPC_RA(a) | IMM_L(i))
+#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | __PPC_RA(a) | IMM_L(i))
+#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | __PPC_RA(a) | IMM_L(i))
+#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | __PPC_RA(a) | __PPC_RB(b))
+
+#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | __PPC_RT(d) | \
+ __PPC_RB(a) | __PPC_RA(b))
+#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | __PPC_RT(d) | \
+ __PPC_RA(a) | __PPC_RB(b))
+#define PPC_MUL(d, a, b) EMIT(PPC_INST_MULLW | __PPC_RT(d) | \
+ __PPC_RA(a) | __PPC_RB(b))
+#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | __PPC_RT(d) | \
+ __PPC_RA(a) | __PPC_RB(b))
+#define PPC_MULI(d, a, i) EMIT(PPC_INST_MULLI | __PPC_RT(d) | \
+ __PPC_RA(a) | IMM_L(i))
+#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | __PPC_RT(d) | \
+ __PPC_RA(a) | __PPC_RB(b))
+#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_RB(b))
+#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | __PPC_RA(d) | \
+ __PPC_RS(a) | IMM_L(i))
+#define PPC_AND_DOT(d, a, b) EMIT(PPC_INST_ANDDOT | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_RB(b))
+#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_RB(b))
+#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | __PPC_RA(d) | \
+ __PPC_RS(a) | IMM_L(i))
+#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | __PPC_RA(d) | \
+ __PPC_RS(a) | IMM_L(i))
+#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_RB(s))
+#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_RB(s))
+/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
+#define PPC_SLWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_SH(i) | \
+ __PPC_MB(0) | __PPC_ME(31-(i)))
+/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */
+#define PPC_SRWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_SH(32-(i)) | \
+ __PPC_MB(i) | __PPC_ME(31))
+/* sldi = rldicr Rx, Ry, n, 63-n */
+#define PPC_SLDI(d, a, i) EMIT(PPC_INST_RLDICR | __PPC_RA(d) | \
+ __PPC_RS(a) | __PPC_SH(i) | \
+ __PPC_MB(63-(i)) | (((i) & 0x20) >> 4))
+#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | __PPC_RT(d) | __PPC_RA(a))
+
+/* Long jump; (unconditional 'branch') */
+#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
+ (((dest) - (ctx->idx * 4)) & 0x03fffffc))
+/* "cond" here covers BO:BI fields. */
+#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
+ (((cond) & 0x3ff) << 16) | \
+ (((dest) - (ctx->idx * 4)) & \
+ 0xfffc))
+#define PPC_LI32(d, i) do { PPC_LI(d, IMM_L(i)); \
+ if ((u32)(uintptr_t)(i) >= 32768) { \
+ PPC_ADDIS(d, d, IMM_HA(i)); \
+ } } while(0)
+#define PPC_LI64(d, i) do { \
+ if (!((uintptr_t)(i) & 0xffffffff00000000ULL)) \
+ PPC_LI32(d, i); \
+ else { \
+ PPC_LIS(d, ((uintptr_t)(i) >> 48)); \
+ if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \
+ PPC_ORI(d, d, \
+ ((uintptr_t)(i) >> 32) & 0xffff); \
+ PPC_SLDI(d, d, 32); \
+ if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \
+ PPC_ORIS(d, d, \
+ ((uintptr_t)(i) >> 16) & 0xffff); \
+ if ((uintptr_t)(i) & 0x000000000000ffffULL) \
+ PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \
+ } } while (0);
+
+static inline bool is_nearbranch(int offset)
+{
+ return (offset < 32768) && (offset >= -32768);
+}
+
+/*
+ * The fly in the ointment of code size changing from pass to pass is
+ * avoided by padding the short branch case with a NOP. If code size differs
+ * with different branch reaches we will have the issue of code moving from
+ * one pass to the next and will need a few passes to converge on a stable
+ * state.
+ */
+#define PPC_BCC(cond, dest) do { \
+ if (is_nearbranch((dest) - (ctx->idx * 4))) { \
+ PPC_BCC_SHORT(cond, dest); \
+ PPC_NOP(); \
+ } else { \
+ /* Flip the 'T or F' bit to invert comparison */ \
+ PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4); \
+ PPC_JMP(dest); \
+ } } while(0)
+
+/* To create a branch condition, select a bit of cr0... */
+#define CR0_LT 0
+#define CR0_GT 1
+#define CR0_EQ 2
+/* ...and modify BO[3] */
+#define COND_CMP_TRUE 0x100
+#define COND_CMP_FALSE 0x000
+/* Together, they make all required comparisons: */
+#define COND_GT (CR0_GT | COND_CMP_TRUE)
+#define COND_GE (CR0_LT | COND_CMP_FALSE)
+#define COND_EQ (CR0_EQ | COND_CMP_TRUE)
+#define COND_NE (CR0_EQ | COND_CMP_FALSE)
+#define COND_LT (CR0_LT | COND_CMP_TRUE)
+
+#define SEEN_DATAREF 0x10000 /* might call external helpers */
+#define SEEN_XREG 0x20000 /* X reg is used */
+#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
+ * storage */
+#define SEEN_MEM_MSK 0x0ffff
+
+struct codegen_context {
+ unsigned int seen;
+ unsigned int idx;
+ int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
+};
+
+#endif
+
+#endif
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
new file mode 100644
index 00000000000..ff4506e85cc
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -0,0 +1,138 @@
+/* bpf_jit.S: Packet/header access helper functions
+ * for PPC64 BPF compiler.
+ *
+ * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <asm/ppc_asm.h>
+#include "bpf_jit.h"
+
+/*
+ * All of these routines are called directly from generated code,
+ * whose register usage is:
+ *
+ * r3 skb
+ * r4,r5 A,X
+ * r6 *** address parameter to helper ***
+ * r7-r10 scratch
+ * r14 skb->data
+ * r15 skb headlen
+ * r16-31 M[]
+ */
+
+/*
+ * To consider: These helpers are so small it could be better to just
+ * generate them inline. Inline code can do the simple headlen check
+ * then branch directly to slow_path_XXX if required. (In fact, could
+ * load a spare GPR with the address of slow_path_generic and pass size
+ * as an argument, making the call site a mtlr, li and bllr.)
+ *
+ * Technically, the "is addr < 0" check is unnecessary & slowing down
+ * the ABS path, as it's statically checked on generation.
+ */
+ .globl sk_load_word
+sk_load_word:
+ cmpdi r_addr, 0
+ blt bpf_error
+ /* Are we accessing past headlen? */
+ subi r_scratch1, r_HL, 4
+ cmpd r_scratch1, r_addr
+ blt bpf_slow_path_word
+ /* Nope, just hitting the header. cr0 here is eq or gt! */
+ lwzx r_A, r_D, r_addr
+ /* When big endian we don't need to byteswap. */
+ blr /* Return success, cr0 != LT */
+
+ .globl sk_load_half
+sk_load_half:
+ cmpdi r_addr, 0
+ blt bpf_error
+ subi r_scratch1, r_HL, 2
+ cmpd r_scratch1, r_addr
+ blt bpf_slow_path_half
+ lhzx r_A, r_D, r_addr
+ blr
+
+ .globl sk_load_byte
+sk_load_byte:
+ cmpdi r_addr, 0
+ blt bpf_error
+ cmpd r_HL, r_addr
+ ble bpf_slow_path_byte
+ lbzx r_A, r_D, r_addr
+ blr
+
+/*
+ * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf)
+ * r_addr is the offset value, already known positive
+ */
+ .globl sk_load_byte_msh
+sk_load_byte_msh:
+ cmpd r_HL, r_addr
+ ble bpf_slow_path_byte_msh
+ lbzx r_X, r_D, r_addr
+ rlwinm r_X, r_X, 2, 32-4-2, 31-2
+ blr
+
+bpf_error:
+ /* Entered with cr0 = lt */
+ li r3, 0
+ /* Generated code will 'blt epilogue', returning 0. */
+ blr
+
+/* Call out to skb_copy_bits:
+ * We'll need to back up our volatile regs first; we have
+ * local variable space at r1+(BPF_PPC_STACK_BASIC).
+ * Allocate a new stack frame here to remain ABI-compliant in
+ * stashing LR.
+ */
+#define bpf_slow_path_common(SIZE) \
+ mflr r0; \
+ std r0, 16(r1); \
+ /* R3 goes in parameter space of caller's frame */ \
+ std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
+ std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
+ std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
+ addi r5, r1, BPF_PPC_STACK_BASIC+(2*8); \
+ stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
+ /* R3 = r_skb, as passed */ \
+ mr r4, r_addr; \
+ li r6, SIZE; \
+ bl skb_copy_bits; \
+ /* R3 = 0 on success */ \
+ addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
+ ld r0, 16(r1); \
+ ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
+ ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
+ mtlr r0; \
+ cmpdi r3, 0; \
+ blt bpf_error; /* cr0 = LT */ \
+ ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
+ /* Great success! */
+
+bpf_slow_path_word:
+ bpf_slow_path_common(4)
+ /* Data value is on stack, and cr0 != LT */
+ lwz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
+ blr
+
+bpf_slow_path_half:
+ bpf_slow_path_common(2)
+ lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
+ blr
+
+bpf_slow_path_byte:
+ bpf_slow_path_common(1)
+ lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
+ blr
+
+bpf_slow_path_byte_msh:
+ bpf_slow_path_common(1)
+ lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
+ rlwinm r_X, r_X, 2, 32-4-2, 31-2
+ blr
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
new file mode 100644
index 00000000000..73619d3aeb6
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -0,0 +1,694 @@
+/* bpf_jit_comp.c: BPF JIT compiler for PPC64
+ *
+ * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
+ *
+ * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/moduleloader.h>
+#include <asm/cacheflush.h>
+#include <linux/netdevice.h>
+#include <linux/filter.h>
+#include "bpf_jit.h"
+
+#ifndef __BIG_ENDIAN
+/* There are endianness assumptions herein. */
+#error "Little-endian PPC not supported in BPF compiler"
+#endif
+
+int bpf_jit_enable __read_mostly;
+
+
+static inline void bpf_flush_icache(void *start, void *end)
+{
+ smp_wmb();
+ flush_icache_range((unsigned long)start, (unsigned long)end);
+}
+
+static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
+ struct codegen_context *ctx)
+{
+ int i;
+ const struct sock_filter *filter = fp->insns;
+
+ if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
+ /* Make stackframe */
+ if (ctx->seen & SEEN_DATAREF) {
+ /* If we call any helpers (for loads), save LR */
+ EMIT(PPC_INST_MFLR | __PPC_RT(0));
+ PPC_STD(0, 1, 16);
+
+ /* Back up non-volatile regs. */
+ PPC_STD(r_D, 1, -(8*(32-r_D)));
+ PPC_STD(r_HL, 1, -(8*(32-r_HL)));
+ }
+ if (ctx->seen & SEEN_MEM) {
+ /*
+ * Conditionally save regs r15-r31 as some will be used
+ * for M[] data.
+ */
+ for (i = r_M; i < (r_M+16); i++) {
+ if (ctx->seen & (1 << (i-r_M)))
+ PPC_STD(i, 1, -(8*(32-i)));
+ }
+ }
+ EMIT(PPC_INST_STDU | __PPC_RS(1) | __PPC_RA(1) |
+ (-BPF_PPC_STACKFRAME & 0xfffc));
+ }
+
+ if (ctx->seen & SEEN_DATAREF) {
+ /*
+ * If this filter needs to access skb data,
+ * prepare r_D and r_HL:
+ * r_HL = skb->len - skb->data_len
+ * r_D = skb->data
+ */
+ PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
+ data_len));
+ PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
+ PPC_SUB(r_HL, r_HL, r_scratch1);
+ PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
+ }
+
+ if (ctx->seen & SEEN_XREG) {
+ /*
+ * TODO: Could also detect whether first instr. sets X and
+ * avoid this (as below, with A).
+ */
+ PPC_LI(r_X, 0);
+ }
+
+ switch (filter[0].code) {
+ case BPF_S_RET_K:
+ case BPF_S_LD_W_LEN:
+ case BPF_S_ANC_PROTOCOL:
+ case BPF_S_ANC_IFINDEX:
+ case BPF_S_ANC_MARK:
+ case BPF_S_ANC_RXHASH:
+ case BPF_S_ANC_CPU:
+ case BPF_S_ANC_QUEUE:
+ case BPF_S_LD_W_ABS:
+ case BPF_S_LD_H_ABS:
+ case BPF_S_LD_B_ABS:
+ /* first instruction sets A register (or is RET 'constant') */
+ break;
+ default:
+ /* make sure we dont leak kernel information to user */
+ PPC_LI(r_A, 0);
+ }
+}
+
+static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+{
+ int i;
+
+ if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
+ PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
+ if (ctx->seen & SEEN_DATAREF) {
+ PPC_LD(0, 1, 16);
+ PPC_MTLR(0);
+ PPC_LD(r_D, 1, -(8*(32-r_D)));
+ PPC_LD(r_HL, 1, -(8*(32-r_HL)));
+ }
+ if (ctx->seen & SEEN_MEM) {
+ /* Restore any saved non-vol registers */
+ for (i = r_M; i < (r_M+16); i++) {
+ if (ctx->seen & (1 << (i-r_M)))
+ PPC_LD(i, 1, -(8*(32-i)));
+ }
+ }
+ }
+ /* The RETs have left a return value in R3. */
+
+ PPC_BLR();
+}
+
+/* Assemble the body code between the prologue & epilogue. */
+static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
+ struct codegen_context *ctx,
+ unsigned int *addrs)
+{
+ const struct sock_filter *filter = fp->insns;
+ int flen = fp->len;
+ u8 *func;
+ unsigned int true_cond;
+ int i;
+
+ /* Start of epilogue code */
+ unsigned int exit_addr = addrs[flen];
+
+ for (i = 0; i < flen; i++) {
+ unsigned int K = filter[i].k;
+
+ /*
+ * addrs[] maps a BPF bytecode address into a real offset from
+ * the start of the body code.
+ */
+ addrs[i] = ctx->idx * 4;
+
+ switch (filter[i].code) {
+ /*** ALU ops ***/
+ case BPF_S_ALU_ADD_X: /* A += X; */
+ ctx->seen |= SEEN_XREG;
+ PPC_ADD(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_ADD_K: /* A += K; */
+ if (!K)
+ break;
+ PPC_ADDI(r_A, r_A, IMM_L(K));
+ if (K >= 32768)
+ PPC_ADDIS(r_A, r_A, IMM_HA(K));
+ break;
+ case BPF_S_ALU_SUB_X: /* A -= X; */
+ ctx->seen |= SEEN_XREG;
+ PPC_SUB(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_SUB_K: /* A -= K */
+ if (!K)
+ break;
+ PPC_ADDI(r_A, r_A, IMM_L(-K));
+ if (K >= 32768)
+ PPC_ADDIS(r_A, r_A, IMM_HA(-K));
+ break;
+ case BPF_S_ALU_MUL_X: /* A *= X; */
+ ctx->seen |= SEEN_XREG;
+ PPC_MUL(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_MUL_K: /* A *= K */
+ if (K < 32768)
+ PPC_MULI(r_A, r_A, K);
+ else {
+ PPC_LI32(r_scratch1, K);
+ PPC_MUL(r_A, r_A, r_scratch1);
+ }
+ break;
+ case BPF_S_ALU_DIV_X: /* A /= X; */
+ ctx->seen |= SEEN_XREG;
+ PPC_CMPWI(r_X, 0);
+ if (ctx->pc_ret0 != -1) {
+ PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
+ } else {
+ /*
+ * Exit, returning 0; first pass hits here
+ * (longer worst-case code size).
+ */
+ PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
+ PPC_LI(r_ret, 0);
+ PPC_JMP(exit_addr);
+ }
+ PPC_DIVWU(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
+ PPC_LI32(r_scratch1, K);
+ /* Top 32 bits of 64bit result -> A */
+ PPC_MULHWU(r_A, r_A, r_scratch1);
+ break;
+ case BPF_S_ALU_AND_X:
+ ctx->seen |= SEEN_XREG;
+ PPC_AND(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_AND_K:
+ if (!IMM_H(K))
+ PPC_ANDI(r_A, r_A, K);
+ else {
+ PPC_LI32(r_scratch1, K);
+ PPC_AND(r_A, r_A, r_scratch1);
+ }
+ break;
+ case BPF_S_ALU_OR_X:
+ ctx->seen |= SEEN_XREG;
+ PPC_OR(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_OR_K:
+ if (IMM_L(K))
+ PPC_ORI(r_A, r_A, IMM_L(K));
+ if (K >= 65536)
+ PPC_ORIS(r_A, r_A, IMM_H(K));
+ break;
+ case BPF_S_ALU_LSH_X: /* A <<= X; */
+ ctx->seen |= SEEN_XREG;
+ PPC_SLW(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_LSH_K:
+ if (K == 0)
+ break;
+ else
+ PPC_SLWI(r_A, r_A, K);
+ break;
+ case BPF_S_ALU_RSH_X: /* A >>= X; */
+ ctx->seen |= SEEN_XREG;
+ PPC_SRW(r_A, r_A, r_X);
+ break;
+ case BPF_S_ALU_RSH_K: /* A >>= K; */
+ if (K == 0)
+ break;
+ else
+ PPC_SRWI(r_A, r_A, K);
+ break;
+ case BPF_S_ALU_NEG:
+ PPC_NEG(r_A, r_A);
+ break;
+ case BPF_S_RET_K:
+ PPC_LI32(r_ret, K);
+ if (!K) {
+ if (ctx->pc_ret0 == -1)
+ ctx->pc_ret0 = i;
+ }
+ /*
+ * If this isn't the very last instruction, branch to
+ * the epilogue if we've stuff to clean up. Otherwise,
+ * if there's nothing to tidy, just return. If we /are/
+ * the last instruction, we're about to fall through to
+ * the epilogue to return.
+ */
+ if (i != flen - 1) {
+ /*
+ * Note: 'seen' is properly valid only on pass
+ * #2. Both parts of this conditional are the
+ * same instruction size though, meaning the
+ * first pass will still correctly determine the
+ * code size/addresses.
+ */
+ if (ctx->seen)
+ PPC_JMP(exit_addr);
+ else
+ PPC_BLR();
+ }
+ break;
+ case BPF_S_RET_A:
+ PPC_MR(r_ret, r_A);
+ if (i != flen - 1) {
+ if (ctx->seen)
+ PPC_JMP(exit_addr);
+ else
+ PPC_BLR();
+ }
+ break;
+ case BPF_S_MISC_TAX: /* X = A */
+ PPC_MR(r_X, r_A);
+ break;
+ case BPF_S_MISC_TXA: /* A = X */
+ ctx->seen |= SEEN_XREG;
+ PPC_MR(r_A, r_X);
+ break;
+
+ /*** Constant loads/M[] access ***/
+ case BPF_S_LD_IMM: /* A = K */
+ PPC_LI32(r_A, K);
+ break;
+ case BPF_S_LDX_IMM: /* X = K */
+ PPC_LI32(r_X, K);
+ break;
+ case BPF_S_LD_MEM: /* A = mem[K] */
+ PPC_MR(r_A, r_M + (K & 0xf));
+ ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
+ break;
+ case BPF_S_LDX_MEM: /* X = mem[K] */
+ PPC_MR(r_X, r_M + (K & 0xf));
+ ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
+ break;
+ case BPF_S_ST: /* mem[K] = A */
+ PPC_MR(r_M + (K & 0xf), r_A);
+ ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
+ break;
+ case BPF_S_STX: /* mem[K] = X */
+ PPC_MR(r_M + (K & 0xf), r_X);
+ ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
+ break;
+ case BPF_S_LD_W_LEN: /* A = skb->len; */
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
+ PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
+ break;
+ case BPF_S_LDX_W_LEN: /* X = skb->len; */
+ PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
+ break;
+
+ /*** Ancillary info loads ***/
+
+ /* None of the BPF_S_ANC* codes appear to be passed by
+ * sk_chk_filter(). The interpreter and the x86 BPF
+ * compiler implement them so we do too -- they may be
+ * planted in future.
+ */
+ case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+ protocol) != 2);
+ PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
+ protocol));
+ /* ntohs is a NOP with BE loads. */
+ break;
+ case BPF_S_ANC_IFINDEX:
+ PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
+ dev));
+ PPC_CMPDI(r_scratch1, 0);
+ if (ctx->pc_ret0 != -1) {
+ PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
+ } else {
+ /* Exit, returning 0; first pass hits here. */
+ PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
+ PPC_LI(r_ret, 0);
+ PPC_JMP(exit_addr);
+ }
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+ ifindex) != 4);
+ PPC_LWZ_OFFS(r_A, r_scratch1,
+ offsetof(struct net_device, ifindex));
+ break;
+ case BPF_S_ANC_MARK:
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+ PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
+ mark));
+ break;
+ case BPF_S_ANC_RXHASH:
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
+ PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
+ rxhash));
+ break;
+ case BPF_S_ANC_QUEUE:
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+ queue_mapping) != 2);
+ PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
+ queue_mapping));
+ break;
+ case BPF_S_ANC_CPU:
+#ifdef CONFIG_SMP
+ /*
+ * PACA ptr is r13:
+ * raw_smp_processor_id() = local_paca->paca_index
+ */
+ BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
+ paca_index) != 2);
+ PPC_LHZ_OFFS(r_A, 13,
+ offsetof(struct paca_struct, paca_index));
+#else
+ PPC_LI(r_A, 0);
+#endif
+ break;
+
+ /*** Absolute loads from packet header/data ***/
+ case BPF_S_LD_W_ABS:
+ func = sk_load_word;
+ goto common_load;
+ case BPF_S_LD_H_ABS:
+ func = sk_load_half;
+ goto common_load;
+ case BPF_S_LD_B_ABS:
+ func = sk_load_byte;
+ common_load:
+ /*
+ * Load from [K]. Reference with the (negative)
+ * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported.
+ */
+ ctx->seen |= SEEN_DATAREF;
+ if ((int)K < 0)
+ return -ENOTSUPP;
+ PPC_LI64(r_scratch1, func);
+ PPC_MTLR(r_scratch1);
+ PPC_LI32(r_addr, K);
+ PPC_BLRL();
+ /*
+ * Helper returns 'lt' condition on error, and an
+ * appropriate return value in r3
+ */
+ PPC_BCC(COND_LT, exit_addr);
+ break;
+
+ /*** Indirect loads from packet header/data ***/
+ case BPF_S_LD_W_IND:
+ func = sk_load_word;
+ goto common_load_ind;
+ case BPF_S_LD_H_IND:
+ func = sk_load_half;
+ goto common_load_ind;
+ case BPF_S_LD_B_IND:
+ func = sk_load_byte;
+ common_load_ind:
+ /*
+ * Load from [X + K]. Negative offsets are tested for
+ * in the helper functions, and result in a 'ret 0'.
+ */
+ ctx->seen |= SEEN_DATAREF | SEEN_XREG;
+ PPC_LI64(r_scratch1, func);
+ PPC_MTLR(r_scratch1);
+ PPC_ADDI(r_addr, r_X, IMM_L(K));
+ if (K >= 32768)
+ PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
+ PPC_BLRL();
+ /* If error, cr0.LT set */
+ PPC_BCC(COND_LT, exit_addr);
+ break;
+
+ case BPF_S_LDX_B_MSH:
+ /*
+ * x86 version drops packet (RET 0) when K<0, whereas
+ * interpreter does allow K<0 (__load_pointer, special
+ * ancillary data). common_load returns ENOTSUPP if K<0,
+ * so we fall back to interpreter & filter works.
+ */
+ func = sk_load_byte_msh;
+ goto common_load;
+ break;
+
+ /*** Jump and branches ***/
+ case BPF_S_JMP_JA:
+ if (K != 0)
+ PPC_JMP(addrs[i + 1 + K]);
+ break;
+
+ case BPF_S_JMP_JGT_K:
+ case BPF_S_JMP_JGT_X:
+ true_cond = COND_GT;
+ goto cond_branch;
+ case BPF_S_JMP_JGE_K:
+ case BPF_S_JMP_JGE_X:
+ true_cond = COND_GE;
+ goto cond_branch;
+ case BPF_S_JMP_JEQ_K:
+ case BPF_S_JMP_JEQ_X:
+ true_cond = COND_EQ;
+ goto cond_branch;
+ case BPF_S_JMP_JSET_K:
+ case BPF_S_JMP_JSET_X:
+ true_cond = COND_NE;
+ /* Fall through */
+ cond_branch:
+ /* same targets, can avoid doing the test :) */
+ if (filter[i].jt == filter[i].jf) {
+ if (filter[i].jt > 0)
+ PPC_JMP(addrs[i + 1 + filter[i].jt]);
+ break;
+ }
+
+ switch (filter[i].code) {
+ case BPF_S_JMP_JGT_X:
+ case BPF_S_JMP_JGE_X:
+ case BPF_S_JMP_JEQ_X:
+ ctx->seen |= SEEN_XREG;
+ PPC_CMPLW(r_A, r_X);
+ break;
+ case BPF_S_JMP_JSET_X:
+ ctx->seen |= SEEN_XREG;
+ PPC_AND_DOT(r_scratch1, r_A, r_X);
+ break;
+ case BPF_S_JMP_JEQ_K:
+ case BPF_S_JMP_JGT_K:
+ case BPF_S_JMP_JGE_K:
+ if (K < 32768)
+ PPC_CMPLWI(r_A, K);
+ else {
+ PPC_LI32(r_scratch1, K);
+ PPC_CMPLW(r_A, r_scratch1);
+ }
+ break;
+ case BPF_S_JMP_JSET_K:
+ if (K < 32768)
+ /* PPC_ANDI is /only/ dot-form */
+ PPC_ANDI(r_scratch1, r_A, K);
+ else {
+ PPC_LI32(r_scratch1, K);
+ PPC_AND_DOT(r_scratch1, r_A,
+ r_scratch1);
+ }
+ break;
+ }
+ /* Sometimes branches are constructed "backward", with
+ * the false path being the branch and true path being
+ * a fallthrough to the next instruction.
+ */
+ if (filter[i].jt == 0)
+ /* Swap the sense of the branch */
+ PPC_BCC(true_cond ^ COND_CMP_TRUE,
+ addrs[i + 1 + filter[i].jf]);
+ else {
+ PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
+ if (filter[i].jf != 0)
+ PPC_JMP(addrs[i + 1 + filter[i].jf]);
+ }
+ break;
+ default:
+ /* The filter contains something cruel & unusual.
+ * We don't handle it, but also there shouldn't be
+ * anything missing from our list.
+ */
+ if (printk_ratelimit())
+ pr_err("BPF filter opcode %04x (@%d) unsupported\n",
+ filter[i].code, i);
+ return -ENOTSUPP;
+ }
+
+ }
+ /* Set end-of-body-code address for exit. */
+ addrs[i] = ctx->idx * 4;
+
+ return 0;
+}
+
+void bpf_jit_compile(struct sk_filter *fp)
+{
+ unsigned int proglen;
+ unsigned int alloclen;
+ u32 *image = NULL;
+ u32 *code_base;
+ unsigned int *addrs;
+ struct codegen_context cgctx;
+ int pass;
+ int flen = fp->len;
+
+ if (!bpf_jit_enable)
+ return;
+
+ addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
+ if (addrs == NULL)
+ return;
+
+ /*
+ * There are multiple assembly passes as the generated code will change
+ * size as it settles down, figuring out the max branch offsets/exit
+ * paths required.
+ *
+ * The range of standard conditional branches is +/- 32Kbytes. Since
+ * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
+ * finish with 8 bytes/instruction. Not feasible, so long jumps are
+ * used, distinct from short branches.
+ *
+ * Current:
+ *
+ * For now, both branch types assemble to 2 words (short branches padded
+ * with a NOP); this is less efficient, but assembly will always complete
+ * after exactly 3 passes:
+ *
+ * First pass: No code buffer; Program is "faux-generated" -- no code
+ * emitted but maximum size of output determined (and addrs[] filled
+ * in). Also, we note whether we use M[], whether we use skb data, etc.
+ * All generation choices assumed to be 'worst-case', e.g. branches all
+ * far (2 instructions), return path code reduction not available, etc.
+ *
+ * Second pass: Code buffer allocated with size determined previously.
+ * Prologue generated to support features we have seen used. Exit paths
+ * determined and addrs[] is filled in again, as code may be slightly
+ * smaller as a result.
+ *
+ * Third pass: Code generated 'for real', and branch destinations
+ * determined from now-accurate addrs[] map.
+ *
+ * Ideal:
+ *
+ * If we optimise this, near branches will be shorter. On the
+ * first assembly pass, we should err on the side of caution and
+ * generate the biggest code. On subsequent passes, branches will be
+ * generated short or long and code size will reduce. With smaller
+ * code, more branches may fall into the short category, and code will
+ * reduce more.
+ *
+ * Finally, if we see one pass generate code the same size as the
+ * previous pass we have converged and should now generate code for
+ * real. Allocating at the end will also save the memory that would
+ * otherwise be wasted by the (small) current code shrinkage.
+ * Preferably, we should do a small number of passes (e.g. 5) and if we
+ * haven't converged by then, get impatient and force code to generate
+ * as-is, even if the odd branch would be left long. The chances of a
+ * long jump are tiny with all but the most enormous of BPF filter
+ * inputs, so we should usually converge on the third pass.
+ */
+
+ cgctx.idx = 0;
+ cgctx.seen = 0;
+ cgctx.pc_ret0 = -1;
+ /* Scouting faux-generate pass 0 */
+ if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
+ /* We hit something illegal or unsupported. */
+ goto out;
+
+ /*
+ * Pretend to build prologue, given the features we've seen. This will
+ * update ctgtx.idx as it pretends to output instructions, then we can
+ * calculate total size from idx.
+ */
+ bpf_jit_build_prologue(fp, 0, &cgctx);
+ bpf_jit_build_epilogue(0, &cgctx);
+
+ proglen = cgctx.idx * 4;
+ alloclen = proglen + FUNCTION_DESCR_SIZE;
+ image = module_alloc(max_t(unsigned int, alloclen,
+ sizeof(struct work_struct)));
+ if (!image)
+ goto out;
+
+ code_base = image + (FUNCTION_DESCR_SIZE/4);
+
+ /* Code generation passes 1-2 */
+ for (pass = 1; pass < 3; pass++) {
+ /* Now build the prologue, body code & epilogue for real. */
+ cgctx.idx = 0;
+ bpf_jit_build_prologue(fp, code_base, &cgctx);
+ bpf_jit_build_body(fp, code_base, &cgctx, addrs);
+ bpf_jit_build_epilogue(code_base, &cgctx);
+
+ if (bpf_jit_enable > 1)
+ pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
+ proglen - (cgctx.idx * 4), cgctx.seen);
+ }
+
+ if (bpf_jit_enable > 1)
+ pr_info("flen=%d proglen=%u pass=%d image=%p\n",
+ flen, proglen, pass, image);
+
+ if (image) {
+ if (bpf_jit_enable > 1)
+ print_hex_dump(KERN_ERR, "JIT code: ",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, code_base,
+ proglen, false);
+
+ bpf_flush_icache(code_base, code_base + (proglen/4));
+ /* Function descriptor nastiness: Address + TOC */
+ ((u64 *)image)[0] = (u64)code_base;
+ ((u64 *)image)[1] = local_paca->kernel_toc;
+ fp->bpf_func = (void *)image;
+ }
+out:
+ kfree(addrs);
+ return;
+}
+
+static void jit_free_defer(struct work_struct *arg)
+{
+ module_free(NULL, arg);
+}
+
+/* run from softirq, we must use a work_struct to call
+ * module_free() from process context
+ */
+void bpf_jit_free(struct sk_filter *fp)
+{
+ if (fp->bpf_func != sk_run_filter) {
+ struct work_struct *work = (struct work_struct *)fp->bpf_func;
+
+ INIT_WORK(work, jit_free_defer);
+ schedule_work(work);
+ }
+}
diff --git a/arch/powerpc/platforms/40x/ep405.c b/arch/powerpc/platforms/40x/ep405.c
index 4058fd1e7fc..b0389bbe4f9 100644
--- a/arch/powerpc/platforms/40x/ep405.c
+++ b/arch/powerpc/platforms/40x/ep405.c
@@ -100,7 +100,7 @@ static void __init ep405_setup_arch(void)
/* Find & init the BCSR CPLD */
ep405_init_bcsr();
- ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
+ pci_set_flags(PCI_REASSIGN_ALL_RSRC);
}
static int __init ep405_probe(void)
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index 2521d93ef13..e8dd5c5df7d 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -61,7 +61,7 @@ static const char *board[] __initdata = {
static int __init ppc40x_probe(void)
{
if (of_flat_dt_match(of_get_flat_dt_root(), board)) {
- ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
+ pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
diff --git a/arch/powerpc/platforms/40x/walnut.c b/arch/powerpc/platforms/40x/walnut.c
index 335df91fbee..8b691df72f7 100644
--- a/arch/powerpc/platforms/40x/walnut.c
+++ b/arch/powerpc/platforms/40x/walnut.c
@@ -51,7 +51,7 @@ static int __init walnut_probe(void)
if (!of_flat_dt_is_compatible(root, "ibm,walnut"))
return 0;
- ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
+ pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
diff --git a/arch/powerpc/platforms/44x/canyonlands.c b/arch/powerpc/platforms/44x/canyonlands.c
index afc5e8ea377..e300dd4c89b 100644
--- a/arch/powerpc/platforms/44x/canyonlands.c
+++ b/arch/powerpc/platforms/44x/canyonlands.c
@@ -55,7 +55,7 @@ static int __init ppc460ex_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (of_flat_dt_is_compatible(root, "amcc,canyonlands")) {
- ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
+ pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
return 0;
diff --git a/arch/powerpc/platforms/44x/ebony.c b/arch/powerpc/platforms/44x/ebony.c
index 88b9117fa69..6a4232bbdf8 100644
--- a/arch/powerpc/platforms/44x/ebony.c
+++ b/arch/powerpc/platforms/44x/ebony.c
@@ -54,7 +54,7 @@ static int __init ebony_probe(void)
if (!of_flat_dt_is_compatible(root, "ibm,ebony"))
return 0;
- ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
+ pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c
index c81c19c0b3d..8d220276341 100644
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -72,7 +72,7 @@ static int __init ppc44x_probe(void)
for (i = 0; i < ARRAY_SIZE(board); i++) {
if (of_flat_dt_is_compatible(root, board[i])) {
- ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
+ pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
}
diff --git a/arch/powerpc/platforms/44x/sam440ep.c b/arch/powerpc/platforms/44x/sam440ep.c
index a78e8eb6da4..9e09b835758 100644
--- a/arch/powerpc/platforms/44x/sam440ep.c
+++ b/arch/powerpc/platforms/44x/sam440ep.c
@@ -51,7 +51,7 @@ static int __init sam440ep_probe(void)
if (!of_flat_dt_is_compatible(root, "acube,sam440ep"))
return 0;
- ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
+ pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 47ea1be1481..90f4496017e 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -55,14 +55,6 @@ config PPC_MPC5200_BUGFIX
It is safe to say 'Y' here
-config PPC_MPC5200_GPIO
- bool "MPC5200 GPIO support"
- depends on PPC_MPC52xx
- select ARCH_REQUIRE_GPIOLIB
- select GENERIC_GPIO
- help
- Enable gpiolib support for mpc5200 based boards
-
config PPC_MPC5200_LPBFIFO
tristate "MPC5200 LocalPlus bus FIFO driver"
depends on PPC_MPC52xx
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
index 2bc8cd0c5cf..4e62486791e 100644
--- a/arch/powerpc/platforms/52xx/Makefile
+++ b/arch/powerpc/platforms/52xx/Makefile
@@ -14,5 +14,4 @@ ifeq ($(CONFIG_PPC_LITE5200),y)
obj-$(CONFIG_PM) += lite5200_sleep.o lite5200_pm.o
endif
-obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o
obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
deleted file mode 100644
index 1757d1db4b5..00000000000
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * MPC52xx gpio driver
- *
- * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/of.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/of_gpio.h>
-#include <linux/io.h>
-#include <linux/of_platform.h>
-
-#include <asm/gpio.h>
-#include <asm/mpc52xx.h>
-#include <sysdev/fsl_soc.h>
-
-static DEFINE_SPINLOCK(gpio_lock);
-
-struct mpc52xx_gpiochip {
- struct of_mm_gpio_chip mmchip;
- unsigned int shadow_dvo;
- unsigned int shadow_gpioe;
- unsigned int shadow_ddr;
-};
-
-/*
- * GPIO LIB API implementation for wakeup GPIOs.
- *
- * There's a maximum of 8 wakeup GPIOs. Which of these are available
- * for use depends on your board setup.
- *
- * 0 -> GPIO_WKUP_7
- * 1 -> GPIO_WKUP_6
- * 2 -> PSC6_1
- * 3 -> PSC6_0
- * 4 -> ETH_17
- * 5 -> PSC3_9
- * 6 -> PSC2_4
- * 7 -> PSC1_4
- *
- */
-static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
- unsigned int ret;
-
- ret = (in_8(&regs->wkup_ival) >> (7 - gpio)) & 1;
-
- pr_debug("%s: gpio: %d ret: %d\n", __func__, gpio, ret);
-
- return ret;
-}
-
-static inline void
-__mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpiochip *chip = container_of(mm_gc,
- struct mpc52xx_gpiochip, mmchip);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
-
- if (val)
- chip->shadow_dvo |= 1 << (7 - gpio);
- else
- chip->shadow_dvo &= ~(1 << (7 - gpio));
-
- out_8(&regs->wkup_dvo, chip->shadow_dvo);
-}
-
-static void
-mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- __mpc52xx_wkup_gpio_set(gc, gpio, val);
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
-}
-
-static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpiochip *chip = container_of(mm_gc,
- struct mpc52xx_gpiochip, mmchip);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- /* set the direction */
- chip->shadow_ddr &= ~(1 << (7 - gpio));
- out_8(&regs->wkup_ddr, chip->shadow_ddr);
-
- /* and enable the pin */
- chip->shadow_gpioe |= 1 << (7 - gpio);
- out_8(&regs->wkup_gpioe, chip->shadow_gpioe);
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- return 0;
-}
-
-static int
-mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
- struct mpc52xx_gpiochip *chip = container_of(mm_gc,
- struct mpc52xx_gpiochip, mmchip);
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- __mpc52xx_wkup_gpio_set(gc, gpio, val);
-
- /* Then set direction */
- chip->shadow_ddr |= 1 << (7 - gpio);
- out_8(&regs->wkup_ddr, chip->shadow_ddr);
-
- /* Finally enable the pin */
- chip->shadow_gpioe |= 1 << (7 - gpio);
- out_8(&regs->wkup_gpioe, chip->shadow_gpioe);
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
-
- return 0;
-}
-
-static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
-{
- struct mpc52xx_gpiochip *chip;
- struct mpc52xx_gpio_wkup __iomem *regs;
- struct gpio_chip *gc;
- int ret;
-
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- gc = &chip->mmchip.gc;
-
- gc->ngpio = 8;
- gc->direction_input = mpc52xx_wkup_gpio_dir_in;
- gc->direction_output = mpc52xx_wkup_gpio_dir_out;
- gc->get = mpc52xx_wkup_gpio_get;
- gc->set = mpc52xx_wkup_gpio_set;
-
- ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip);
- if (ret)
- return ret;
-
- regs = chip->mmchip.regs;
- chip->shadow_gpioe = in_8(&regs->wkup_gpioe);
- chip->shadow_ddr = in_8(&regs->wkup_ddr);
- chip->shadow_dvo = in_8(&regs->wkup_dvo);
-
- return 0;
-}
-
-static int mpc52xx_gpiochip_remove(struct platform_device *ofdev)
-{
- return -EBUSY;
-}
-
-static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
- {
- .compatible = "fsl,mpc5200-gpio-wkup",
- },
- {}
-};
-
-static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
- .driver = {
- .name = "gpio_wkup",
- .owner = THIS_MODULE,
- .of_match_table = mpc52xx_wkup_gpiochip_match,
- },
- .probe = mpc52xx_wkup_gpiochip_probe,
- .remove = mpc52xx_gpiochip_remove,
-};
-
-/*
- * GPIO LIB API implementation for simple GPIOs
- *
- * There's a maximum of 32 simple GPIOs. Which of these are available
- * for use depends on your board setup.
- * The numbering reflects the bit numbering in the port registers:
- *
- * 0..1 > reserved
- * 2..3 > IRDA
- * 4..7 > ETHR
- * 8..11 > reserved
- * 12..15 > USB
- * 16..17 > reserved
- * 18..23 > PSC3
- * 24..27 > PSC2
- * 28..31 > PSC1
- */
-static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
- unsigned int ret;
-
- ret = (in_be32(&regs->simple_ival) >> (31 - gpio)) & 1;
-
- return ret;
-}
-
-static inline void
-__mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpiochip *chip = container_of(mm_gc,
- struct mpc52xx_gpiochip, mmchip);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
-
- if (val)
- chip->shadow_dvo |= 1 << (31 - gpio);
- else
- chip->shadow_dvo &= ~(1 << (31 - gpio));
- out_be32(&regs->simple_dvo, chip->shadow_dvo);
-}
-
-static void
-mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- __mpc52xx_simple_gpio_set(gc, gpio, val);
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
-}
-
-static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpiochip *chip = container_of(mm_gc,
- struct mpc52xx_gpiochip, mmchip);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- /* set the direction */
- chip->shadow_ddr &= ~(1 << (31 - gpio));
- out_be32(&regs->simple_ddr, chip->shadow_ddr);
-
- /* and enable the pin */
- chip->shadow_gpioe |= 1 << (31 - gpio);
- out_be32(&regs->simple_gpioe, chip->shadow_gpioe);
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- return 0;
-}
-
-static int
-mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpiochip *chip = container_of(mm_gc,
- struct mpc52xx_gpiochip, mmchip);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- /* First set initial value */
- __mpc52xx_simple_gpio_set(gc, gpio, val);
-
- /* Then set direction */
- chip->shadow_ddr |= 1 << (31 - gpio);
- out_be32(&regs->simple_ddr, chip->shadow_ddr);
-
- /* Finally enable the pin */
- chip->shadow_gpioe |= 1 << (31 - gpio);
- out_be32(&regs->simple_gpioe, chip->shadow_gpioe);
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
-
- return 0;
-}
-
-static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
-{
- struct mpc52xx_gpiochip *chip;
- struct gpio_chip *gc;
- struct mpc52xx_gpio __iomem *regs;
- int ret;
-
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- gc = &chip->mmchip.gc;
-
- gc->ngpio = 32;
- gc->direction_input = mpc52xx_simple_gpio_dir_in;
- gc->direction_output = mpc52xx_simple_gpio_dir_out;
- gc->get = mpc52xx_simple_gpio_get;
- gc->set = mpc52xx_simple_gpio_set;
-
- ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip);
- if (ret)
- return ret;
-
- regs = chip->mmchip.regs;
- chip->shadow_gpioe = in_be32(&regs->simple_gpioe);
- chip->shadow_ddr = in_be32(&regs->simple_ddr);
- chip->shadow_dvo = in_be32(&regs->simple_dvo);
-
- return 0;
-}
-
-static const struct of_device_id mpc52xx_simple_gpiochip_match[] = {
- {
- .compatible = "fsl,mpc5200-gpio",
- },
- {}
-};
-
-static struct platform_driver mpc52xx_simple_gpiochip_driver = {
- .driver = {
- .name = "gpio",
- .owner = THIS_MODULE,
- .of_match_table = mpc52xx_simple_gpiochip_match,
- },
- .probe = mpc52xx_simple_gpiochip_probe,
- .remove = mpc52xx_gpiochip_remove,
-};
-
-static int __init mpc52xx_gpio_init(void)
-{
- if (platform_driver_register(&mpc52xx_wkup_gpiochip_driver))
- printk(KERN_ERR "Unable to register wakeup GPIO driver\n");
-
- if (platform_driver_register(&mpc52xx_simple_gpiochip_driver))
- printk(KERN_ERR "Unable to register simple GPIO driver\n");
-
- return 0;
-}
-
-
-/* Make sure we get initialised before anyone else tries to use us */
-subsys_initcall(mpc52xx_gpio_init);
-
-/* No exit call at the moment as we cannot unregister of gpio chips */
-
-MODULE_DESCRIPTION("Freescale MPC52xx gpio driver");
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
-MODULE_LICENSE("GPL v2");
-
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
index da110bd8834..bfb11e01133 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
@@ -264,7 +264,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
(unsigned long long)res->flags);
out_be32(&pci_regs->iw0btar,
MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start,
- res->end - res->start + 1));
+ resource_size(res)));
iwcr0 = MPC52xx_PCI_IWCR_ENABLE | MPC52xx_PCI_IWCR_MEM;
if (res->flags & IORESOURCE_PREFETCH)
iwcr0 |= MPC52xx_PCI_IWCR_READ_MULTI;
@@ -278,7 +278,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
res->start, res->end, res->flags);
out_be32(&pci_regs->iw1btar,
MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start,
- res->end - res->start + 1));
+ resource_size(res)));
iwcr1 = MPC52xx_PCI_IWCR_ENABLE | MPC52xx_PCI_IWCR_MEM;
if (res->flags & IORESOURCE_PREFETCH)
iwcr1 |= MPC52xx_PCI_IWCR_READ_MULTI;
@@ -300,7 +300,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
out_be32(&pci_regs->iw2btar,
MPC52xx_PCI_IWBTAR_TRANSLATION(hose->io_base_phys,
res->start,
- res->end - res->start + 1));
+ resource_size(res)));
iwcr2 = MPC52xx_PCI_IWCR_ENABLE | MPC52xx_PCI_IWCR_IO;
/* Set all the IWCR fields at once; they're in the same reg */
@@ -371,7 +371,7 @@ mpc52xx_add_bridge(struct device_node *node)
pr_debug("Adding MPC52xx PCI host bridge %s\n", node->full_name);
- ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
if (of_address_to_resource(node, 0, &rsrc) != 0) {
printk(KERN_ERR "Can't get %s resources\n", node->full_name);
@@ -402,7 +402,7 @@ mpc52xx_add_bridge(struct device_node *node)
hose->ops = &mpc52xx_pci_ops;
- pci_regs = ioremap(rsrc.start, rsrc.end - rsrc.start + 1);
+ pci_regs = ioremap(rsrc.start, resource_size(&rsrc));
if (!pci_regs)
return -ENOMEM;
diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c
index 9761a59f175..d111b024eaf 100644
--- a/arch/powerpc/platforms/82xx/pq2.c
+++ b/arch/powerpc/platforms/82xx/pq2.c
@@ -53,7 +53,7 @@ static void __init pq2_pci_add_bridge(struct device_node *np)
if (of_address_to_resource(np, 0, &r) || r.end - r.start < 0x10b)
goto err;
- ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
hose = pcibios_alloc_controller(np);
if (!hose)
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index a2b9b9ef124..c55129f5760 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -28,7 +28,7 @@
#include <linux/of_device.h>
#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
@@ -101,7 +101,7 @@ static void __init mpc83xx_km_setup_arch(void)
__func__);
return;
}
- base = ioremap(res.start, res.end - res.start + 1);
+ base = ioremap(res.start, resource_size(&res));
/*
* IMMR + 0x14A8[4:5] = 11 (clk delay for UCC 2)
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index ec0b401bc9c..32a52896822 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -27,7 +27,7 @@
#include <linux/of_device.h>
#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
@@ -68,7 +68,7 @@ static void __init mpc832x_sys_setup_arch(void)
struct resource res;
of_address_to_resource(np, 0, &res);
- bcsr_regs = ioremap(res.start, res.end - res.start +1);
+ bcsr_regs = ioremap(res.start, resource_size(&res));
of_node_put(np);
}
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index 81e44fa1c64..6b45969567d 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -26,7 +26,7 @@
#include <linux/of_platform.h>
#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index d0a634b056c..041c5177e73 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -26,7 +26,7 @@
#include <linux/of_platform.h>
#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
@@ -53,7 +53,7 @@ static int mpc834xemds_usb_cfg(void)
struct resource res;
of_address_to_resource(np, 0, &res);
- bcsr_regs = ioremap(res.start, res.end - res.start + 1);
+ bcsr_regs = ioremap(res.start, resource_size(&res));
of_node_put(np);
}
if (!bcsr_regs)
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index 09e9d6fb741..934cc8c46bb 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -34,7 +34,7 @@
#include <linux/of_device.h>
#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
@@ -76,7 +76,7 @@ static void __init mpc836x_mds_setup_arch(void)
struct resource res;
of_address_to_resource(np, 0, &res);
- bcsr_regs = ioremap(res.start, res.end - res.start +1);
+ bcsr_regs = ioremap(res.start, resource_size(&res));
of_node_put(np);
}
diff --git a/arch/powerpc/platforms/83xx/sbc834x.c b/arch/powerpc/platforms/83xx/sbc834x.c
index 49023dbe157..af41d8c810a 100644
--- a/arch/powerpc/platforms/83xx/sbc834x.c
+++ b/arch/powerpc/platforms/83xx/sbc834x.c
@@ -28,7 +28,7 @@
#include <linux/of_platform.h>
#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
index 2c64164722d..1ad748bb39b 100644
--- a/arch/powerpc/platforms/83xx/usb.c
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -171,7 +171,7 @@ int mpc831x_usb_cfg(void)
of_node_put(np);
return ret;
}
- usb_regs = ioremap(res.start, res.end - res.start + 1);
+ usb_regs = ioremap(res.start, resource_size(&res));
/* Using on-chip PHY */
if (prop && (!strcmp(prop, "utmi_wide") ||
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index b6976e1726e..498534cd526 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -67,6 +67,16 @@ config MPC85xx_RDB
help
This option enables support for the MPC85xx RDB (P2020 RDB) board
+config P1010_RDB
+ bool "Freescale P1010RDB"
+ select DEFAULT_UIMAGE
+ help
+ This option enables support for the MPC85xx RDB (P1010 RDB) board
+
+ P1010RDB contains P1010Si, which provides CPU performance up to 800
+ MHz and 1600 DMIPS, additional functionality and faster interfaces
+ (DDR3/3L, SATA II, and PCI Express).
+
config P1022_DS
bool "Freescale P1022 DS"
select DEFAULT_UIMAGE
@@ -75,6 +85,12 @@ config P1022_DS
help
This option enables support for the Freescale P1022DS reference board.
+config P1023_RDS
+ bool "Freescale P1023 RDS"
+ select DEFAULT_UIMAGE
+ help
+ This option enables support for the P1023 RDS board
+
config SOCRATES
bool "Socrates"
select DEFAULT_UIMAGE
@@ -155,6 +171,18 @@ config SBC8560
help
This option enables support for the Wind River SBC8560 board
+config P2040_RDB
+ bool "Freescale P2040 RDB"
+ select DEFAULT_UIMAGE
+ select PPC_E500MC
+ select PHYS_64BIT
+ select SWIOTLB
+ select MPC8xxx_GPIO
+ select HAS_RAPIDIO
+ select PPC_EPAPR_HV_PIC
+ help
+ This option enables support for the P2040 RDB board
+
config P3041_DS
bool "Freescale P3041 DS"
select DEFAULT_UIMAGE
@@ -163,6 +191,7 @@ config P3041_DS
select SWIOTLB
select MPC8xxx_GPIO
select HAS_RAPIDIO
+ select PPC_EPAPR_HV_PIC
help
This option enables support for the P3041 DS board
@@ -174,6 +203,7 @@ config P4080_DS
select SWIOTLB
select MPC8xxx_GPIO
select HAS_RAPIDIO
+ select PPC_EPAPR_HV_PIC
help
This option enables support for the P4080 DS board
@@ -188,6 +218,7 @@ config P5020_DS
select SWIOTLB
select MPC8xxx_GPIO
select HAS_RAPIDIO
+ select PPC_EPAPR_HV_PIC
help
This option enables support for the P5020 DS board
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index dd70db77d63..a971b32c5c0 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -10,7 +10,10 @@ obj-$(CONFIG_MPC8536_DS) += mpc8536_ds.o
obj-$(CONFIG_MPC85xx_DS) += mpc85xx_ds.o
obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o
obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o
+obj-$(CONFIG_P1010_RDB) += p1010rdb.o
obj-$(CONFIG_P1022_DS) += p1022_ds.o
+obj-$(CONFIG_P1023_RDS) += p1023_rds.o
+obj-$(CONFIG_P2040_RDB) += p2040_rdb.o corenet_ds.o
obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o
obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o
obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
index 2ab338c9ac3..802ad110b75 100644
--- a/arch/powerpc/platforms/85xx/corenet_ds.c
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -3,7 +3,7 @@
*
* Maintained by Kumar Gala (see MAINTAINERS for contact information)
*
- * Copyright 2009 Freescale Semiconductor Inc.
+ * Copyright 2009-2011 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -22,6 +22,7 @@
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
+#include <asm/ppc-pci.h>
#include <mm/mmu_decl.h>
#include <asm/prom.h>
#include <asm/udbg.h>
@@ -61,10 +62,6 @@ void __init corenet_ds_pic_init(void)
mpic_init(mpic);
}
-#ifdef CONFIG_PCI
-static int primary_phb_addr;
-#endif
-
/*
* Setup the architecture
*/
@@ -85,18 +82,19 @@ void __init corenet_ds_setup_arch(void)
#endif
#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,p4080-pcie") {
- struct resource rsrc;
- of_address_to_resource(np, 0, &rsrc);
- if ((rsrc.start & 0xfffff) == primary_phb_addr)
- fsl_add_bridge(np, 1);
- else
+ for_each_node_by_type(np, "pci") {
+ if (of_device_is_compatible(np, "fsl,p4080-pcie") ||
+ of_device_is_compatible(np, "fsl,qoriq-pcie-v2.2")) {
fsl_add_bridge(np, 0);
-
- hose = pci_find_hose_for_OF_device(np);
- max = min(max, hose->dma_window_base_cur +
- hose->dma_window_size);
+ hose = pci_find_hose_for_OF_device(np);
+ max = min(max, hose->dma_window_base_cur +
+ hose->dma_window_size);
+ }
}
+
+#ifdef CONFIG_PPC64
+ pci_devs_phb_init();
+#endif
#endif
#ifdef CONFIG_SWIOTLB
@@ -116,6 +114,19 @@ static const struct of_device_id of_device_ids[] __devinitconst = {
{
.compatible = "fsl,rapidio-delta",
},
+ {
+ .compatible = "fsl,p4080-pcie",
+ },
+ {
+ .compatible = "fsl,qoriq-pcie-v2.2",
+ },
+ /* The following two are for the Freescale hypervisor */
+ {
+ .name = "hypervisor",
+ },
+ {
+ .name = "handles",
+ },
{}
};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 6299a2a51ae..2bf99786d24 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -31,7 +31,7 @@
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/page.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index c7b97f70312..1b9a8cf1873 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -83,7 +83,8 @@ void __init mpc85xx_ds_pic_init(void)
if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
mpic = mpic_alloc(np, r.start,
MPIC_PRIMARY |
- MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
+ MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
+ MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
} else {
mpic = mpic_alloc(np, r.start,
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 747d1ee661f..973b3f4a4b4 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -36,7 +36,7 @@
#include <linux/memblock.h>
#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 088f30b0c08..f5ff9110c97 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -58,10 +58,11 @@ void __init mpc85xx_rdb_pic_init(void)
return;
}
- if (of_flat_dt_is_compatible(root, "fsl,85XXRDB-CAMP")) {
+ if (of_flat_dt_is_compatible(root, "fsl,MPC85XXRDB-CAMP")) {
mpic = mpic_alloc(np, r.start,
MPIC_PRIMARY |
- MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
+ MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
+ MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
} else {
mpic = mpic_alloc(np, r.start,
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
new file mode 100644
index 00000000000..d7387fa7f53
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -0,0 +1,122 @@
+/*
+ * P1010RDB Board Setup
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/of_platform.h>
+
+#include <asm/system.h>
+#include <asm/time.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <mm/mmu_decl.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+
+void __init p1010_rdb_pic_init(void)
+{
+ struct mpic *mpic;
+ struct resource r;
+ struct device_node *np;
+
+ np = of_find_node_by_type(NULL, "open-pic");
+ if (np == NULL) {
+ printk(KERN_ERR "Could not find open-pic node\n");
+ return;
+ }
+
+ if (of_address_to_resource(np, 0, &r)) {
+ printk(KERN_ERR "Failed to map mpic register space\n");
+ of_node_put(np);
+ return;
+ }
+
+ mpic = mpic_alloc(np, r.start, MPIC_PRIMARY | MPIC_WANTS_RESET |
+ MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
+ 0, 256, " OpenPIC ");
+
+ BUG_ON(mpic == NULL);
+ of_node_put(np);
+
+ mpic_init(mpic);
+
+}
+
+
+/*
+ * Setup the architecture
+ */
+static void __init p1010_rdb_setup_arch(void)
+{
+#ifdef CONFIG_PCI
+ struct device_node *np;
+#endif
+
+ if (ppc_md.progress)
+ ppc_md.progress("p1010_rdb_setup_arch()", 0);
+
+#ifdef CONFIG_PCI
+ for_each_node_by_type(np, "pci") {
+ if (of_device_is_compatible(np, "fsl,p1010-pcie"))
+ fsl_add_bridge(np, 0);
+ }
+
+#endif
+
+ printk(KERN_INFO "P1010 RDB board from Freescale Semiconductor\n");
+}
+
+static struct of_device_id __initdata p1010rdb_ids[] = {
+ { .type = "soc", },
+ { .compatible = "soc", },
+ { .compatible = "simple-bus", },
+ {},
+};
+
+static int __init p1010rdb_publish_devices(void)
+{
+ return of_platform_bus_probe(NULL, p1010rdb_ids, NULL);
+}
+machine_device_initcall(p1010_rdb, p1010rdb_publish_devices);
+machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier);
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init p1010_rdb_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (of_flat_dt_is_compatible(root, "fsl,P1010RDB"))
+ return 1;
+ return 0;
+}
+
+define_machine(p1010_rdb) {
+ .name = "P1010 RDB",
+ .probe = p1010_rdb_probe,
+ .setup_arch = p1010_rdb_setup_arch,
+ .init_IRQ = p1010_rdb_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 7eb5c40c069..266b3aadfe5 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -129,6 +129,7 @@ static void p1022ds_set_gamma_table(int monitor_port, char *gamma_table_base)
static void p1022ds_set_monitor_port(int monitor_port)
{
struct device_node *pixis_node;
+ void __iomem *pixis;
u8 __iomem *brdcfg1;
pixis_node = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-pixis");
@@ -137,12 +138,12 @@ static void p1022ds_set_monitor_port(int monitor_port)
return;
}
- brdcfg1 = of_iomap(pixis_node, 0);
- if (!brdcfg1) {
+ pixis = of_iomap(pixis_node, 0);
+ if (!pixis) {
pr_err("p1022ds: could not map ngPIXIS registers\n");
return;
}
- brdcfg1 += 9; /* BRDCFG1 is at offset 9 in the ngPIXIS */
+ brdcfg1 = pixis + 9; /* BRDCFG1 is at offset 9 in the ngPIXIS */
switch (monitor_port) {
case 0: /* DVI */
@@ -158,6 +159,8 @@ static void p1022ds_set_monitor_port(int monitor_port)
default:
pr_err("p1022ds: unsupported monitor port %i\n", monitor_port);
}
+
+ iounmap(pixis);
}
/**
@@ -192,8 +195,13 @@ void p1022ds_set_pixel_clock(unsigned int pixclock)
do_div(temp, pixclock);
freq = temp;
- /* pixclk is the ratio of the platform clock to the pixel clock */
+ /*
+ * 'pxclk' is the ratio of the platform clock to the pixel clock.
+ * This number is programmed into the CLKDVDR register, and the valid
+ * range of values is 2-255.
+ */
pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq);
+ pxclk = clamp_t(u32, pxclk, 2, 255);
/* Disable the pixel clock, and set it to non-inverted and no delay */
clrbits32(&guts->clkdvdr,
@@ -201,6 +209,8 @@ void p1022ds_set_pixel_clock(unsigned int pixclock)
/* Enable the clock and set the pxclk */
setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16));
+
+ iounmap(guts);
}
/**
diff --git a/arch/powerpc/platforms/85xx/p1023_rds.c b/arch/powerpc/platforms/85xx/p1023_rds.c
new file mode 100644
index 00000000000..835e0b335bf
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p1023_rds.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Roy Zang <tie-fei.zang@freescale.com>
+ *
+ * Description:
+ * P1023 RDS Board Setup
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/fsl_devices.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+
+#include <asm/system.h>
+#include <asm/time.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <mm/mmu_decl.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+
+/* ************************************************************************
+ *
+ * Setup the architecture
+ *
+ */
+#ifdef CONFIG_SMP
+void __init mpc85xx_smp_init(void);
+#endif
+
+static void __init mpc85xx_rds_setup_arch(void)
+{
+ struct device_node *np;
+
+ if (ppc_md.progress)
+ ppc_md.progress("p1023_rds_setup_arch()", 0);
+
+ /* Map BCSR area */
+ np = of_find_node_by_name(NULL, "bcsr");
+ if (np != NULL) {
+ static u8 __iomem *bcsr_regs;
+
+ bcsr_regs = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!bcsr_regs) {
+ printk(KERN_ERR
+ "BCSR: Failed to map bcsr register space\n");
+ return;
+ } else {
+#define BCSR15_I2C_BUS0_SEG_CLR 0x07
+#define BCSR15_I2C_BUS0_SEG2 0x02
+/*
+ * Note: Accessing exclusively i2c devices.
+ *
+ * The i2c controller selects initially ID EEPROM in the u-boot;
+ * but if menu configuration selects RTC support in the kernel,
+ * the i2c controller switches to select RTC chip in the kernel.
+ */
+#ifdef CONFIG_RTC_CLASS
+ /* Enable RTC chip on the segment #2 of i2c */
+ clrbits8(&bcsr_regs[15], BCSR15_I2C_BUS0_SEG_CLR);
+ setbits8(&bcsr_regs[15], BCSR15_I2C_BUS0_SEG2);
+#endif
+
+ iounmap(bcsr_regs);
+ }
+ }
+
+#ifdef CONFIG_PCI
+ for_each_compatible_node(np, "pci", "fsl,p1023-pcie")
+ fsl_add_bridge(np, 0);
+#endif
+
+#ifdef CONFIG_SMP
+ mpc85xx_smp_init();
+#endif
+}
+
+static struct of_device_id p1023_ids[] = {
+ { .type = "soc", },
+ { .compatible = "soc", },
+ { .compatible = "simple-bus", },
+ {},
+};
+
+
+static int __init p1023_publish_devices(void)
+{
+ of_platform_bus_probe(NULL, p1023_ids, NULL);
+
+ return 0;
+}
+
+machine_device_initcall(p1023_rds, p1023_publish_devices);
+
+static void __init mpc85xx_rds_pic_init(void)
+{
+ struct mpic *mpic;
+ struct resource r;
+ struct device_node *np = NULL;
+
+ np = of_find_node_by_type(NULL, "open-pic");
+ if (!np) {
+ printk(KERN_ERR "Could not find open-pic node\n");
+ return;
+ }
+
+ if (of_address_to_resource(np, 0, &r)) {
+ printk(KERN_ERR "Failed to map mpic register space\n");
+ of_node_put(np);
+ return;
+ }
+
+ mpic = mpic_alloc(np, r.start,
+ MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+ MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
+ 0, 256, " OpenPIC ");
+
+ BUG_ON(mpic == NULL);
+ of_node_put(np);
+
+ mpic_init(mpic);
+}
+
+static int __init p1023_rds_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "fsl,P1023RDS");
+
+}
+
+define_machine(p1023_rds) {
+ .name = "P1023 RDS",
+ .probe = p1023_rds_probe,
+ .setup_arch = mpc85xx_rds_setup_arch,
+ .init_IRQ = mpc85xx_rds_pic_init,
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+};
+
diff --git a/arch/powerpc/platforms/85xx/p2040_rdb.c b/arch/powerpc/platforms/85xx/p2040_rdb.c
new file mode 100644
index 00000000000..32b56ac73df
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p2040_rdb.c
@@ -0,0 +1,88 @@
+/*
+ * P2040 RDB Setup
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/phy.h>
+
+#include <asm/system.h>
+#include <asm/time.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <mm/mmu_decl.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+
+#include <linux/of_platform.h>
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+#include <asm/ehv_pic.h>
+
+#include "corenet_ds.h"
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init p2040_rdb_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+#ifdef CONFIG_SMP
+ extern struct smp_ops_t smp_85xx_ops;
+#endif
+
+ if (of_flat_dt_is_compatible(root, "fsl,P2040RDB"))
+ return 1;
+
+ /* Check if we're running under the Freescale hypervisor */
+ if (of_flat_dt_is_compatible(root, "fsl,P2040RDB-hv")) {
+ ppc_md.init_IRQ = ehv_pic_init;
+ ppc_md.get_irq = ehv_pic_get_irq;
+ ppc_md.restart = fsl_hv_restart;
+ ppc_md.power_off = fsl_hv_halt;
+ ppc_md.halt = fsl_hv_halt;
+#ifdef CONFIG_SMP
+ /*
+ * Disable the timebase sync operations because we can't write
+ * to the timebase registers under the hypervisor.
+ */
+ smp_85xx_ops.give_timebase = NULL;
+ smp_85xx_ops.take_timebase = NULL;
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+define_machine(p2040_rdb) {
+ .name = "P2040 RDB",
+ .probe = p2040_rdb_probe,
+ .setup_arch = corenet_ds_setup_arch,
+ .init_IRQ = corenet_ds_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_coreint_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+ .power_save = e500_idle,
+};
+
+machine_device_initcall(p2040_rdb, corenet_ds_publish_devices);
+
+#ifdef CONFIG_SWIOTLB
+machine_arch_initcall(p2040_rdb, swiotlb_setup_bus_notifier);
+#endif
diff --git a/arch/powerpc/platforms/85xx/p3041_ds.c b/arch/powerpc/platforms/85xx/p3041_ds.c
index 0ed52e18298..96d99a374dc 100644
--- a/arch/powerpc/platforms/85xx/p3041_ds.c
+++ b/arch/powerpc/platforms/85xx/p3041_ds.c
@@ -30,6 +30,7 @@
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include <asm/ehv_pic.h>
#include "corenet_ds.h"
@@ -39,8 +40,32 @@
static int __init p3041_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
+#ifdef CONFIG_SMP
+ extern struct smp_ops_t smp_85xx_ops;
+#endif
+
+ if (of_flat_dt_is_compatible(root, "fsl,P3041DS"))
+ return 1;
+
+ /* Check if we're running under the Freescale hypervisor */
+ if (of_flat_dt_is_compatible(root, "fsl,P3041DS-hv")) {
+ ppc_md.init_IRQ = ehv_pic_init;
+ ppc_md.get_irq = ehv_pic_get_irq;
+ ppc_md.restart = fsl_hv_restart;
+ ppc_md.power_off = fsl_hv_halt;
+ ppc_md.halt = fsl_hv_halt;
+#ifdef CONFIG_SMP
+ /*
+ * Disable the timebase sync operations because we can't write
+ * to the timebase registers under the hypervisor.
+ */
+ smp_85xx_ops.give_timebase = NULL;
+ smp_85xx_ops.take_timebase = NULL;
+#endif
+ return 1;
+ }
- return of_flat_dt_is_compatible(root, "fsl,P3041DS");
+ return 0;
}
define_machine(p3041_ds) {
@@ -55,6 +80,7 @@ define_machine(p3041_ds) {
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
+ .power_save = e500_idle,
};
machine_device_initcall(p3041_ds, corenet_ds_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/p4080_ds.c b/arch/powerpc/platforms/85xx/p4080_ds.c
index 84170460497..d1b21d7663e 100644
--- a/arch/powerpc/platforms/85xx/p4080_ds.c
+++ b/arch/powerpc/platforms/85xx/p4080_ds.c
@@ -29,31 +29,42 @@
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include <asm/ehv_pic.h>
#include "corenet_ds.h"
-#ifdef CONFIG_PCI
-static int primary_phb_addr;
-#endif
-
/*
* Called very early, device-tree isn't unflattened
*/
static int __init p4080_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
+#ifdef CONFIG_SMP
+ extern struct smp_ops_t smp_85xx_ops;
+#endif
- if (of_flat_dt_is_compatible(root, "fsl,P4080DS")) {
-#ifdef CONFIG_PCI
- /* treat PCIe1 as primary,
- * shouldn't matter as we have no ISA on the board
- */
- primary_phb_addr = 0x0000;
+ if (of_flat_dt_is_compatible(root, "fsl,P4080DS"))
+ return 1;
+
+ /* Check if we're running under the Freescale hypervisor */
+ if (of_flat_dt_is_compatible(root, "fsl,P4080DS-hv")) {
+ ppc_md.init_IRQ = ehv_pic_init;
+ ppc_md.get_irq = ehv_pic_get_irq;
+ ppc_md.restart = fsl_hv_restart;
+ ppc_md.power_off = fsl_hv_halt;
+ ppc_md.halt = fsl_hv_halt;
+#ifdef CONFIG_SMP
+ /*
+ * Disable the timebase sync operations because we can't write
+ * to the timebase registers under the hypervisor.
+ */
+ smp_85xx_ops.give_timebase = NULL;
+ smp_85xx_ops.take_timebase = NULL;
#endif
return 1;
- } else {
- return 0;
}
+
+ return 0;
}
define_machine(p4080_ds) {
@@ -68,7 +79,10 @@ define_machine(p4080_ds) {
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
+ .power_save = e500_idle,
};
machine_device_initcall(p4080_ds, corenet_ds_publish_devices);
+#ifdef CONFIG_SWIOTLB
machine_arch_initcall(p4080_ds, swiotlb_setup_bus_notifier);
+#endif
diff --git a/arch/powerpc/platforms/85xx/p5020_ds.c b/arch/powerpc/platforms/85xx/p5020_ds.c
index 7467b712ee0..e8cba5004fd 100644
--- a/arch/powerpc/platforms/85xx/p5020_ds.c
+++ b/arch/powerpc/platforms/85xx/p5020_ds.c
@@ -30,6 +30,7 @@
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include <asm/ehv_pic.h>
#include "corenet_ds.h"
@@ -39,8 +40,32 @@
static int __init p5020_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
+#ifdef CONFIG_SMP
+ extern struct smp_ops_t smp_85xx_ops;
+#endif
+
+ if (of_flat_dt_is_compatible(root, "fsl,P5020DS"))
+ return 1;
+
+ /* Check if we're running under the Freescale hypervisor */
+ if (of_flat_dt_is_compatible(root, "fsl,P5020DS-hv")) {
+ ppc_md.init_IRQ = ehv_pic_init;
+ ppc_md.get_irq = ehv_pic_get_irq;
+ ppc_md.restart = fsl_hv_restart;
+ ppc_md.power_off = fsl_hv_halt;
+ ppc_md.halt = fsl_hv_halt;
+#ifdef CONFIG_SMP
+ /*
+ * Disable the timebase sync operations because we can't write
+ * to the timebase registers under the hypervisor.
+ */
+ smp_85xx_ops.give_timebase = NULL;
+ smp_85xx_ops.take_timebase = NULL;
+#endif
+ return 1;
+ }
- return of_flat_dt_is_compatible(root, "fsl,P5020DS");
+ return 0;
}
define_machine(p5020_ds) {
@@ -60,6 +85,11 @@ define_machine(p5020_ds) {
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
+#ifdef CONFIG_PPC64
+ .power_save = book3e_idle,
+#else
+ .power_save = e500_idle,
+#endif
};
machine_device_initcall(p5020_ds, corenet_ds_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index ecdd8c09e4e..d07dcb7f4ee 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -34,7 +34,7 @@
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/page.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
index d2dfd465fbf..09ced722175 100644
--- a/arch/powerpc/platforms/85xx/sbc8560.c
+++ b/arch/powerpc/platforms/85xx/sbc8560.c
@@ -285,7 +285,7 @@ static int __init sbc8560_bdrstcr_init(void)
printk(KERN_INFO "sbc8560: Found BRSTCR at i/o 0x%x\n", res.start);
- brstcr = ioremap(res.start, res.end - res.start);
+ brstcr = ioremap(res.start, resource_size(&res));
if(!brstcr)
printk(KERN_WARNING "sbc8560: ioremap of brstcr failed.\n");
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index d6a93a10c0f..5b9b901f644 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -2,7 +2,7 @@
* Author: Andy Fleming <afleming@freescale.com>
* Kumar Gala <galak@kernel.crashing.org>
*
- * Copyright 2006-2008 Freescale Semiconductor Inc.
+ * Copyright 2006-2008, 2011 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -111,14 +111,6 @@ smp_85xx_kick_cpu(int nr)
return 0;
}
-static void __init
-smp_85xx_setup_cpu(int cpu_nr)
-{
- mpic_setup_this_cpu();
- if (cpu_has_feature(CPU_FTR_DBELL))
- doorbell_setup_this_cpu();
-}
-
struct smp_ops_t smp_85xx_ops = {
.kick_cpu = smp_85xx_kick_cpu,
#ifdef CONFIG_KEXEC
@@ -224,24 +216,36 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
}
#endif /* CONFIG_KEXEC */
+static void __init
+smp_85xx_setup_cpu(int cpu_nr)
+{
+ if (smp_85xx_ops.probe == smp_mpic_probe)
+ mpic_setup_this_cpu();
+
+ if (cpu_has_feature(CPU_FTR_DBELL))
+ doorbell_setup_this_cpu();
+}
+
void __init mpc85xx_smp_init(void)
{
struct device_node *np;
+ smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
+
np = of_find_node_by_type(NULL, "open-pic");
if (np) {
smp_85xx_ops.probe = smp_mpic_probe;
- smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
smp_85xx_ops.message_pass = smp_mpic_message_pass;
}
if (cpu_has_feature(CPU_FTR_DBELL)) {
- smp_85xx_ops.message_pass = smp_muxed_ipi_message_pass;
+ /*
+ * If left NULL, .message_pass defaults to
+ * smp_muxed_ipi_message_pass
+ */
smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
}
- BUG_ON(!smp_85xx_ops.message_pass);
-
smp_ops = &smp_85xx_ops;
#ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index 0125604d096..a9dc5e79512 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -123,7 +123,7 @@ static void xes_mpc85xx_fixups(void)
continue;
}
- l2_base = ioremap(r[0].start, r[0].end - r[0].start + 1);
+ l2_base = ioremap(r[0].start, resource_size(&r[0]));
xes_mpc85xx_configure_l2(l2_base);
}
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index a896511690c..74e018ef724 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -39,12 +39,19 @@
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/simple_gpio.h>
+#include <asm/fsl_guts.h>
#include "mpc86xx.h"
static struct device_node *pixis_node;
static unsigned char *pixis_bdcfg0, *pixis_arch;
+/* DIU Pixel Clock bits of the CLKDVDR Global Utilities register */
+#define CLKDVDR_PXCKEN 0x80000000
+#define CLKDVDR_PXCKINV 0x10000000
+#define CLKDVDR_PXCKDLY 0x06000000
+#define CLKDVDR_PXCLK_MASK 0x001F0000
+
#ifdef CONFIG_SUSPEND
static irqreturn_t mpc8610_sw9_irq(int irq, void *data)
{
@@ -205,72 +212,54 @@ void mpc8610hpcd_set_monitor_port(int monitor_port)
bdcfg[monitor_port]);
}
+/**
+ * mpc8610hpcd_set_pixel_clock: program the DIU's clock
+ *
+ * @pixclock: the wavelength, in picoseconds, of the clock
+ */
void mpc8610hpcd_set_pixel_clock(unsigned int pixclock)
{
- u32 __iomem *clkdvdr;
- u32 temp;
- /* variables for pixel clock calcs */
- ulong bestval, bestfreq, speed_ccb, minpixclock, maxpixclock;
- ulong pixval;
- long err;
- int i;
-
- clkdvdr = ioremap(get_immrbase() + 0xe0800, sizeof(u32));
- if (!clkdvdr) {
- printk(KERN_ERR "Err: can't map clock divider register!\n");
+ struct device_node *guts_np = NULL;
+ struct ccsr_guts_86xx __iomem *guts;
+ unsigned long freq;
+ u64 temp;
+ u32 pxclk;
+
+ /* Map the global utilities registers. */
+ guts_np = of_find_compatible_node(NULL, NULL, "fsl,mpc8610-guts");
+ if (!guts_np) {
+ pr_err("mpc8610hpcd: missing global utilties device node\n");
return;
}
- /* Pixel Clock configuration */
- speed_ccb = fsl_get_sys_freq();
-
- /* Calculate the pixel clock with the smallest error */
- /* calculate the following in steps to avoid overflow */
- pr_debug("DIU pixclock in ps - %d\n", pixclock);
- temp = 1000000000/pixclock;
- temp *= 1000;
- pixclock = temp;
- pr_debug("DIU pixclock freq - %u\n", pixclock);
-
- temp = pixclock * 5 / 100;
- pr_debug("deviation = %d\n", temp);
- minpixclock = pixclock - temp;
- maxpixclock = pixclock + temp;
- pr_debug("DIU minpixclock - %lu\n", minpixclock);
- pr_debug("DIU maxpixclock - %lu\n", maxpixclock);
- pixval = speed_ccb/pixclock;
- pr_debug("DIU pixval = %lu\n", pixval);
-
- err = 100000000;
- bestval = pixval;
- pr_debug("DIU bestval = %lu\n", bestval);
-
- bestfreq = 0;
- for (i = -1; i <= 1; i++) {
- temp = speed_ccb / ((pixval+i) + 1);
- pr_debug("DIU test pixval i= %d, pixval=%lu, temp freq. = %u\n",
- i, pixval, temp);
- if ((temp < minpixclock) || (temp > maxpixclock))
- pr_debug("DIU exceeds monitor range (%lu to %lu)\n",
- minpixclock, maxpixclock);
- else if (abs(temp - pixclock) < err) {
- pr_debug("Entered the else if block %d\n", i);
- err = abs(temp - pixclock);
- bestval = pixval+i;
- bestfreq = temp;
- }
+ guts = of_iomap(guts_np, 0);
+ of_node_put(guts_np);
+ if (!guts) {
+ pr_err("mpc8610hpcd: could not map global utilties device\n");
+ return;
}
- pr_debug("DIU chose = %lx\n", bestval);
- pr_debug("DIU error = %ld\n NomPixClk ", err);
- pr_debug("DIU: Best Freq = %lx\n", bestfreq);
- /* Modify PXCLK in GUTS CLKDVDR */
- pr_debug("DIU: Current value of CLKDVDR = 0x%08x\n", (*clkdvdr));
- temp = (*clkdvdr) & 0x2000FFFF;
- *clkdvdr = temp; /* turn off clock */
- *clkdvdr = temp | 0x80000000 | (((bestval) & 0x1F) << 16);
- pr_debug("DIU: Modified value of CLKDVDR = 0x%08x\n", (*clkdvdr));
- iounmap(clkdvdr);
+ /* Convert pixclock from a wavelength to a frequency */
+ temp = 1000000000000ULL;
+ do_div(temp, pixclock);
+ freq = temp;
+
+ /*
+ * 'pxclk' is the ratio of the platform clock to the pixel clock.
+ * On the MPC8610, the value programmed into CLKDVDR is the ratio
+ * minus one. The valid range of values is 2-31.
+ */
+ pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq) - 1;
+ pxclk = clamp_t(u32, pxclk, 2, 31);
+
+ /* Disable the pixel clock, and set it to non-inverted and no delay */
+ clrbits32(&guts->clkdvdr,
+ CLKDVDR_PXCKEN | CLKDVDR_PXCKDLY | CLKDVDR_PXCLK_MASK);
+
+ /* Enable the clock and set the pxclk */
+ setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16));
+
+ iounmap(guts);
}
ssize_t mpc8610hpcd_show_monitor_port(int monitor_port, char *buf)
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index f970ca2b180..d0af7fb2f34 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -78,6 +78,10 @@ config MPIC
bool
default n
+config PPC_EPAPR_HV_PIC
+ bool
+ default n
+
config MPIC_WEIRD
bool
default n
@@ -266,7 +270,7 @@ config TAU_AVERAGE
config QUICC_ENGINE
bool "Freescale QUICC Engine (QE) Support"
- depends on FSL_SOC
+ depends on FSL_SOC && PPC32
select PPC_LIB_RHEAP
select CRC32
help
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 2165b65876f..e06e39589a0 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -1,7 +1,6 @@
config PPC64
bool "64-bit kernel"
default n
- select PPC_HAVE_PMU_SUPPORT
help
This option selects whether a 32-bit or a 64-bit kernel
will be built.
@@ -69,6 +68,7 @@ choice
config PPC_BOOK3S_64
bool "Server processors"
select PPC_FPU
+ select PPC_HAVE_PMU_SUPPORT
config PPC_BOOK3E_64
bool "Embedded processors"
diff --git a/arch/powerpc/platforms/amigaone/Kconfig b/arch/powerpc/platforms/amigaone/Kconfig
index 02247671771..128de25cc28 100644
--- a/arch/powerpc/platforms/amigaone/Kconfig
+++ b/arch/powerpc/platforms/amigaone/Kconfig
@@ -8,7 +8,7 @@ config AMIGAONE
select NOT_COHERENT_CACHE
select CHECK_CACHE_COHERENCY
select DEFAULT_UIMAGE
- select PCSPKR_PLATFORM
+ select HAVE_PCSPKR_PLATFORM
help
Select AmigaOne for the following machines:
- AmigaOne SE/Teron CX (G3 only)
diff --git a/arch/powerpc/platforms/cell/celleb_scc_epci.c b/arch/powerpc/platforms/cell/celleb_scc_epci.c
index 05b0db3ef63..844c0facb4f 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_epci.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_epci.c
@@ -393,19 +393,19 @@ static int __init celleb_setup_epci(struct device_node *node,
if (of_address_to_resource(node, 0, &r))
goto error;
- hose->cfg_addr = ioremap(r.start, (r.end - r.start + 1));
+ hose->cfg_addr = ioremap(r.start, resource_size(&r));
if (!hose->cfg_addr)
goto error;
pr_debug("EPCI: cfg_addr map 0x%016llx->0x%016lx + 0x%016llx\n",
- r.start, (unsigned long)hose->cfg_addr, (r.end - r.start + 1));
+ r.start, (unsigned long)hose->cfg_addr, resource_size(&r));
if (of_address_to_resource(node, 2, &r))
goto error;
- hose->cfg_data = ioremap(r.start, (r.end - r.start + 1));
+ hose->cfg_data = ioremap(r.start, resource_size(&r));
if (!hose->cfg_data)
goto error;
pr_debug("EPCI: cfg_data map 0x%016llx->0x%016lx + 0x%016llx\n",
- r.start, (unsigned long)hose->cfg_data, (r.end - r.start + 1));
+ r.start, (unsigned long)hose->cfg_data, resource_size(&r));
hose->ops = &celleb_epci_ops;
celleb_epci_init(hose);
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
index a881bbee8de..ae790ac4a58 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -494,7 +494,7 @@ static __init int celleb_setup_pciex(struct device_node *node,
pr_err("PCIEXC:Failed to get config resource.\n");
return 1;
}
- phb->cfg_addr = ioremap(r.start, r.end - r.start + 1);
+ phb->cfg_addr = ioremap(r.start, resource_size(&r));
if (!phb->cfg_addr) {
pr_err("PCIEXC:Failed to remap SMMIO region.\n");
return 1;
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index d809836bcf5..7f92096fe96 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -24,7 +24,7 @@
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/machdep.h>
#include <asm/spu.h>
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index dbb641ea90d..f2e1dfe4bf3 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -28,7 +28,7 @@
#include <linux/cpu.h>
#include <asm/ptrace.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index f465d474ad9..4e5c91489c0 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -222,7 +222,7 @@ static int spu_map_resource(struct spu *spu, int nr,
return ret;
if (phys)
*phys = resource.start;
- len = resource.end - resource.start + 1;
+ len = resource_size(&resource);
*virt = ioremap(resource.start, len);
if (!*virt)
return -EINVAL;
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 0c87bcd2452..bf4d41d8fa1 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -24,7 +24,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
#include "spufs.h"
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 3c7c3f82d84..fb59c46e9e9 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1850,9 +1850,16 @@ out:
return ret;
}
-static int spufs_mfc_fsync(struct file *file, int datasync)
+static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- return spufs_mfc_flush(file, NULL);
+ struct inode *inode = file->f_path.dentry->d_inode;
+ int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ if (!err) {
+ mutex_lock(&inode->i_mutex);
+ err = spufs_mfc_flush(file, NULL);
+ mutex_unlock(&inode->i_mutex);
+ }
+ return err;
}
static int spufs_mfc_fasync(int fd, struct file *file, int on)
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 856e9c39806..e481f6b9a78 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -611,15 +611,14 @@ out:
static struct file_system_type spufs_type;
-long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
- struct file *filp)
+long spufs_create(struct path *path, struct dentry *dentry,
+ unsigned int flags, mode_t mode, struct file *filp)
{
- struct dentry *dentry;
int ret;
ret = -EINVAL;
/* check if we are on spufs */
- if (nd->path.dentry->d_sb->s_type != &spufs_type)
+ if (path->dentry->d_sb->s_type != &spufs_type)
goto out;
/* don't accept undefined flags */
@@ -627,33 +626,27 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
goto out;
/* only threads can be underneath a gang */
- if (nd->path.dentry != nd->path.dentry->d_sb->s_root) {
+ if (path->dentry != path->dentry->d_sb->s_root) {
if ((flags & SPU_CREATE_GANG) ||
- !SPUFS_I(nd->path.dentry->d_inode)->i_gang)
+ !SPUFS_I(path->dentry->d_inode)->i_gang)
goto out;
}
- dentry = lookup_create(nd, 1);
- ret = PTR_ERR(dentry);
- if (IS_ERR(dentry))
- goto out_dir;
-
mode &= ~current_umask();
if (flags & SPU_CREATE_GANG)
- ret = spufs_create_gang(nd->path.dentry->d_inode,
- dentry, nd->path.mnt, mode);
+ ret = spufs_create_gang(path->dentry->d_inode,
+ dentry, path->mnt, mode);
else
- ret = spufs_create_context(nd->path.dentry->d_inode,
- dentry, nd->path.mnt, flags, mode,
+ ret = spufs_create_context(path->dentry->d_inode,
+ dentry, path->mnt, flags, mode,
filp);
if (ret >= 0)
- fsnotify_mkdir(nd->path.dentry->d_inode, dentry);
+ fsnotify_mkdir(path->dentry->d_inode, dentry);
return ret;
-out_dir:
- mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
out:
+ mutex_unlock(&path->dentry->d_inode->i_mutex);
return ret;
}
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index c448bac6551..099245f230b 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -248,7 +248,7 @@ extern const struct spufs_tree_descr spufs_dir_debug_contents[];
/* system call implementation */
extern struct spufs_calls spufs_calls;
long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
-long spufs_create(struct nameidata *nd, unsigned int flags,
+long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags,
mode_t mode, struct file *filp);
/* ELF coredump callbacks for writing SPU ELF notes */
extern int spufs_coredump_extra_notes_size(void);
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index a3d2ce54ea2..609e016e92d 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -62,21 +62,17 @@ out:
static long do_spu_create(const char __user *pathname, unsigned int flags,
mode_t mode, struct file *neighbor)
{
- char *tmp;
+ struct path path;
+ struct dentry *dentry;
int ret;
- tmp = getname(pathname);
- ret = PTR_ERR(tmp);
- if (!IS_ERR(tmp)) {
- struct nameidata nd;
-
- ret = kern_path_parent(tmp, &nd);
- if (!ret) {
- nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE;
- ret = spufs_create(&nd, flags, mode, neighbor);
- path_put(&nd.path);
- }
- putname(tmp);
+ dentry = user_path_create(AT_FDCWD, pathname, &path, 1);
+ ret = PTR_ERR(dentry);
+ if (!IS_ERR(dentry)) {
+ ret = spufs_create(&path, dentry, flags, mode, neighbor);
+ mutex_unlock(&path.dentry->d_inode->i_mutex);
+ dput(dentry);
+ path_put(&path);
}
return ret;
diff --git a/arch/powerpc/platforms/chrp/Kconfig b/arch/powerpc/platforms/chrp/Kconfig
index bc0b0efdc5f..d3cdab582c5 100644
--- a/arch/powerpc/platforms/chrp/Kconfig
+++ b/arch/powerpc/platforms/chrp/Kconfig
@@ -1,6 +1,7 @@
config PPC_CHRP
bool "Common Hardware Reference Platform (CHRP) based machines"
depends on 6xx
+ select HAVE_PCSPKR_PLATFORM
select MPIC
select PPC_I8259
select PPC_INDIRECT_PCI
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index 8f67a394b2d..83285c5a204 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -142,7 +142,7 @@ hydra_init(void)
return 0;
}
of_node_put(np);
- Hydra = ioremap(r.start, r.end-r.start);
+ Hydra = ioremap(r.start, resource_size(&r));
printk("Hydra Mac I/O at %llx\n", (unsigned long long)r.start);
printk("Hydra Feature_Control was %x",
in_le32(&Hydra->Feature_Control));
@@ -199,7 +199,7 @@ static void __init setup_peg2(struct pci_controller *hose, struct device_node *d
printk ("RTAS supporting Pegasos OF not found, please upgrade"
" your firmware\n");
}
- ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
/* keep the reference to the root node */
}
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index a800122e4dd..feab30bbae2 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -18,7 +18,7 @@
#include <linux/spinlock.h>
#include <asm/ptrace.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
index 29c02f36b32..f519ee17ff7 100644
--- a/arch/powerpc/platforms/iseries/exception.S
+++ b/arch/powerpc/platforms/iseries/exception.S
@@ -167,7 +167,7 @@ BEGIN_FTR_SECTION
std r12,PACA_EXGEN+EX_R13(r13)
EXCEPTION_PROLOG_ISERIES_1
FTR_SECTION_ELSE
- EXCEPTION_PROLOG_1(PACA_EXGEN)
+ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0)
EXCEPTION_PROLOG_ISERIES_1
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
b data_access_common
diff --git a/arch/powerpc/platforms/iseries/exception.h b/arch/powerpc/platforms/iseries/exception.h
index bae3fba5ad8..50271b550a9 100644
--- a/arch/powerpc/platforms/iseries/exception.h
+++ b/arch/powerpc/platforms/iseries/exception.h
@@ -39,7 +39,7 @@
label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
- EXCEPTION_PROLOG_1(area); \
+ EXCEPTION_PROLOG_1(area, NOTEST, 0); \
EXCEPTION_PROLOG_ISERIES_1; \
b label##_common
@@ -48,7 +48,7 @@ label##_iSeries: \
label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
- EXCEPTION_PROLOG_1(PACA_EXGEN); \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0); \
lbz r10,PACASOFTIRQEN(r13); \
cmpwi 0,r10,0; \
beq- label##_iSeries_masked; \
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index e3265adde5d..8bda9be06fa 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -29,7 +29,7 @@
#include <linux/cpu.h>
#include <asm/ptrace.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -75,7 +75,7 @@ static void __devinit smp_iSeries_setup_cpu(int nr)
}
static struct smp_ops_t iSeries_smp_ops = {
- .message_pass = smp_muxed_ipi_message_pass,
+ .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = smp_iSeries_cause_ipi,
.probe = smp_iSeries_probe,
.kick_cpu = smp_iSeries_kick_cpu,
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index fe34c3d9bb7..5b3388b9f91 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -338,35 +338,16 @@ define_machine(maple) {
#ifdef CONFIG_EDAC
/*
* Register a platform device for CPC925 memory controller on
- * Motorola ATCA-6101 blade.
+ * all boards with U3H (CPC925) bridge.
*/
-#define MAPLE_CPC925_MODEL "Motorola,ATCA-6101"
static int __init maple_cpc925_edac_setup(void)
{
struct platform_device *pdev;
struct device_node *np = NULL;
struct resource r;
- const unsigned char *model;
int ret;
-
- np = of_find_node_by_path("/");
- if (!np) {
- printk(KERN_ERR "%s: Unable to get root node\n", __func__);
- return -ENODEV;
- }
-
- model = (const unsigned char *)of_get_property(np, "model", NULL);
- if (!model) {
- printk(KERN_ERR "%s: Unabel to get model info\n", __func__);
- of_node_put(np);
- return -ENODEV;
- }
-
- ret = strcmp(model, MAPLE_CPC925_MODEL);
- of_node_put(np);
-
- if (ret != 0)
- return 0;
+ volatile void __iomem *mem;
+ u32 rev;
np = of_find_node_by_type(NULL, "memory-controller");
if (!np) {
@@ -384,6 +365,22 @@ static int __init maple_cpc925_edac_setup(void)
return -ENODEV;
}
+ mem = ioremap(r.start, resource_size(&r));
+ if (!mem) {
+ printk(KERN_ERR "%s: Unable to map memory-controller memory\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ rev = __raw_readl(mem);
+ iounmap(mem);
+
+ if (rev < 0x34 || rev > 0x3f) { /* U3H */
+ printk(KERN_ERR "%s: Non-CPC925(U3H) bridge revision: %02x\n",
+ __func__, rev);
+ return 0;
+ }
+
pdev = platform_device_register_simple("cpc925_edac", 0, &r, 1);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index 321a9b3a2d0..756123bf06a 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -576,7 +576,7 @@ int pasemi_dma_init(void)
res.start = 0xfd800000;
res.end = res.start + 0x1000;
}
- dma_status = __ioremap(res.start, res.end-res.start, 0);
+ dma_status = __ioremap(res.start, resource_size(&res), 0);
pci_dev_put(iob_pdev);
for (i = 0; i < MAX_TXCH; i++)
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
index d679964ae2a..c2f3e861f5e 100644
--- a/arch/powerpc/platforms/powermac/backlight.c
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -12,7 +12,7 @@
#include <linux/backlight.h>
#include <linux/adb.h>
#include <linux/pmu.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/prom.h>
#include <asm/backlight.h>
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index b1cdcf94aa8..695443bfdb0 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -580,10 +580,10 @@ int __init pmac_nvram_init(void)
/* Try to obtain an address */
if (of_address_to_resource(dp, 0, &r1) == 0) {
nvram_naddrs = 1;
- s1 = (r1.end - r1.start) + 1;
+ s1 = resource_size(&r1);
if (of_address_to_resource(dp, 1, &r2) == 0) {
nvram_naddrs = 2;
- s2 = (r2.end - r2.start) + 1;
+ s2 = resource_size(&r2);
}
}
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index f33e08d573c..5cc83851ad0 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
+#include <linux/of_pci.h>
#include <asm/sections.h>
#include <asm/io.h>
@@ -235,7 +236,7 @@ static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
if (offset >= 0x100)
return PCIBIOS_BAD_REGISTER_NUMBER;
- np = pci_busdev_to_OF_node(bus, devfn);
+ np = of_pci_find_child_device(bus->dev.of_node, devfn);
if (np == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -731,7 +732,7 @@ static void __init setup_bandit(struct pci_controller *hose,
static int __init setup_uninorth(struct pci_controller *hose,
struct resource *addr)
{
- ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
has_uninorth = 1;
hose->ops = &macrisc_pci_ops;
hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
@@ -838,8 +839,7 @@ static void __init setup_u3_ht(struct pci_controller* hose)
* into cfg_addr
*/
hose->cfg_data = ioremap(cfg_res.start, 0x02000000);
- hose->cfg_addr = ioremap(self_res.start,
- self_res.end - self_res.start + 1);
+ hose->cfg_addr = ioremap(self_res.start, resource_size(&self_res));
/*
* /ht node doesn't expose a "ranges" property, we read the register
@@ -998,7 +998,7 @@ void __init pmac_pci_init(void)
struct device_node *np, *root;
struct device_node *ht = NULL;
- ppc_pci_set_flags(PPC_PCI_CAN_SKIP_ISA_ALIGN);
+ pci_set_flags(PCI_CAN_SKIP_ISA_ALIGN);
root = of_find_node_by_path("/");
if (root == NULL) {
@@ -1057,7 +1057,7 @@ void __init pmac_pci_init(void)
* some offset between bus number and domains for now when we
* assign all busses should help for now
*/
- if (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
+ if (pci_has_flag(PCI_REASSIGN_ALL_BUS))
pcibios_assign_bus_offset = 0x10;
#endif
}
@@ -1323,8 +1323,7 @@ static void fixup_u4_pcie(struct pci_dev* dev)
*/
if (r->start >= 0xf0000000 && r->start < 0xf3000000)
continue;
- if (!region || (r->end - r->start) >
- (region->end - region->start))
+ if (!region || resource_size(r) > resource_size(region))
region = r;
}
/* Nothing found, bail */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index aa45281bd29..a028f08309d 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -355,9 +355,6 @@ static int initializing = 1;
static int pmac_late_init(void)
{
initializing = 0;
- /* this is udbg (which is __init) and we can later use it during
- * cpu hotplug (in smp_core99_kick_cpu) */
- ppc_md.progress = NULL;
return 0;
}
machine_late_initcall(powermac, pmac_late_init);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index db092d7c4c5..9a521dc8e48 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -35,7 +35,7 @@
#include <linux/compiler.h>
#include <asm/ptrace.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/code-patching.h>
#include <asm/irq.h>
#include <asm/page.h>
@@ -447,7 +447,7 @@ void __init smp_psurge_give_timebase(void)
/* PowerSurge-style Macs */
struct smp_ops_t psurge_smp_ops = {
- .message_pass = smp_muxed_ipi_message_pass,
+ .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = smp_psurge_cause_ipi,
.probe = smp_psurge_probe,
.kick_cpu = smp_psurge_kick_cpu,
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index 48211ca134c..11c9fce43b5 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -274,7 +274,7 @@ int __init via_calibrate_decr(void)
return 0;
}
of_node_put(vias);
- via = ioremap(rsrc.start, rsrc.end - rsrc.start + 1);
+ via = ioremap(rsrc.start, resource_size(&rsrc));
if (via == NULL) {
printk(KERN_ERR "Failed to map VIA for timer calibration !\n");
return 0;
diff --git a/arch/powerpc/platforms/prep/Kconfig b/arch/powerpc/platforms/prep/Kconfig
index bf8330ef2e7..f0536c7cda9 100644
--- a/arch/powerpc/platforms/prep/Kconfig
+++ b/arch/powerpc/platforms/prep/Kconfig
@@ -1,6 +1,7 @@
config PPC_PREP
bool "PowerPC Reference Platform (PReP) based machines"
depends on 6xx && BROKEN
+ select HAVE_PCSPKR_PLATFORM
select MPIC
select PPC_I8259
select PPC_INDIRECT_PCI
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 71af4c5d6c0..05cf4769b88 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -1,6 +1,7 @@
config PPC_PSERIES
depends on PPC64 && PPC_BOOK3S
bool "IBM pSeries & new (POWER5-based) iSeries"
+ select HAVE_PCSPKR_PLATFORM
select MPIC
select PCI_MSI
select PPC_XICS
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 57ceb92b228..e9be25bc571 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -262,12 +262,11 @@ int dlpar_attach_node(struct device_node *dn)
if (!dn->parent)
return -ENOMEM;
- rc = blocking_notifier_call_chain(&pSeries_reconfig_chain,
- PSERIES_RECONFIG_ADD, dn);
- if (rc == NOTIFY_BAD) {
+ rc = pSeries_reconfig_notify(PSERIES_RECONFIG_ADD, dn);
+ if (rc) {
printk(KERN_ERR "Failed to add device node %s\n",
dn->full_name);
- return -ENOMEM; /* For now, safe to assume kmalloc failure */
+ return rc;
}
of_attach_node(dn);
@@ -297,8 +296,7 @@ int dlpar_detach_node(struct device_node *dn)
remove_proc_entry(dn->pde->name, parent->pde);
#endif
- blocking_notifier_call_chain(&pSeries_reconfig_chain,
- PSERIES_RECONFIG_REMOVE, dn);
+ pSeries_reconfig_notify(PSERIES_RECONFIG_REMOVE, dn);
of_detach_node(dn);
of_node_put(dn); /* Must decrement the refcount */
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 46b55cf563e..ada6e07532e 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -31,7 +31,7 @@
#include <linux/spinlock.h>
#include <linux/of.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/eeh.h>
#include <asm/eeh_event.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index 8ed0d2d0e1b..fc5ae767989 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -25,7 +25,7 @@
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 46f13a3c5d0..bc0288501f1 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -330,21 +330,17 @@ static void pseries_remove_processor(struct device_node *np)
static int pseries_smp_notifier(struct notifier_block *nb,
unsigned long action, void *node)
{
- int err = NOTIFY_OK;
+ int err = 0;
switch (action) {
case PSERIES_RECONFIG_ADD:
- if (pseries_add_processor(node))
- err = NOTIFY_BAD;
+ err = pseries_add_processor(node);
break;
case PSERIES_RECONFIG_REMOVE:
pseries_remove_processor(node);
break;
- default:
- err = NOTIFY_DONE;
- break;
}
- return err;
+ return notifier_from_errno(err);
}
static struct notifier_block pseries_smp_nb = {
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 33867ec4a23..11d8e0544ac 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -12,6 +12,8 @@
#include <linux/of.h>
#include <linux/memblock.h>
#include <linux/vmalloc.h>
+#include <linux/memory.h>
+
#include <asm/firmware.h>
#include <asm/machdep.h>
#include <asm/pSeries_reconfig.h>
@@ -20,24 +22,25 @@
static unsigned long get_memblock_size(void)
{
struct device_node *np;
- unsigned int memblock_size = 0;
+ unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
+ struct resource r;
np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (np) {
- const unsigned long *size;
+ const __be64 *size;
size = of_get_property(np, "ibm,lmb-size", NULL);
- memblock_size = size ? *size : 0;
-
+ if (size)
+ memblock_size = be64_to_cpup(size);
of_node_put(np);
- } else {
+ } else if (machine_is(pseries)) {
+ /* This fallback really only applies to pseries */
unsigned int memzero_size = 0;
- const unsigned int *regs;
np = of_find_node_by_path("/memory@0");
if (np) {
- regs = of_get_property(np, "reg", NULL);
- memzero_size = regs ? regs[3] : 0;
+ if (!of_address_to_resource(np, 0, &r))
+ memzero_size = resource_size(&r);
of_node_put(np);
}
@@ -50,16 +53,21 @@ static unsigned long get_memblock_size(void)
sprintf(buf, "/memory@%x", memzero_size);
np = of_find_node_by_path(buf);
if (np) {
- regs = of_get_property(np, "reg", NULL);
- memblock_size = regs ? regs[3] : 0;
+ if (!of_address_to_resource(np, 0, &r))
+ memblock_size = resource_size(&r);
of_node_put(np);
}
}
}
-
return memblock_size;
}
+/* WARNING: This is going to override the generic definition whenever
+ * pseries is built-in regardless of what platform is active at boot
+ * time. This is fine for now as this is the only "option" and it
+ * should work everywhere. If not, we'll have to turn this into a
+ * ppc_md. callback
+ */
unsigned long memory_block_size_bytes(void)
{
return get_memblock_size();
@@ -197,27 +205,21 @@ static int pseries_drconf_memory(unsigned long *base, unsigned int action)
static int pseries_memory_notifier(struct notifier_block *nb,
unsigned long action, void *node)
{
- int err = NOTIFY_OK;
+ int err = 0;
switch (action) {
case PSERIES_RECONFIG_ADD:
- if (pseries_add_memory(node))
- err = NOTIFY_BAD;
+ err = pseries_add_memory(node);
break;
case PSERIES_RECONFIG_REMOVE:
- if (pseries_remove_memory(node))
- err = NOTIFY_BAD;
+ err = pseries_remove_memory(node);
break;
case PSERIES_DRCONF_MEM_ADD:
case PSERIES_DRCONF_MEM_REMOVE:
- if (pseries_drconf_memory(node, action))
- err = NOTIFY_BAD;
- break;
- default:
- err = NOTIFY_DONE;
+ err = pseries_drconf_memory(node, action);
break;
}
- return err;
+ return notifier_from_errno(err);
}
static struct notifier_block pseries_mem_nb = {
diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c
index 3f6a89b0981..041e87ca189 100644
--- a/arch/powerpc/platforms/pseries/hvconsole.c
+++ b/arch/powerpc/platforms/pseries/hvconsole.c
@@ -73,7 +73,7 @@ int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
if (ret == H_SUCCESS)
return count;
if (ret == H_BUSY)
- return 0;
+ return -EAGAIN;
return -EIO;
}
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 39e6e0a7b2f..f7205d344ef 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -52,197 +52,6 @@ EXPORT_SYMBOL(plpar_hcall_norets);
extern void pSeries_find_serial_port(void);
-
-static int vtermno; /* virtual terminal# for udbg */
-
-#define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
-static void udbg_hvsi_putc(char c)
-{
- /* packet's seqno isn't used anyways */
- uint8_t packet[] __ALIGNED__ = { 0xff, 5, 0, 0, c };
- int rc;
-
- if (c == '\n')
- udbg_hvsi_putc('\r');
-
- do {
- rc = plpar_put_term_char(vtermno, sizeof(packet), packet);
- } while (rc == H_BUSY);
-}
-
-static long hvsi_udbg_buf_len;
-static uint8_t hvsi_udbg_buf[256];
-
-static int udbg_hvsi_getc_poll(void)
-{
- unsigned char ch;
- int rc, i;
-
- if (hvsi_udbg_buf_len == 0) {
- rc = plpar_get_term_char(vtermno, &hvsi_udbg_buf_len, hvsi_udbg_buf);
- if (rc != H_SUCCESS || hvsi_udbg_buf[0] != 0xff) {
- /* bad read or non-data packet */
- hvsi_udbg_buf_len = 0;
- } else {
- /* remove the packet header */
- for (i = 4; i < hvsi_udbg_buf_len; i++)
- hvsi_udbg_buf[i-4] = hvsi_udbg_buf[i];
- hvsi_udbg_buf_len -= 4;
- }
- }
-
- if (hvsi_udbg_buf_len <= 0 || hvsi_udbg_buf_len > 256) {
- /* no data ready */
- hvsi_udbg_buf_len = 0;
- return -1;
- }
-
- ch = hvsi_udbg_buf[0];
- /* shift remaining data down */
- for (i = 1; i < hvsi_udbg_buf_len; i++) {
- hvsi_udbg_buf[i-1] = hvsi_udbg_buf[i];
- }
- hvsi_udbg_buf_len--;
-
- return ch;
-}
-
-static int udbg_hvsi_getc(void)
-{
- int ch;
- for (;;) {
- ch = udbg_hvsi_getc_poll();
- if (ch == -1) {
- /* This shouldn't be needed...but... */
- volatile unsigned long delay;
- for (delay=0; delay < 2000000; delay++)
- ;
- } else {
- return ch;
- }
- }
-}
-
-static void udbg_putcLP(char c)
-{
- char buf[16];
- unsigned long rc;
-
- if (c == '\n')
- udbg_putcLP('\r');
-
- buf[0] = c;
- do {
- rc = plpar_put_term_char(vtermno, 1, buf);
- } while(rc == H_BUSY);
-}
-
-/* Buffered chars getc */
-static long inbuflen;
-static long inbuf[2]; /* must be 2 longs */
-
-static int udbg_getc_pollLP(void)
-{
- /* The interface is tricky because it may return up to 16 chars.
- * We save them statically for future calls to udbg_getc().
- */
- char ch, *buf = (char *)inbuf;
- int i;
- long rc;
- if (inbuflen == 0) {
- /* get some more chars. */
- inbuflen = 0;
- rc = plpar_get_term_char(vtermno, &inbuflen, buf);
- if (rc != H_SUCCESS)
- inbuflen = 0; /* otherwise inbuflen is garbage */
- }
- if (inbuflen <= 0 || inbuflen > 16) {
- /* Catch error case as well as other oddities (corruption) */
- inbuflen = 0;
- return -1;
- }
- ch = buf[0];
- for (i = 1; i < inbuflen; i++) /* shuffle them down. */
- buf[i-1] = buf[i];
- inbuflen--;
- return ch;
-}
-
-static int udbg_getcLP(void)
-{
- int ch;
- for (;;) {
- ch = udbg_getc_pollLP();
- if (ch == -1) {
- /* This shouldn't be needed...but... */
- volatile unsigned long delay;
- for (delay=0; delay < 2000000; delay++)
- ;
- } else {
- return ch;
- }
- }
-}
-
-/* call this from early_init() for a working debug console on
- * vterm capable LPAR machines
- */
-void __init udbg_init_debug_lpar(void)
-{
- vtermno = 0;
- udbg_putc = udbg_putcLP;
- udbg_getc = udbg_getcLP;
- udbg_getc_poll = udbg_getc_pollLP;
-
- register_early_udbg_console();
-}
-
-/* returns 0 if couldn't find or use /chosen/stdout as console */
-void __init find_udbg_vterm(void)
-{
- struct device_node *stdout_node;
- const u32 *termno;
- const char *name;
-
- /* find the boot console from /chosen/stdout */
- if (!of_chosen)
- return;
- name = of_get_property(of_chosen, "linux,stdout-path", NULL);
- if (name == NULL)
- return;
- stdout_node = of_find_node_by_path(name);
- if (!stdout_node)
- return;
- name = of_get_property(stdout_node, "name", NULL);
- if (!name) {
- printk(KERN_WARNING "stdout node missing 'name' property!\n");
- goto out;
- }
-
- /* Check if it's a virtual terminal */
- if (strncmp(name, "vty", 3) != 0)
- goto out;
- termno = of_get_property(stdout_node, "reg", NULL);
- if (termno == NULL)
- goto out;
- vtermno = termno[0];
-
- if (of_device_is_compatible(stdout_node, "hvterm1")) {
- udbg_putc = udbg_putcLP;
- udbg_getc = udbg_getcLP;
- udbg_getc_poll = udbg_getc_pollLP;
- add_preferred_console("hvc", termno[0] & 0xff, NULL);
- } else if (of_device_is_compatible(stdout_node, "hvterm-protocol")) {
- vtermno = termno[0];
- udbg_putc = udbg_hvsi_putc;
- udbg_getc = udbg_hvsi_getc;
- udbg_getc_poll = udbg_hvsi_getc_poll;
- add_preferred_console("hvsi", termno[0] & 0xff, NULL);
- }
-out:
- of_node_put(stdout_node);
-}
-
void vpa_init(int cpu)
{
int hwcpu = get_hard_smp_processor_id(cpu);
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index e9f6d2859c3..24c7162f11d 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -47,7 +47,8 @@ extern void pSeries_final_fixup(void);
/* Poweron flag used for enabling auto ups restart */
extern unsigned long rtas_poweron_auto;
-extern void find_udbg_vterm(void);
+/* Provided by HVC VIO */
+extern void hvc_vio_init_early(void);
/* Dynamic logical Partitioning/Mobility */
extern void dlpar_free_cc_nodes(struct device_node *);
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 1de2cbb9230..168651acdd8 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -97,7 +97,7 @@ static struct device_node *derive_parent(const char *path)
return parent;
}
-BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain);
+static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain);
int pSeries_reconfig_notifier_register(struct notifier_block *nb)
{
@@ -109,6 +109,14 @@ void pSeries_reconfig_notifier_unregister(struct notifier_block *nb)
blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb);
}
+int pSeries_reconfig_notify(unsigned long action, void *p)
+{
+ int err = blocking_notifier_call_chain(&pSeries_reconfig_chain,
+ action, p);
+
+ return notifier_to_errno(err);
+}
+
static int pSeries_reconfig_add_node(const char *path, struct property *proplist)
{
struct device_node *np;
@@ -132,11 +140,9 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
goto out_err;
}
- err = blocking_notifier_call_chain(&pSeries_reconfig_chain,
- PSERIES_RECONFIG_ADD, np);
- if (err == NOTIFY_BAD) {
+ err = pSeries_reconfig_notify(PSERIES_RECONFIG_ADD, np);
+ if (err) {
printk(KERN_ERR "Failed to add device node %s\n", path);
- err = -ENOMEM; /* For now, safe to assume kmalloc failure */
goto out_err;
}
@@ -173,8 +179,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
remove_node_proc_entries(np);
- blocking_notifier_call_chain(&pSeries_reconfig_chain,
- PSERIES_RECONFIG_REMOVE, np);
+ pSeries_reconfig_notify(PSERIES_RECONFIG_REMOVE, np);
of_detach_node(np);
of_node_put(parent);
@@ -472,11 +477,10 @@ static int do_update_property(char *buf, size_t bufsize)
else
action = PSERIES_DRCONF_MEM_REMOVE;
- rc = blocking_notifier_call_chain(&pSeries_reconfig_chain,
- action, value);
- if (rc == NOTIFY_BAD) {
- rc = prom_update_property(np, oldprop, newprop);
- return -ENOMEM;
+ rc = pSeries_reconfig_notify(action, value);
+ if (rc) {
+ prom_update_property(np, oldprop, newprop);
+ return rc;
}
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 593acceeff9..d00e52926b7 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -512,9 +512,10 @@ static void __init pSeries_init_early(void)
{
pr_debug(" -> pSeries_init_early()\n");
+#ifdef CONFIG_HVC_CONSOLE
if (firmware_has_feature(FW_FEATURE_LPAR))
- find_udbg_vterm();
-
+ hvc_vio_init_early();
+#endif
if (firmware_has_feature(FW_FEATURE_DABR))
ppc_md.set_dabr = pseries_set_dabr;
else if (firmware_has_feature(FW_FEATURE_XDABR))
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index fbffd7e47ab..4e44c4dcd11 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -27,7 +27,7 @@
#include <linux/cpu.h>
#include <asm/ptrace.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -44,7 +44,6 @@
#include <asm/mpic.h>
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
-#include <asm/mpic.h>
#include <asm/xics.h>
#include "plpar_wrappers.h"
@@ -207,7 +206,7 @@ static struct smp_ops_t pSeries_mpic_smp_ops = {
};
static struct smp_ops_t pSeries_xics_smp_ops = {
- .message_pass = smp_muxed_ipi_message_pass,
+ .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
.probe = xics_smp_probe,
.kick_cpu = smp_pSeries_kick_cpu,
diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c
index 9d20fa9d371..71bd105f386 100644
--- a/arch/powerpc/platforms/wsp/smp.c
+++ b/arch/powerpc/platforms/wsp/smp.c
@@ -75,7 +75,7 @@ static int __init smp_a2_probe(void)
}
static struct smp_ops_t a2_smp_ops = {
- .message_pass = smp_muxed_ipi_message_pass,
+ .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = doorbell_cause_ipi,
.probe = smp_a2_probe,
.kick_cpu = smp_a2_kick_cpu,
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 0efa990e334..cf736ca0cf0 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -4,6 +4,7 @@ ccflags-$(CONFIG_PPC64) := -mno-minimal-toc
mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o
obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y)
+obj-$(CONFIG_PPC_EPAPR_HV_PIC) += ehv_pic.o
fsl-msi-obj-$(CONFIG_PCI_MSI) += fsl_msi.o
obj-$(CONFIG_PPC_MSI_BITMAP) += msi_bitmap.o
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index bd0d54060b9..265f0f09395 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -203,7 +203,7 @@ static int axon_ram_probe(struct platform_device *device)
goto failed;
}
- bank->size = resource.end - resource.start + 1;
+ bank->size = resource_size(&resource);
if (bank->size == 0) {
dev_err(&device->dev, "No DDR2 memory found for %s%d\n",
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 350787c83e2..5d7d59a43c4 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -148,7 +148,7 @@ unsigned int cpm_pic_init(void)
if (ret)
goto end;
- cpic_reg = ioremap(res.start, res.end - res.start + 1);
+ cpic_reg = ioremap(res.start, resource_size(&res));
if (cpic_reg == NULL)
goto end;
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 2b69aa0315b..d55d0ad0dea 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -115,7 +115,7 @@ int cpm_muram_init(void)
max = r.end;
rh_attach_region(&cpm_muram_info, r.start - muram_pbase,
- r.end - r.start + 1);
+ resource_size(&r));
}
muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 8e9e06a7ca5..4f2680f431b 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -239,7 +239,7 @@ static int __init dart_init(struct device_node *dart_node)
DARTMAP_RPNMASK);
/* Map in DART registers */
- dart = ioremap(r.start, r.end - r.start + 1);
+ dart = ioremap(r.start, resource_size(&r));
if (dart == NULL)
panic("DART: Cannot map registers!");
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
new file mode 100644
index 00000000000..af1a5df46b3
--- /dev/null
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -0,0 +1,302 @@
+/*
+ * Driver for ePAPR Embedded Hypervisor PIC
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Ashish Kalra <ashish.kalra@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/ehv_pic.h>
+#include <asm/fsl_hcalls.h>
+
+#include "../../../kernel/irq/settings.h"
+
+static struct ehv_pic *global_ehv_pic;
+static DEFINE_SPINLOCK(ehv_pic_lock);
+
+static u32 hwirq_intspec[NR_EHV_PIC_INTS];
+static u32 __iomem *mpic_percpu_base_vaddr;
+
+#define IRQ_TYPE_MPIC_DIRECT 4
+#define MPIC_EOI 0x00B0
+
+/*
+ * Linux descriptor level callbacks
+ */
+
+void ehv_pic_unmask_irq(struct irq_data *d)
+{
+ unsigned int src = virq_to_hw(d->irq);
+
+ ev_int_set_mask(src, 0);
+}
+
+void ehv_pic_mask_irq(struct irq_data *d)
+{
+ unsigned int src = virq_to_hw(d->irq);
+
+ ev_int_set_mask(src, 1);
+}
+
+void ehv_pic_end_irq(struct irq_data *d)
+{
+ unsigned int src = virq_to_hw(d->irq);
+
+ ev_int_eoi(src);
+}
+
+void ehv_pic_direct_end_irq(struct irq_data *d)
+{
+ out_be32(mpic_percpu_base_vaddr + MPIC_EOI / 4, 0);
+}
+
+int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
+ bool force)
+{
+ unsigned int src = virq_to_hw(d->irq);
+ unsigned int config, prio, cpu_dest;
+ int cpuid = irq_choose_cpu(dest);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ehv_pic_lock, flags);
+ ev_int_get_config(src, &config, &prio, &cpu_dest);
+ ev_int_set_config(src, config, prio, cpuid);
+ spin_unlock_irqrestore(&ehv_pic_lock, flags);
+
+ return 0;
+}
+
+static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
+{
+ /* Now convert sense value */
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
+ EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
+
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
+ EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
+ EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
+
+ case IRQ_TYPE_LEVEL_LOW:
+ default:
+ return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
+ EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
+ }
+}
+
+int ehv_pic_set_irq_type(struct irq_data *d, unsigned int flow_type)
+{
+ unsigned int src = virq_to_hw(d->irq);
+ struct irq_desc *desc = irq_to_desc(d->irq);
+ unsigned int vecpri, vold, vnew, prio, cpu_dest;
+ unsigned long flags;
+
+ if (flow_type == IRQ_TYPE_NONE)
+ flow_type = IRQ_TYPE_LEVEL_LOW;
+
+ irq_settings_clr_level(desc);
+ irq_settings_set_trigger_mask(desc, flow_type);
+ if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ irq_settings_set_level(desc);
+
+ vecpri = ehv_pic_type_to_vecpri(flow_type);
+
+ spin_lock_irqsave(&ehv_pic_lock, flags);
+ ev_int_get_config(src, &vold, &prio, &cpu_dest);
+ vnew = vold & ~(EHV_PIC_INFO(VECPRI_POLARITY_MASK) |
+ EHV_PIC_INFO(VECPRI_SENSE_MASK));
+ vnew |= vecpri;
+
+ /*
+ * TODO : Add specific interface call for platform to set
+ * individual interrupt priorities.
+ * platform currently using static/default priority for all ints
+ */
+
+ prio = 8;
+
+ ev_int_set_config(src, vecpri, prio, cpu_dest);
+
+ spin_unlock_irqrestore(&ehv_pic_lock, flags);
+ return 0;
+}
+
+static struct irq_chip ehv_pic_irq_chip = {
+ .irq_mask = ehv_pic_mask_irq,
+ .irq_unmask = ehv_pic_unmask_irq,
+ .irq_eoi = ehv_pic_end_irq,
+ .irq_set_type = ehv_pic_set_irq_type,
+};
+
+static struct irq_chip ehv_pic_direct_eoi_irq_chip = {
+ .irq_mask = ehv_pic_mask_irq,
+ .irq_unmask = ehv_pic_unmask_irq,
+ .irq_eoi = ehv_pic_direct_end_irq,
+ .irq_set_type = ehv_pic_set_irq_type,
+};
+
+/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
+unsigned int ehv_pic_get_irq(void)
+{
+ int irq;
+
+ BUG_ON(global_ehv_pic == NULL);
+
+ if (global_ehv_pic->coreint_flag)
+ irq = mfspr(SPRN_EPR); /* if core int mode */
+ else
+ ev_int_iack(0, &irq); /* legacy mode */
+
+ if (irq == 0xFFFF) /* 0xFFFF --> no irq is pending */
+ return NO_IRQ;
+
+ /*
+ * this will also setup revmap[] in the slow path for the first
+ * time, next calls will always use fast path by indexing revmap
+ */
+ return irq_linear_revmap(global_ehv_pic->irqhost, irq);
+}
+
+static int ehv_pic_host_match(struct irq_host *h, struct device_node *node)
+{
+ /* Exact match, unless ehv_pic node is NULL */
+ return h->of_node == NULL || h->of_node == node;
+}
+
+static int ehv_pic_host_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct ehv_pic *ehv_pic = h->host_data;
+ struct irq_chip *chip;
+
+ /* Default chip */
+ chip = &ehv_pic->hc_irq;
+
+ if (mpic_percpu_base_vaddr)
+ if (hwirq_intspec[hw] & IRQ_TYPE_MPIC_DIRECT)
+ chip = &ehv_pic_direct_eoi_irq_chip;
+
+ irq_set_chip_data(virq, chip);
+ /*
+ * using handle_fasteoi_irq as our irq handler, this will
+ * only call the eoi callback and suitable for the MPIC
+ * controller which set ISR/IPR automatically and clear the
+ * highest priority active interrupt in ISR/IPR when we do
+ * a specific eoi
+ */
+ irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
+
+ /* Set default irq type */
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int ehv_pic_host_xlate(struct irq_host *h, struct device_node *ct,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_flags)
+
+{
+ /*
+ * interrupt sense values coming from the guest device tree
+ * interrupt specifiers can have four possible sense and
+ * level encoding information and they need to
+ * be translated between firmware type & linux type.
+ */
+
+ static unsigned char map_of_senses_to_linux_irqtype[4] = {
+ IRQ_TYPE_EDGE_FALLING,
+ IRQ_TYPE_EDGE_RISING,
+ IRQ_TYPE_LEVEL_LOW,
+ IRQ_TYPE_LEVEL_HIGH,
+ };
+
+ *out_hwirq = intspec[0];
+ if (intsize > 1) {
+ hwirq_intspec[intspec[0]] = intspec[1];
+ *out_flags = map_of_senses_to_linux_irqtype[intspec[1] &
+ ~IRQ_TYPE_MPIC_DIRECT];
+ } else {
+ *out_flags = IRQ_TYPE_NONE;
+ }
+
+ return 0;
+}
+
+static struct irq_host_ops ehv_pic_host_ops = {
+ .match = ehv_pic_host_match,
+ .map = ehv_pic_host_map,
+ .xlate = ehv_pic_host_xlate,
+};
+
+void __init ehv_pic_init(void)
+{
+ struct device_node *np, *np2;
+ struct ehv_pic *ehv_pic;
+ int coreint_flag = 1;
+
+ np = of_find_compatible_node(NULL, NULL, "epapr,hv-pic");
+ if (!np) {
+ pr_err("ehv_pic_init: could not find epapr,hv-pic node\n");
+ return;
+ }
+
+ if (!of_find_property(np, "has-external-proxy", NULL))
+ coreint_flag = 0;
+
+ ehv_pic = kzalloc(sizeof(struct ehv_pic), GFP_KERNEL);
+ if (!ehv_pic) {
+ of_node_put(np);
+ return;
+ }
+
+ ehv_pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
+ NR_EHV_PIC_INTS, &ehv_pic_host_ops, 0);
+
+ if (!ehv_pic->irqhost) {
+ of_node_put(np);
+ return;
+ }
+
+ np2 = of_find_compatible_node(NULL, NULL, "fsl,hv-mpic-per-cpu");
+ if (np2) {
+ mpic_percpu_base_vaddr = of_iomap(np2, 0);
+ if (!mpic_percpu_base_vaddr)
+ pr_err("ehv_pic_init: of_iomap failed\n");
+
+ of_node_put(np2);
+ }
+
+ ehv_pic->irqhost->host_data = ehv_pic;
+ ehv_pic->hc_irq = ehv_pic_irq_chip;
+ ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity;
+ ehv_pic->coreint_flag = coreint_flag;
+
+ global_ehv_pic = ehv_pic;
+ irq_set_default_host(global_ehv_pic->irqhost);
+}
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 92e78333c47..419a77239bd 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -349,7 +349,7 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
goto error_out;
}
- msi->msi_regs = ioremap(res.start, res.end - res.start + 1);
+ msi->msi_regs = ioremap(res.start, resource_size(&res));
if (!msi->msi_regs) {
dev_err(&dev->dev, "ioremap problem failed\n");
goto error_out;
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 68ca9290df9..4ce547e0047 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -38,10 +38,17 @@ static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
static void __init quirk_fsl_pcie_header(struct pci_dev *dev)
{
+ u8 progif;
+
/* if we aren't a PCIe don't bother */
if (!pci_find_capability(dev, PCI_CAP_ID_EXP))
return;
+ /* if we aren't in host mode don't bother */
+ pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
+ if (progif & 0x1)
+ return;
+
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
fsl_pcie_bus_fixup = 1;
return;
@@ -64,7 +71,7 @@ static int __init setup_one_atmu(struct ccsr_pci __iomem *pci,
{
resource_size_t pci_addr = res->start - offset;
resource_size_t phys_addr = res->start;
- resource_size_t size = res->end - res->start + 1;
+ resource_size_t size = resource_size(res);
u32 flags = 0x80044000; /* enable & mem R/W */
unsigned int i;
@@ -108,7 +115,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
char *name = hose->dn->full_name;
pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
- (u64)rsrc->start, (u64)rsrc->end - (u64)rsrc->start + 1);
+ (u64)rsrc->start, (u64)resource_size(rsrc));
if (of_device_is_compatible(hose->dn, "fsl,qoriq-pcie-v2.2")) {
win_idx = 2;
@@ -116,7 +123,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
end_idx = 3;
}
- pci = ioremap(rsrc->start, rsrc->end - rsrc->start + 1);
+ pci = ioremap(rsrc->start, resource_size(rsrc));
if (!pci) {
dev_err(hose->parent, "Unable to map ATMU registers\n");
return;
@@ -153,9 +160,9 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
} else {
pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
"phy base 0x%016llx.\n",
- (u64)hose->io_resource.start,
- (u64)hose->io_resource.end - (u64)hose->io_resource.start + 1,
- (u64)hose->io_base_phys);
+ (u64)hose->io_resource.start,
+ (u64)resource_size(&hose->io_resource),
+ (u64)hose->io_base_phys);
out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
out_be32(&pci->pow[j].potear, 0);
out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
@@ -323,6 +330,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
struct pci_controller *hose;
struct resource rsrc;
const int *bus_range;
+ u8 progif;
if (!of_device_is_available(dev)) {
pr_warning("%s: disabled\n", dev->full_name);
@@ -343,7 +351,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
printk(KERN_WARNING "Can't get bus-range for %s, assume"
" bus 0\n", dev->full_name);
- ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
hose = pcibios_alloc_controller(dev);
if (!hose)
return -ENOMEM;
@@ -353,6 +361,18 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
PPC_INDIRECT_TYPE_BIG_ENDIAN);
+
+ early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
+ if ((progif & 1) == 1) {
+ /* unmap cfg_data & cfg_addr separately if not on same page */
+ if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
+ ((unsigned long)hose->cfg_addr & PAGE_MASK))
+ iounmap(hose->cfg_data);
+ iounmap(hose->cfg_addr);
+ pcibios_free_controller(hose);
+ return 0;
+ }
+
setup_pci_cmd(hose);
/* check PCI express link status */
@@ -380,70 +400,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
return 0;
}
-
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8548E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8548, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8543E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8543, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8547E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8545E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8545, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8569E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8569, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8568E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8568, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8567E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8567, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8533E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8533, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8544E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8544, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8572E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8572, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8536E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8536, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2040E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2040, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P3041E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P3041, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5010E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5010, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5020E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5020, quirk_fsl_pcie_header);
#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
-#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8308, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8314E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8314, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8315E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8315, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8377E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8377, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8378E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8378, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_pcie_header);
+#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
struct mpc83xx_pcie_priv {
void __iomem *cfg_type0;
void __iomem *cfg_type1;
@@ -679,7 +640,7 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
" bus 0\n", dev->full_name);
}
- ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
hose = pcibios_alloc_controller(dev);
if (!hose)
return -ENOMEM;
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index b3fd081d56f..2de8551df40 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -1524,7 +1524,7 @@ int fsl_rio_setup(struct platform_device *dev)
port->priv = priv;
port->phys_efptr = 0x100;
- priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
+ priv->regs_win = ioremap(regs.start, resource_size(&regs));
rio_regs_win = priv->regs_win;
/* Probe the master port phy type */
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 19e5015e039..2d66275e489 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -32,7 +32,7 @@
#include <linux/fs_uart_pd.h>
#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/time.h>
@@ -41,6 +41,7 @@
#include <sysdev/fsl_soc.h>
#include <mm/mmu_decl.h>
#include <asm/cpm2.h>
+#include <asm/fsl_hcalls.h> /* For the Freescale hypervisor */
extern void init_fcc_ioports(struct fs_platform_info*);
extern void init_fec_ioports(struct fs_platform_info*);
@@ -252,3 +253,29 @@ void fsl_rstcr_restart(char *cmd)
struct platform_diu_data_ops diu_ops;
EXPORT_SYMBOL(diu_ops);
#endif
+
+/*
+ * Restart the current partition
+ *
+ * This function should be assigned to the ppc_md.restart function pointer,
+ * to initiate a partition restart when we're running under the Freescale
+ * hypervisor.
+ */
+void fsl_hv_restart(char *cmd)
+{
+ pr_info("hv restart\n");
+ fh_partition_restart(-1);
+}
+
+/*
+ * Halt the current partition
+ *
+ * This function should be assigned to the ppc_md.power_off and ppc_md.halt
+ * function pointers, to shut down the partition when we're running under
+ * the Freescale hypervisor.
+ */
+void fsl_hv_halt(void)
+{
+ pr_info("hv exit\n");
+ fh_partition_stop(-1);
+}
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 53609489a62..2ece02beb8f 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -36,5 +36,8 @@ struct platform_diu_data_ops {
extern struct platform_diu_data_ops diu_ops;
#endif
+void fsl_hv_restart(char *cmd);
+void fsl_hv_halt(void);
+
#endif
#endif
diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c
index cf27df6e508..08abe91ae79 100644
--- a/arch/powerpc/sysdev/grackle.c
+++ b/arch/powerpc/sysdev/grackle.c
@@ -57,7 +57,7 @@ void __init setup_grackle(struct pci_controller *hose)
{
setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0);
if (of_machine_is_compatible("PowerMac1,1"))
- ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
if (of_machine_is_compatible("AAPL,PowerBook1998"))
grackle_set_loop_snoop(hose, 1);
#if 0 /* Disabled for now, HW problems ??? */
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 7367d17364c..95da897f05a 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -736,7 +736,7 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
return NULL;
}
- ipic->regs = ioremap(res.start, res.end - res.start + 1);
+ ipic->regs = ioremap(res.start, resource_size(&res));
ipic->irqhost->host_data = ipic;
diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c
index ddc877a3a23..69f5814ae6d 100644
--- a/arch/powerpc/sysdev/mmio_nvram.c
+++ b/arch/powerpc/sysdev/mmio_nvram.c
@@ -129,7 +129,7 @@ int __init mmio_nvram_init(void)
goto out;
}
nvram_addr = r.start;
- mmio_nvram_len = r.end - r.start + 1;
+ mmio_nvram_len = resource_size(&r);
if ( (!mmio_nvram_len) || (!nvram_addr) ) {
printk(KERN_WARNING "nvram: address or length is 0\n");
ret = -EIO;
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 20924f2246f..22e48e2d71f 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -166,7 +166,7 @@ int mpc8xx_pic_init(void)
if (ret)
goto out;
- siu_reg = ioremap(res.start, res.end - res.start + 1);
+ siu_reg = ioremap(res.start, resource_size(&res));
if (siu_reg == NULL) {
ret = -EINVAL;
goto out;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 58d7a534f87..d5d3ff3d757 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -598,42 +598,6 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
#endif /* CONFIG_MPIC_U3_HT_IRQS */
-#ifdef CONFIG_SMP
-static int irq_choose_cpu(const struct cpumask *mask)
-{
- int cpuid;
-
- if (cpumask_equal(mask, cpu_all_mask)) {
- static int irq_rover = 0;
- static DEFINE_RAW_SPINLOCK(irq_rover_lock);
- unsigned long flags;
-
- /* Round-robin distribution... */
- do_round_robin:
- raw_spin_lock_irqsave(&irq_rover_lock, flags);
-
- irq_rover = cpumask_next(irq_rover, cpu_online_mask);
- if (irq_rover >= nr_cpu_ids)
- irq_rover = cpumask_first(cpu_online_mask);
-
- cpuid = irq_rover;
-
- raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
- } else {
- cpuid = cpumask_first_and(mask, cpu_online_mask);
- if (cpuid >= nr_cpu_ids)
- goto do_round_robin;
- }
-
- return get_hard_smp_processor_id(cpuid);
-}
-#else
-static int irq_choose_cpu(const struct cpumask *mask)
-{
- return hard_smp_processor_id();
-}
-#endif
-
/* Find an mpic associated with a given linux interrupt */
static struct mpic *mpic_find(unsigned int irq)
{
@@ -849,7 +813,7 @@ static void mpic_unmask_tm(struct irq_data *d)
struct mpic *mpic = mpic_from_irq_data(d);
unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
- DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, irq, src);
+ DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, d->irq, src);
mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK);
mpic_tm_read(src);
}
diff --git a/arch/powerpc/sysdev/mv64x60_udbg.c b/arch/powerpc/sysdev/mv64x60_udbg.c
index 2792dc8b038..50a81387e9b 100644
--- a/arch/powerpc/sysdev/mv64x60_udbg.c
+++ b/arch/powerpc/sysdev/mv64x60_udbg.c
@@ -125,11 +125,11 @@ static void mv64x60_udbg_init(void)
of_node_put(np);
- mpsc_base = ioremap(r[0].start, r[0].end - r[0].start + 1);
+ mpsc_base = ioremap(r[0].start, resource_size(&r[0]));
if (!mpsc_base)
return;
- mpsc_intr_cause = ioremap(r[1].start, r[1].end - r[1].start + 1);
+ mpsc_intr_cause = ioremap(r[1].start, resource_size(&r[1]));
if (!mpsc_intr_cause) {
iounmap(mpsc_base);
return;
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 156aa7d3625..a59ba96d2c2 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -265,7 +265,7 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
if (ppc4xx_setup_one_pci_PMM(hose, reg,
res->start,
res->start - hose->pci_mem_offset,
- res->end + 1 - res->start,
+ resource_size(res),
res->flags,
j) == 0) {
j++;
@@ -290,7 +290,7 @@ static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
void __iomem *reg,
const struct resource *res)
{
- resource_size_t size = res->end - res->start + 1;
+ resource_size_t size = resource_size(res);
u32 sa;
/* Calculate window size */
@@ -349,7 +349,7 @@ static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
bus_range = of_get_property(np, "bus-range", NULL);
/* Map registers */
- reg = ioremap(rsrc_reg.start, rsrc_reg.end + 1 - rsrc_reg.start);
+ reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
if (reg == NULL) {
printk(KERN_ERR "%s: Can't map registers !", np->full_name);
goto fail;
@@ -465,7 +465,7 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
if (ppc4xx_setup_one_pcix_POM(hose, reg,
res->start,
res->start - hose->pci_mem_offset,
- res->end + 1 - res->start,
+ resource_size(res),
res->flags,
j) == 0) {
j++;
@@ -492,7 +492,7 @@ static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
int big_pim,
int enable_msi_hole)
{
- resource_size_t size = res->end - res->start + 1;
+ resource_size_t size = resource_size(res);
u32 sa;
/* RAM is always at 0 */
@@ -555,7 +555,7 @@ static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
bus_range = of_get_property(np, "bus-range", NULL);
/* Map registers */
- reg = ioremap(rsrc_reg.start, rsrc_reg.end + 1 - rsrc_reg.start);
+ reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
if (reg == NULL) {
printk(KERN_ERR "%s: Can't map registers !", np->full_name);
goto fail;
@@ -650,12 +650,74 @@ struct ppc4xx_pciex_hwops
int (*core_init)(struct device_node *np);
int (*port_init_hw)(struct ppc4xx_pciex_port *port);
int (*setup_utl)(struct ppc4xx_pciex_port *port);
+ void (*check_link)(struct ppc4xx_pciex_port *port);
};
static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
#ifdef CONFIG_44x
+static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
+ unsigned int sdr_offset,
+ unsigned int mask,
+ unsigned int value,
+ int timeout_ms)
+{
+ u32 val;
+
+ while(timeout_ms--) {
+ val = mfdcri(SDR0, port->sdr_base + sdr_offset);
+ if ((val & mask) == value) {
+ pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
+ port->index, sdr_offset, timeout_ms, val);
+ return 0;
+ }
+ msleep(1);
+ }
+ return -1;
+}
+
+static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port)
+{
+ /* Wait for reset to complete */
+ if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
+ printk(KERN_WARNING "PCIE%d: PGRST failed\n",
+ port->index);
+ return -1;
+ }
+ return 0;
+}
+
+static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
+{
+ printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
+
+ /* Check for card presence detect if supported, if not, just wait for
+ * link unconditionally.
+ *
+ * note that we don't fail if there is no link, we just filter out
+ * config space accesses. That way, it will be easier to implement
+ * hotplug later on.
+ */
+ if (!port->has_ibpre ||
+ !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
+ 1 << 28, 1 << 28, 100)) {
+ printk(KERN_INFO
+ "PCIE%d: Device detected, waiting for link...\n",
+ port->index);
+ if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
+ 0x1000, 0x1000, 2000))
+ printk(KERN_WARNING
+ "PCIE%d: Link up failed\n", port->index);
+ else {
+ printk(KERN_INFO
+ "PCIE%d: link is up !\n", port->index);
+ port->link = 1;
+ }
+ } else
+ printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
+}
+
/* Check various reset bits of the 440SPe PCIe core */
static int __init ppc440spe_pciex_check_reset(struct device_node *np)
{
@@ -806,7 +868,7 @@ static int ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
(1 << 24) | (1 << 16), 1 << 12);
- return 0;
+ return ppc4xx_pciex_port_reset_sdr(port);
}
static int ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
@@ -856,6 +918,7 @@ static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
.core_init = ppc440spe_pciex_core_init,
.port_init_hw = ppc440speA_pciex_init_port_hw,
.setup_utl = ppc440speA_pciex_init_utl,
+ .check_link = ppc4xx_pciex_check_link_sdr,
};
static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
@@ -863,6 +926,7 @@ static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
.core_init = ppc440spe_pciex_core_init,
.port_init_hw = ppc440speB_pciex_init_port_hw,
.setup_utl = ppc440speB_pciex_init_utl,
+ .check_link = ppc4xx_pciex_check_link_sdr,
};
static int __init ppc460ex_pciex_core_init(struct device_node *np)
@@ -944,7 +1008,7 @@ static int ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
port->has_ibpre = 1;
- return 0;
+ return ppc4xx_pciex_port_reset_sdr(port);
}
static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
@@ -972,6 +1036,7 @@ static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
.core_init = ppc460ex_pciex_core_init,
.port_init_hw = ppc460ex_pciex_init_port_hw,
.setup_utl = ppc460ex_pciex_init_utl,
+ .check_link = ppc4xx_pciex_check_link_sdr,
};
static int __init ppc460sx_pciex_core_init(struct device_node *np)
@@ -1075,7 +1140,7 @@ static int ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
port->has_ibpre = 1;
- return 0;
+ return ppc4xx_pciex_port_reset_sdr(port);
}
static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port)
@@ -1089,6 +1154,7 @@ static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
.core_init = ppc460sx_pciex_core_init,
.port_init_hw = ppc460sx_pciex_init_port_hw,
.setup_utl = ppc460sx_pciex_init_utl,
+ .check_link = ppc4xx_pciex_check_link_sdr,
};
#endif /* CONFIG_44x */
@@ -1154,7 +1220,7 @@ static int ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
port->has_ibpre = 1;
- return 0;
+ return ppc4xx_pciex_port_reset_sdr(port);
}
static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
@@ -1183,11 +1249,11 @@ static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
.core_init = ppc405ex_pciex_core_init,
.port_init_hw = ppc405ex_pciex_init_port_hw,
.setup_utl = ppc405ex_pciex_init_utl,
+ .check_link = ppc4xx_pciex_check_link_sdr,
};
#endif /* CONFIG_40x */
-
/* Check that the core has been initied and if not, do it */
static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
{
@@ -1261,26 +1327,6 @@ static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port
dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
}
-static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
- unsigned int sdr_offset,
- unsigned int mask,
- unsigned int value,
- int timeout_ms)
-{
- u32 val;
-
- while(timeout_ms--) {
- val = mfdcri(SDR0, port->sdr_base + sdr_offset);
- if ((val & mask) == value) {
- pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
- port->index, sdr_offset, timeout_ms, val);
- return 0;
- }
- msleep(1);
- }
- return -1;
-}
-
static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
{
int rc = 0;
@@ -1291,40 +1337,8 @@ static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
if (rc != 0)
return rc;
- printk(KERN_INFO "PCIE%d: Checking link...\n",
- port->index);
-
- /* Wait for reset to complete */
- if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
- printk(KERN_WARNING "PCIE%d: PGRST failed\n",
- port->index);
- return -1;
- }
-
- /* Check for card presence detect if supported, if not, just wait for
- * link unconditionally.
- *
- * note that we don't fail if there is no link, we just filter out
- * config space accesses. That way, it will be easier to implement
- * hotplug later on.
- */
- if (!port->has_ibpre ||
- !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
- 1 << 28, 1 << 28, 100)) {
- printk(KERN_INFO
- "PCIE%d: Device detected, waiting for link...\n",
- port->index);
- if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
- 0x1000, 0x1000, 2000))
- printk(KERN_WARNING
- "PCIE%d: Link up failed\n", port->index);
- else {
- printk(KERN_INFO
- "PCIE%d: link is up !\n", port->index);
- port->link = 1;
- }
- } else
- printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
+ if (ppc4xx_pciex_hwops->check_link)
+ ppc4xx_pciex_hwops->check_link(port);
/*
* Initialize mapping: disable all regions and configure
@@ -1347,14 +1361,17 @@ static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
/*
* Check for VC0 active and assert RDY.
*/
- if (port->link &&
- ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
- 1 << 16, 1 << 16, 5000)) {
- printk(KERN_INFO "PCIE%d: VC0 not active\n", port->index);
- port->link = 0;
+ if (port->sdr_base) {
+ if (port->link &&
+ ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
+ 1 << 16, 1 << 16, 5000)) {
+ printk(KERN_INFO "PCIE%d: VC0 not active\n", port->index);
+ port->link = 0;
+ }
+
+ dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
}
- dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
msleep(100);
return 0;
@@ -1604,7 +1621,7 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
res->start,
res->start - hose->pci_mem_offset,
- res->end + 1 - res->start,
+ resource_size(res),
res->flags,
j) == 0) {
j++;
@@ -1639,7 +1656,7 @@ static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
void __iomem *mbase,
struct resource *res)
{
- resource_size_t size = res->end - res->start + 1;
+ resource_size_t size = resource_size(res);
u64 sa;
if (port->endpoint) {
@@ -1960,7 +1977,7 @@ static int __init ppc4xx_pci_find_bridges(void)
{
struct device_node *np;
- ppc_pci_flags |= PPC_PCI_ENABLE_PROC_DOMAINS | PPC_PCI_COMPAT_DOMAIN_0;
+ pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
#ifdef CONFIG_PPC4xx_PCI_EXPRESS
for_each_compatible_node(np, NULL, "ibm,plb-pciex")
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index b2acda07220..18e75ca19fe 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -347,7 +347,7 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
return;
}
- qe_ic->regs = ioremap(res.start, res.end - res.start + 1);
+ qe_ic->regs = ioremap(res.start, resource_size(&res));
qe_ic->irqhost->host_data = qe_ic;
qe_ic->hc_irq = qe_ic_irq_chip;
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c
index 77e4934b88c..fd1a6c3b172 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_io.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_io.c
@@ -41,7 +41,7 @@ int par_io_init(struct device_node *np)
ret = of_address_to_resource(np, 0, &res);
if (ret)
return ret;
- par_io = ioremap(res.start, res.end - res.start + 1);
+ par_io = ioremap(res.start, resource_size(&res));
num_ports = of_get_property(np, "num-ports", NULL);
if (num_ports)
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index ee056807b52..9f51f97abb5 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -23,7 +23,7 @@
#include <asm/tsi108.h>
#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 1f15ad43614..50e32afe392 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/spinlock.h>
+#include <linux/module.h>
#include <asm/prom.h>
#include <asm/io.h>
@@ -24,6 +25,7 @@
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/xics.h>
+#include <asm/kvm_ppc.h>
struct icp_ipl {
union {
@@ -139,6 +141,12 @@ static void icp_native_cause_ipi(int cpu, unsigned long data)
icp_native_set_qirr(cpu, IPI_PRIORITY);
}
+void xics_wake_cpu(int cpu)
+{
+ icp_native_set_qirr(cpu, IPI_PRIORITY);
+}
+EXPORT_SYMBOL_GPL(xics_wake_cpu);
+
static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
{
int cpu = smp_processor_id();
@@ -185,6 +193,7 @@ static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
}
icp_native_regs[cpu] = ioremap(addr, size);
+ kvmppc_set_xics_phys(cpu, addr);
if (!icp_native_regs[cpu]) {
pr_warning("icp_native: Failed ioremap for CPU %d, "
"interrupt server #0x%x, addr %#lx\n",
@@ -247,7 +256,7 @@ static int __init icp_native_init_one_node(struct device_node *np,
return -1;
}
- if (icp_native_map_one_cpu(*indx, r.start, r.end - r.start))
+ if (icp_native_map_one_cpu(*indx, r.start, resource_size(&r)))
return -1;
(*indx)++;