diff options
Diffstat (limited to 'arch/powerpc')
399 files changed, 12676 insertions, 8492 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a902a5c1c76..80821512e9c 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -87,9 +87,6 @@ config GENERIC_GPIO help Generic GPIO API support -config ARCH_NO_VIRT_TO_BUS - def_bool PPC64 - config PPC bool default y @@ -101,6 +98,7 @@ config PPC select HAVE_FUNCTION_GRAPH_TRACER select SYSCTL_EXCEPTION_TRACE select ARCH_WANT_OPTIONAL_GPIOLIB + select VIRT_TO_BUS if !PPC64 select HAVE_IDE select HAVE_IOREMAP_PROT select HAVE_EFFICIENT_UNALIGNED_ACCESS @@ -118,14 +116,12 @@ config PPC select HAVE_SYSCALL_WRAPPERS if PPC64 select GENERIC_ATOMIC64 if PPC32 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE - select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 select HAVE_GENERIC_HARDIRQS select ARCH_WANT_IPC_PARSE_VERSION select SPARSE_IRQ - select IRQ_PER_CPU select IRQ_DOMAIN select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL @@ -141,9 +137,12 @@ config PPC select GENERIC_CLOCKEVENTS select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER - select GENERIC_KERNEL_THREAD select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA + select CLONE_BACKWARDS + select ARCH_USE_BUILTIN_BSWAP + select OLD_SIGSUSPEND + select OLD_SIGACTION if PPC32 config EARLY_PRINTK bool @@ -154,6 +153,7 @@ config COMPAT default y if PPC64 select COMPAT_BINFMT_ELF select ARCH_WANT_OLD_COMPAT_IPC + select COMPAT_OLD_SIGACTION config SYSVIPC_COMPAT bool @@ -275,6 +275,10 @@ config PPC_ADV_DEBUG_DAC_RANGE depends on PPC_ADV_DEBUG_REGS && 44x default y +config PPC_EMULATE_SSTEP + bool + default y if KPROBES || UPROBES || XMON || HAVE_HW_BREAKPOINT + source "init/Kconfig" source "kernel/Kconfig.freezer" @@ -308,6 +312,14 @@ config MATH_EMULATION unit, which will allow programs that use floating-point instructions to run. +config PPC_TRANSACTIONAL_MEM + bool "Transactional Memory support for POWERPC" + depends on PPC_BOOK3S_64 + depends on SMP + default n + ---help--- + Support user-mode Transactional Memory on POWERPC. + config 8XX_MINIMAL_FPEMU bool "Minimal math emulation for 8xx" depends on 8xx && !MATH_EMULATION @@ -334,7 +346,7 @@ config SWIOTLB config HOTPLUG_CPU bool "Support for enabling/disabling CPUs" - depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || \ + depends on SMP && HOTPLUG && (PPC_PSERIES || \ PPC_PMAC || PPC_POWERNV || (PPC_85xx && !PPC_E500MC)) ---help--- Say Y here to be able to disable and re-enable individual @@ -356,8 +368,8 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE def_bool y config KEXEC - bool "kexec system call (EXPERIMENTAL)" - depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) && EXPERIMENTAL + bool "kexec system call" + depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot @@ -852,8 +864,8 @@ config LOWMEM_CAM_NUM default 3 config DYNAMIC_MEMSTART - bool "Enable page aligned dynamic load address for kernel (EXPERIMENTAL)" - depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || 44x) + bool "Enable page aligned dynamic load address for kernel" + depends on ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || 44x) select NONSTATIC_KERNEL help This option enables the kernel to be loaded at any page aligned @@ -870,8 +882,8 @@ config DYNAMIC_MEMSTART This option is overridden by CONFIG_RELOCATABLE config RELOCATABLE - bool "Build a relocatable kernel (EXPERIMENTAL)" - depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && 44x + bool "Build a relocatable kernel" + depends on ADVANCED_OPTIONS && FLATMEM && 44x select NONSTATIC_KERNEL help This builds a kernel image that is capable of running at the diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 159e94f4b22..967fd23ace7 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -67,7 +67,25 @@ LDFLAGS_vmlinux-y := -Bstatic LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y) -CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=no -mcall-aixdesc +ifeq ($(CONFIG_PPC64),y) +ifeq ($(call cc-option-yn,-mcmodel=medium),y) + # -mcmodel=medium breaks modules because it uses 32bit offsets from + # the TOC pointer to create pointers where possible. Pointers into the + # percpu data area are created by this method. + # + # The kernel module loader relocates the percpu data section from the + # original location (starting with 0xd...) to somewhere in the base + # kernel percpu data space (starting with 0xc...). We need a full + # 64bit relocation for this to work, hence -mcmodel=large. + KBUILD_CFLAGS_MODULE += -mcmodel=large +else + export NO_MINIMAL_TOC := -mno-minimal-toc +endif +endif + +CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc +CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc) +CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4) @@ -136,6 +154,7 @@ head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o head-$(CONFIG_ALTIVEC) += arch/powerpc/kernel/vector.o +head-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += arch/powerpc/kernel/prom_init.o core-y += arch/powerpc/kernel/ \ arch/powerpc/mm/ \ @@ -143,6 +162,7 @@ core-y += arch/powerpc/kernel/ \ arch/powerpc/sysdev/ \ arch/powerpc/platforms/ \ arch/powerpc/math-emu/ \ + arch/powerpc/crypto/ \ arch/powerpc/net/ core-$(CONFIG_XMON) += arch/powerpc/xmon/ core-$(CONFIG_KVM) += arch/powerpc/kvm/ @@ -181,7 +201,7 @@ $(BOOT_TARGETS2): vmlinux bootwrapper_install: $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) -%.dtb: +%.dtb: scripts $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) define archhelp diff --git a/arch/powerpc/boot/dts/a3m071.dts b/arch/powerpc/boot/dts/a3m071.dts new file mode 100644 index 00000000000..bf81b8f9704 --- /dev/null +++ b/arch/powerpc/boot/dts/a3m071.dts @@ -0,0 +1,142 @@ +/* + * a3m071 board Device Tree Source + * + * Copyright 2012 Stefan Roese <sr@denx.de> + * + * Copyright (C) 2011 DENX Software Engineering GmbH + * Heiko Schocher <hs@denx.de> + * + * Copyright (C) 2007 Semihalf + * Marian Balakowicz <m8@semihalf.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/include/ "mpc5200b.dtsi" + +&gpt0 { fsl,has-wdt; }; + +/ { + model = "anonymous,a3m071"; + compatible = "anonymous,a3m071"; + + soc5200@f0000000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc5200b-immr"; + ranges = <0 0xf0000000 0x0000c000>; + reg = <0xf0000000 0x00000100>; + bus-frequency = <0>; /* From boot loader */ + system-frequency = <0>; /* From boot loader */ + + spi@f00 { + status = "disabled"; + }; + + usb: usb@1000 { + status = "disabled"; + }; + + psc@2000 { + compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; + reg = <0x2000 0x100>; + interrupts = <2 1 0>; + }; + + psc@2200 { + status = "disabled"; + }; + + psc@2400 { + status = "disabled"; + }; + + psc@2600 { + status = "disabled"; + }; + + psc@2800 { + status = "disabled"; + }; + + psc@2c00 { // PSC6 + compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; + reg = <0x2c00 0x100>; + interrupts = <2 4 0>; + }; + + ethernet@3000 { + phy-handle = <&phy0>; + }; + + mdio@3000 { + phy0: ethernet-phy@3 { + reg = <0x03>; + }; + }; + + ata@3a00 { + status = "disabled"; + }; + + i2c@3d00 { + status = "disabled"; + }; + + i2c@3d40 { + status = "disabled"; + }; + }; + + localbus { + compatible = "fsl,mpc5200b-lpb","simple-bus"; + #address-cells = <2>; + #size-cells = <1>; + ranges = <0 0 0xfc000000 0x02000000 + 3 0 0xe9000000 0x00080000 + 5 0 0xe8000000 0x00010000>; + + flash@0,0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0 0x0 0x02000000>; + compatible = "cfi-flash"; + bank-width = <2>; + partition@0x0 { + label = "u-boot"; + reg = <0x00000000 0x00040000>; + read-only; + }; + partition@0x00040000 { + label = "env"; + reg = <0x00040000 0x00020000>; + }; + partition@0x00060000 { + label = "dtb"; + reg = <0x00060000 0x00020000>; + }; + partition@0x00080000 { + label = "kernel"; + reg = <0x00080000 0x00500000>; + }; + partition@0x00580000 { + label = "root"; + reg = <0x00580000 0x00A80000>; + }; + }; + + fpga@3,0 { + compatible = "anonymous,a3m071-fpga"; + reg = <3 0x0 0x00080000 + 5 0x0 0x00010000>; + interrupts = <0 0 3>; /* level low */ + }; + }; + + pci@f0000d00 { + status = "disabled"; + }; +}; diff --git a/arch/powerpc/boot/dts/a4m072.dts b/arch/powerpc/boot/dts/a4m072.dts index fabe7b7d5f1..1f02034c7e9 100644 --- a/arch/powerpc/boot/dts/a4m072.dts +++ b/arch/powerpc/boot/dts/a4m072.dts @@ -15,6 +15,11 @@ /include/ "mpc5200b.dtsi" +&gpt0 { fsl,has-wdt; }; +&gpt3 { gpio-controller; }; +&gpt4 { gpio-controller; }; +&gpt5 { gpio-controller; }; + / { model = "anonymous,a4m072"; compatible = "anonymous,a4m072"; @@ -34,28 +39,6 @@ fsl,init-fd-counters = <0x3333>; }; - timer@600 { - fsl,has-wdt; - }; - - gpt3: timer@630 { /* General Purpose Timer in GPIO mode */ - compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; - gpio-controller; - #gpio-cells = <2>; - }; - - gpt4: timer@640 { /* General Purpose Timer in GPIO mode */ - compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; - gpio-controller; - #gpio-cells = <2>; - }; - - gpt5: timer@650 { /* General Purpose Timer in GPIO mode */ - compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; - gpio-controller; - #gpio-cells = <2>; - }; - spi@f00 { status = "disabled"; }; diff --git a/arch/powerpc/boot/dts/bluestone.dts b/arch/powerpc/boot/dts/bluestone.dts index 9d4917aebe6..7daaca324c0 100644 --- a/arch/powerpc/boot/dts/bluestone.dts +++ b/arch/powerpc/boot/dts/bluestone.dts @@ -107,6 +107,14 @@ interrupt-parent = <&UIC0>; }; + OCM: ocm@400040000 { + compatible = "ibm,ocm"; + status = "ok"; + cell-index = <1>; + /* configured in U-Boot */ + reg = <4 0x00040000 0x8000>; /* 32K */ + }; + SDR0: sdr { compatible = "ibm,sdr-apm821xx"; dcr-reg = <0x00e 0x002>; diff --git a/arch/powerpc/boot/dts/bsc9131rdb.dtsi b/arch/powerpc/boot/dts/bsc9131rdb.dtsi index 638adda2c21..9e6c01339cc 100644 --- a/arch/powerpc/boot/dts/bsc9131rdb.dtsi +++ b/arch/powerpc/boot/dts/bsc9131rdb.dtsi @@ -126,7 +126,7 @@ }; }; - sdhci@2e000 { + sdhc@2e000 { status = "disabled"; }; diff --git a/arch/powerpc/boot/dts/cm5200.dts b/arch/powerpc/boot/dts/cm5200.dts index ad3a4f4a2b0..fb580dd84dd 100644 --- a/arch/powerpc/boot/dts/cm5200.dts +++ b/arch/powerpc/boot/dts/cm5200.dts @@ -12,15 +12,13 @@ /include/ "mpc5200b.dtsi" +&gpt0 { fsl,has-wdt; }; + / { model = "schindler,cm5200"; compatible = "schindler,cm5200"; soc5200@f0000000 { - timer@600 { // General Purpose Timer - fsl,has-wdt; - }; - can@900 { status = "disabled"; }; diff --git a/arch/powerpc/boot/dts/digsy_mtc.dts b/arch/powerpc/boot/dts/digsy_mtc.dts index a7511f2d844..955bff629df 100644 --- a/arch/powerpc/boot/dts/digsy_mtc.dts +++ b/arch/powerpc/boot/dts/digsy_mtc.dts @@ -13,6 +13,9 @@ /include/ "mpc5200b.dtsi" +&gpt0 { gpio-controller; fsl,has-wdt; }; +&gpt1 { gpio-controller; }; + / { model = "intercontrol,digsy-mtc"; compatible = "intercontrol,digsy-mtc"; @@ -22,17 +25,6 @@ }; soc5200@f0000000 { - timer@600 { // General Purpose Timer - #gpio-cells = <2>; - fsl,has-wdt; - gpio-controller; - }; - - timer@610 { - #gpio-cells = <2>; - gpio-controller; - }; - rtc@800 { status = "disabled"; }; diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi index 0bde9ee8afa..af12ead88c5 100644 --- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi @@ -41,7 +41,7 @@ /* controller at 0x9000 */ &pci0 { - compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2"; + compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3"; device_type = "pci"; #size-cells = <2>; #address-cells = <3>; @@ -69,7 +69,7 @@ /* controller at 0xa000 */ &pci1 { - compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2"; + compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3"; device_type = "pci"; #size-cells = <2>; #address-cells = <3>; diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi index 06216b8c0af..e179803a81e 100644 --- a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi @@ -45,7 +45,7 @@ /* controller at 0x9000 */ &pci0 { - compatible = "fsl,p1022-pcie"; + compatible = "fsl,mpc8548-pcie"; device_type = "pci"; #size-cells = <2>; #address-cells = <3>; @@ -73,7 +73,7 @@ /* controller at 0xa000 */ &pci1 { - compatible = "fsl,p1022-pcie"; + compatible = "fsl,mpc8548-pcie"; device_type = "pci"; #size-cells = <2>; #address-cells = <3>; @@ -102,7 +102,7 @@ /* controller at 0xb000 */ &pci2 { - compatible = "fsl,p1022-pcie"; + compatible = "fsl,mpc8548-pcie"; device_type = "pci"; #size-cells = <2>; #address-cells = <3>; diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi index 531eab82c6c..69ac1acd434 100644 --- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi @@ -48,6 +48,8 @@ bus-range = <0x0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 15>; + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x500>; /* PEX1LIODNR */ pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -75,6 +77,8 @@ bus-range = <0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 14>; + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x504>; /* PEX2LIODNR */ pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -102,6 +106,8 @@ bus-range = <0x0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 13>; + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x508>; /* PEX3LIODNR */ pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -125,18 +131,21 @@ interrupts = <16 2 1 11>; #address-cells = <2>; #size-cells = <2>; + fsl,iommu-parent = <&pamu0>; ranges; port1 { #address-cells = <2>; #size-cells = <2>; cell-index = <1>; + fsl,liodn-reg = <&guts 0x510>; /* RIO1LIODNR */ }; port2 { #address-cells = <2>; #size-cells = <2>; cell-index = <2>; + fsl,liodn-reg = <&guts 0x514>; /* RIO2LIODNR */ }; }; @@ -246,10 +255,37 @@ iommu@20000 { compatible = "fsl,pamu-v1.0", "fsl,pamu"; - reg = <0x20000 0x4000>; + reg = <0x20000 0x4000>; /* for compatibility with older PAMU drivers */ + ranges = <0 0x20000 0x4000>; + #address-cells = <1>; + #size-cells = <1>; interrupts = < 24 2 0 0 16 2 1 30>; + + pamu0: pamu@0 { + reg = <0 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu1: pamu@1000 { + reg = <0x1000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu2: pamu@2000 { + reg = <0x2000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu3: pamu@3000 { + reg = <0x3000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; }; /include/ "qoriq-mpic.dtsi" @@ -291,7 +327,17 @@ }; /include/ "qoriq-dma-0.dtsi" + dma@100300 { + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */ + }; + /include/ "qoriq-dma-1.dtsi" + dma@101300 { + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */ + }; + /include/ "qoriq-espi-0.dtsi" spi@110000 { fsl,espi-num-chipselects = <4>; @@ -299,6 +345,8 @@ /include/ "qoriq-esdhc-0.dtsi" sdhc@114000 { + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x530>; /* eSDHCLIODNR */ sdhci,auto-cmd12; }; @@ -308,20 +356,37 @@ /include/ "qoriq-duart-1.dtsi" /include/ "qoriq-gpio-0.dtsi" /include/ "qoriq-usb2-mph-0.dtsi" - usb0: usb@210000 { - compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; - phy_type = "utmi"; - port0; - }; + usb0: usb@210000 { + compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; + phy_type = "utmi"; + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x520>; /* USB1LIODNR */ + port0; + }; /include/ "qoriq-usb2-dr-0.dtsi" - usb1: usb@211000 { - compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; - dr_mode = "host"; - phy_type = "utmi"; - }; + usb1: usb@211000 { + compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x524>; /* USB2LIODNR */ + dr_mode = "host"; + phy_type = "utmi"; + }; /include/ "qoriq-sata2-0.dtsi" + sata@220000 { + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x550>; /* SATA1LIODNR */ + }; + /include/ "qoriq-sata2-1.dtsi" + sata@221000 { + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x554>; /* SATA2LIODNR */ + }; + /include/ "qoriq-sec4.2-0.dtsi" +crypto: crypto@300000 { + fsl,iommu-parent = <&pamu1>; + }; }; diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi index af4ebc8009e..9b5a81a4529 100644 --- a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi @@ -48,6 +48,8 @@ bus-range = <0x0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 15>; + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x500>; /* PEX1LIODNR */ pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -75,6 +77,8 @@ bus-range = <0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 14>; + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x504>; /* PEX2LIODNR */ pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -102,6 +106,8 @@ bus-range = <0x0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 13>; + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x508>; /* PEX3LIODNR */ pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -152,18 +158,21 @@ interrupts = <16 2 1 11>; #address-cells = <2>; #size-cells = <2>; + fsl,iommu-parent = <&pamu0>; ranges; port1 { #address-cells = <2>; #size-cells = <2>; cell-index = <1>; + fsl,liodn-reg = <&guts 0x510>; /* RIO1LIODNR */ }; port2 { #address-cells = <2>; #size-cells = <2>; cell-index = <2>; + fsl,liodn-reg = <&guts 0x514>; /* RIO2LIODNR */ }; }; @@ -273,10 +282,37 @@ iommu@20000 { compatible = "fsl,pamu-v1.0", "fsl,pamu"; - reg = <0x20000 0x4000>; + reg = <0x20000 0x4000>; /* for compatibility with older PAMU drivers */ + ranges = <0 0x20000 0x4000>; + #address-cells = <1>; + #size-cells = <1>; interrupts = < 24 2 0 0 16 2 1 30>; + + pamu0: pamu@0 { + reg = <0 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu1: pamu@1000 { + reg = <0x1000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu2: pamu@2000 { + reg = <0x2000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu3: pamu@3000 { + reg = <0x3000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; }; /include/ "qoriq-mpic.dtsi" @@ -318,7 +354,17 @@ }; /include/ "qoriq-dma-0.dtsi" + dma@100300 { + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */ + }; + /include/ "qoriq-dma-1.dtsi" + dma@101300 { + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */ + }; + /include/ "qoriq-espi-0.dtsi" spi@110000 { fsl,espi-num-chipselects = <4>; @@ -326,6 +372,8 @@ /include/ "qoriq-esdhc-0.dtsi" sdhc@114000 { + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x530>; /* eSDHCLIODNR */ sdhci,auto-cmd12; }; @@ -335,20 +383,37 @@ /include/ "qoriq-duart-1.dtsi" /include/ "qoriq-gpio-0.dtsi" /include/ "qoriq-usb2-mph-0.dtsi" - usb0: usb@210000 { - compatible = "fsl-usb2-mph-v1.6", "fsl-usb2-mph"; - phy_type = "utmi"; - port0; - }; + usb0: usb@210000 { + compatible = "fsl-usb2-mph-v1.6", "fsl-usb2-mph"; + phy_type = "utmi"; + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x520>; /* USB1LIODNR */ + port0; + }; /include/ "qoriq-usb2-dr-0.dtsi" - usb1: usb@211000 { - compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; - dr_mode = "host"; - phy_type = "utmi"; - }; + usb1: usb@211000 { + compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x524>; /* USB2LIODNR */ + dr_mode = "host"; + phy_type = "utmi"; + }; /include/ "qoriq-sata2-0.dtsi" + sata@220000 { + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x550>; /* SATA1LIODNR */ + }; + /include/ "qoriq-sata2-1.dtsi" + sata@221000 { + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x554>; /* SATA2LIODNR */ + }; + /include/ "qoriq-sec4.2-0.dtsi" +crypto: crypto@300000 { + fsl,iommu-parent = <&pamu1>; + }; }; diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi index 4f9c9f682ec..19859ad851e 100644 --- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi @@ -41,13 +41,15 @@ /* controller at 0x200000 */ &pci0 { - compatible = "fsl,p4080-pcie"; + compatible = "fsl,p4080-pcie", "fsl,qoriq-pcie-v2.1"; device_type = "pci"; #size-cells = <2>; #address-cells = <3>; bus-range = <0x0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 15>; + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x500>; /* PEX1LIODNR */ pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -68,13 +70,15 @@ /* controller at 0x201000 */ &pci1 { - compatible = "fsl,p4080-pcie"; + compatible = "fsl,p4080-pcie", "fsl,qoriq-pcie-v2.1"; device_type = "pci"; #size-cells = <2>; #address-cells = <3>; bus-range = <0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 14>; + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x504>; /* PEX2LIODNR */ pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -95,13 +99,15 @@ /* controller at 0x202000 */ &pci2 { - compatible = "fsl,p4080-pcie"; + compatible = "fsl,p4080-pcie", "fsl,qoriq-pcie-v2.1"; device_type = "pci"; #size-cells = <2>; #address-cells = <3>; bus-range = <0x0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 13>; + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x508>; /* PEX3LIODNR */ pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -126,18 +132,21 @@ #address-cells = <2>; #size-cells = <2>; fsl,srio-rmu-handle = <&rmu>; + fsl,iommu-parent = <&pamu0>; ranges; port1 { #address-cells = <2>; #size-cells = <2>; cell-index = <1>; + fsl,liodn-reg = <&guts 0x510>; /* RIO1LIODNR */ }; port2 { #address-cells = <2>; #size-cells = <2>; cell-index = <2>; + fsl,liodn-reg = <&guts 0x514>; /* RIO2LIODNR */ }; }; @@ -281,13 +290,51 @@ iommu@20000 { compatible = "fsl,pamu-v1.0", "fsl,pamu"; - reg = <0x20000 0x5000>; + reg = <0x20000 0x5000>; /* for compatibility with older PAMU drivers */ + ranges = <0 0x20000 0x5000>; + #address-cells = <1>; + #size-cells = <1>; interrupts = < 24 2 0 0 16 2 1 30>; + + pamu0: pamu@0 { + reg = <0 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu1: pamu@1000 { + reg = <0x1000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu2: pamu@2000 { + reg = <0x2000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu3: pamu@3000 { + reg = <0x3000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu4: pamu@4000 { + reg = <0x4000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; }; /include/ "qoriq-rmu-0.dtsi" + rmu@d3000 { + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x540>; /* RMULIODNR */ + }; + /include/ "qoriq-mpic.dtsi" guts: global-utilities@e0000 { @@ -327,7 +374,17 @@ }; /include/ "qoriq-dma-0.dtsi" + dma@100300 { + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */ + }; + /include/ "qoriq-dma-1.dtsi" + dma@101300 { + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */ + }; + /include/ "qoriq-espi-0.dtsi" spi@110000 { fsl,espi-num-chipselects = <4>; @@ -335,6 +392,8 @@ /include/ "qoriq-esdhc-0.dtsi" sdhc@114000 { + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x530>; /* eSDHCLIODNR */ voltage-ranges = <3300 3300>; sdhci,auto-cmd12; }; @@ -347,11 +406,18 @@ /include/ "qoriq-usb2-mph-0.dtsi" usb@210000 { compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x520>; /* USB1LIODNR */ port0; }; /include/ "qoriq-usb2-dr-0.dtsi" usb@211000 { compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x524>; /* USB2LIODNR */ }; /include/ "qoriq-sec4.0-0.dtsi" +crypto: crypto@300000 { + fsl,iommu-parent = <&pamu1>; + }; }; diff --git a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi index 64b6abea846..9ea77c3513f 100644 --- a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi @@ -48,6 +48,8 @@ bus-range = <0x0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 15>; + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x500>; /* PEX1LIODNR */ pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -75,6 +77,8 @@ bus-range = <0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 14>; + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x504>; /* PEX2LIODNR */ pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -102,6 +106,8 @@ bus-range = <0x0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 13>; + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x508>; /* PEX3LIODNR */ pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -129,6 +135,8 @@ bus-range = <0x0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 12>; + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x50c>; /* PEX4LIODNR */ pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -152,18 +160,21 @@ interrupts = <16 2 1 11>; #address-cells = <2>; #size-cells = <2>; + fsl,iommu-parent = <&pamu0>; ranges; port1 { #address-cells = <2>; #size-cells = <2>; cell-index = <1>; + fsl,liodn-reg = <&guts 0x510>; /* RIO1LIODNR */ }; port2 { #address-cells = <2>; #size-cells = <2>; cell-index = <2>; + fsl,liodn-reg = <&guts 0x514>; /* RIO2LIODNR */ }; }; @@ -276,10 +287,37 @@ iommu@20000 { compatible = "fsl,pamu-v1.0", "fsl,pamu"; - reg = <0x20000 0x4000>; + reg = <0x20000 0x4000>; /* for compatibility with older PAMU drivers */ + ranges = <0 0x20000 0x4000>; + #address-cells = <1>; + #size-cells = <1>; interrupts = < 24 2 0 0 16 2 1 30>; + + pamu0: pamu@0 { + reg = <0 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu1: pamu@1000 { + reg = <0x1000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu2: pamu@2000 { + reg = <0x2000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu3: pamu@3000 { + reg = <0x3000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; }; /include/ "qoriq-mpic.dtsi" @@ -321,7 +359,17 @@ }; /include/ "qoriq-dma-0.dtsi" + dma@100300 { + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */ + }; + /include/ "qoriq-dma-1.dtsi" + dma@101300 { + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */ + }; + /include/ "qoriq-espi-0.dtsi" spi@110000 { fsl,espi-num-chipselects = <4>; @@ -329,6 +377,8 @@ /include/ "qoriq-esdhc-0.dtsi" sdhc@114000 { + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x530>; /* eSDHCLIODNR */ sdhci,auto-cmd12; }; @@ -338,20 +388,41 @@ /include/ "qoriq-duart-1.dtsi" /include/ "qoriq-gpio-0.dtsi" /include/ "qoriq-usb2-mph-0.dtsi" - usb0: usb@210000 { - compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; - phy_type = "utmi"; - port0; - }; + usb0: usb@210000 { + compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x520>; /* USB1LIODNR */ + phy_type = "utmi"; + port0; + }; /include/ "qoriq-usb2-dr-0.dtsi" - usb1: usb@211000 { - compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; - dr_mode = "host"; - phy_type = "utmi"; - }; + usb1: usb@211000 { + compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x524>; /* USB2LIODNR */ + dr_mode = "host"; + phy_type = "utmi"; + }; /include/ "qoriq-sata2-0.dtsi" + sata@220000 { + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x550>; /* SATA1LIODNR */ + }; + /include/ "qoriq-sata2-1.dtsi" + sata@221000 { + fsl,iommu-parent = <&pamu1>; + fsl,liodn-reg = <&guts 0x554>; /* SATA2LIODNR */ + }; /include/ "qoriq-sec4.2-0.dtsi" + crypto@300000 { + fsl,iommu-parent = <&pamu1>; + }; + +/include/ "qoriq-raid1.0-0.dtsi" + raideng@320000 { + fsl,iommu-parent = <&pamu1>; + }; }; diff --git a/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi index 0a198b0a77e..8df47fc45ab 100644 --- a/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi @@ -73,6 +73,12 @@ rtic_c = &rtic_c; rtic_d = &rtic_d; sec_mon = &sec_mon; + + raideng = &raideng; + raideng_jr0 = &raideng_jr0; + raideng_jr1 = &raideng_jr1; + raideng_jr2 = &raideng_jr2; + raideng_jr3 = &raideng_jr3; }; cpus { diff --git a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi index db2c9a7b3a0..97f8c26f970 100644 --- a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi @@ -48,6 +48,7 @@ bus-range = <0x0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 15>; + fsl,iommu-parent = <&pamu0>; pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -75,6 +76,7 @@ bus-range = <0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 14>; + fsl,iommu-parent = <&pamu0>; pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -102,6 +104,7 @@ bus-range = <0x0 0xff>; clock-frequency = <33333333>; interrupts = <16 2 1 13>; + fsl,iommu-parent = <&pamu0>; pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; @@ -239,10 +242,42 @@ iommu@20000 { compatible = "fsl,pamu-v1.0", "fsl,pamu"; - reg = <0x20000 0x5000>; - interrupts = < - 24 2 0 0 - 16 2 1 30>; + reg = <0x20000 0x5000>; /* for compatibility with older PAMU drivers */ + ranges = <0 0x20000 0x5000>; + #address-cells = <1>; + #size-cells = <1>; + interrupts = <24 2 0 0 + 16 2 1 30>; + + pamu0: pamu@0 { + reg = <0 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu1: pamu@1000 { + reg = <0x1000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu2: pamu@2000 { + reg = <0x2000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu3: pamu@3000 { + reg = <0x3000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; + + pamu4: pamu@4000 { + reg = <0x4000 0x1000>; + fsl,primary-cache-geometry = <32 1>; + fsl,secondary-cache-geometry = <128 2>; + }; }; /include/ "qoriq-mpic.dtsi" @@ -284,7 +319,17 @@ }; /include/ "qoriq-dma-0.dtsi" + dma@100300 { + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */ + }; + /include/ "qoriq-dma-1.dtsi" + dma@101300 { + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */ + }; + /include/ "qoriq-espi-0.dtsi" spi@110000 { fsl,espi-num-chipselects = <4>; @@ -292,6 +337,8 @@ /include/ "qoriq-esdhc-0.dtsi" sdhc@114000 { + fsl,iommu-parent = <&pamu2>; + fsl,liodn-reg = <&guts 0x530>; /* eSDHCLIODNR */ sdhci,auto-cmd12; }; @@ -301,20 +348,37 @@ /include/ "qoriq-duart-1.dtsi" /include/ "qoriq-gpio-0.dtsi" /include/ "qoriq-usb2-mph-0.dtsi" - usb0: usb@210000 { - compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; - phy_type = "utmi"; - port0; - }; + usb0: usb@210000 { + compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; + fsl,iommu-parent = <&pamu4>; + fsl,liodn-reg = <&guts 0x520>; /* USB1LIODNR */ + phy_type = "utmi"; + port0; + }; /include/ "qoriq-usb2-dr-0.dtsi" - usb1: usb@211000 { - compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; - dr_mode = "host"; - phy_type = "utmi"; - }; + usb1: usb@211000 { + compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; + fsl,iommu-parent = <&pamu4>; + fsl,liodn-reg = <&guts 0x524>; /* USB2LIODNR */ + dr_mode = "host"; + phy_type = "utmi"; + }; /include/ "qoriq-sata2-0.dtsi" + sata@220000 { + fsl,iommu-parent = <&pamu4>; + fsl,liodn-reg = <&guts 0x550>; /* SATA1LIODNR */ + }; + /include/ "qoriq-sata2-1.dtsi" + sata@221000 { + fsl,iommu-parent = <&pamu4>; + fsl,liodn-reg = <&guts 0x554>; /* SATA2LIODNR */ + }; + /include/ "qoriq-sec5.2-0.dtsi" + crypto@300000 { + fsl,iommu-parent = <&pamu4>; + }; }; diff --git a/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi index d4c9d5daab2..ffadcb563ad 100644 --- a/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi @@ -36,6 +36,7 @@ crypto@30000 { compatible = "fsl,sec-v4.4", "fsl,sec-v4.0"; #address-cells = <1>; #size-cells = <1>; + ranges = <0x0 0x30000 0x10000>; reg = <0x30000 0x10000>; interrupts = <58 2 0 0>; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-raid1.0-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-raid1.0-0.dtsi new file mode 100644 index 00000000000..8d2e8aa6cf8 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/qoriq-raid1.0-0.dtsi @@ -0,0 +1,85 @@ +/* + * QorIQ RAID 1.0 device tree stub [ controller @ offset 0x320000 ] + * + * Copyright 2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +raideng: raideng@320000 { + compatible = "fsl,raideng-v1.0"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x320000 0x10000>; + ranges = <0 0x320000 0x10000>; + + raideng_jq0@1000 { + compatible = "fsl,raideng-v1.0-job-queue"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x1000 0x1000>; + ranges = <0x0 0x1000 0x1000>; + + raideng_jr0: jr@0 { + compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-hp-ring"; + reg = <0x0 0x400>; + interrupts = <139 2 0 0>; + interrupt-parent = <&mpic>; + }; + + raideng_jr1: jr@400 { + compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-lp-ring"; + reg = <0x400 0x400>; + interrupts = <140 2 0 0>; + interrupt-parent = <&mpic>; + }; + }; + + raideng_jq1@2000 { + compatible = "fsl,raideng-v1.0-job-queue"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x2000 0x1000>; + ranges = <0x0 0x2000 0x1000>; + + raideng_jr2: jr@0 { + compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-hp-ring"; + reg = <0x0 0x400>; + interrupts = <141 2 0 0>; + interrupt-parent = <&mpic>; + }; + + raideng_jr3: jr@400 { + compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-lp-ring"; + reg = <0x400 0x400>; + interrupts = <142 2 0 0>; + interrupt-parent = <&mpic>; + }; + }; +}; diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts index fb288bb882b..5abb46c5cc9 100644 --- a/arch/powerpc/boot/dts/lite5200b.dts +++ b/arch/powerpc/boot/dts/lite5200b.dts @@ -12,19 +12,34 @@ /include/ "mpc5200b.dtsi" +&gpt0 { fsl,has-wdt; }; +&gpt2 { gpio-controller; }; +&gpt3 { gpio-controller; }; + / { model = "fsl,lite5200b"; compatible = "fsl,lite5200b"; + leds { + compatible = "gpio-leds"; + tmr2 { + gpios = <&gpt2 0 1>; + }; + tmr3 { + gpios = <&gpt3 0 1>; + linux,default-trigger = "heartbeat"; + }; + led1 { gpios = <&gpio_wkup 2 1>; }; + led2 { gpios = <&gpio_simple 3 1>; }; + led3 { gpios = <&gpio_wkup 3 1>; }; + led4 { gpios = <&gpio_simple 2 1>; }; + }; + memory { reg = <0x00000000 0x10000000>; // 256MB }; soc5200@f0000000 { - timer@600 { // General Purpose Timer - fsl,has-wdt; - }; - psc@2000 { // PSC1 compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; cell-index = <0>; diff --git a/arch/powerpc/boot/dts/media5200.dts b/arch/powerpc/boot/dts/media5200.dts index 48d72f38e5e..b5413cb85f1 100644 --- a/arch/powerpc/boot/dts/media5200.dts +++ b/arch/powerpc/boot/dts/media5200.dts @@ -13,6 +13,8 @@ /include/ "mpc5200b.dtsi" +&gpt0 { fsl,has-wdt; }; + / { model = "fsl,media5200"; compatible = "fsl,media5200"; @@ -41,10 +43,6 @@ soc5200@f0000000 { bus-frequency = <132000000>;// 132 MHz - timer@600 { // General Purpose Timer - fsl,has-wdt; - }; - psc@2000 { // PSC1 status = "disabled"; }; diff --git a/arch/powerpc/boot/dts/motionpro.dts b/arch/powerpc/boot/dts/motionpro.dts index 0b78e89ac69..bbabd97492a 100644 --- a/arch/powerpc/boot/dts/motionpro.dts +++ b/arch/powerpc/boot/dts/motionpro.dts @@ -12,26 +12,22 @@ /include/ "mpc5200b.dtsi" +&gpt0 { fsl,has-wdt; }; +&gpt6 { // Motion-PRO status LED + compatible = "promess,motionpro-led"; + label = "motionpro-statusled"; + blink-delay = <100>; // 100 msec +}; +&gpt7 { // Motion-PRO ready LED + compatible = "promess,motionpro-led"; + label = "motionpro-readyled"; +}; + / { model = "promess,motionpro"; compatible = "promess,motionpro"; soc5200@f0000000 { - timer@600 { // General Purpose Timer - fsl,has-wdt; - }; - - timer@660 { // Motion-PRO status LED - compatible = "promess,motionpro-led"; - label = "motionpro-statusled"; - blink-delay = <100>; // 100 msec - }; - - timer@670 { // Motion-PRO ready LED - compatible = "promess,motionpro-led"; - label = "motionpro-readyled"; - }; - can@900 { status = "disabled"; }; diff --git a/arch/powerpc/boot/dts/mpc5121.dtsi b/arch/powerpc/boot/dts/mpc5121.dtsi new file mode 100644 index 00000000000..723e292b6b4 --- /dev/null +++ b/arch/powerpc/boot/dts/mpc5121.dtsi @@ -0,0 +1,410 @@ +/* + * base MPC5121 Device Tree Source + * + * Copyright 2007-2008 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/dts-v1/; + +/ { + model = "mpc5121"; + compatible = "fsl,mpc5121"; + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&ipic>; + + aliases { + ethernet0 = ð0; + pci = &pci; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,5121@0 { + device_type = "cpu"; + reg = <0>; + d-cache-line-size = <0x20>; /* 32 bytes */ + i-cache-line-size = <0x20>; /* 32 bytes */ + d-cache-size = <0x8000>; /* L1, 32K */ + i-cache-size = <0x8000>; /* L1, 32K */ + timebase-frequency = <49500000>;/* 49.5 MHz (csb/4) */ + bus-frequency = <198000000>; /* 198 MHz csb bus */ + clock-frequency = <396000000>; /* 396 MHz ppc core */ + }; + }; + + memory { + device_type = "memory"; + reg = <0x00000000 0x10000000>; /* 256MB at 0 */ + }; + + mbx@20000000 { + compatible = "fsl,mpc5121-mbx"; + reg = <0x20000000 0x4000>; + interrupts = <66 0x8>; + }; + + sram@30000000 { + compatible = "fsl,mpc5121-sram"; + reg = <0x30000000 0x20000>; /* 128K at 0x30000000 */ + }; + + nfc@40000000 { + compatible = "fsl,mpc5121-nfc"; + reg = <0x40000000 0x100000>; /* 1M at 0x40000000 */ + interrupts = <6 8>; + #address-cells = <1>; + #size-cells = <1>; + }; + + localbus@80000020 { + compatible = "fsl,mpc5121-localbus"; + #address-cells = <2>; + #size-cells = <1>; + reg = <0x80000020 0x40>; + interrupts = <7 0x8>; + ranges = <0x0 0x0 0xfc000000 0x04000000>; + }; + + soc@80000000 { + compatible = "fsl,mpc5121-immr"; + #address-cells = <1>; + #size-cells = <1>; + #interrupt-cells = <2>; + ranges = <0x0 0x80000000 0x400000>; + reg = <0x80000000 0x400000>; + bus-frequency = <66000000>; /* 66 MHz ips bus */ + + + /* + * IPIC + * interrupts cell = <intr #, sense> + * sense values match linux IORESOURCE_IRQ_* defines: + * sense == 8: Level, low assertion + * sense == 2: Edge, high-to-low change + */ + ipic: interrupt-controller@c00 { + compatible = "fsl,mpc5121-ipic", "fsl,ipic"; + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0xc00 0x100>; + }; + + /* Watchdog timer */ + wdt@900 { + compatible = "fsl,mpc5121-wdt"; + reg = <0x900 0x100>; + }; + + /* Real time clock */ + rtc@a00 { + compatible = "fsl,mpc5121-rtc"; + reg = <0xa00 0x100>; + interrupts = <79 0x8 80 0x8>; + }; + + /* Reset module */ + reset@e00 { + compatible = "fsl,mpc5121-reset"; + reg = <0xe00 0x100>; + }; + + /* Clock control */ + clock@f00 { + compatible = "fsl,mpc5121-clock"; + reg = <0xf00 0x100>; + }; + + /* Power Management Controller */ + pmc@1000{ + compatible = "fsl,mpc5121-pmc"; + reg = <0x1000 0x100>; + interrupts = <83 0x8>; + }; + + gpio@1100 { + compatible = "fsl,mpc5121-gpio"; + reg = <0x1100 0x100>; + interrupts = <78 0x8>; + }; + + can@1300 { + compatible = "fsl,mpc5121-mscan"; + reg = <0x1300 0x80>; + interrupts = <12 0x8>; + }; + + can@1380 { + compatible = "fsl,mpc5121-mscan"; + reg = <0x1380 0x80>; + interrupts = <13 0x8>; + }; + + sdhc@1500 { + compatible = "fsl,mpc5121-sdhc"; + reg = <0x1500 0x100>; + interrupts = <8 0x8>; + }; + + i2c@1700 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,mpc5121-i2c", "fsl-i2c"; + reg = <0x1700 0x20>; + interrupts = <9 0x8>; + }; + + i2c@1720 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,mpc5121-i2c", "fsl-i2c"; + reg = <0x1720 0x20>; + interrupts = <10 0x8>; + }; + + i2c@1740 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,mpc5121-i2c", "fsl-i2c"; + reg = <0x1740 0x20>; + interrupts = <11 0x8>; + }; + + i2ccontrol@1760 { + compatible = "fsl,mpc5121-i2c-ctrl"; + reg = <0x1760 0x8>; + }; + + axe@2000 { + compatible = "fsl,mpc5121-axe"; + reg = <0x2000 0x100>; + interrupts = <42 0x8>; + }; + + display@2100 { + compatible = "fsl,mpc5121-diu"; + reg = <0x2100 0x100>; + interrupts = <64 0x8>; + }; + + can@2300 { + compatible = "fsl,mpc5121-mscan"; + reg = <0x2300 0x80>; + interrupts = <90 0x8>; + }; + + can@2380 { + compatible = "fsl,mpc5121-mscan"; + reg = <0x2380 0x80>; + interrupts = <91 0x8>; + }; + + viu@2400 { + compatible = "fsl,mpc5121-viu"; + reg = <0x2400 0x400>; + interrupts = <67 0x8>; + }; + + mdio@2800 { + compatible = "fsl,mpc5121-fec-mdio"; + reg = <0x2800 0x800>; + #address-cells = <1>; + #size-cells = <0>; + }; + + eth0: ethernet@2800 { + device_type = "network"; + compatible = "fsl,mpc5121-fec"; + reg = <0x2800 0x800>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <4 0x8>; + }; + + /* USB1 using external ULPI PHY */ + usb@3000 { + compatible = "fsl,mpc5121-usb2-dr"; + reg = <0x3000 0x600>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <43 0x8>; + dr_mode = "otg"; + phy_type = "ulpi"; + }; + + /* USB0 using internal UTMI PHY */ + usb@4000 { + compatible = "fsl,mpc5121-usb2-dr"; + reg = <0x4000 0x600>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <44 0x8>; + dr_mode = "otg"; + phy_type = "utmi_wide"; + }; + + /* IO control */ + ioctl@a000 { + compatible = "fsl,mpc5121-ioctl"; + reg = <0xA000 0x1000>; + }; + + /* LocalPlus controller */ + lpc@10000 { + compatible = "fsl,mpc5121-lpc"; + reg = <0x10000 0x200>; + }; + + pata@10200 { + compatible = "fsl,mpc5121-pata"; + reg = <0x10200 0x100>; + interrupts = <5 0x8>; + }; + + /* 512x PSCs are not 52xx PSC compatible */ + + /* PSC0 */ + psc@11000 { + compatible = "fsl,mpc5121-psc"; + reg = <0x11000 0x100>; + interrupts = <40 0x8>; + fsl,rx-fifo-size = <16>; + fsl,tx-fifo-size = <16>; + }; + + /* PSC1 */ + psc@11100 { + compatible = "fsl,mpc5121-psc"; + reg = <0x11100 0x100>; + interrupts = <40 0x8>; + fsl,rx-fifo-size = <16>; + fsl,tx-fifo-size = <16>; + }; + + /* PSC2 */ + psc@11200 { + compatible = "fsl,mpc5121-psc"; + reg = <0x11200 0x100>; + interrupts = <40 0x8>; + fsl,rx-fifo-size = <16>; + fsl,tx-fifo-size = <16>; + }; + + /* PSC3 */ + psc@11300 { + compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc"; + reg = <0x11300 0x100>; + interrupts = <40 0x8>; + fsl,rx-fifo-size = <16>; + fsl,tx-fifo-size = <16>; + }; + + /* PSC4 */ + psc@11400 { + compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc"; + reg = <0x11400 0x100>; + interrupts = <40 0x8>; + fsl,rx-fifo-size = <16>; + fsl,tx-fifo-size = <16>; + }; + + /* PSC5 */ + psc@11500 { + compatible = "fsl,mpc5121-psc"; + reg = <0x11500 0x100>; + interrupts = <40 0x8>; + fsl,rx-fifo-size = <16>; + fsl,tx-fifo-size = <16>; + }; + + /* PSC6 */ + psc@11600 { + compatible = "fsl,mpc5121-psc"; + reg = <0x11600 0x100>; + interrupts = <40 0x8>; + fsl,rx-fifo-size = <16>; + fsl,tx-fifo-size = <16>; + }; + + /* PSC7 */ + psc@11700 { + compatible = "fsl,mpc5121-psc"; + reg = <0x11700 0x100>; + interrupts = <40 0x8>; + fsl,rx-fifo-size = <16>; + fsl,tx-fifo-size = <16>; + }; + + /* PSC8 */ + psc@11800 { + compatible = "fsl,mpc5121-psc"; + reg = <0x11800 0x100>; + interrupts = <40 0x8>; + fsl,rx-fifo-size = <16>; + fsl,tx-fifo-size = <16>; + }; + + /* PSC9 */ + psc@11900 { + compatible = "fsl,mpc5121-psc"; + reg = <0x11900 0x100>; + interrupts = <40 0x8>; + fsl,rx-fifo-size = <16>; + fsl,tx-fifo-size = <16>; + }; + + /* PSC10 */ + psc@11a00 { + compatible = "fsl,mpc5121-psc"; + reg = <0x11a00 0x100>; + interrupts = <40 0x8>; + fsl,rx-fifo-size = <16>; + fsl,tx-fifo-size = <16>; + }; + + /* PSC11 */ + psc@11b00 { + compatible = "fsl,mpc5121-psc"; + reg = <0x11b00 0x100>; + interrupts = <40 0x8>; + fsl,rx-fifo-size = <16>; + fsl,tx-fifo-size = <16>; + }; + + pscfifo@11f00 { + compatible = "fsl,mpc5121-psc-fifo"; + reg = <0x11f00 0x100>; + interrupts = <40 0x8>; + }; + + dma@14000 { + compatible = "fsl,mpc5121-dma"; + reg = <0x14000 0x1800>; + interrupts = <65 0x8>; + }; + }; + + pci: pci@80008500 { + compatible = "fsl,mpc5121-pci"; + device_type = "pci"; + interrupts = <1 0x8>; + clock-frequency = <0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + + reg = <0x80008500 0x100 /* internal registers */ + 0x80008300 0x8>; /* config space access registers */ + bus-range = <0x0 0x0>; + ranges = <0x42000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000 + 0x02000000 0x0 0xb0000000 0xb0000000 0x0 0x10000000 + 0x01000000 0x0 0x00000000 0x84000000 0x0 0x01000000>; + }; +}; diff --git a/arch/powerpc/boot/dts/mpc5121ads.dts b/arch/powerpc/boot/dts/mpc5121ads.dts index c9ef6bbe26c..f269b1382ef 100644 --- a/arch/powerpc/boot/dts/mpc5121ads.dts +++ b/arch/powerpc/boot/dts/mpc5121ads.dts @@ -1,7 +1,7 @@ /* * MPC5121E ADS Device Tree Source * - * Copyright 2007,2008 Freescale Semiconductor Inc. + * Copyright 2007-2008 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -9,74 +9,26 @@ * option) any later version. */ -/dts-v1/; +/include/ "mpc5121.dtsi" / { model = "mpc5121ads"; compatible = "fsl,mpc5121ads"; - #address-cells = <1>; - #size-cells = <1>; - - aliases { - pci = &pci; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,5121@0 { - device_type = "cpu"; - reg = <0>; - d-cache-line-size = <0x20>; // 32 bytes - i-cache-line-size = <0x20>; // 32 bytes - d-cache-size = <0x8000>; // L1, 32K - i-cache-size = <0x8000>; // L1, 32K - timebase-frequency = <49500000>;// 49.5 MHz (csb/4) - bus-frequency = <198000000>; // 198 MHz csb bus - clock-frequency = <396000000>; // 396 MHz ppc core - }; - }; - - memory { - device_type = "memory"; - reg = <0x00000000 0x10000000>; // 256MB at 0 - }; - - mbx@20000000 { - compatible = "fsl,mpc5121-mbx"; - reg = <0x20000000 0x4000>; - interrupts = <66 0x8>; - interrupt-parent = < &ipic >; - }; - - sram@30000000 { - compatible = "fsl,mpc5121-sram"; - reg = <0x30000000 0x20000>; // 128K at 0x30000000 - }; nfc@40000000 { - compatible = "fsl,mpc5121-nfc"; - reg = <0x40000000 0x100000>; // 1M at 0x40000000 - interrupts = <6 8>; - interrupt-parent = < &ipic >; - #address-cells = <1>; - #size-cells = <1>; - // ADS has two Hynix 512MB Nand flash chips in a single - // stacked package. + /* + * ADS has two Hynix 512MB Nand flash chips in a single + * stacked package. + */ chips = <2>; + nand@0 { label = "nand"; - reg = <0x00000000 0x40000000>; // 512MB + 512MB + reg = <0x00000000 0x40000000>; /* 512MB + 512MB */ }; }; localbus@80000020 { - compatible = "fsl,mpc5121-localbus"; - #address-cells = <2>; - #size-cells = <1>; - reg = <0x80000020 0x40>; - ranges = <0x0 0x0 0xfc000000 0x04000000 0x2 0x0 0x82000000 0x00008000>; @@ -87,6 +39,7 @@ #size-cells = <1>; bank-width = <4>; device-width = <2>; + protected@0 { label = "protected"; reg = <0x00000000 0x00040000>; // first sector is protected @@ -121,91 +74,18 @@ interrupt-controller; #interrupt-cells = <2>; reg = <0x2 0xa 0x5>; - interrupt-parent = < &ipic >; - // irq routing - // all irqs but touch screen are routed to irq0 (ipic 48) - // touch screen is statically routed to irq1 (ipic 17) - // so don't use it here + /* irq routing: + * all irqs but touch screen are routed to irq0 (ipic 48) + * touch screen is statically routed to irq1 (ipic 17) + * so don't use it here + */ interrupts = <48 0x8>; }; }; soc@80000000 { - compatible = "fsl,mpc5121-immr"; - #address-cells = <1>; - #size-cells = <1>; - #interrupt-cells = <2>; - ranges = <0x0 0x80000000 0x400000>; - reg = <0x80000000 0x400000>; - bus-frequency = <66000000>; // 66 MHz ips bus - - - // IPIC - // interrupts cell = <intr #, sense> - // sense values match linux IORESOURCE_IRQ_* defines: - // sense == 8: Level, low assertion - // sense == 2: Edge, high-to-low change - // - ipic: interrupt-controller@c00 { - compatible = "fsl,mpc5121-ipic", "fsl,ipic"; - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - reg = <0xc00 0x100>; - }; - - rtc@a00 { // Real time clock - compatible = "fsl,mpc5121-rtc"; - reg = <0xa00 0x100>; - interrupts = <79 0x8 80 0x8>; - interrupt-parent = < &ipic >; - }; - - reset@e00 { // Reset module - compatible = "fsl,mpc5121-reset"; - reg = <0xe00 0x100>; - }; - - clock@f00 { // Clock control - compatible = "fsl,mpc5121-clock"; - reg = <0xf00 0x100>; - }; - - pmc@1000{ //Power Management Controller - compatible = "fsl,mpc5121-pmc"; - reg = <0x1000 0x100>; - interrupts = <83 0x2>; - interrupt-parent = < &ipic >; - }; - - gpio@1100 { - compatible = "fsl,mpc5121-gpio"; - reg = <0x1100 0x100>; - interrupts = <78 0x8>; - interrupt-parent = < &ipic >; - }; - - can@1300 { - compatible = "fsl,mpc5121-mscan"; - interrupts = <12 0x8>; - interrupt-parent = < &ipic >; - reg = <0x1300 0x80>; - }; - - can@1380 { - compatible = "fsl,mpc5121-mscan"; - interrupts = <13 0x8>; - interrupt-parent = < &ipic >; - reg = <0x1380 0x80>; - }; i2c@1700 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,mpc5121-i2c", "fsl-i2c"; - reg = <0x1700 0x20>; - interrupts = <9 0x8>; - interrupt-parent = < &ipic >; fsl,preserve-clocking; hwmon@4a { @@ -224,196 +104,75 @@ }; }; - i2c@1720 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,mpc5121-i2c", "fsl-i2c"; - reg = <0x1720 0x20>; - interrupts = <10 0x8>; - interrupt-parent = < &ipic >; - }; - - i2c@1740 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,mpc5121-i2c", "fsl-i2c"; - reg = <0x1740 0x20>; - interrupts = <11 0x8>; - interrupt-parent = < &ipic >; + eth0: ethernet@2800 { + phy-handle = <&phy0>; }; - i2ccontrol@1760 { - compatible = "fsl,mpc5121-i2c-ctrl"; - reg = <0x1760 0x8>; + can@2300 { + status = "disabled"; }; - axe@2000 { - compatible = "fsl,mpc5121-axe"; - reg = <0x2000 0x100>; - interrupts = <42 0x8>; - interrupt-parent = < &ipic >; + can@2380 { + status = "disabled"; }; - display@2100 { - compatible = "fsl,mpc5121-diu"; - reg = <0x2100 0x100>; - interrupts = <64 0x8>; - interrupt-parent = < &ipic >; + viu@2400 { + status = "disabled"; }; mdio@2800 { - compatible = "fsl,mpc5121-fec-mdio"; - reg = <0x2800 0x800>; - #address-cells = <1>; - #size-cells = <0>; - phy: ethernet-phy@0 { + phy0: ethernet-phy@0 { reg = <1>; - device_type = "ethernet-phy"; }; }; - ethernet@2800 { - device_type = "network"; - compatible = "fsl,mpc5121-fec"; - reg = <0x2800 0x800>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <4 0x8>; - interrupt-parent = < &ipic >; - phy-handle = < &phy >; - fsl,align-tx-packets = <4>; + /* mpc5121ads only uses USB0 */ + usb@3000 { + status = "disabled"; }; - // 5121e has two dr usb modules - // mpc5121_ads only uses USB0 - - // USB1 using external ULPI PHY - //usb@3000 { - // compatible = "fsl,mpc5121-usb2-dr"; - // reg = <0x3000 0x1000>; - // #address-cells = <1>; - // #size-cells = <0>; - // interrupt-parent = < &ipic >; - // interrupts = <43 0x8>; - // dr_mode = "otg"; - // phy_type = "ulpi"; - //}; - - // USB0 using internal UTMI PHY + /* USB0 using internal UTMI PHY */ usb@4000 { - compatible = "fsl,mpc5121-usb2-dr"; - reg = <0x4000 0x1000>; - #address-cells = <1>; - #size-cells = <0>; - interrupt-parent = < &ipic >; - interrupts = <44 0x8>; - dr_mode = "otg"; - phy_type = "utmi_wide"; + dr_mode = "host"; fsl,invert-drvvbus; fsl,invert-pwr-fault; }; - // IO control - ioctl@a000 { - compatible = "fsl,mpc5121-ioctl"; - reg = <0xA000 0x1000>; - }; - - pata@10200 { - compatible = "fsl,mpc5121-pata"; - reg = <0x10200 0x100>; - interrupts = <5 0x8>; - interrupt-parent = < &ipic >; - }; - - // 512x PSCs are not 52xx PSC compatible - // PSC3 serial port A aka ttyPSC0 - serial@11300 { - device_type = "serial"; + /* PSC3 serial port A aka ttyPSC0 */ + psc@11300 { compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc"; - // Logical port assignment needed until driver - // learns to use aliases - port-number = <0>; - cell-index = <3>; - reg = <0x11300 0x100>; - interrupts = <40 0x8>; - interrupt-parent = < &ipic >; - rx-fifo-size = <16>; - tx-fifo-size = <16>; }; - // PSC4 serial port B aka ttyPSC1 - serial@11400 { - device_type = "serial"; + /* PSC4 serial port B aka ttyPSC1 */ + psc@11400 { compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc"; - // Logical port assignment needed until driver - // learns to use aliases - port-number = <1>; - cell-index = <4>; - reg = <0x11400 0x100>; - interrupts = <40 0x8>; - interrupt-parent = < &ipic >; - rx-fifo-size = <16>; - tx-fifo-size = <16>; }; - // PSC5 in ac97 mode - ac97@11500 { + /* PSC5 in ac97 mode */ + ac97: psc@11500 { compatible = "fsl,mpc5121-psc-ac97", "fsl,mpc5121-psc"; - cell-index = <5>; - reg = <0x11500 0x100>; - interrupts = <40 0x8>; - interrupt-parent = < &ipic >; fsl,mode = "ac97-slave"; - rx-fifo-size = <384>; - tx-fifo-size = <384>; - }; - - pscfifo@11f00 { - compatible = "fsl,mpc5121-psc-fifo"; - reg = <0x11f00 0x100>; - interrupts = <40 0x8>; - interrupt-parent = < &ipic >; + fsl,rx-fifo-size = <384>; + fsl,tx-fifo-size = <384>; }; - - dma@14000 { - compatible = "fsl,mpc5121-dma"; - reg = <0x14000 0x1800>; - interrupts = <65 0x8>; - interrupt-parent = < &ipic >; - }; - }; pci: pci@80008500 { interrupt-map-mask = <0xf800 0x0 0x0 0x7>; interrupt-map = < - // IDSEL 0x15 - Slot 1 PCI + /* IDSEL 0x15 - Slot 1 PCI */ 0xa800 0x0 0x0 0x1 &cpld_pic 0x0 0x8 0xa800 0x0 0x0 0x2 &cpld_pic 0x1 0x8 0xa800 0x0 0x0 0x3 &cpld_pic 0x2 0x8 0xa800 0x0 0x0 0x4 &cpld_pic 0x3 0x8 - // IDSEL 0x16 - Slot 2 MiniPCI + /* IDSEL 0x16 - Slot 2 MiniPCI */ 0xb000 0x0 0x0 0x1 &cpld_pic 0x4 0x8 0xb000 0x0 0x0 0x2 &cpld_pic 0x5 0x8 - // IDSEL 0x17 - Slot 3 MiniPCI + /* IDSEL 0x17 - Slot 3 MiniPCI */ 0xb800 0x0 0x0 0x1 &cpld_pic 0x6 0x8 0xb800 0x0 0x0 0x2 &cpld_pic 0x7 0x8 >; - interrupt-parent = < &ipic >; - interrupts = <1 0x8>; - bus-range = <0 0>; - ranges = <0x42000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000 - 0x02000000 0x0 0xb0000000 0xb0000000 0x0 0x10000000 - 0x01000000 0x0 0x00000000 0x84000000 0x0 0x01000000>; - clock-frequency = <0>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0x80008500 0x100 /* internal registers */ - 0x80008300 0x8>; /* config space access registers */ - compatible = "fsl,mpc5121-pci"; - device_type = "pci"; }; }; diff --git a/arch/powerpc/boot/dts/mpc5200b.dtsi b/arch/powerpc/boot/dts/mpc5200b.dtsi index 7ab286ab530..969b2200b2f 100644 --- a/arch/powerpc/boot/dts/mpc5200b.dtsi +++ b/arch/powerpc/boot/dts/mpc5200b.dtsi @@ -64,50 +64,59 @@ reg = <0x500 0x80>; }; - timer@600 { // General Purpose Timer + gpt0: timer@600 { // General Purpose Timer compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; + #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode reg = <0x600 0x10>; interrupts = <1 9 0>; + // add 'fsl,has-wdt' to enable watchdog }; - timer@610 { // General Purpose Timer + gpt1: timer@610 { // General Purpose Timer compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; + #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode reg = <0x610 0x10>; interrupts = <1 10 0>; }; - timer@620 { // General Purpose Timer + gpt2: timer@620 { // General Purpose Timer compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; + #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode reg = <0x620 0x10>; interrupts = <1 11 0>; }; - timer@630 { // General Purpose Timer + gpt3: timer@630 { // General Purpose Timer compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; + #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode reg = <0x630 0x10>; interrupts = <1 12 0>; }; - timer@640 { // General Purpose Timer + gpt4: timer@640 { // General Purpose Timer compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; + #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode reg = <0x640 0x10>; interrupts = <1 13 0>; }; - timer@650 { // General Purpose Timer + gpt5: timer@650 { // General Purpose Timer compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; + #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode reg = <0x650 0x10>; interrupts = <1 14 0>; }; - timer@660 { // General Purpose Timer + gpt6: timer@660 { // General Purpose Timer compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; + #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode reg = <0x660 0x10>; interrupts = <1 15 0>; }; - timer@670 { // General Purpose Timer + gpt7: timer@670 { // General Purpose Timer compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; + #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode reg = <0x670 0x10>; interrupts = <1 16 0>; }; @@ -231,6 +240,12 @@ interrupts = <2 7 0>; }; + sclpc@3c00 { + compatible = "fsl,mpc5200-lpbfifo"; + reg = <0x3c00 0x60>; + interrupts = <2 23 0>; + }; + i2c@3d00 { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/powerpc/boot/dts/mucmc52.dts b/arch/powerpc/boot/dts/mucmc52.dts index 21d34720fcc..d3a792bb5c1 100644 --- a/arch/powerpc/boot/dts/mucmc52.dts +++ b/arch/powerpc/boot/dts/mucmc52.dts @@ -13,47 +13,23 @@ /include/ "mpc5200b.dtsi" +/* Timer pins that need to be in GPIO mode */ +&gpt0 { gpio-controller; }; +&gpt1 { gpio-controller; }; +&gpt2 { gpio-controller; }; +&gpt3 { gpio-controller; }; + +/* Disabled timers */ +&gpt4 { status = "disabled"; }; +&gpt5 { status = "disabled"; }; +&gpt6 { status = "disabled"; }; +&gpt7 { status = "disabled"; }; + / { model = "manroland,mucmc52"; compatible = "manroland,mucmc52"; soc5200@f0000000 { - gpt0: timer@600 { // GPT 0 in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - - gpt1: timer@610 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - - gpt2: timer@620 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - - gpt3: timer@630 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - - timer@640 { - status = "disabled"; - }; - - timer@650 { - status = "disabled"; - }; - - timer@660 { - status = "disabled"; - }; - - timer@670 { - status = "disabled"; - }; - rtc@800 { status = "disabled"; }; diff --git a/arch/powerpc/boot/dts/o2d.dtsi b/arch/powerpc/boot/dts/o2d.dtsi index 3444eb8f0ad..cf073e693f2 100644 --- a/arch/powerpc/boot/dts/o2d.dtsi +++ b/arch/powerpc/boot/dts/o2d.dtsi @@ -12,6 +12,13 @@ /include/ "mpc5200b.dtsi" +&gpt0 { + gpio-controller; + fsl,has-wdt; + fsl,wdt-on-boot = <0>; +}; +&gpt1 { gpio-controller; }; + / { model = "ifm,o2d"; compatible = "ifm,o2d"; @@ -22,24 +29,6 @@ soc5200@f0000000 { - gpio_simple: gpio@b00 { - }; - - timer@600 { // General Purpose Timer - #gpio-cells = <2>; - gpio-controller; - fsl,has-wdt; - fsl,wdt-on-boot = <0>; - }; - - timer@610 { - #gpio-cells = <2>; - gpio-controller; - }; - - timer7: timer@670 { - }; - rtc@800 { status = "disabled"; }; @@ -86,12 +75,6 @@ reg = <0>; }; }; - - sclpc@3c00 { - compatible = "fsl,mpc5200-lpbfifo"; - reg = <0x3c00 0x60>; - interrupts = <3 23 0>; - }; }; localbus { @@ -124,7 +107,7 @@ csi@3,0 { compatible = "ifm,o2d-csi"; reg = <3 0 0x00100000>; - ifm,csi-clk-handle = <&timer7>; + ifm,csi-clk-handle = <&gpt7>; gpios = <&gpio_simple 23 0 /* imag_capture */ &gpio_simple 26 0 /* imag_reset */ &gpio_simple 29 0>; /* imag_master_en */ diff --git a/arch/powerpc/boot/dts/pcm030.dts b/arch/powerpc/boot/dts/pcm030.dts index 9e354997eb7..192e66af000 100644 --- a/arch/powerpc/boot/dts/pcm030.dts +++ b/arch/powerpc/boot/dts/pcm030.dts @@ -14,52 +14,20 @@ /include/ "mpc5200b.dtsi" +&gpt0 { fsl,has-wdt; }; +&gpt2 { gpio-controller; }; +&gpt3 { gpio-controller; }; +&gpt4 { gpio-controller; }; +&gpt5 { gpio-controller; }; +&gpt6 { gpio-controller; }; +&gpt7 { gpio-controller; }; + / { model = "phytec,pcm030"; compatible = "phytec,pcm030"; soc5200@f0000000 { - timer@600 { // General Purpose Timer - fsl,has-wdt; - }; - - gpt2: timer@620 { // General Purpose Timer in GPIO mode - compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; - gpio-controller; - #gpio-cells = <2>; - }; - - gpt3: timer@630 { // General Purpose Timer in GPIO mode - compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; - gpio-controller; - #gpio-cells = <2>; - }; - - gpt4: timer@640 { // General Purpose Timer in GPIO mode - compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; - gpio-controller; - #gpio-cells = <2>; - }; - - gpt5: timer@650 { // General Purpose Timer in GPIO mode - compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; - gpio-controller; - #gpio-cells = <2>; - }; - - gpt6: timer@660 { // General Purpose Timer in GPIO mode - compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; - gpio-controller; - #gpio-cells = <2>; - }; - - gpt7: timer@670 { // General Purpose Timer in GPIO mode - compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; - gpio-controller; - #gpio-cells = <2>; - }; - - psc@2000 { /* PSC1 in ac97 mode */ + audioplatform: psc@2000 { /* PSC1 in ac97 mode */ compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97"; cell-index = <0>; }; @@ -134,4 +102,9 @@ localbus { status = "disabled"; }; + + sound { + compatible = "phytec,pcm030-audio-fabric"; + asoc-platform = <&audioplatform>; + }; }; diff --git a/arch/powerpc/boot/dts/pcm032.dts b/arch/powerpc/boot/dts/pcm032.dts index 1dd478bfff9..96b139bf50e 100644 --- a/arch/powerpc/boot/dts/pcm032.dts +++ b/arch/powerpc/boot/dts/pcm032.dts @@ -14,6 +14,14 @@ /include/ "mpc5200b.dtsi" +&gpt0 { fsl,has-wdt; }; +&gpt2 { gpio-controller; }; +&gpt3 { gpio-controller; }; +&gpt4 { gpio-controller; }; +&gpt5 { gpio-controller; }; +&gpt6 { gpio-controller; }; +&gpt7 { gpio-controller; }; + / { model = "phytec,pcm032"; compatible = "phytec,pcm032"; @@ -23,43 +31,6 @@ }; soc5200@f0000000 { - timer@600 { // General Purpose Timer - fsl,has-wdt; - }; - - gpt2: timer@620 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - - gpt3: timer@630 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - - gpt4: timer@640 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - - gpt5: timer@650 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - - gpt6: timer@660 { // General Purpose Timer in GPIO mode - compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; - reg = <0x660 0x10>; - interrupts = <1 15 0>; - gpio-controller; - #gpio-cells = <2>; - }; - - gpt7: timer@670 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - psc@2000 { /* PSC1 is ac97 */ compatible = "fsl,mpc5200b-psc-ac97","fsl,mpc5200-psc-ac97"; cell-index = <0>; diff --git a/arch/powerpc/boot/dts/pdm360ng.dts b/arch/powerpc/boot/dts/pdm360ng.dts index 94dfa5c9a7f..0b069477838 100644 --- a/arch/powerpc/boot/dts/pdm360ng.dts +++ b/arch/powerpc/boot/dts/pdm360ng.dts @@ -13,7 +13,7 @@ * option) any later version. */ -/dts-v1/; +/include/ "mpc5121.dtsi" / { model = "pdm360ng"; @@ -22,38 +22,12 @@ #size-cells = <1>; interrupt-parent = <&ipic>; - aliases { - ethernet0 = ð0; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,5121@0 { - device_type = "cpu"; - reg = <0>; - d-cache-line-size = <0x20>; // 32 bytes - i-cache-line-size = <0x20>; // 32 bytes - d-cache-size = <0x8000>; // L1, 32K - i-cache-size = <0x8000>; // L1, 32K - timebase-frequency = <49500000>;// 49.5 MHz (csb/4) - bus-frequency = <198000000>; // 198 MHz csb bus - clock-frequency = <396000000>; // 396 MHz ppc core - }; - }; - memory { device_type = "memory"; reg = <0x00000000 0x20000000>; // 512MB at 0 }; nfc@40000000 { - compatible = "fsl,mpc5121-nfc"; - reg = <0x40000000 0x100000>; - interrupts = <0x6 0x8>; - #address-cells = <0x1>; - #size-cells = <0x1>; bank-width = <0x1>; chips = <0x1>; @@ -63,17 +37,7 @@ }; }; - sram@50000000 { - compatible = "fsl,mpc5121-sram"; - reg = <0x50000000 0x20000>; // 128K at 0x50000000 - }; - localbus@80000020 { - compatible = "fsl,mpc5121-localbus"; - #address-cells = <2>; - #size-cells = <1>; - reg = <0x80000020 0x40>; - ranges = <0x0 0x0 0xf0000000 0x10000000 /* Flash */ 0x2 0x0 0x50040000 0x00020000>; /* CS2: MRAM */ @@ -129,74 +93,8 @@ }; soc@80000000 { - compatible = "fsl,mpc5121-immr"; - #address-cells = <1>; - #size-cells = <1>; - #interrupt-cells = <2>; - ranges = <0x0 0x80000000 0x400000>; - reg = <0x80000000 0x400000>; - bus-frequency = <66000000>; // 66 MHz ips bus - - // IPIC - // interrupts cell = <intr #, sense> - // sense values match linux IORESOURCE_IRQ_* defines: - // sense == 8: Level, low assertion - // sense == 2: Edge, high-to-low change - // - ipic: interrupt-controller@c00 { - compatible = "fsl,mpc5121-ipic", "fsl,ipic"; - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - reg = <0xc00 0x100>; - }; - - rtc@a00 { // Real time clock - compatible = "fsl,mpc5121-rtc"; - reg = <0xa00 0x100>; - interrupts = <79 0x8 80 0x8>; - }; - - reset@e00 { // Reset module - compatible = "fsl,mpc5121-reset"; - reg = <0xe00 0x100>; - }; - - clock@f00 { // Clock control - compatible = "fsl,mpc5121-clock"; - reg = <0xf00 0x100>; - }; - - pmc@1000{ //Power Management Controller - compatible = "fsl,mpc5121-pmc"; - reg = <0x1000 0x100>; - interrupts = <83 0x2>; - }; - - gpio@1100 { - compatible = "fsl,mpc5121-gpio"; - reg = <0x1100 0x100>; - interrupts = <78 0x8>; - }; - - can@1300 { - compatible = "fsl,mpc5121-mscan"; - interrupts = <12 0x8>; - reg = <0x1300 0x80>; - }; - - can@1380 { - compatible = "fsl,mpc5121-mscan"; - interrupts = <13 0x8>; - reg = <0x1380 0x80>; - }; i2c@1700 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,mpc5121-i2c"; - reg = <0x1700 0x20>; - interrupts = <0x9 0x8>; fsl,preserve-clocking; eeprom@50 { @@ -210,201 +108,92 @@ }; }; - i2c@1740 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,mpc5121-i2c"; - reg = <0x1740 0x20>; - interrupts = <0xb 0x8>; - fsl,preserve-clocking; - }; - - i2ccontrol@1760 { - compatible = "fsl,mpc5121-i2c-ctrl"; - reg = <0x1760 0x8>; - }; - - axe@2000 { - compatible = "fsl,mpc5121-axe"; - reg = <0x2000 0x100>; - interrupts = <42 0x8>; - }; - - display@2100 { - compatible = "fsl,mpc5121-diu"; - reg = <0x2100 0x100>; - interrupts = <64 0x8>; + i2c@1720 { + status = "disabled"; }; - can@2300 { - compatible = "fsl,mpc5121-mscan"; - interrupts = <90 0x8>; - reg = <0x2300 0x80>; - }; - - can@2380 { - compatible = "fsl,mpc5121-mscan"; - interrupts = <91 0x8>; - reg = <0x2380 0x80>; + i2c@1740 { + fsl,preserve-clocking; }; - viu@2400 { - compatible = "fsl,mpc5121-viu"; - reg = <0x2400 0x400>; - interrupts = <67 0x8>; + ethernet@2800 { + phy-handle = <&phy0>; }; mdio@2800 { - compatible = "fsl,mpc5121-fec-mdio"; - reg = <0x2800 0x200>; - #address-cells = <1>; - #size-cells = <0>; - phy: ethernet-phy@0 { + phy0: ethernet-phy@1f { compatible = "smsc,lan8700"; reg = <0x1f>; }; }; - eth0: ethernet@2800 { - compatible = "fsl,mpc5121-fec"; - reg = <0x2800 0x200>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <4 0x8>; - phy-handle = < &phy >; - }; - - // USB1 using external ULPI PHY + /* USB1 using external ULPI PHY */ usb@3000 { - compatible = "fsl,mpc5121-usb2-dr"; - reg = <0x3000 0x600>; - #address-cells = <1>; - #size-cells = <0>; - interrupts = <43 0x8>; dr_mode = "host"; - phy_type = "ulpi"; }; - // USB0 using internal UTMI PHY + /* USB0 using internal UTMI PHY */ usb@4000 { - compatible = "fsl,mpc5121-usb2-dr"; - reg = <0x4000 0x600>; - #address-cells = <1>; - #size-cells = <0>; - interrupts = <44 0x8>; - dr_mode = "otg"; - phy_type = "utmi_wide"; fsl,invert-pwr-fault; }; - // IO control - ioctl@a000 { - compatible = "fsl,mpc5121-ioctl"; - reg = <0xA000 0x1000>; - }; - - // 512x PSCs are not 52xx PSCs compatible - serial@11000 { + psc@11000 { compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc"; - cell-index = <0>; - reg = <0x11000 0x100>; - interrupts = <40 0x8>; - fsl,rx-fifo-size = <16>; - fsl,tx-fifo-size = <16>; }; - serial@11100 { + psc@11100 { compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc"; - cell-index = <1>; - reg = <0x11100 0x100>; - interrupts = <40 0x8>; - fsl,rx-fifo-size = <16>; - fsl,tx-fifo-size = <16>; }; - serial@11200 { + psc@11200 { compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc"; - cell-index = <2>; - reg = <0x11200 0x100>; - interrupts = <40 0x8>; - fsl,rx-fifo-size = <16>; - fsl,tx-fifo-size = <16>; }; - serial@11300 { + psc@11300 { compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc"; - cell-index = <3>; - reg = <0x11300 0x100>; - interrupts = <40 0x8>; - fsl,rx-fifo-size = <16>; - fsl,tx-fifo-size = <16>; }; - serial@11400 { + psc@11400 { compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc"; - cell-index = <4>; - reg = <0x11400 0x100>; - interrupts = <40 0x8>; - fsl,rx-fifo-size = <16>; - fsl,tx-fifo-size = <16>; }; - serial@11600 { - compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc"; - cell-index = <6>; - reg = <0x11600 0x100>; - interrupts = <40 0x8>; - fsl,rx-fifo-size = <16>; - fsl,tx-fifo-size = <16>; + psc@11500 { + status = "disabled"; }; - serial@11800 { + psc@11600 { compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc"; - cell-index = <8>; - reg = <0x11800 0x100>; - interrupts = <40 0x8>; - fsl,rx-fifo-size = <16>; - fsl,tx-fifo-size = <16>; }; - serial@11B00 { - compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc"; - cell-index = <11>; - reg = <0x11B00 0x100>; - interrupts = <40 0x8>; - fsl,rx-fifo-size = <16>; - fsl,tx-fifo-size = <16>; + psc@11700 { + status = "disabled"; }; - pscfifo@11f00 { - compatible = "fsl,mpc5121-psc-fifo"; - reg = <0x11f00 0x100>; - interrupts = <40 0x8>; + psc@11800 { + compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc"; }; - spi@11900 { + psc@11900 { compatible = "fsl,mpc5121-psc-spi", "fsl,mpc5121-psc"; - cell-index = <9>; #address-cells = <1>; #size-cells = <0>; - reg = <0x11900 0x100>; - interrupts = <40 0x8>; - fsl,rx-fifo-size = <16>; - fsl,tx-fifo-size = <16>; - // 7845 touch screen controller + /* ADS7845 touch screen controller */ ts@0 { compatible = "ti,ads7846"; reg = <0x0>; spi-max-frequency = <3000000>; - // pen irq is GPIO25 + /* pen irq is GPIO25 */ interrupts = <78 0x8>; }; }; - dma@14000 { - compatible = "fsl,mpc5121-dma"; - reg = <0x14000 0x1800>; - interrupts = <65 0x8>; + psc@11a00 { + status = "disabled"; + }; + + psc@11b00 { + compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc"; }; }; }; diff --git a/arch/powerpc/boot/dts/ppa8548.dts b/arch/powerpc/boot/dts/ppa8548.dts new file mode 100644 index 00000000000..f97eceed610 --- /dev/null +++ b/arch/powerpc/boot/dts/ppa8548.dts @@ -0,0 +1,166 @@ +/* + * PPA8548 Device Tree Source (36-bit address map) + * Copyright 2013 Prodrive B.V. + * + * Based on: + * MPC8548 CDS Device Tree Source (36-bit address map) + * Copyright 2012 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/include/ "fsl/mpc8548si-pre.dtsi" + +/ { + model = "ppa8548"; + compatible = "ppa8548"; + #address-cells = <2>; + #size-cells = <2>; + interrupt-parent = <&mpic>; + + memory { + device_type = "memory"; + reg = <0 0 0x0 0x40000000>; + }; + + lbc: localbus@fe0005000 { + reg = <0xf 0xe0005000 0 0x1000>; + ranges = <0x0 0x0 0xf 0xff800000 0x00800000>; + }; + + soc: soc8548@fe0000000 { + ranges = <0 0xf 0xe0000000 0x100000>; + }; + + pci0: pci@fe0008000 { + /* ppa8548 board doesn't support PCI */ + status = "disabled"; + }; + + pci1: pci@fe0009000 { + /* ppa8548 board doesn't support PCI */ + status = "disabled"; + }; + + pci2: pcie@fe000a000 { + /* ppa8548 board doesn't support PCI */ + status = "disabled"; + }; + + rio: rapidio@fe00c0000 { + reg = <0xf 0xe00c0000 0x0 0x11000>; + port1 { + ranges = <0x0 0x0 0x0 0x80000000 0x0 0x40000000>; + }; + }; +}; + +&lbc { + nor@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x0 0x0 0x00800000>; + bank-width = <2>; + device-width = <2>; + + partition@0 { + reg = <0x0 0x7A0000>; + label = "user"; + }; + + partition@7A0000 { + reg = <0x7A0000 0x20000>; + label = "env"; + read-only; + }; + + partition@7C0000 { + reg = <0x7C0000 0x40000>; + label = "u-boot"; + read-only; + }; + }; +}; + +&soc { + i2c@3000 { + rtc@6f { + compatible = "intersil,isl1208"; + reg = <0x6f>; + }; + }; + + i2c@3100 { + }; + + /* + * Only ethernet controller @25000 and @26000 are used. + * Use alias enet2 and enet3 for the remainig controllers, + * to stay compatible with mpc8548si-pre.dtsi. + */ + enet2: ethernet@24000 { + status = "disabled"; + }; + + mdio@24520 { + phy0: ethernet-phy@0 { + interrupts = <7 1 0 0>; + reg = <0x0>; + device_type = "ethernet-phy"; + }; + phy1: ethernet-phy@1 { + interrupts = <8 1 0 0>; + reg = <0x1>; + device_type = "ethernet-phy"; + }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + enet0: ethernet@25000 { + tbi-handle = <&tbi1>; + phy-handle = <&phy0>; + }; + + mdio@25520 { + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + enet1: ethernet@26000 { + tbi-handle = <&tbi2>; + phy-handle = <&phy1>; + }; + + mdio@26520 { + tbi2: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + enet3: ethernet@27000 { + status = "disabled"; + }; + + mdio@27520 { + tbi3: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + crypto@30000 { + status = "disabled"; + }; +}; + +/include/ "fsl/mpc8548si-post.dtsi" diff --git a/arch/powerpc/boot/dts/sbc8548-altflash.dts b/arch/powerpc/boot/dts/sbc8548-altflash.dts new file mode 100644 index 00000000000..0b38a0defd2 --- /dev/null +++ b/arch/powerpc/boot/dts/sbc8548-altflash.dts @@ -0,0 +1,115 @@ +/* + * SBC8548 Device Tree Source + * + * Configured for booting off the alternate (64MB SODIMM) flash. + * Requires switching JP12 jumpers and changing SW2.8 setting. + * + * Copyright 2013 Wind River Systems Inc. + * + * Paul Gortmaker (see MAINTAINERS for contact information) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + + +/dts-v1/; + +/include/ "sbc8548-pre.dtsi" + +/{ + localbus@e0000000 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "simple-bus"; + reg = <0xe0000000 0x5000>; + interrupt-parent = <&mpic>; + + ranges = <0x0 0x0 0xfc000000 0x04000000 /*64MB Flash*/ + 0x3 0x0 0xf0000000 0x04000000 /*64MB SDRAM*/ + 0x4 0x0 0xf4000000 0x04000000 /*64MB SDRAM*/ + 0x5 0x0 0xf8000000 0x00b10000 /* EPLD */ + 0x6 0x0 0xef800000 0x00800000>; /*8MB Flash*/ + + flash@0,0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0x0 0x0 0x04000000>; + compatible = "intel,JS28F128", "cfi-flash"; + bank-width = <4>; + device-width = <1>; + partition@0x0 { + label = "space"; + /* FC000000 -> FFEFFFFF */ + reg = <0x00000000 0x03f00000>; + }; + partition@0x03f00000 { + label = "bootloader"; + /* FFF00000 -> FFFFFFFF */ + reg = <0x03f00000 0x00100000>; + read-only; + }; + }; + + + epld@5,0 { + compatible = "wrs,epld-localbus"; + #address-cells = <2>; + #size-cells = <1>; + reg = <0x5 0x0 0x00b10000>; + ranges = < + 0x0 0x0 0x5 0x000000 0x1fff /* LED */ + 0x1 0x0 0x5 0x100000 0x1fff /* Switches */ + 0x3 0x0 0x5 0x300000 0x1fff /* HW Rev. */ + 0xb 0x0 0x5 0xb00000 0x1fff /* EEPROM */ + >; + + led@0,0 { + compatible = "led"; + reg = <0x0 0x0 0x1fff>; + }; + + switches@1,0 { + compatible = "switches"; + reg = <0x1 0x0 0x1fff>; + }; + + hw-rev@3,0 { + compatible = "hw-rev"; + reg = <0x3 0x0 0x1fff>; + }; + + eeprom@b,0 { + compatible = "eeprom"; + reg = <0xb 0 0x1fff>; + }; + + }; + + alt-flash@6,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "intel,JS28F640", "cfi-flash"; + reg = <0x6 0x0 0x800000>; + bank-width = <1>; + device-width = <1>; + partition@0x0 { + label = "space"; + /* EF800000 -> EFF9FFFF */ + reg = <0x00000000 0x007a0000>; + }; + partition@0x7a0000 { + label = "bootloader"; + /* EFFA0000 -> EFFFFFFF */ + reg = <0x007a0000 0x00060000>; + read-only; + }; + }; + + + }; +}; + +/include/ "sbc8548-post.dtsi" diff --git a/arch/powerpc/boot/dts/sbc8548-post.dtsi b/arch/powerpc/boot/dts/sbc8548-post.dtsi new file mode 100644 index 00000000000..33a47e27a11 --- /dev/null +++ b/arch/powerpc/boot/dts/sbc8548-post.dtsi @@ -0,0 +1,295 @@ +/* + * SBC8548 Device Tree Source + * + * Copyright 2007 Wind River Systems Inc. + * + * Paul Gortmaker (see MAINTAINERS for contact information) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/{ + soc8548@e0000000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + ranges = <0x00000000 0xe0000000 0x00100000>; + bus-frequency = <0>; + compatible = "simple-bus"; + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <10>; + }; + + ecm@1000 { + compatible = "fsl,mpc8548-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <17 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,mpc8548-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <0x12 0x2>; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,mpc8548-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <0x20>; // 32 bytes + cache-size = <0x80000>; // L2, 512K + interrupt-parent = <&mpic>; + interrupts = <0x10 0x2>; + }; + + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <0x2b 0x2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <0x2b 0x2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,mpc8548-dma", "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,mpc8548-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,mpc8548-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,mpc8548-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,mpc8548-dma-channel", + "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + + enet0: ethernet@24000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x24000 0x1000>; + ranges = <0x0 0x24000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi0>; + phy-handle = <&phy0>; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x520 0x20>; + + phy0: ethernet-phy@19 { + interrupt-parent = <&mpic>; + interrupts = <0x6 0x1>; + reg = <0x19>; + device_type = "ethernet-phy"; + }; + phy1: ethernet-phy@1a { + interrupt-parent = <&mpic>; + interrupts = <0x7 0x1>; + reg = <0x1a>; + device_type = "ethernet-phy"; + }; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + enet1: ethernet@25000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x25000 0x1000>; + ranges = <0x0 0x25000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>; + interrupt-parent = <&mpic>; + tbi-handle = <&tbi1>; + phy-handle = <&phy1>; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "fsl,ns16550", "ns16550"; + reg = <0x4500 0x100>; // reg base, size + clock-frequency = <0>; // should we fill in in uboot? + interrupts = <0x2a 0x2>; + interrupt-parent = <&mpic>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "fsl,ns16550", "ns16550"; + reg = <0x4600 0x100>; // reg base, size + clock-frequency = <0>; // should we fill in in uboot? + interrupts = <0x2a 0x2>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities reg + compatible = "fsl,mpc8548-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; + + crypto@30000 { + compatible = "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0xfe>; + fsl,descriptor-types-mask = <0x12b0ebf>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + }; + }; + + pci0: pci@e0008000 { + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x01 (PCI-X slot) @66MHz */ + 0x0800 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x0800 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x0800 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x0800 0x0 0x0 0x4 &mpic 0x1 0x1 + + /* IDSEL 0x11 (PCI, 3.3V 32bit) @33MHz */ + 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1 + 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1 + 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1 + 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1>; + + interrupt-parent = <&mpic>; + interrupts = <0x18 0x2>; + bus-range = <0 0>; + ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x10000000 + 0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00800000>; + clock-frequency = <66000000>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xe0008000 0x1000>; + compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci"; + device_type = "pci"; + }; + + pci1: pcie@e000a000 { + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + + /* IDSEL 0x0 (PEX) */ + 0x0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0x0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0x0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0x0000 0x0 0x0 0x4 &mpic 0x3 0x1>; + + interrupt-parent = <&mpic>; + interrupts = <0x1a 0x2>; + bus-range = <0x0 0xff>; + ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000 + 0x01000000 0x0 0x00000000 0xe2800000 0x0 0x08000000>; + clock-frequency = <33000000>; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0xe000a000 0x1000>; + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + ranges = <0x02000000 0x0 0xa0000000 + 0x02000000 0x0 0xa0000000 + 0x0 0x10000000 + + 0x01000000 0x0 0x00000000 + 0x01000000 0x0 0x00000000 + 0x0 0x00800000>; + }; + }; +}; diff --git a/arch/powerpc/boot/dts/sbc8548-pre.dtsi b/arch/powerpc/boot/dts/sbc8548-pre.dtsi new file mode 100644 index 00000000000..d8c66290c5b --- /dev/null +++ b/arch/powerpc/boot/dts/sbc8548-pre.dtsi @@ -0,0 +1,52 @@ +/* + * SBC8548 Device Tree Source + * + * Copyright 2007 Wind River Systems Inc. + * + * Paul Gortmaker (see MAINTAINERS for contact information) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/{ + model = "SBC8548"; + compatible = "SBC8548"; + #address-cells = <1>; + #size-cells = <1>; + + aliases { + ethernet0 = &enet0; + ethernet1 = &enet1; + serial0 = &serial0; + serial1 = &serial1; + pci0 = &pci0; + pci1 = &pci1; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,8548@0 { + device_type = "cpu"; + reg = <0>; + d-cache-line-size = <0x20>; // 32 bytes + i-cache-line-size = <0x20>; // 32 bytes + d-cache-size = <0x8000>; // L1, 32K + i-cache-size = <0x8000>; // L1, 32K + timebase-frequency = <0>; // From uboot + bus-frequency = <0>; + clock-frequency = <0>; + next-level-cache = <&L2>; + }; + }; + + memory { + device_type = "memory"; + reg = <0x00000000 0x10000000>; + }; + +}; diff --git a/arch/powerpc/boot/dts/sbc8548.dts b/arch/powerpc/boot/dts/sbc8548.dts index 77be77116c2..1df2a095566 100644 --- a/arch/powerpc/boot/dts/sbc8548.dts +++ b/arch/powerpc/boot/dts/sbc8548.dts @@ -14,44 +14,9 @@ /dts-v1/; -/ { - model = "SBC8548"; - compatible = "SBC8548"; - #address-cells = <1>; - #size-cells = <1>; - - aliases { - ethernet0 = &enet0; - ethernet1 = &enet1; - serial0 = &serial0; - serial1 = &serial1; - pci0 = &pci0; - pci1 = &pci1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,8548@0 { - device_type = "cpu"; - reg = <0>; - d-cache-line-size = <0x20>; // 32 bytes - i-cache-line-size = <0x20>; // 32 bytes - d-cache-size = <0x8000>; // L1, 32K - i-cache-size = <0x8000>; // L1, 32K - timebase-frequency = <0>; // From uboot - bus-frequency = <0>; - clock-frequency = <0>; - next-level-cache = <&L2>; - }; - }; - - memory { - device_type = "memory"; - reg = <0x00000000 0x10000000>; - }; +/include/ "sbc8548-pre.dtsi" +/{ localbus@e0000000 { #address-cells = <2>; #size-cells = <1>; @@ -63,23 +28,25 @@ 0x3 0x0 0xf0000000 0x04000000 /*64MB SDRAM*/ 0x4 0x0 0xf4000000 0x04000000 /*64MB SDRAM*/ 0x5 0x0 0xf8000000 0x00b10000 /* EPLD */ - 0x6 0x0 0xfb800000 0x04000000>; /*64MB Flash*/ + 0x6 0x0 0xec000000 0x04000000>; /*64MB Flash*/ flash@0,0 { #address-cells = <1>; #size-cells = <1>; - compatible = "cfi-flash"; + compatible = "intel,JS28F640", "cfi-flash"; reg = <0x0 0x0 0x800000>; bank-width = <1>; device-width = <1>; partition@0x0 { label = "space"; - reg = <0x00000000 0x00100000>; + /* FF800000 -> FFF9FFFF */ + reg = <0x00000000 0x007a0000>; }; - partition@0x100000 { + partition@0x7a0000 { label = "bootloader"; - reg = <0x00100000 0x00700000>; + /* FFFA0000 -> FFFFFFFF */ + reg = <0x007a0000 0x00060000>; read-only; }; }; @@ -122,307 +89,22 @@ #address-cells = <1>; #size-cells = <1>; reg = <0x6 0x0 0x04000000>; - compatible = "cfi-flash"; + compatible = "intel,JS28F128", "cfi-flash"; bank-width = <4>; device-width = <1>; partition@0x0 { + label = "space"; + /* EC000000 -> EFEFFFFF */ + reg = <0x00000000 0x03f00000>; + }; + partition@0x03f00000 { label = "bootloader"; - reg = <0x00000000 0x00100000>; + /* EFF00000 -> EFFFFFFF */ + reg = <0x03f00000 0x00100000>; read-only; }; - partition@0x00100000 { - label = "file-system"; - reg = <0x00100000 0x01f00000>; - }; - partition@0x02000000 { - label = "boot-config"; - reg = <0x02000000 0x00100000>; - }; - partition@0x02100000 { - label = "space"; - reg = <0x02100000 0x01f00000>; - }; }; }; - - soc8548@e0000000 { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - ranges = <0x00000000 0xe0000000 0x00100000>; - bus-frequency = <0>; - compatible = "simple-bus"; - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <10>; - }; - - ecm@1000 { - compatible = "fsl,mpc8548-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <17 2>; - interrupt-parent = <&mpic>; - }; - - memory-controller@2000 { - compatible = "fsl,mpc8548-memory-controller"; - reg = <0x2000 0x1000>; - interrupt-parent = <&mpic>; - interrupts = <0x12 0x2>; - }; - - L2: l2-cache-controller@20000 { - compatible = "fsl,mpc8548-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <0x20>; // 32 bytes - cache-size = <0x80000>; // L2, 512K - interrupt-parent = <&mpic>; - interrupts = <0x10 0x2>; - }; - - i2c@3000 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <0>; - compatible = "fsl-i2c"; - reg = <0x3000 0x100>; - interrupts = <0x2b 0x2>; - interrupt-parent = <&mpic>; - dfsrr; - }; - - i2c@3100 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <1>; - compatible = "fsl-i2c"; - reg = <0x3100 0x100>; - interrupts = <0x2b 0x2>; - interrupt-parent = <&mpic>; - dfsrr; - }; - - dma@21300 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,mpc8548-dma", "fsl,eloplus-dma"; - reg = <0x21300 0x4>; - ranges = <0x0 0x21100 0x200>; - cell-index = <0>; - dma-channel@0 { - compatible = "fsl,mpc8548-dma-channel", - "fsl,eloplus-dma-channel"; - reg = <0x0 0x80>; - cell-index = <0>; - interrupt-parent = <&mpic>; - interrupts = <20 2>; - }; - dma-channel@80 { - compatible = "fsl,mpc8548-dma-channel", - "fsl,eloplus-dma-channel"; - reg = <0x80 0x80>; - cell-index = <1>; - interrupt-parent = <&mpic>; - interrupts = <21 2>; - }; - dma-channel@100 { - compatible = "fsl,mpc8548-dma-channel", - "fsl,eloplus-dma-channel"; - reg = <0x100 0x80>; - cell-index = <2>; - interrupt-parent = <&mpic>; - interrupts = <22 2>; - }; - dma-channel@180 { - compatible = "fsl,mpc8548-dma-channel", - "fsl,eloplus-dma-channel"; - reg = <0x180 0x80>; - cell-index = <3>; - interrupt-parent = <&mpic>; - interrupts = <23 2>; - }; - }; - - enet0: ethernet@24000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <0>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x24000 0x1000>; - ranges = <0x0 0x24000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>; - interrupt-parent = <&mpic>; - tbi-handle = <&tbi0>; - phy-handle = <&phy0>; - - mdio@520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-mdio"; - reg = <0x520 0x20>; - - phy0: ethernet-phy@19 { - interrupt-parent = <&mpic>; - interrupts = <0x6 0x1>; - reg = <0x19>; - device_type = "ethernet-phy"; - }; - phy1: ethernet-phy@1a { - interrupt-parent = <&mpic>; - interrupts = <0x7 0x1>; - reg = <0x1a>; - device_type = "ethernet-phy"; - }; - tbi0: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - }; - - enet1: ethernet@25000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <1>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x25000 0x1000>; - ranges = <0x0 0x25000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>; - interrupt-parent = <&mpic>; - tbi-handle = <&tbi1>; - phy-handle = <&phy1>; - - mdio@520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-tbi"; - reg = <0x520 0x20>; - - tbi1: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - }; - - serial0: serial@4500 { - cell-index = <0>; - device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; - reg = <0x4500 0x100>; // reg base, size - clock-frequency = <0>; // should we fill in in uboot? - interrupts = <0x2a 0x2>; - interrupt-parent = <&mpic>; - }; - - serial1: serial@4600 { - cell-index = <1>; - device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; - reg = <0x4600 0x100>; // reg base, size - clock-frequency = <0>; // should we fill in in uboot? - interrupts = <0x2a 0x2>; - interrupt-parent = <&mpic>; - }; - - global-utilities@e0000 { //global utilities reg - compatible = "fsl,mpc8548-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; - }; - - crypto@30000 { - compatible = "fsl,sec2.1", "fsl,sec2.0"; - reg = <0x30000 0x10000>; - interrupts = <45 2>; - interrupt-parent = <&mpic>; - fsl,num-channels = <4>; - fsl,channel-fifo-len = <24>; - fsl,exec-units-mask = <0xfe>; - fsl,descriptor-types-mask = <0x12b0ebf>; - }; - - mpic: pic@40000 { - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - reg = <0x40000 0x40000>; - compatible = "chrp,open-pic"; - device_type = "open-pic"; - }; - }; - - pci0: pci@e0008000 { - interrupt-map-mask = <0xf800 0x0 0x0 0x7>; - interrupt-map = < - /* IDSEL 0x01 (PCI-X slot) @66MHz */ - 0x0800 0x0 0x0 0x1 &mpic 0x2 0x1 - 0x0800 0x0 0x0 0x2 &mpic 0x3 0x1 - 0x0800 0x0 0x0 0x3 &mpic 0x4 0x1 - 0x0800 0x0 0x0 0x4 &mpic 0x1 0x1 - - /* IDSEL 0x11 (PCI, 3.3V 32bit) @33MHz */ - 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1 - 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1 - 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1 - 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1>; - - interrupt-parent = <&mpic>; - interrupts = <0x18 0x2>; - bus-range = <0 0>; - ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x10000000 - 0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00800000>; - clock-frequency = <66000000>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0xe0008000 0x1000>; - compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci"; - device_type = "pci"; - }; - - pci1: pcie@e000a000 { - interrupt-map-mask = <0xf800 0x0 0x0 0x7>; - interrupt-map = < - - /* IDSEL 0x0 (PEX) */ - 0x0000 0x0 0x0 0x1 &mpic 0x0 0x1 - 0x0000 0x0 0x0 0x2 &mpic 0x1 0x1 - 0x0000 0x0 0x0 0x3 &mpic 0x2 0x1 - 0x0000 0x0 0x0 0x4 &mpic 0x3 0x1>; - - interrupt-parent = <&mpic>; - interrupts = <0x1a 0x2>; - bus-range = <0x0 0xff>; - ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000 - 0x01000000 0x0 0x00000000 0xe2800000 0x0 0x08000000>; - clock-frequency = <33000000>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0xe000a000 0x1000>; - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - pcie@0 { - reg = <0x0 0x0 0x0 0x0 0x0>; - #size-cells = <2>; - #address-cells = <3>; - device_type = "pci"; - ranges = <0x02000000 0x0 0xa0000000 - 0x02000000 0x0 0xa0000000 - 0x0 0x10000000 - - 0x01000000 0x0 0x00000000 - 0x01000000 0x0 0x00000000 - 0x0 0x00800000>; - }; - }; }; + +/include/ "sbc8548-post.dtsi" diff --git a/arch/powerpc/boot/dts/uc101.dts b/arch/powerpc/boot/dts/uc101.dts index ba83d5488ec..5c462194ef0 100644 --- a/arch/powerpc/boot/dts/uc101.dts +++ b/arch/powerpc/boot/dts/uc101.dts @@ -13,54 +13,20 @@ /include/ "mpc5200b.dtsi" +&gpt0 { gpio-controller; }; +&gpt1 { gpio-controller; }; +&gpt2 { gpio-controller; }; +&gpt3 { gpio-controller; }; +&gpt4 { gpio-controller; }; +&gpt5 { gpio-controller; }; +&gpt6 { gpio-controller; }; +&gpt7 { gpio-controller; }; + / { model = "manroland,uc101"; compatible = "manroland,uc101"; soc5200@f0000000 { - gpt0: timer@600 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - - gpt1: timer@610 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - - gpt2: timer@620 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - - gpt3: timer@630 { // General Purpose Timer in GPIO mode - compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; - reg = <0x630 0x10>; - interrupts = <1 12 0>; - gpio-controller; - #gpio-cells = <2>; - }; - - gpt4: timer@640 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - - gpt5: timer@650 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - - gpt6: timer@660 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - - gpt7: timer@670 { // General Purpose Timer in GPIO mode - gpio-controller; - #gpio-cells = <2>; - }; - rtc@800 { status = "disabled"; }; diff --git a/arch/powerpc/boot/dts/virtex440-ml507.dts b/arch/powerpc/boot/dts/virtex440-ml507.dts index 52d8c1ad26a..fc7073bc547 100644 --- a/arch/powerpc/boot/dts/virtex440-ml507.dts +++ b/arch/powerpc/boot/dts/virtex440-ml507.dts @@ -272,6 +272,12 @@ xlnx,temac-type = <0>; xlnx,txcsum = <1>; xlnx,txfifo = <0x1000>; + phy-handle = <&phy7>; + clock-frequency = <100000000>; + phy7: phy@7 { + compatible = "marvell,88e1111"; + reg = <7>; + } ; } ; } ; IIC_EEPROM: i2c@81600000 { diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig index a0dfef1fcdb..e12e60c3b9a 100644 --- a/arch/powerpc/configs/83xx/kmeter1_defconfig +++ b/arch/powerpc/configs/83xx/kmeter1_defconfig @@ -2,6 +2,8 @@ CONFIG_EXPERIMENTAL=y # CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_EXPERT=y CONFIG_SLAB=y @@ -16,8 +18,6 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_PPC_PMAC is not set CONFIG_PPC_83xx=y CONFIG_KMETER1=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT=y # CONFIG_SECCOMP is not set CONFIG_NET=y @@ -45,7 +45,6 @@ CONFIG_MTD_PHYSMAP_OF=y CONFIG_MTD_PHRAM=y CONFIG_MTD_UBI=y CONFIG_MTD_UBI_GLUEBI=y -CONFIG_MTD_UBI_DEBUG=y CONFIG_PROC_DEVICETREE=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y @@ -76,5 +75,4 @@ CONFIG_TMPFS=y CONFIG_JFFS2_FS=y CONFIG_UBIFS_FS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig index f8c51a4ab99..c9765b54dd1 100644 --- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig +++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig @@ -34,7 +34,6 @@ CONFIG_PREEMPT=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m CONFIG_MATH_EMULATION=y -CONFIG_IRQ_ALL_CPUS=y CONFIG_FORCE_MAX_ZONEORDER=17 CONFIG_PCI=y CONFIG_PCIEPORTBUS=y diff --git a/arch/powerpc/configs/85xx/ppa8548_defconfig b/arch/powerpc/configs/85xx/ppa8548_defconfig new file mode 100644 index 00000000000..a11337de8aa --- /dev/null +++ b/arch/powerpc/configs/85xx/ppa8548_defconfig @@ -0,0 +1,65 @@ +CONFIG_PPC_85xx=y +CONFIG_PPA8548=y +CONFIG_DTC=y +CONFIG_DEFAULT_UIMAGE=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_PCI is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_ADVANCED_OPTIONS=y +CONFIG_LOWMEM_SIZE_BOOL=y +CONFIG_LOWMEM_SIZE=0x40000000 +CONFIG_LOWMEM_CAM_NUM_BOOL=y +CONFIG_LOWMEM_CAM_NUM=4 +CONFIG_PAGE_OFFSET_BOOL=y +CONFIG_PAGE_OFFSET=0xb0000000 +CONFIG_KERNEL_START_BOOL=y +CONFIG_KERNEL_START=0xb0000000 +# CONFIG_PHYSICAL_START_BOOL is not set +CONFIG_PHYSICAL_START=0x00000000 +CONFIG_PHYSICAL_ALIGN=0x04000000 +CONFIG_TASK_SIZE_BOOL=y +CONFIG_TASK_SIZE=0xb0000000 + +CONFIG_FSL_LBC=y +CONFIG_FSL_DMA=y +CONFIG_FSL_RIO=y + +CONFIG_RAPIDIO=y +CONFIG_RAPIDIO_DMA_ENGINE=y +CONFIG_RAPIDIO_TSI57X=y +CONFIG_RAPIDIO_TSI568=y +CONFIG_RAPIDIO_CPS_XX=y +CONFIG_RAPIDIO_CPS_GEN2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_PROC_DEVICETREE=y + +CONFIG_MTD=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CHAR=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_CONCAT=y +CONFIG_MTD_PARTITIONS=y +CONFIG_MTD_PHYSMAP_OF=y + +CONFIG_I2C=y +CONFIG_I2C_MPC=y +CONFIG_I2C_CHARDEV +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_DRV_ISL1208=y + +CONFIG_NET=y +CONFIG_INET=y +CONFIG_IP_PNP=y +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_GIANFAR=y +CONFIG_MARVELL_PHY=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y diff --git a/arch/powerpc/configs/85xx/sbc8548_defconfig b/arch/powerpc/configs/85xx/sbc8548_defconfig index 5b2b651dfb9..008a7a47b89 100644 --- a/arch/powerpc/configs/85xx/sbc8548_defconfig +++ b/arch/powerpc/configs/85xx/sbc8548_defconfig @@ -55,3 +55,22 @@ CONFIG_ROOT_NFS=y # CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SYSCTL_SYSCALL_CHECK=y # CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_MTD=y +CONFIG_MTD_OF_PARTS=y +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_GEN_PROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_NOSWAP=y +CONFIG_MTD_CFI_GEOMETRY=y +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_I4=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_UTIL=y +CONFIG_MTD_PHYSMAP_OF=y diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig index da731c2fe98..f2f6734d5f7 100644 --- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig +++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig @@ -25,7 +25,6 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_HZ_1000=y CONFIG_PREEMPT=y CONFIG_BINFMT_MISC=m -CONFIG_IRQ_ALL_CPUS=y CONFIG_SPARSE_IRQ=y CONFIG_PCI=y CONFIG_PCIEPORTBUS=y diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig index 2149360a1e6..be73219212b 100644 --- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig +++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig @@ -25,7 +25,6 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_HZ_1000=y CONFIG_PREEMPT=y CONFIG_BINFMT_MISC=y -CONFIG_IRQ_ALL_CPUS=y CONFIG_SPARSE_IRQ=y CONFIG_PCI=y CONFIG_PCIEPORTBUS=y diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig index af2e8e1edba..b3e2b1058f2 100644 --- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig +++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig @@ -25,7 +25,6 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_HZ_1000=y CONFIG_PREEMPT=y CONFIG_BINFMT_MISC=m -CONFIG_IRQ_ALL_CPUS=y CONFIG_SPARSE_IRQ=y CONFIG_PCI=y CONFIG_PCIEPORTBUS=y diff --git a/arch/powerpc/configs/86xx/sbc8641d_defconfig b/arch/powerpc/configs/86xx/sbc8641d_defconfig index 0a92ca04564..1a62baf855e 100644 --- a/arch/powerpc/configs/86xx/sbc8641d_defconfig +++ b/arch/powerpc/configs/86xx/sbc8641d_defconfig @@ -23,7 +23,6 @@ CONFIG_SBC8641D=y CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT=y CONFIG_BINFMT_MISC=m -CONFIG_IRQ_ALL_CPUS=y CONFIG_SPARSE_IRQ=y CONFIG_PCI=y CONFIG_PCIEPORTBUS=y diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig index 29bb11ec6c6..4f35fc46238 100644 --- a/arch/powerpc/configs/chroma_defconfig +++ b/arch/powerpc/configs/chroma_defconfig @@ -1,6 +1,6 @@ CONFIG_PPC64=y CONFIG_PPC_BOOK3E_64=y -# CONFIG_VIRT_CPU_ACCOUNTING is not set +# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set CONFIG_SMP=y CONFIG_NR_CPUS=256 CONFIG_EXPERIMENTAL=y diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig index 1c0f2432ecd..60027c2a703 100644 --- a/arch/powerpc/configs/corenet32_smp_defconfig +++ b/arch/powerpc/configs/corenet32_smp_defconfig @@ -32,7 +32,6 @@ CONFIG_HIGHMEM=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m CONFIG_KEXEC=y -CONFIG_IRQ_ALL_CPUS=y CONFIG_FORCE_MAX_ZONEORDER=13 CONFIG_PCI=y CONFIG_PCIEPORTBUS=y diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig index 88fa5c46f66..3d139fa0405 100644 --- a/arch/powerpc/configs/corenet64_smp_defconfig +++ b/arch/powerpc/configs/corenet64_smp_defconfig @@ -1,6 +1,6 @@ CONFIG_PPC64=y CONFIG_PPC_BOOK3E_64=y -# CONFIG_VIRT_CPU_ACCOUNTING is not set +# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_EXPERIMENTAL=y @@ -26,7 +26,6 @@ CONFIG_P5020_DS=y CONFIG_P5040_DS=y # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set CONFIG_BINFMT_MISC=m -CONFIG_IRQ_ALL_CPUS=y CONFIG_PCIEPORTBUS=y CONFIG_PCI_MSI=y CONFIG_RAPIDIO=y diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig index 502cd9e027e..8d00ea5b8a9 100644 --- a/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/arch/powerpc/configs/mpc85xx_smp_defconfig @@ -49,7 +49,6 @@ CONFIG_QE_GPIO=y CONFIG_HIGHMEM=y CONFIG_BINFMT_MISC=m CONFIG_MATH_EMULATION=y -CONFIG_IRQ_ALL_CPUS=y CONFIG_FORCE_MAX_ZONEORDER=12 CONFIG_PCI=y CONFIG_PCI_MSI=y diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index 840a2c2d043..bd8a6f71944 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -1,28 +1,27 @@ CONFIG_PPC64=y CONFIG_ALTIVEC=y -# CONFIG_VIRT_CPU_ACCOUNTING is not set CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_PROFILING=y CONFIG_OPROFILE=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y # CONFIG_PPC_PSERIES is not set # CONFIG_PPC_PMAC is not set CONFIG_PPC_PASEMI=y CONFIG_PPC_PASEMI_IOMMU=y CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_DEBUG=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_HZ_1000=y CONFIG_PPC_64K_PAGES=y # CONFIG_SECCOMP is not set @@ -47,7 +46,6 @@ CONFIG_INET_ESP=y # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_CONCAT=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_SLRAM=y @@ -58,7 +56,6 @@ CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=16384 -CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y @@ -91,21 +88,19 @@ CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y -CONFIG_MARVELL_PHY=y -CONFIG_NET_ETHERNET=y CONFIG_MII=y -CONFIG_NET_PCI=y -CONFIG_E1000=y CONFIG_TIGON3=y +CONFIG_E1000=y CONFIG_PASEMI_MAC=y +CONFIG_MARVELL_PHY=y CONFIG_INPUT_JOYDEV=y CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set # CONFIG_MOUSE_PS2 is not set # CONFIG_SERIO is not set +CONFIG_LEGACY_PTY_COUNT=4 CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_LEGACY_PTY_COUNT=4 CONFIG_HW_RANDOM=y CONFIG_RAW_DRIVER=y CONFIG_I2C_CHARDEV=y @@ -146,14 +141,11 @@ CONFIG_HID_TOPSEED=y CONFIG_HID_THRUSTMASTER=y CONFIG_HID_ZEROPLUS=y CONFIG_USB=y -CONFIG_USB_DEVICEFS=y -# CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_EHCI_HCD=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_UHCI_HCD=y CONFIG_USB_SL811_HCD=y CONFIG_USB_STORAGE=y -CONFIG_USB_LIBUSUAL=y CONFIG_EDAC=y CONFIG_EDAC_MM_EDAC=y CONFIG_EDAC_PASEMI=y @@ -164,8 +156,6 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_INOTIFY=y -CONFIG_AUTOFS_FS=y CONFIG_AUTOFS4_FS=y CONFIG_ISO9660_FS=y CONFIG_UDF_FS=y @@ -177,27 +167,22 @@ CONFIG_HUGETLBFS=y CONFIG_CONFIGFS_FS=y CONFIG_JFFS2_FS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=y CONFIG_NFSD_V4=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_CRC_CCITT=y +CONFIG_PRINTK_TIME=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_XMON=y CONFIG_XMON_DEFAULT=y CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_BLOWFISH=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 6d03530b750..aef3f71de5a 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -5,6 +5,9 @@ CONFIG_SMP=y CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_IKCONFIG=y @@ -21,6 +24,8 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_EFI_PARTITION=y CONFIG_PPC_SPLPAR=y CONFIG_SCANLOG=m CONFIG_PPC_SMLPAR=y @@ -42,10 +47,9 @@ CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_PMAC64=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_HZ_100=y CONFIG_BINFMT_MISC=m +CONFIG_PPC_TRANSACTIONAL_MEM=y CONFIG_HOTPLUG_CPU=y CONFIG_KEXEC=y CONFIG_IRQ_ALL_CPUS=y @@ -73,7 +77,6 @@ CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m # CONFIG_IPV6 is not set CONFIG_NETFILTER=y -CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CT_PROTO_SCTP=m @@ -130,19 +133,12 @@ CONFIG_NETFILTER_XT_MATCH_U32=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -151,7 +147,10 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_BPF_JIT=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_FD=y CONFIG_BLK_DEV_LOOP=y @@ -173,7 +172,6 @@ CONFIG_CHR_DEV_SG=y CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_FC_ATTRS=y -CONFIG_SCSI_SAS_ATTRS=m CONFIG_SCSI_CXGB3_ISCSI=m CONFIG_SCSI_CXGB4_ISCSI=m CONFIG_SCSI_BNX2_ISCSI=m @@ -205,13 +203,6 @@ CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m -CONFIG_IEEE1394=y -CONFIG_IEEE1394_OHCI1394=y -CONFIG_IEEE1394_SBP2=m -CONFIG_IEEE1394_ETH1394=m -CONFIG_IEEE1394_RAWIO=y -CONFIG_IEEE1394_VIDEO1394=m -CONFIG_IEEE1394_DV1394=m CONFIG_ADB_PMU=y CONFIG_PMAC_SMU=y CONFIG_THERM_PM72=y @@ -220,50 +211,43 @@ CONFIG_WINDFARM_PM81=y CONFIG_WINDFARM_PM91=y CONFIG_WINDFARM_PM112=y CONFIG_WINDFARM_PM121=y -CONFIG_NETDEVICES=y -CONFIG_DUMMY=m CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_NETCONSOLE=y +CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m -CONFIG_MARVELL_PHY=y -CONFIG_BROADCOM_PHY=m -CONFIG_NET_ETHERNET=y -CONFIG_SUNGEM=y -CONFIG_NET_VENDOR_3COM=y CONFIG_VORTEX=y -CONFIG_IBMVETH=m -CONFIG_NET_PCI=y -CONFIG_PCNET32=y -CONFIG_E100=y CONFIG_ACENIC=m CONFIG_ACENIC_OMIT_TIGON_I=y -CONFIG_E1000=y -CONFIG_E1000E=y +CONFIG_PCNET32=y CONFIG_TIGON3=y -CONFIG_BNX2=m -CONFIG_SPIDER_NET=m -CONFIG_GELIC_NET=m -CONFIG_GELIC_WIRELESS=y CONFIG_CHELSIO_T1=m -CONFIG_CHELSIO_T3=m -CONFIG_CHELSIO_T4=m +CONFIG_BE2NET=m +CONFIG_S2IO=m +CONFIG_IBMVETH=m CONFIG_EHEA=m -CONFIG_IXGBE=m +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y CONFIG_IXGB=m -CONFIG_S2IO=m +CONFIG_IXGBE=m +CONFIG_MLX4_EN=m CONFIG_MYRI10GE=m -CONFIG_NETXEN_NIC=m CONFIG_PASEMI_MAC=y -CONFIG_MLX4_EN=m CONFIG_QLGE=m -CONFIG_BE2NET=m +CONFIG_NETXEN_NIC=m +CONFIG_SUNGEM=y +CONFIG_GELIC_NET=m +CONFIG_GELIC_WIRELESS=y +CONFIG_SPIDER_NET=m +CONFIG_MARVELL_PHY=y +CONFIG_BROADCOM_PHY=m CONFIG_PPP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m CONFIG_PPPOE=m -CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m # CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=m CONFIG_INPUT_MISC=y @@ -279,13 +263,10 @@ CONFIG_HVC_RTAS=y CONFIG_HVC_BEAT=y CONFIG_HVCS=m CONFIG_IBM_BSR=m -CONFIG_HW_RANDOM=m -CONFIG_HW_RANDOM_PSERIES=m CONFIG_RAW_DRIVER=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_AMD8111=y CONFIG_I2C_PASEMI=y -# CONFIG_HWMON is not set CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_FB=y CONFIG_FIRMWARE_EDID=y @@ -300,7 +281,6 @@ CONFIG_FB_RADEON=y CONFIG_FB_IBM_GXT4500=y CONFIG_FB_PS3=m CONFIG_LCD_CLASS_DEVICE=y -CONFIG_DISPLAY_SUPPORT=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y @@ -317,18 +297,16 @@ CONFIG_SND_AOA_FABRIC_LAYOUT=m CONFIG_SND_AOA_ONYX=m CONFIG_SND_AOA_TAS=m CONFIG_SND_AOA_TOONIE=m -CONFIG_USB_HIDDEV=y CONFIG_HID_GYRATION=y CONFIG_HID_PANTHERLORD=y CONFIG_HID_PETALYNX=y CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y +CONFIG_USB_HIDDEV=y CONFIG_USB=y -CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=m CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_TT_NEWSCHED=y # CONFIG_USB_EHCI_HCD_PPC_OF is not set CONFIG_USB_OHCI_HCD=y CONFIG_USB_STORAGE=m @@ -370,11 +348,9 @@ CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y CONFIG_XFS_FS=m CONFIG_XFS_POSIX_ACL=y -CONFIG_OCFS2_FS=m CONFIG_BTRFS_FS=m CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_NILFS2_FS=m -CONFIG_INOTIFY=y CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m CONFIG_ISO9660_FS=y @@ -383,100 +359,53 @@ CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=m CONFIG_SQUASHFS=m CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y -CONFIG_RPCSEC_GSS_SPKM3=m CONFIG_CIFS=m CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y -CONFIG_PARTITION_ADVANCED=y +CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=m +CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m +CONFIG_NLS_UTF8=y CONFIG_CRC_T10DIF=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_LOCKUP_DETECTOR=y -CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_MUTEXES=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_DEBUG_STACK_USAGE=y CONFIG_LATENCYTOP=y -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_SCHED_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_DEBUG_STACKOVERFLOW=y -CONFIG_DEBUG_STACK_USAGE=y CONFIG_CODE_PATCHING_SELFTEST=y CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y CONFIG_XMON=y -CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_BOOTX_TEXT=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_ARC4=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m @@ -486,11 +415,9 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_DEV_NX=y CONFIG_CRYPTO_DEV_NX_ENCRYPT=m CONFIG_VIRTUALIZATION=y CONFIG_KVM_BOOK3S_64=m CONFIG_KVM_BOOK3S_64_HV=y CONFIG_VHOST_NET=m -CONFIG_BPF_JIT=y diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index f55c27609fc..4b20f76172e 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig @@ -4,6 +4,8 @@ CONFIG_SMP=y CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_IKCONFIG=y @@ -18,12 +20,13 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y +CONFIG_EFI_PARTITION=y CONFIG_P5020_DS=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_BINFMT_MISC=m CONFIG_IRQ_ALL_CPUS=y CONFIG_SPARSEMEM_MANUAL=y @@ -46,7 +49,6 @@ CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m # CONFIG_IPV6 is not set CONFIG_NETFILTER=y -CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CT_PROTO_SCTP=m @@ -103,19 +105,12 @@ CONFIG_NETFILTER_XT_MATCH_U32=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -125,6 +120,8 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_FD=y CONFIG_BLK_DEV_LOOP=y @@ -167,41 +164,31 @@ CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m -CONFIG_IEEE1394=y -CONFIG_IEEE1394_OHCI1394=y -CONFIG_IEEE1394_SBP2=m -CONFIG_IEEE1394_ETH1394=m -CONFIG_IEEE1394_RAWIO=y -CONFIG_IEEE1394_VIDEO1394=m -CONFIG_IEEE1394_DV1394=m CONFIG_MACINTOSH_DRIVERS=y CONFIG_WINDFARM=y CONFIG_NETDEVICES=y -CONFIG_DUMMY=m CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_NETCONSOLE=y +CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m -CONFIG_MARVELL_PHY=y -CONFIG_BROADCOM_PHY=m -CONFIG_NET_ETHERNET=y -CONFIG_SUNGEM=y -CONFIG_NET_VENDOR_3COM=y CONFIG_VORTEX=y -CONFIG_NET_PCI=y -CONFIG_PCNET32=y -CONFIG_E100=y CONFIG_ACENIC=y CONFIG_ACENIC_OMIT_TIGON_I=y -CONFIG_E1000=y +CONFIG_PCNET32=y CONFIG_TIGON3=y +CONFIG_E100=y +CONFIG_E1000=y CONFIG_IXGB=m +CONFIG_SUNGEM=y +CONFIG_MARVELL_PHY=y +CONFIG_BROADCOM_PHY=m CONFIG_PPP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m CONFIG_PPPOE=m -CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m # CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=m CONFIG_INPUT_MISC=y @@ -213,7 +200,6 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_RAW_DRIVER=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_AMD8111=y -# CONFIG_HWMON is not set CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_FB=y CONFIG_FIRMWARE_EDID=y @@ -227,7 +213,6 @@ CONFIG_FB_MATROX_MAVEN=m CONFIG_FB_RADEON=y CONFIG_FB_IBM_GXT4500=y CONFIG_LCD_CLASS_DEVICE=y -CONFIG_DISPLAY_SUPPORT=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y @@ -238,7 +223,6 @@ CONFIG_SND_SEQ_DUMMY=m CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m CONFIG_SND_SEQUENCER_OSS=y -CONFIG_USB_HIDDEV=y CONFIG_HID_DRAGONRISE=y CONFIG_HID_GYRATION=y CONFIG_HID_TWINHAN=y @@ -253,8 +237,8 @@ CONFIG_HID_SMARTJOYPLUS=y CONFIG_HID_TOPSEED=y CONFIG_HID_THRUSTMASTER=y CONFIG_HID_ZEROPLUS=y +CONFIG_USB_HIDDEV=y CONFIG_USB=y -CONFIG_USB_DEVICEFS=y CONFIG_USB_EHCI_HCD=y # CONFIG_USB_EHCI_HCD_PPC_OF is not set CONFIG_USB_OHCI_HCD=y @@ -296,73 +280,36 @@ CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y -CONFIG_RPCSEC_GSS_SPKM3=m CONFIG_CIFS=m CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y +CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=m +CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m +CONFIG_NLS_UTF8=y CONFIG_CRC_T10DIF=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_MUTEXES=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_DEBUG_STACK_USAGE=y CONFIG_LATENCYTOP=y -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_IRQSOFF_TRACER=y CONFIG_SCHED_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_DEBUG_STACKOVERFLOW=y -CONFIG_DEBUG_STACK_USAGE=y CONFIG_CODE_PATCHING_SELFTEST=y CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y @@ -371,16 +318,12 @@ CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index c2f4b4a86ec..7a5c15fcc7c 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig @@ -6,6 +6,7 @@ CONFIG_NR_CPUS=2 CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_EMBEDDED=y @@ -24,12 +25,13 @@ CONFIG_PS3_DISK=y CONFIG_PS3_ROM=y CONFIG_PS3_FLASH=y CONFIG_PS3_VRAM=m +CONFIG_PS3_LPM=m # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set -CONFIG_HIGH_RES_TIMERS=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=y CONFIG_KEXEC=y # CONFIG_SPARSEMEM_VMEMMAP is not set +# CONFIG_COMPACTION is not set CONFIG_SCHED_SMT=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="" @@ -59,6 +61,7 @@ CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_HIDP=m CONFIG_BT_HCIBTUSB=m CONFIG_CFG80211=m +CONFIG_CFG80211_WEXT=y CONFIG_MAC80211=m CONFIG_MAC80211_RC_PID=y # CONFIG_MAC80211_RC_MINSTREL is not set @@ -78,7 +81,6 @@ CONFIG_MD=y CONFIG_BLK_DEV_DM=m CONFIG_NETDEVICES=y # CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CHELSIO is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set @@ -119,21 +121,21 @@ CONFIG_SND=m # CONFIG_SND_DRIVERS is not set CONFIG_SND_USB_AUDIO=m CONFIG_HIDRAW=y -CONFIG_USB_HIDDEV=y CONFIG_HID_APPLE=m CONFIG_HID_BELKIN=m CONFIG_HID_CHERRY=m CONFIG_HID_EZKEY=m CONFIG_HID_TWINHAN=m CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m CONFIG_HID_MICROSOFT=m +CONFIG_HID_PS3REMOTE=m CONFIG_HID_SONY=m CONFIG_HID_SUNPLUS=m CONFIG_HID_SMARTJOYPLUS=m +CONFIG_USB_HIDDEV=y CONFIG_USB=m CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_DEVICEFS=y -# CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_SUSPEND=y CONFIG_USB_MON=m CONFIG_USB_EHCI_HCD=m @@ -158,8 +160,8 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_HUGETLBFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y CONFIG_ROOT_NFS=y CONFIG_CIFS=m CONFIG_NLS=y @@ -176,6 +178,7 @@ CONFIG_DEBUG_INFO=y CONFIG_DEBUG_WRITECOUNT=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_FTRACE is not set CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_CRYPTO_CCM=m diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 1f710a32ffa..c4dfbaf8b19 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -2,16 +2,19 @@ CONFIG_PPC64=y CONFIG_ALTIVEC=y CONFIG_VSX=y CONFIG_SMP=y -CONFIG_NR_CPUS=1024 +CONFIG_NR_CPUS=2048 CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_AUDITSYSCALL=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_AUDIT=y -CONFIG_AUDITSYSCALL=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_CGROUPS=y @@ -29,6 +32,8 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_EFI_PARTITION=y CONFIG_PPC_SPLPAR=y CONFIG_SCANLOG=m CONFIG_PPC_SMLPAR=y @@ -36,10 +41,9 @@ CONFIG_DTL=y # CONFIG_PPC_PMAC is not set CONFIG_RTAS_FLASH=m CONFIG_IBMEBUS=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_HZ_100=y CONFIG_BINFMT_MISC=m +CONFIG_PPC_TRANSACTIONAL_MEM=y CONFIG_HOTPLUG_CPU=y CONFIG_KEXEC=y CONFIG_IRQ_ALL_CPUS=y @@ -65,7 +69,6 @@ CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m # CONFIG_IPV6 is not set CONFIG_NETFILTER=y -CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CT_PROTO_UDPLITE=m @@ -112,20 +115,15 @@ CONFIG_NETFILTER_XT_MATCH_U32=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y CONFIG_PROC_DEVICETREE=y CONFIG_PARPORT=m CONFIG_PARPORT_PC=m @@ -146,7 +144,6 @@ CONFIG_CHR_DEV_SG=y CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_FC_ATTRS=y -CONFIG_SCSI_SAS_ATTRS=m CONFIG_SCSI_CXGB3_ISCSI=m CONFIG_SCSI_CXGB4_ISCSI=m CONFIG_SCSI_BNX2_ISCSI=m @@ -177,43 +174,36 @@ CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m -CONFIG_NETDEVICES=y -CONFIG_DUMMY=m CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_NETCONSOLE=y +CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m -CONFIG_NET_ETHERNET=y -CONFIG_NET_VENDOR_3COM=y CONFIG_VORTEX=y -CONFIG_IBMVETH=y -CONFIG_NET_PCI=y -CONFIG_PCNET32=y -CONFIG_E100=y CONFIG_ACENIC=m CONFIG_ACENIC_OMIT_TIGON_I=y -CONFIG_E1000=y -CONFIG_E1000E=y +CONFIG_PCNET32=y CONFIG_TIGON3=y -CONFIG_BNX2=m CONFIG_CHELSIO_T1=m -CONFIG_CHELSIO_T3=m -CONFIG_CHELSIO_T4=m +CONFIG_BE2NET=m +CONFIG_S2IO=m +CONFIG_IBMVETH=y CONFIG_EHEA=y -CONFIG_IXGBE=m +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y CONFIG_IXGB=m -CONFIG_S2IO=m -CONFIG_MYRI10GE=m -CONFIG_NETXEN_NIC=m +CONFIG_IXGBE=m CONFIG_MLX4_EN=m +CONFIG_MYRI10GE=m CONFIG_QLGE=m -CONFIG_BE2NET=m +CONFIG_NETXEN_NIC=m CONFIG_PPP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m CONFIG_PPPOE=m -CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m # CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=m CONFIG_INPUT_MISC=y @@ -227,12 +217,9 @@ CONFIG_HVC_CONSOLE=y CONFIG_HVC_RTAS=y CONFIG_HVCS=m CONFIG_IBM_BSR=m -CONFIG_HW_RANDOM=m -CONFIG_HW_RANDOM_PSERIES=m CONFIG_GEN_RTC=y CONFIG_RAW_DRIVER=y CONFIG_MAX_RAW_DEVS=1024 -# CONFIG_HWMON is not set CONFIG_FB=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_OF=y @@ -243,19 +230,17 @@ CONFIG_FB_MATROX_G=y CONFIG_FB_RADEON=y CONFIG_FB_IBM_GXT4500=y CONFIG_LCD_PLATFORM=m -CONFIG_DISPLAY_SUPPORT=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y -CONFIG_USB_HIDDEV=y CONFIG_HID_GYRATION=y CONFIG_HID_PANTHERLORD=y CONFIG_HID_PETALYNX=y CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y +CONFIG_USB_HIDDEV=y CONFIG_USB=y -CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=m CONFIG_USB_EHCI_HCD=y # CONFIG_USB_EHCI_HCD_PPC_OF is not set @@ -293,7 +278,6 @@ CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y CONFIG_XFS_FS=m CONFIG_XFS_POSIX_ACL=y -CONFIG_OCFS2_FS=m CONFIG_BTRFS_FS=m CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_NILFS2_FS=m @@ -305,61 +289,49 @@ CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y CONFIG_CRAMFS=m CONFIG_SQUASHFS=m CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=y CONFIG_NFSD=m CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y -CONFIG_RPCSEC_GSS_SPKM3=m CONFIG_CIFS=m CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y +CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y CONFIG_CRC_T10DIF=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_LOCKUP_DETECTOR=y -CONFIG_DETECT_HUNG_TASK=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_DEBUG_STACK_USAGE=y CONFIG_LATENCYTOP=y -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_SCHED_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_DEBUG_STACKOVERFLOW=y -CONFIG_DEBUG_STACK_USAGE=y CONFIG_CODE_PATCHING_SELFTEST=y CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y CONFIG_XMON=y CONFIG_XMON_DEFAULT=y -CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_ARC4=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m @@ -369,7 +341,6 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_DEV_NX=y CONFIG_CRYPTO_DEV_NX_ENCRYPT=m CONFIG_VIRTUALIZATION=y diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile new file mode 100644 index 00000000000..2926fb9c570 --- /dev/null +++ b/arch/powerpc/crypto/Makefile @@ -0,0 +1,9 @@ +# +# powerpc/crypto/Makefile +# +# Arch-specific CryptoAPI modules. +# + +obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o + +sha1-powerpc-y := sha1-powerpc-asm.o sha1.o diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S new file mode 100644 index 00000000000..125e1652006 --- /dev/null +++ b/arch/powerpc/crypto/sha1-powerpc-asm.S @@ -0,0 +1,179 @@ +/* + * SHA-1 implementation for PowerPC. + * + * Copyright (C) 2005 Paul Mackerras <paulus@samba.org> + */ + +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> + +/* + * We roll the registers for T, A, B, C, D, E around on each + * iteration; T on iteration t is A on iteration t+1, and so on. + * We use registers 7 - 12 for this. + */ +#define RT(t) ((((t)+5)%6)+7) +#define RA(t) ((((t)+4)%6)+7) +#define RB(t) ((((t)+3)%6)+7) +#define RC(t) ((((t)+2)%6)+7) +#define RD(t) ((((t)+1)%6)+7) +#define RE(t) ((((t)+0)%6)+7) + +/* We use registers 16 - 31 for the W values */ +#define W(t) (((t)%16)+16) + +#define LOADW(t) \ + lwz W(t),(t)*4(r4) + +#define STEPD0_LOAD(t) \ + andc r0,RD(t),RB(t); \ + and r6,RB(t),RC(t); \ + rotlwi RT(t),RA(t),5; \ + or r6,r6,r0; \ + add r0,RE(t),r15; \ + add RT(t),RT(t),r6; \ + add r14,r0,W(t); \ + lwz W((t)+4),((t)+4)*4(r4); \ + rotlwi RB(t),RB(t),30; \ + add RT(t),RT(t),r14 + +#define STEPD0_UPDATE(t) \ + and r6,RB(t),RC(t); \ + andc r0,RD(t),RB(t); \ + rotlwi RT(t),RA(t),5; \ + rotlwi RB(t),RB(t),30; \ + or r6,r6,r0; \ + add r0,RE(t),r15; \ + xor r5,W((t)+4-3),W((t)+4-8); \ + add RT(t),RT(t),r6; \ + xor W((t)+4),W((t)+4-16),W((t)+4-14); \ + add r0,r0,W(t); \ + xor W((t)+4),W((t)+4),r5; \ + add RT(t),RT(t),r0; \ + rotlwi W((t)+4),W((t)+4),1 + +#define STEPD1(t) \ + xor r6,RB(t),RC(t); \ + rotlwi RT(t),RA(t),5; \ + rotlwi RB(t),RB(t),30; \ + xor r6,r6,RD(t); \ + add r0,RE(t),r15; \ + add RT(t),RT(t),r6; \ + add r0,r0,W(t); \ + add RT(t),RT(t),r0 + +#define STEPD1_UPDATE(t) \ + xor r6,RB(t),RC(t); \ + rotlwi RT(t),RA(t),5; \ + rotlwi RB(t),RB(t),30; \ + xor r6,r6,RD(t); \ + add r0,RE(t),r15; \ + xor r5,W((t)+4-3),W((t)+4-8); \ + add RT(t),RT(t),r6; \ + xor W((t)+4),W((t)+4-16),W((t)+4-14); \ + add r0,r0,W(t); \ + xor W((t)+4),W((t)+4),r5; \ + add RT(t),RT(t),r0; \ + rotlwi W((t)+4),W((t)+4),1 + +#define STEPD2_UPDATE(t) \ + and r6,RB(t),RC(t); \ + and r0,RB(t),RD(t); \ + rotlwi RT(t),RA(t),5; \ + or r6,r6,r0; \ + rotlwi RB(t),RB(t),30; \ + and r0,RC(t),RD(t); \ + xor r5,W((t)+4-3),W((t)+4-8); \ + or r6,r6,r0; \ + xor W((t)+4),W((t)+4-16),W((t)+4-14); \ + add r0,RE(t),r15; \ + add RT(t),RT(t),r6; \ + add r0,r0,W(t); \ + xor W((t)+4),W((t)+4),r5; \ + add RT(t),RT(t),r0; \ + rotlwi W((t)+4),W((t)+4),1 + +#define STEP0LD4(t) \ + STEPD0_LOAD(t); \ + STEPD0_LOAD((t)+1); \ + STEPD0_LOAD((t)+2); \ + STEPD0_LOAD((t)+3) + +#define STEPUP4(t, fn) \ + STEP##fn##_UPDATE(t); \ + STEP##fn##_UPDATE((t)+1); \ + STEP##fn##_UPDATE((t)+2); \ + STEP##fn##_UPDATE((t)+3) + +#define STEPUP20(t, fn) \ + STEPUP4(t, fn); \ + STEPUP4((t)+4, fn); \ + STEPUP4((t)+8, fn); \ + STEPUP4((t)+12, fn); \ + STEPUP4((t)+16, fn) + +_GLOBAL(powerpc_sha_transform) + PPC_STLU r1,-INT_FRAME_SIZE(r1) + SAVE_8GPRS(14, r1) + SAVE_10GPRS(22, r1) + + /* Load up A - E */ + lwz RA(0),0(r3) /* A */ + lwz RB(0),4(r3) /* B */ + lwz RC(0),8(r3) /* C */ + lwz RD(0),12(r3) /* D */ + lwz RE(0),16(r3) /* E */ + + LOADW(0) + LOADW(1) + LOADW(2) + LOADW(3) + + lis r15,0x5a82 /* K0-19 */ + ori r15,r15,0x7999 + STEP0LD4(0) + STEP0LD4(4) + STEP0LD4(8) + STEPUP4(12, D0) + STEPUP4(16, D0) + + lis r15,0x6ed9 /* K20-39 */ + ori r15,r15,0xeba1 + STEPUP20(20, D1) + + lis r15,0x8f1b /* K40-59 */ + ori r15,r15,0xbcdc + STEPUP20(40, D2) + + lis r15,0xca62 /* K60-79 */ + ori r15,r15,0xc1d6 + STEPUP4(60, D1) + STEPUP4(64, D1) + STEPUP4(68, D1) + STEPUP4(72, D1) + lwz r20,16(r3) + STEPD1(76) + lwz r19,12(r3) + STEPD1(77) + lwz r18,8(r3) + STEPD1(78) + lwz r17,4(r3) + STEPD1(79) + + lwz r16,0(r3) + add r20,RE(80),r20 + add RD(0),RD(80),r19 + add RC(0),RC(80),r18 + add RB(0),RB(80),r17 + add RA(0),RA(80),r16 + mr RE(0),r20 + stw RA(0),0(r3) + stw RB(0),4(r3) + stw RC(0),8(r3) + stw RD(0),12(r3) + stw RE(0),16(r3) + + REST_8GPRS(14, r1) + REST_10GPRS(22, r1) + addi r1,r1,INT_FRAME_SIZE + blr diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c new file mode 100644 index 00000000000..f9e8b9491ef --- /dev/null +++ b/arch/powerpc/crypto/sha1.c @@ -0,0 +1,157 @@ +/* + * Cryptographic API. + * + * powerpc implementation of the SHA1 Secure Hash Algorithm. + * + * Derived from cryptoapi implementation, adapted for in-place + * scatterlist interface. + * + * Derived from "crypto/sha1.c" + * Copyright (c) Alan Smithee. + * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> + * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#include <crypto/internal/hash.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mm.h> +#include <linux/cryptohash.h> +#include <linux/types.h> +#include <crypto/sha.h> +#include <asm/byteorder.h> + +extern void powerpc_sha_transform(u32 *state, const u8 *src, u32 *temp); + +static int sha1_init(struct shash_desc *desc) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha1_state){ + .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, + }; + + return 0; +} + +static int sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + + if ((partial + len) > 63) { + u32 temp[SHA_WORKSPACE_WORDS]; + + if (partial) { + done = -partial; + memcpy(sctx->buffer + partial, data, done + 64); + src = sctx->buffer; + } + + do { + powerpc_sha_transform(sctx->state, src, temp); + done += 64; + src = data + done; + } while (done + 63 < len); + + memset(temp, 0, sizeof(temp)); + partial = 0; + } + memcpy(sctx->buffer + partial, src, len - done); + + return 0; +} + + +/* Add padding and return the message digest. */ +static int sha1_final(struct shash_desc *desc, u8 *out) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + __be32 *dst = (__be32 *)out; + u32 i, index, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(sctx->count << 3); + + /* Pad out to 56 mod 64 */ + index = sctx->count & 0x3f; + padlen = (index < 56) ? (56 - index) : ((64+56) - index); + sha1_update(desc, padding, padlen); + + /* Append length */ + sha1_update(desc, (const u8 *)&bits, sizeof(bits)); + + /* Store state in digest */ + for (i = 0; i < 5; i++) + dst[i] = cpu_to_be32(sctx->state[i]); + + /* Wipe context */ + memset(sctx, 0, sizeof *sctx); + + return 0; +} + +static int sha1_export(struct shash_desc *desc, void *out) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + memcpy(out, sctx, sizeof(*sctx)); + return 0; +} + +static int sha1_import(struct shash_desc *desc, const void *in) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + memcpy(sctx, in, sizeof(*sctx)); + return 0; +} + +static struct shash_alg alg = { + .digestsize = SHA1_DIGEST_SIZE, + .init = sha1_init, + .update = sha1_update, + .final = sha1_final, + .export = sha1_export, + .import = sha1_import, + .descsize = sizeof(struct sha1_state), + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name= "sha1-powerpc", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static int __init sha1_powerpc_mod_init(void) +{ + return crypto_register_shash(&alg); +} + +static void __exit sha1_powerpc_mod_fini(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(sha1_powerpc_mod_init); +module_exit(sha1_powerpc_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); + +MODULE_ALIAS("sha1-powerpc"); diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index a4fe15e33c6..650757c300d 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -1,4 +1,4 @@ - generic-y += clkdev.h generic-y += rwsem.h +generic-y += trace_clock.h diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index dc2cf9c6d9e..08bd299c75b 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -52,10 +52,6 @@ #define smp_mb__before_clear_bit() smp_mb() #define smp_mb__after_clear_bit() smp_mb() -#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) -#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) -#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) - /* Macro for generating the ***_bits() functions */ #define DEFINE_BITOP(fn, op, prefix, postfix) \ static __inline__ void fn(unsigned long mask, \ @@ -83,22 +79,22 @@ DEFINE_BITOP(change_bits, xor, "", "") static __inline__ void set_bit(int nr, volatile unsigned long *addr) { - set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); + set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); } static __inline__ void clear_bit(int nr, volatile unsigned long *addr) { - clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); + clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); } static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr) { - clear_bits_unlock(BITOP_MASK(nr), addr + BITOP_WORD(nr)); + clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr)); } static __inline__ void change_bit(int nr, volatile unsigned long *addr) { - change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); + change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); } /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output @@ -136,26 +132,26 @@ DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER, static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { - return test_and_set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; + return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; } static __inline__ int test_and_set_bit_lock(unsigned long nr, volatile unsigned long *addr) { - return test_and_set_bits_lock(BITOP_MASK(nr), - addr + BITOP_WORD(nr)) != 0; + return test_and_set_bits_lock(BIT_MASK(nr), + addr + BIT_WORD(nr)) != 0; } static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { - return test_and_clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; + return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; } static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { - return test_and_change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; + return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; } #include <asm-generic/bitops/non-atomic.h> @@ -280,61 +276,8 @@ unsigned long __arch_hweight64(__u64 w); #include <asm-generic/bitops/find.h> /* Little-endian versions */ +#include <asm-generic/bitops/le.h> -static __inline__ int test_bit_le(unsigned long nr, - __const__ void *addr) -{ - __const__ unsigned char *tmp = (__const__ unsigned char *) addr; - return (tmp[nr >> 3] >> (nr & 7)) & 1; -} - -static inline void set_bit_le(int nr, void *addr) -{ - set_bit(nr ^ BITOP_LE_SWIZZLE, addr); -} - -static inline void clear_bit_le(int nr, void *addr) -{ - clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); -} - -static inline void __set_bit_le(int nr, void *addr) -{ - __set_bit(nr ^ BITOP_LE_SWIZZLE, addr); -} - -static inline void __clear_bit_le(int nr, void *addr) -{ - __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); -} - -static inline int test_and_set_bit_le(int nr, void *addr) -{ - return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); -} - -static inline int test_and_clear_bit_le(int nr, void *addr) -{ - return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); -} - -static inline int __test_and_set_bit_le(int nr, void *addr) -{ - return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); -} - -static inline int __test_and_clear_bit_le(int nr, void *addr) -{ - return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); -} - -#define find_first_zero_bit_le(addr, size) \ - find_next_zero_bit_le((addr), (size), 0) -unsigned long find_next_zero_bit_le(const void *addr, - unsigned long size, unsigned long offset); - -unsigned long find_next_bit_le(const void *addr, - unsigned long size, unsigned long offset); /* Bitmap functions for the ext2 filesystem */ #include <asm-generic/bitops/ext2-atomic-setbit.h> diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 21a0687b8c4..fb3245e928e 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -106,37 +106,37 @@ extern const char *powerpc_base_platform; /* CPU kernel features */ /* Retain the 32b definitions all use bottom half of word */ -#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000000000000001) -#define CPU_FTR_L2CR ASM_CONST(0x0000000000000002) -#define CPU_FTR_SPEC7450 ASM_CONST(0x0000000000000004) -#define CPU_FTR_ALTIVEC ASM_CONST(0x0000000000000008) -#define CPU_FTR_TAU ASM_CONST(0x0000000000000010) -#define CPU_FTR_CAN_DOZE ASM_CONST(0x0000000000000020) -#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040) -#define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080) -#define CPU_FTR_601 ASM_CONST(0x0000000000000100) -#define CPU_FTR_DBELL ASM_CONST(0x0000000000000200) -#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400) -#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800) -#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000) -#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000) -#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000) -#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000) -#define CPU_FTR_476_DD2 ASM_CONST(0x0000000000010000) -#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000) -#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) -#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x0000000000080000) -#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) -#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000) -#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000) -#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x0000000000800000) -#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x0000000001000000) -#define CPU_FTR_SPE ASM_CONST(0x0000000002000000) -#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000) -#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000) -#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000) -#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000) -#define CPU_FTR_EMB_HV ASM_CONST(0x0000000040000000) +#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x00000001) +#define CPU_FTR_L2CR ASM_CONST(0x00000002) +#define CPU_FTR_SPEC7450 ASM_CONST(0x00000004) +#define CPU_FTR_ALTIVEC ASM_CONST(0x00000008) +#define CPU_FTR_TAU ASM_CONST(0x00000010) +#define CPU_FTR_CAN_DOZE ASM_CONST(0x00000020) +#define CPU_FTR_USE_TB ASM_CONST(0x00000040) +#define CPU_FTR_L2CSR ASM_CONST(0x00000080) +#define CPU_FTR_601 ASM_CONST(0x00000100) +#define CPU_FTR_DBELL ASM_CONST(0x00000200) +#define CPU_FTR_CAN_NAP ASM_CONST(0x00000400) +#define CPU_FTR_L3CR ASM_CONST(0x00000800) +#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x00001000) +#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x00002000) +#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x00004000) +#define CPU_FTR_NO_DPM ASM_CONST(0x00008000) +#define CPU_FTR_476_DD2 ASM_CONST(0x00010000) +#define CPU_FTR_NEED_COHERENT ASM_CONST(0x00020000) +#define CPU_FTR_NO_BTIC ASM_CONST(0x00040000) +#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x00080000) +#define CPU_FTR_NODSISRALIGN ASM_CONST(0x00100000) +#define CPU_FTR_PPC_LE ASM_CONST(0x00200000) +#define CPU_FTR_REAL_LE ASM_CONST(0x00400000) +#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x00800000) +#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x01000000) +#define CPU_FTR_SPE ASM_CONST(0x02000000) +#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x04000000) +#define CPU_FTR_LWSYNC ASM_CONST(0x08000000) +#define CPU_FTR_NOEXECUTE ASM_CONST(0x10000000) +#define CPU_FTR_INDEXED_DCR ASM_CONST(0x20000000) +#define CPU_FTR_EMB_HV ASM_CONST(0x40000000) /* * Add the 64-bit processor unique features in the top half of the word; @@ -148,29 +148,33 @@ extern const char *powerpc_base_platform; #define LONG_ASM_CONST(x) 0 #endif -#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000200000000) -#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000400000000) -#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000800000000) -#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000) -#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000) -#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000) -#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000) -#define CPU_FTR_SMT LONG_ASM_CONST(0x0000010000000000) -#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000) -#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000) -#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000) -#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000) -#define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000) -#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000) -#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000) -#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000) -#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000) -#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000) -#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000) -#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000) -#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000) -#define CPU_FTR_ICSWX LONG_ASM_CONST(0x1000000000000000) -#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x2000000000000000) +#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000100000000) +#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000200000000) +#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000400000000) +#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000000800000000) +#define CPU_FTR_IABR LONG_ASM_CONST(0x0000001000000000) +#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000002000000000) +#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000004000000000) +#define CPU_FTR_SMT LONG_ASM_CONST(0x0000008000000000) +#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000010000000000) +#define CPU_FTR_PURR LONG_ASM_CONST(0x0000020000000000) +#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000040000000000) +#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000080000000000) +#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000100000000000) +#define CPU_FTR_VSX LONG_ASM_CONST(0x0000200000000000) +#define CPU_FTR_SAO LONG_ASM_CONST(0x0000400000000000) +#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000800000000000) +#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0001000000000000) +#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0002000000000000) +#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0004000000000000) +#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0008000000000000) +#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0010000000000000) +#define CPU_FTR_ICSWX LONG_ASM_CONST(0x0020000000000000) +#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x0040000000000000) +#define CPU_FTR_TM LONG_ASM_CONST(0x0080000000000000) +#define CPU_FTR_BCTAR LONG_ASM_CONST(0x0100000000000000) +#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000) +#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) #ifndef __ASSEMBLY__ @@ -216,6 +220,13 @@ extern const char *powerpc_base_platform; #define PPC_FEATURE_HAS_EFP_DOUBLE_COMP 0 #endif +/* We only set the TM feature if the kernel was compiled with TM supprt */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +#define CPU_FTR_TM_COMP CPU_FTR_TM +#else +#define CPU_FTR_TM_COMP 0 +#endif + /* We need to mark all pages as being coherent if we're SMP or we have a * 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II * require it for PCI "streaming/prefetch" to work properly. @@ -400,7 +411,18 @@ extern const char *powerpc_base_platform; CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ - CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY) + CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \ + CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR) +#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ + CPU_FTR_MMCRA | CPU_FTR_SMT | \ + CPU_FTR_COHERENT_ICACHE | \ + CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ + CPU_FTR_DSCR | CPU_FTR_SAO | \ + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ + CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ + CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | CPU_FTR_BCTAR | \ + CPU_FTR_TM_COMP) #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -421,8 +443,8 @@ extern const char *powerpc_base_platform; #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ - CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ - CPU_FTR_VSX) + CPU_FTRS_POWER7 | CPU_FTRS_POWER8 | CPU_FTRS_CELL | \ + CPU_FTRS_PA6T | CPU_FTR_VSX) #endif #else enum { diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 487d46ff68a..607559ab271 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -8,7 +8,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * - * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in + * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in * the same units as the timebase. Otherwise we measure cpu time * in jiffies using the generic definitions. */ @@ -16,7 +16,7 @@ #ifndef __POWERPC_CPUTIME_H #define __POWERPC_CPUTIME_H -#ifndef CONFIG_VIRT_CPU_ACCOUNTING +#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #include <asm-generic/cputime.h> #ifdef __KERNEL__ static inline void setup_cputime_one_jiffy(void) { } @@ -228,6 +228,8 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk) #define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) +static inline void arch_vtime_task_switch(struct task_struct *tsk) { } + #endif /* __KERNEL__ */ -#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #endif /* __POWERPC_CPUTIME_H */ diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h index 154c067761b..5fa6b20eba1 100644 --- a/arch/powerpc/include/asm/dbell.h +++ b/arch/powerpc/include/asm/dbell.h @@ -1,5 +1,5 @@ /* - * Copyright 2009 Freescale Semicondutor, Inc. + * Copyright 2009 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -28,8 +28,36 @@ enum ppc_dbell { PPC_G_DBELL = 2, /* guest doorbell */ PPC_G_DBELL_CRIT = 3, /* guest critical doorbell */ PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */ + PPC_DBELL_SERVER = 5, /* doorbell on server */ }; +#ifdef CONFIG_PPC_BOOK3S + +#define PPC_DBELL_MSGTYPE PPC_DBELL_SERVER +#define SPRN_DOORBELL_CPUTAG SPRN_TIR +#define PPC_DBELL_TAG_MASK 0x7f + +static inline void _ppc_msgsnd(u32 msg) +{ + if (cpu_has_feature(CPU_FTR_HVMODE)) + __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); + else + __asm__ __volatile__ (PPC_MSGSNDP(%0) : : "r" (msg)); +} + +#else /* CONFIG_PPC_BOOK3S */ + +#define PPC_DBELL_MSGTYPE PPC_DBELL +#define SPRN_DOORBELL_CPUTAG SPRN_PIR +#define PPC_DBELL_TAG_MASK 0x3fff + +static inline void _ppc_msgsnd(u32 msg) +{ + __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); +} + +#endif /* CONFIG_PPC_BOOK3S */ + extern void doorbell_cause_ipi(int cpu, unsigned long data); extern void doorbell_exception(struct pt_regs *regs); extern void doorbell_setup_this_cpu(void); @@ -39,7 +67,7 @@ static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag) u32 msg = PPC_DBELL_TYPE(type) | (flags & PPC_DBELL_MSG_BRDCAST) | (tag & 0x07ffffff); - __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); + _ppc_msgsnd(msg); } #endif /* _ASM_POWERPC_DBELL_H */ diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h index 32de2577bb6..d2516308ed1 100644 --- a/arch/powerpc/include/asm/debug.h +++ b/arch/powerpc/include/asm/debug.h @@ -4,6 +4,8 @@ #ifndef _ASM_POWERPC_DEBUG_H #define _ASM_POWERPC_DEBUG_H +#include <asm/hw_breakpoint.h> + struct pt_regs; extern struct dentry *powerpc_debugfs_root; @@ -15,7 +17,7 @@ extern int (*__debugger_ipi)(struct pt_regs *regs); extern int (*__debugger_bpt)(struct pt_regs *regs); extern int (*__debugger_sstep)(struct pt_regs *regs); extern int (*__debugger_iabr_match)(struct pt_regs *regs); -extern int (*__debugger_dabr_match)(struct pt_regs *regs); +extern int (*__debugger_break_match)(struct pt_regs *regs); extern int (*__debugger_fault_handler)(struct pt_regs *regs); #define DEBUGGER_BOILERPLATE(__NAME) \ @@ -31,7 +33,7 @@ DEBUGGER_BOILERPLATE(debugger_ipi) DEBUGGER_BOILERPLATE(debugger_bpt) DEBUGGER_BOILERPLATE(debugger_sstep) DEBUGGER_BOILERPLATE(debugger_iabr_match) -DEBUGGER_BOILERPLATE(debugger_dabr_match) +DEBUGGER_BOILERPLATE(debugger_break_match) DEBUGGER_BOILERPLATE(debugger_fault_handler) #else @@ -40,17 +42,18 @@ static inline int debugger_ipi(struct pt_regs *regs) { return 0; } static inline int debugger_bpt(struct pt_regs *regs) { return 0; } static inline int debugger_sstep(struct pt_regs *regs) { return 0; } static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } -static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } +static inline int debugger_break_match(struct pt_regs *regs) { return 0; } static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } #endif -extern int set_dabr(unsigned long dabr, unsigned long dabrx); +int set_breakpoint(struct arch_hw_breakpoint *brk); #ifdef CONFIG_PPC_ADV_DEBUG_REGS extern void do_send_trap(struct pt_regs *regs, unsigned long address, unsigned long error_code, int signal_code, int brkpt); #else -extern void do_dabr(struct pt_regs *regs, unsigned long address, - unsigned long error_code); + +extern void do_break(struct pt_regs *regs, unsigned long address, + unsigned long error_code); #endif #endif /* _ASM_POWERPC_DEBUG_H */ diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 78160874809..e27e9ad6818 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -172,6 +172,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { struct dma_map_ops *dma_ops = get_dma_ops(dev); + debug_dma_mapping_error(dev, dma_addr); if (dma_ops->mapping_error) return dma_ops->mapping_error(dev, dma_addr); diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index b0ef73882b3..a80e32b46c1 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -183,7 +183,7 @@ static inline void eeh_unlock(void) #define EEH_MAX_ALLOWED_FREEZES 5 typedef void *(*eeh_traverse_func)(void *data, void *flag); -int __devinit eeh_phb_pe_create(struct pci_controller *phb); +int eeh_phb_pe_create(struct pci_controller *phb); int eeh_add_to_parent_pe(struct eeh_dev *edev); int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe); void *eeh_pe_dev_traverse(struct eeh_pe *root, @@ -191,8 +191,8 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root, void eeh_pe_restore_bars(struct eeh_pe *pe); struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); -void * __devinit eeh_dev_init(struct device_node *dn, void *data); -void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb); +void *eeh_dev_init(struct device_node *dn, void *data); +void eeh_dev_phb_init_dynamic(struct pci_controller *phb); int __init eeh_ops_register(struct eeh_ops *ops); int __exit eeh_ops_unregister(const char *name); unsigned long eeh_check_failure(const volatile void __iomem *token, @@ -201,6 +201,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev); void __init eeh_addr_cache_build(void); void eeh_add_device_tree_early(struct device_node *); void eeh_add_device_tree_late(struct pci_bus *); +void eeh_add_sysfs_files(struct pci_bus *); void eeh_remove_bus_device(struct pci_dev *, int); /** @@ -240,6 +241,8 @@ static inline void eeh_add_device_tree_early(struct device_node *dn) { } static inline void eeh_add_device_tree_late(struct pci_bus *bus) { } +static inline void eeh_add_sysfs_files(struct pci_bus *bus) { } + static inline void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe) { } static inline void eeh_lock(void) { } diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 6abf0a16323..ac9790fc383 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -103,8 +103,6 @@ do { \ # define elf_read_implies_exec(ex, exec_stk) (is_32bit_task() ? \ (exec_stk == EXSTACK_DEFAULT) : 0) #else -# define SET_PERSONALITY(ex) \ - set_personality(PER_LINUX | (current->personality & (~PER_MASK))) # define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT) #endif /* __powerpc64__ */ diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h index bf2c06c3387..d3d634274d2 100644 --- a/arch/powerpc/include/asm/epapr_hcalls.h +++ b/arch/powerpc/include/asm/epapr_hcalls.h @@ -50,64 +50,13 @@ #ifndef _EPAPR_HCALLS_H #define _EPAPR_HCALLS_H +#include <uapi/asm/epapr_hcalls.h> + +#ifndef __ASSEMBLY__ #include <linux/types.h> #include <linux/errno.h> #include <asm/byteorder.h> -#define EV_BYTE_CHANNEL_SEND 1 -#define EV_BYTE_CHANNEL_RECEIVE 2 -#define EV_BYTE_CHANNEL_POLL 3 -#define EV_INT_SET_CONFIG 4 -#define EV_INT_GET_CONFIG 5 -#define EV_INT_SET_MASK 6 -#define EV_INT_GET_MASK 7 -#define EV_INT_IACK 9 -#define EV_INT_EOI 10 -#define EV_INT_SEND_IPI 11 -#define EV_INT_SET_TASK_PRIORITY 12 -#define EV_INT_GET_TASK_PRIORITY 13 -#define EV_DOORBELL_SEND 14 -#define EV_MSGSND 15 -#define EV_IDLE 16 - -/* vendor ID: epapr */ -#define EV_LOCAL_VENDOR_ID 0 /* for private use */ -#define EV_EPAPR_VENDOR_ID 1 -#define EV_FSL_VENDOR_ID 2 /* Freescale Semiconductor */ -#define EV_IBM_VENDOR_ID 3 /* IBM */ -#define EV_GHS_VENDOR_ID 4 /* Green Hills Software */ -#define EV_ENEA_VENDOR_ID 5 /* Enea */ -#define EV_WR_VENDOR_ID 6 /* Wind River Systems */ -#define EV_AMCC_VENDOR_ID 7 /* Applied Micro Circuits */ -#define EV_KVM_VENDOR_ID 42 /* KVM */ - -/* The max number of bytes that a byte channel can send or receive per call */ -#define EV_BYTE_CHANNEL_MAX_BYTES 16 - - -#define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num)) -#define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num) - -/* epapr error codes */ -#define EV_EPERM 1 /* Operation not permitted */ -#define EV_ENOENT 2 /* Entry Not Found */ -#define EV_EIO 3 /* I/O error occured */ -#define EV_EAGAIN 4 /* The operation had insufficient - * resources to complete and should be - * retried - */ -#define EV_ENOMEM 5 /* There was insufficient memory to - * complete the operation */ -#define EV_EFAULT 6 /* Bad guest address */ -#define EV_ENODEV 7 /* No such device */ -#define EV_EINVAL 8 /* An argument supplied to the hcall - was out of range or invalid */ -#define EV_INTERNAL 9 /* An internal error occured */ -#define EV_CONFIG 10 /* A configuration error was detected */ -#define EV_INVALID_STATE 11 /* The object is in an invalid state */ -#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */ -#define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */ - /* * Hypercall register clobber list * @@ -193,7 +142,7 @@ static inline unsigned int ev_int_set_config(unsigned int interrupt, r5 = priority; r6 = destination; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6) : : EV_HCALL_CLOBBERS4 ); @@ -222,7 +171,7 @@ static inline unsigned int ev_int_get_config(unsigned int interrupt, r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG); r3 = interrupt; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6) : : EV_HCALL_CLOBBERS4 ); @@ -252,7 +201,7 @@ static inline unsigned int ev_int_set_mask(unsigned int interrupt, r3 = interrupt; r4 = mask; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "+r" (r4) : : EV_HCALL_CLOBBERS2 ); @@ -277,7 +226,7 @@ static inline unsigned int ev_int_get_mask(unsigned int interrupt, r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK); r3 = interrupt; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "=r" (r4) : : EV_HCALL_CLOBBERS2 ); @@ -305,7 +254,7 @@ static inline unsigned int ev_int_eoi(unsigned int interrupt) r11 = EV_HCALL_TOKEN(EV_INT_EOI); r3 = interrupt; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3) : : EV_HCALL_CLOBBERS1 ); @@ -344,7 +293,7 @@ static inline unsigned int ev_byte_channel_send(unsigned int handle, r7 = be32_to_cpu(p[2]); r8 = be32_to_cpu(p[3]); - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8) : : EV_HCALL_CLOBBERS6 @@ -383,7 +332,7 @@ static inline unsigned int ev_byte_channel_receive(unsigned int handle, r3 = handle; r4 = *count; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "+r" (r4), "=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8) : : EV_HCALL_CLOBBERS6 @@ -421,7 +370,7 @@ static inline unsigned int ev_byte_channel_poll(unsigned int handle, r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL); r3 = handle; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5) : : EV_HCALL_CLOBBERS3 ); @@ -454,7 +403,7 @@ static inline unsigned int ev_int_iack(unsigned int handle, r11 = EV_HCALL_TOKEN(EV_INT_IACK); r3 = handle; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "=r" (r4) : : EV_HCALL_CLOBBERS2 ); @@ -478,7 +427,7 @@ static inline unsigned int ev_doorbell_send(unsigned int handle) r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND); r3 = handle; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3) : : EV_HCALL_CLOBBERS1 ); @@ -498,12 +447,12 @@ static inline unsigned int ev_idle(void) r11 = EV_HCALL_TOKEN(EV_IDLE); - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "=r" (r3) : : EV_HCALL_CLOBBERS1 ); return r3; } - -#endif +#endif /* !__ASSEMBLY__ */ +#endif /* _EPAPR_HCALLS_H */ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index a43c1473915..05e6d2ee1db 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -47,6 +47,39 @@ #define EX_R3 64 #define EX_LR 72 #define EX_CFAR 80 +#define EX_PPR 88 /* SMT thread status register (priority) */ + +#ifdef CONFIG_RELOCATABLE +#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ + ld r12,PACAKBASE(r13); /* get high part of &label */ \ + mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ + LOAD_HANDLER(r12,label); \ + mtlr r12; \ + mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ + li r10,MSR_RI; \ + mtmsrd r10,1; /* Set RI (EE=0) */ \ + blr; +#else +/* If not relocatable, we can jump directly -- and save messing with LR */ +#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ + mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ + mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ + li r10,MSR_RI; \ + mtmsrd r10,1; /* Set RI (EE=0) */ \ + b label; +#endif +#define EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ + __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ + +/* + * As EXCEPTION_PROLOG_PSERIES(), except we've already got relocation on + * so no need to rfid. Save lr in case we're CONFIG_RELOCATABLE, in which + * case EXCEPTION_RELON_PROLOG_PSERIES_1 will be using lr. + */ +#define EXCEPTION_RELON_PROLOG_PSERIES(area, label, h, extra, vec) \ + EXCEPTION_PROLOG_0(area); \ + EXCEPTION_PROLOG_1(area, extra, vec); \ + EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) /* * We're short on space and time in the exception prolog, so we can't @@ -55,20 +88,83 @@ * word. */ #define LOAD_HANDLER(reg, label) \ - addi reg,reg,(label)-_stext; /* virt addr of handler ... */ + /* Handlers must be within 64K of kbase, which must be 64k aligned */ \ + ori reg,reg,(label)-_stext; /* virt addr of handler ... */ /* Exception register prefixes */ #define EXC_HV H #define EXC_STD -#define __EXCEPTION_PROLOG_1(area, extra, vec) \ +#if defined(CONFIG_RELOCATABLE) +/* + * If we support interrupts with relocation on AND we're a relocatable + * kernel, we need to use LR to get to the 2nd level handler. So, save/restore + * it when required. + */ +#define SAVE_LR(reg, area) mflr reg ; std reg,area+EX_LR(r13) +#define GET_LR(reg, area) ld reg,area+EX_LR(r13) +#define RESTORE_LR(reg, area) ld reg,area+EX_LR(r13) ; mtlr reg +#else +/* ...else LR is unused and in register. */ +#define SAVE_LR(reg, area) +#define GET_LR(reg, area) mflr reg +#define RESTORE_LR(reg, area) +#endif + +/* + * PPR save/restore macros used in exceptions_64s.S + * Used for P7 or later processors + */ +#define SAVE_PPR(area, ra, rb) \ +BEGIN_FTR_SECTION_NESTED(940) \ + ld ra,PACACURRENT(r13); \ + ld rb,area+EX_PPR(r13); /* Read PPR from paca */ \ + std rb,TASKTHREADPPR(ra); \ +END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940) + +#define RESTORE_PPR_PACA(area, ra) \ +BEGIN_FTR_SECTION_NESTED(941) \ + ld ra,area+EX_PPR(r13); \ + mtspr SPRN_PPR,ra; \ +END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941) + +/* + * Increase the priority on systems where PPR save/restore is not + * implemented/ supported. + */ +#define HMT_MEDIUM_PPR_DISCARD \ +BEGIN_FTR_SECTION_NESTED(942) \ + HMT_MEDIUM; \ +END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,0,942) /*non P7*/ + +/* + * Get an SPR into a register if the CPU has the given feature + */ +#define OPT_GET_SPR(ra, spr, ftr) \ +BEGIN_FTR_SECTION_NESTED(943) \ + mfspr ra,spr; \ +END_FTR_SECTION_NESTED(ftr,ftr,943) + +/* + * Save a register to the PACA if the CPU has the given feature + */ +#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \ +BEGIN_FTR_SECTION_NESTED(943) \ + std ra,offset(r13); \ +END_FTR_SECTION_NESTED(ftr,ftr,943) + +#define EXCEPTION_PROLOG_0(area) \ GET_PACA(r13); \ - std r9,area+EX_R9(r13); /* save r9 - r12 */ \ - std r10,area+EX_R10(r13); \ - BEGIN_FTR_SECTION_NESTED(66); \ - mfspr r10,SPRN_CFAR; \ - std r10,area+EX_CFAR(r13); \ - END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ + std r9,area+EX_R9(r13); /* save r9 */ \ + OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \ + HMT_MEDIUM; \ + std r10,area+EX_R10(r13); /* save r10 - r12 */ \ + OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR) + +#define __EXCEPTION_PROLOG_1(area, extra, vec) \ + OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ + OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ + SAVE_LR(r10, area); \ mfcr r9; \ extra(vec); \ std r11,area+EX_R11(r13); \ @@ -92,6 +188,7 @@ __EXCEPTION_PROLOG_PSERIES_1(label, h) #define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \ + EXCEPTION_PROLOG_0(area); \ EXCEPTION_PROLOG_1(area, extra, vec); \ EXCEPTION_PROLOG_PSERIES_1(label, h); @@ -102,10 +199,14 @@ #define __KVM_HANDLER(area, h, n) \ do_kvm_##n: \ + BEGIN_FTR_SECTION_NESTED(947) \ + ld r10,area+EX_CFAR(r13); \ + std r10,HSTATE_CFAR(r13); \ + END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \ ld r10,area+EX_R10(r13); \ - stw r9,HSTATE_SCRATCH1(r13); \ + stw r9,HSTATE_SCRATCH1(r13); \ ld r9,area+EX_R9(r13); \ - std r12,HSTATE_SCRATCH0(r13); \ + std r12,HSTATE_SCRATCH0(r13); \ li r12,n; \ b kvmppc_interrupt @@ -169,6 +270,7 @@ do_kvm_##n: \ sth r1,PACA_TRAP_SAVE(r13); \ std r3,area+EX_R3(r13); \ addi r3,r13,area; /* r3 -> where regs are saved*/ \ + RESTORE_LR(r1, area); \ b bad_stack; \ 3: std r9,_CCR(r1); /* save CR in stackframe */ \ std r11,_NIP(r1); /* save SRR0 in stackframe */ \ @@ -176,8 +278,10 @@ do_kvm_##n: \ std r10,0(r1); /* make stack chain pointer */ \ std r0,GPR0(r1); /* save r0 in stackframe */ \ std r10,GPR1(r1); /* save r1 in stackframe */ \ + beq 4f; /* if from kernel mode */ \ ACCOUNT_CPU_USER_ENTRY(r9, r10); \ - std r2,GPR2(r1); /* save r2 in stackframe */ \ + SAVE_PPR(area, r9, r10); \ +4: std r2,GPR2(r1); /* save r2 in stackframe */ \ SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ @@ -194,8 +298,8 @@ do_kvm_##n: \ ld r10,area+EX_CFAR(r13); \ std r10,ORIG_GPR3(r1); \ END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ + GET_LR(r9,area); /* Get LR, later save to stack */ \ ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ - mflr r9; /* save LR in stackframe */ \ std r9,_LINK(r1); \ mfctr r10; /* save CTR in stackframe */ \ std r10,_CTR(r1); \ @@ -218,25 +322,74 @@ do_kvm_##n: \ . = loc; \ .globl label##_pSeries; \ label##_pSeries: \ - HMT_MEDIUM; \ + HMT_MEDIUM_PPR_DISCARD; \ SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ EXC_STD, KVMTEST_PR, vec) +/* Version of above for when we have to branch out-of-line */ +#define STD_EXCEPTION_PSERIES_OOL(vec, label) \ + .globl label##_pSeries; \ +label##_pSeries: \ + EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \ + EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_STD) + #define STD_EXCEPTION_HV(loc, vec, label) \ . = loc; \ .globl label##_hv; \ label##_hv: \ - HMT_MEDIUM; \ + HMT_MEDIUM_PPR_DISCARD; \ SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ EXC_HV, KVMTEST, vec) +/* Version of above for when we have to branch out-of-line */ +#define STD_EXCEPTION_HV_OOL(vec, label) \ + .globl label##_hv; \ +label##_hv: \ + EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \ + EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV) + +#define STD_RELON_EXCEPTION_PSERIES(loc, vec, label) \ + . = loc; \ + .globl label##_relon_pSeries; \ +label##_relon_pSeries: \ + HMT_MEDIUM_PPR_DISCARD; \ + /* No guest interrupts come through here */ \ + SET_SCRATCH0(r13); /* save r13 */ \ + EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ + EXC_STD, KVMTEST_PR, vec) + +#define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \ + .globl label##_relon_pSeries; \ +label##_relon_pSeries: \ + EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \ + EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_STD) + +#define STD_RELON_EXCEPTION_HV(loc, vec, label) \ + . = loc; \ + .globl label##_relon_hv; \ +label##_relon_hv: \ + HMT_MEDIUM_PPR_DISCARD; \ + /* No guest interrupts come through here */ \ + SET_SCRATCH0(r13); /* save r13 */ \ + EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ + EXC_HV, KVMTEST, vec) + +#define STD_RELON_EXCEPTION_HV_OOL(vec, label) \ + .globl label##_relon_hv; \ +label##_relon_hv: \ + EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \ + EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_HV) + /* This associate vector numbers with bits in paca->irq_happened */ #define SOFTEN_VALUE_0x500 PACA_IRQ_EE #define SOFTEN_VALUE_0x502 PACA_IRQ_EE #define SOFTEN_VALUE_0x900 PACA_IRQ_DEC #define SOFTEN_VALUE_0x982 PACA_IRQ_DEC +#define SOFTEN_VALUE_0xa00 PACA_IRQ_DBELL +#define SOFTEN_VALUE_0xe80 PACA_IRQ_DBELL +#define SOFTEN_VALUE_0xe82 PACA_IRQ_DBELL #define __SOFTEN_TEST(h, vec) \ lbz r10,PACASOFTIRQEN(r13); \ @@ -257,11 +410,16 @@ label##_hv: \ KVMTEST(vec); \ _SOFTEN_TEST(EXC_STD, vec) +#define SOFTEN_NOTEST_PR(vec) _SOFTEN_TEST(EXC_STD, vec) +#define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec) + #define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ - HMT_MEDIUM; \ + HMT_MEDIUM_PPR_DISCARD; \ SET_SCRATCH0(r13); /* save r13 */ \ - __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \ + EXCEPTION_PROLOG_0(PACA_EXGEN); \ + __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \ EXCEPTION_PROLOG_PSERIES_1(label##_common, h); + #define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) @@ -279,6 +437,41 @@ label##_hv: \ _MASKABLE_EXCEPTION_PSERIES(vec, label, \ EXC_HV, SOFTEN_TEST_HV) +#define MASKABLE_EXCEPTION_HV_OOL(vec, label) \ + .globl label##_hv; \ +label##_hv: \ + EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec); \ + EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV); + +#define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \ + HMT_MEDIUM_PPR_DISCARD; \ + SET_SCRATCH0(r13); /* save r13 */ \ + EXCEPTION_PROLOG_0(PACA_EXGEN); \ + __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \ + EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, h); +#define _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \ + __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) + +#define MASKABLE_RELON_EXCEPTION_PSERIES(loc, vec, label) \ + . = loc; \ + .globl label##_relon_pSeries; \ +label##_relon_pSeries: \ + _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \ + EXC_STD, SOFTEN_NOTEST_PR) + +#define MASKABLE_RELON_EXCEPTION_HV(loc, vec, label) \ + . = loc; \ + .globl label##_relon_hv; \ +label##_relon_hv: \ + _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \ + EXC_HV, SOFTEN_NOTEST_HV) + +#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label) \ + .globl label##_relon_hv; \ +label##_relon_hv: \ + EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec); \ + EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV); + /* * Our exception common code can be passed various "additions" * to specify the behaviour of interrupts, whether to kick the diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index ad0b751b0d7..097dee57a7a 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h @@ -49,6 +49,8 @@ #define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000) #define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000) #define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000) +#define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000) +#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) #ifndef __ASSEMBLY__ @@ -62,7 +64,8 @@ enum { FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN | FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | - FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO, + FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO | + FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY, FW_FEATURE_PSERIES_ALWAYS = 0, FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, FW_FEATURE_POWERNV_ALWAYS = 0, diff --git a/arch/powerpc/include/asm/fsl_gtm.h b/arch/powerpc/include/asm/fsl_gtm.h index 8e8c9b5032d..3b05808f9ca 100644 --- a/arch/powerpc/include/asm/fsl_gtm.h +++ b/arch/powerpc/include/asm/fsl_gtm.h @@ -1,7 +1,7 @@ /* * Freescale General-purpose Timers Module * - * Copyright (c) Freescale Semicondutor, Inc. 2006. + * Copyright 2006 Freescale Semiconductor, Inc. * Shlomi Gridish <gridish@freescale.com> * Jerry Huang <Chang-Ming.Huang@freescale.com> * Copyright (c) MontaVista Software, Inc. 2008. diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h index dd5ba2c2277..77ced0b3d81 100644 --- a/arch/powerpc/include/asm/fsl_guts.h +++ b/arch/powerpc/include/asm/fsl_guts.h @@ -71,7 +71,9 @@ struct ccsr_guts { u8 res0c4[0x224 - 0xc4]; __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ - u8 res22c[0x800 - 0x22c]; + u8 res22c[0x604 - 0x22c]; + __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ + u8 res608[0x800 - 0x608]; __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ u8 res804[0x900 - 0x804]; __be32 ircr; /* 0x.0900 - Infrared Control Register */ diff --git a/arch/powerpc/include/asm/fsl_hcalls.h b/arch/powerpc/include/asm/fsl_hcalls.h index 922d9b5fe3d..3abb58394da 100644 --- a/arch/powerpc/include/asm/fsl_hcalls.h +++ b/arch/powerpc/include/asm/fsl_hcalls.h @@ -96,7 +96,7 @@ static inline unsigned int fh_send_nmi(unsigned int vcpu_mask) r11 = FH_HCALL_TOKEN(FH_SEND_NMI); r3 = vcpu_mask; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3) : : EV_HCALL_CLOBBERS1 ); @@ -151,7 +151,7 @@ static inline unsigned int fh_partition_get_dtprop(int handle, r9 = (uint32_t)propvalue_addr; r10 = *propvalue_len; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8), "+r" (r9), "+r" (r10) @@ -205,7 +205,7 @@ static inline unsigned int fh_partition_set_dtprop(int handle, r9 = (uint32_t)propvalue_addr; r10 = propvalue_len; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8), "+r" (r9), "+r" (r10) @@ -229,7 +229,7 @@ static inline unsigned int fh_partition_restart(unsigned int partition) r11 = FH_HCALL_TOKEN(FH_PARTITION_RESTART); r3 = partition; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3) : : EV_HCALL_CLOBBERS1 ); @@ -262,7 +262,7 @@ static inline unsigned int fh_partition_get_status(unsigned int partition, r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_STATUS); r3 = partition; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "=r" (r4) : : EV_HCALL_CLOBBERS2 ); @@ -295,7 +295,7 @@ static inline unsigned int fh_partition_start(unsigned int partition, r4 = entry_point; r5 = load; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5) : : EV_HCALL_CLOBBERS3 ); @@ -317,7 +317,7 @@ static inline unsigned int fh_partition_stop(unsigned int partition) r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP); r3 = partition; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3) : : EV_HCALL_CLOBBERS1 ); @@ -376,7 +376,7 @@ static inline unsigned int fh_partition_memcpy(unsigned int source, #endif r7 = count; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7) : : EV_HCALL_CLOBBERS5 @@ -399,7 +399,7 @@ static inline unsigned int fh_dma_enable(unsigned int liodn) r11 = FH_HCALL_TOKEN(FH_DMA_ENABLE); r3 = liodn; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3) : : EV_HCALL_CLOBBERS1 ); @@ -421,7 +421,7 @@ static inline unsigned int fh_dma_disable(unsigned int liodn) r11 = FH_HCALL_TOKEN(FH_DMA_DISABLE); r3 = liodn; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3) : : EV_HCALL_CLOBBERS1 ); @@ -447,7 +447,7 @@ static inline unsigned int fh_vmpic_get_msir(unsigned int interrupt, r11 = FH_HCALL_TOKEN(FH_VMPIC_GET_MSIR); r3 = interrupt; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "=r" (r4) : : EV_HCALL_CLOBBERS2 ); @@ -469,7 +469,7 @@ static inline unsigned int fh_system_reset(void) r11 = FH_HCALL_TOKEN(FH_SYSTEM_RESET); - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "=r" (r3) : : EV_HCALL_CLOBBERS1 ); @@ -506,7 +506,7 @@ static inline unsigned int fh_err_get_info(int queue, uint32_t *bufsize, r6 = addr_lo; r7 = peek; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7) : : EV_HCALL_CLOBBERS5 @@ -542,7 +542,7 @@ static inline unsigned int fh_get_core_state(unsigned int handle, r3 = handle; r4 = vcpu; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "+r" (r4) : : EV_HCALL_CLOBBERS2 ); @@ -572,7 +572,7 @@ static inline unsigned int fh_enter_nap(unsigned int handle, unsigned int vcpu) r3 = handle; r4 = vcpu; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "+r" (r4) : : EV_HCALL_CLOBBERS2 ); @@ -597,7 +597,7 @@ static inline unsigned int fh_exit_nap(unsigned int handle, unsigned int vcpu) r3 = handle; r4 = vcpu; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3), "+r" (r4) : : EV_HCALL_CLOBBERS2 ); @@ -618,7 +618,7 @@ static inline unsigned int fh_claim_device(unsigned int handle) r11 = FH_HCALL_TOKEN(FH_CLAIM_DEVICE); r3 = handle; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3) : : EV_HCALL_CLOBBERS1 ); @@ -645,7 +645,7 @@ static inline unsigned int fh_partition_stop_dma(unsigned int handle) r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP_DMA); r3 = handle; - __asm__ __volatile__ ("sc 1" + asm volatile("bl epapr_hypercall_start" : "+r" (r11), "+r" (r3) : : EV_HCALL_CLOBBERS1 ); diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 7a867065db7..4bc2c3dad6a 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -267,7 +267,8 @@ #define H_RANDOM 0x300 #define H_COP 0x304 #define H_GET_MPP_X 0x314 -#define MAX_HCALL_OPCODE H_GET_MPP_X +#define H_SET_MODE 0x31C +#define MAX_HCALL_OPCODE H_SET_MODE #ifndef __ASSEMBLY__ @@ -355,6 +356,26 @@ struct hvcall_mpp_x_data { int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data); +static inline unsigned int get_longbusy_msecs(int longbusy_rc) +{ + switch (longbusy_rc) { + case H_LONG_BUSY_ORDER_1_MSEC: + return 1; + case H_LONG_BUSY_ORDER_10_MSEC: + return 10; + case H_LONG_BUSY_ORDER_100_MSEC: + return 100; + case H_LONG_BUSY_ORDER_1_SEC: + return 1000; + case H_LONG_BUSY_ORDER_10_SEC: + return 10000; + case H_LONG_BUSY_ORDER_100_SEC: + return 100000; + default: + return 1; + } +} + #ifdef CONFIG_PPC_PSERIES extern int CMO_PrPSP; extern int CMO_SecPSP; @@ -374,6 +395,15 @@ static inline unsigned long cmo_get_page_size(void) { return CMO_PageSize; } + +extern long pSeries_enable_reloc_on_exc(void); +extern long pSeries_disable_reloc_on_exc(void); + +#else + +#define pSeries_enable_reloc_on_exc() do {} while (0) +#define pSeries_disable_reloc_on_exc() do {} while (0) + #endif /* CONFIG_PPC_PSERIES */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 423424599da..eb0f4ac75c4 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -24,16 +24,30 @@ #define _PPC_BOOK3S_64_HW_BREAKPOINT_H #ifdef __KERNEL__ -#ifdef CONFIG_HAVE_HW_BREAKPOINT - struct arch_hw_breakpoint { unsigned long address; - unsigned long dabrx; - int type; - u8 len; /* length of the target data symbol */ - bool extraneous_interrupt; + u16 type; + u16 len; /* length of the target data symbol */ }; +/* Note: Don't change the the first 6 bits below as they are in the same order + * as the dabr and dabrx. + */ +#define HW_BRK_TYPE_READ 0x01 +#define HW_BRK_TYPE_WRITE 0x02 +#define HW_BRK_TYPE_TRANSLATE 0x04 +#define HW_BRK_TYPE_USER 0x08 +#define HW_BRK_TYPE_KERNEL 0x10 +#define HW_BRK_TYPE_HYP 0x20 +#define HW_BRK_TYPE_EXTRANEOUS_IRQ 0x80 + +/* bits that overlap with the bottom 3 bits of the dabr */ +#define HW_BRK_TYPE_RDWR (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE) +#define HW_BRK_TYPE_DABR (HW_BRK_TYPE_RDWR | HW_BRK_TYPE_TRANSLATE) +#define HW_BRK_TYPE_PRIV_ALL (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \ + HW_BRK_TYPE_HYP) + +#ifdef CONFIG_HAVE_HW_BREAKPOINT #include <linux/kdebug.h> #include <asm/reg.h> #include <asm/debug.h> @@ -43,8 +57,6 @@ struct pmu; struct perf_sample_data; #define HW_BREAKPOINT_ALIGN 0x7 -/* Maximum permissible length of any HW Breakpoint */ -#define HW_BREAKPOINT_LEN 0x8 extern int hw_breakpoint_slots(int type); extern int arch_bp_generic_fields(int type, int *gen_bp_type); @@ -62,7 +74,12 @@ extern void ptrace_triggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs); static inline void hw_breakpoint_disable(void) { - set_dabr(0, 0); + struct arch_hw_breakpoint brk; + + brk.address = 0; + brk.type = 0; + brk.len = 0; + set_breakpoint(&brk); } extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h index 61e8490786b..bedbff89142 100644 --- a/arch/powerpc/include/asm/immap_qe.h +++ b/arch/powerpc/include/asm/immap_qe.h @@ -3,7 +3,7 @@ * The Internal Memory Map for devices with QE on them. This * is the superset of all QE devices (8360, etc.). - * Copyright (C) 2006. Freescale Semicondutor, Inc. All rights reserved. + * Copyright (C) 2006. Freescale Semiconductor, Inc. All rights reserved. * * Authors: Shlomi Gridish <gridish@freescale.com> * Li Yang <leoli@freescale.com> diff --git a/arch/powerpc/include/asm/io-workarounds.h b/arch/powerpc/include/asm/io-workarounds.h index fbae4928692..f96dd096ff4 100644 --- a/arch/powerpc/include/asm/io-workarounds.h +++ b/arch/powerpc/include/asm/io-workarounds.h @@ -31,8 +31,8 @@ struct iowa_bus { void *private; }; -void __devinit iowa_register_bus(struct pci_controller *, struct ppc_pci_io *, - int (*)(struct iowa_bus *, void *), void *); +void iowa_register_bus(struct pci_controller *, struct ppc_pci_io *, + int (*)(struct iowa_bus *, void *), void *); struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR); struct iowa_bus *iowa_pio_find_bus(unsigned long); diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 76fdcfef088..aabcdba8f6b 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -118,6 +118,7 @@ #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ +#define RESUME_FLAG_ARCH1 (1<<2) #define RESUME_GUEST 0 #define RESUME_GUEST_NV RESUME_FLAG_NV diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 7aefdb3e1ce..5a56e1c5f85 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -81,6 +81,8 @@ struct kvmppc_vcpu_book3s { u64 sdr1; u64 hior; u64 msr_mask; + u64 purr_offset; + u64 spurr_offset; #ifdef CONFIG_PPC_BOOK3S_32 u32 vsid_pool[VSID_POOL_SIZE]; u32 vsid_next; @@ -157,10 +159,14 @@ extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr, extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr); extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, long pte_index, unsigned long pteh, unsigned long ptel); -extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, - long pte_index, unsigned long pteh, unsigned long ptel); +extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, + long pte_index, unsigned long pteh, unsigned long ptel, + pgd_t *pgdir, bool realmode, unsigned long *idx_ret); +extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, + unsigned long pte_index, unsigned long avpn, + unsigned long *hpret); extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, - struct kvm_memory_slot *memslot); + struct kvm_memory_slot *memslot, unsigned long *map); extern void kvmppc_entry_trampoline(void); extern void kvmppc_hv_entry_trampoline(void); diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 0dd1d86d3e3..38bec1dc992 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -50,6 +50,15 @@ extern int kvm_hpt_order; /* order of preallocated HPTs */ #define HPTE_V_HVLOCK 0x40UL #define HPTE_V_ABSENT 0x20UL +/* + * We use this bit in the guest_rpte field of the revmap entry + * to indicate a modified HPTE. + */ +#define HPTE_GR_MODIFIED (1ul << 62) + +/* These bits are reserved in the guest view of the HPTE */ +#define HPTE_GR_RESERVED HPTE_GR_MODIFIED + static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits) { unsigned long tmp, old; @@ -60,7 +69,7 @@ static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits) " ori %0,%0,%4\n" " stdcx. %0,0,%2\n" " beq+ 2f\n" - " li %1,%3\n" + " mr %1,%3\n" "2: isync" : "=&r" (tmp), "=&r" (old) : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK) @@ -237,4 +246,26 @@ static inline bool slot_is_aligned(struct kvm_memory_slot *memslot, return !(memslot->base_gfn & mask) && !(memslot->npages & mask); } +/* + * This works for 4k, 64k and 16M pages on POWER7, + * and 4k and 16M pages on PPC970. + */ +static inline unsigned long slb_pgsize_encoding(unsigned long psize) +{ + unsigned long senc = 0; + + if (psize > 0x1000) { + senc = SLB_VSID_L; + if (psize == 0x10000) + senc |= SLB_VSID_LP_01; + } + return senc; +} + +static inline int is_vrma_hpte(unsigned long hpte_v) +{ + return (hpte_v & ~0xffffffUL) == + (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16))); +} + #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 88609b23b77..cdc3d2717cc 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -93,6 +93,9 @@ struct kvmppc_host_state { u64 host_dscr; u64 dec_expires; #endif +#ifdef CONFIG_PPC_BOOK3S_64 + u64 cfar; +#endif }; struct kvmppc_book3s_shadow_vcpu { diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h index 30a600fa1b6..3a79f532571 100644 --- a/arch/powerpc/include/asm/kvm_booke_hv_asm.h +++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h @@ -17,6 +17,7 @@ * there are no exceptions for which we fall through directly to * the normal host handler. * + * 32-bit host * Expected inputs (normal exceptions): * SCRATCH0 = saved r10 * r10 = thread struct @@ -33,14 +34,38 @@ * *(r8 + GPR9) = saved r9 * *(r8 + GPR10) = saved r10 (r10 not yet clobbered) * *(r8 + GPR11) = saved r11 + * + * 64-bit host + * Expected inputs (GEN/GDBELL/DBG/MC exception types): + * r10 = saved CR + * r13 = PACA_POINTER + * *(r13 + PACA_EX##type + EX_R10) = saved r10 + * *(r13 + PACA_EX##type + EX_R11) = saved r11 + * SPRN_SPRG_##type##_SCRATCH = saved r13 + * + * Expected inputs (CRIT exception type): + * r10 = saved CR + * r13 = PACA_POINTER + * *(r13 + PACA_EX##type + EX_R10) = saved r10 + * *(r13 + PACA_EX##type + EX_R11) = saved r11 + * *(r13 + PACA_EX##type + EX_R13) = saved r13 + * + * Expected inputs (TLB exception type): + * r10 = saved CR + * r13 = PACA_POINTER + * *(r13 + PACA_EX##type + EX_TLB_R10) = saved r10 + * *(r13 + PACA_EX##type + EX_TLB_R11) = saved r11 + * SPRN_SPRG_GEN_SCRATCH = saved r13 + * + * Only the bolted version of TLB miss exception handlers is supported now. */ .macro DO_KVM intno srr1 #ifdef CONFIG_KVM_BOOKE_HV BEGIN_FTR_SECTION mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */ - bf 3, kvmppc_resume_\intno\()_\srr1 + bf 3, 1975f b kvmppc_handler_\intno\()_\srr1 -kvmppc_resume_\intno\()_\srr1: +1975: END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) #endif .endm diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 28e8f5e5c63..d1bb8607472 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -37,16 +37,14 @@ #define KVM_MAX_VCPUS NR_CPUS #define KVM_MAX_VCORES NR_CPUS -#define KVM_MEMORY_SLOTS 32 -/* memory slots that does not exposed to userspace */ -#define KVM_PRIVATE_MEM_SLOTS 4 -#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) +#define KVM_USER_MEM_SLOTS 32 +#define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS #ifdef CONFIG_KVM_MMIO #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #endif -#ifdef CONFIG_KVM_BOOK3S_64_HV +#if !defined(CONFIG_KVM_440) #include <linux/mmu_notifier.h> #define KVM_ARCH_WANT_MMU_NOTIFIER @@ -204,7 +202,7 @@ struct revmap_entry { }; /* - * We use the top bit of each memslot->rmap entry as a lock bit, + * We use the top bit of each memslot->arch.rmap entry as a lock bit, * and bit 32 as a present flag. The bottom 32 bits are the * index in the guest HPT of a HPTE that points to the page. */ @@ -215,14 +213,17 @@ struct revmap_entry { #define KVMPPC_RMAP_PRESENT 0x100000000ul #define KVMPPC_RMAP_INDEX 0xfffffffful -/* Low-order bits in kvm->arch.slot_phys[][] */ +/* Low-order bits in memslot->arch.slot_phys[] */ #define KVMPPC_PAGE_ORDER_MASK 0x1f #define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */ #define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */ #define KVMPPC_GOT_PAGE 0x80 struct kvm_arch_memory_slot { +#ifdef CONFIG_KVM_BOOK3S_64_HV unsigned long *rmap; + unsigned long *slot_phys; +#endif /* CONFIG_KVM_BOOK3S_64_HV */ }; struct kvm_arch { @@ -243,12 +244,12 @@ struct kvm_arch { int using_mmu_notifiers; u32 hpt_order; atomic_t vcpus_running; + u32 online_vcores; unsigned long hpt_npte; unsigned long hpt_mask; + atomic_t hpte_mod_interest; spinlock_t slot_phys_lock; - unsigned long *slot_phys[KVM_MEM_SLOTS_NUM]; - int slot_npages[KVM_MEM_SLOTS_NUM]; - unsigned short last_vcpu[NR_CPUS]; + cpumask_t need_tlb_flush; struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; struct kvmppc_linear_info *hpt_li; #endif /* CONFIG_KVM_BOOK3S_64_HV */ @@ -273,6 +274,7 @@ struct kvmppc_vcore { int nap_count; int napping_threads; u16 pcpu; + u16 last_cpu; u8 vcore_state; u8 in_guest; struct list_head runnable_threads; @@ -288,9 +290,10 @@ struct kvmppc_vcore { /* Values for vcore_state */ #define VCORE_INACTIVE 0 -#define VCORE_RUNNING 1 -#define VCORE_EXITING 2 -#define VCORE_SLEEPING 3 +#define VCORE_SLEEPING 1 +#define VCORE_STARTING 2 +#define VCORE_RUNNING 3 +#define VCORE_EXITING 4 /* * Struct used to manage memory for a virtual processor area @@ -346,6 +349,27 @@ struct kvmppc_slb { bool class : 1; }; +# ifdef CONFIG_PPC_FSL_BOOK3E +#define KVMPPC_BOOKE_IAC_NUM 2 +#define KVMPPC_BOOKE_DAC_NUM 2 +# else +#define KVMPPC_BOOKE_IAC_NUM 4 +#define KVMPPC_BOOKE_DAC_NUM 2 +# endif +#define KVMPPC_BOOKE_MAX_IAC 4 +#define KVMPPC_BOOKE_MAX_DAC 2 + +struct kvmppc_booke_debug_reg { + u32 dbcr0; + u32 dbcr1; + u32 dbcr2; +#ifdef CONFIG_KVM_E500MC + u32 dbcr4; +#endif + u64 iac[KVMPPC_BOOKE_MAX_IAC]; + u64 dac[KVMPPC_BOOKE_MAX_DAC]; +}; + struct kvm_vcpu_arch { ulong host_stack; u32 host_pid; @@ -380,13 +404,18 @@ struct kvm_vcpu_arch { u32 host_mas4; u32 host_mas6; u32 shadow_epcr; - u32 epcr; u32 shadow_msrp; u32 eplc; u32 epsc; u32 oldpir; #endif +#if defined(CONFIG_BOOKE) +#if defined(CONFIG_KVM_BOOKE_HV) || defined(CONFIG_64BIT) + u32 epcr; +#endif +#endif + #ifdef CONFIG_PPC_BOOK3S /* For Gekko paired singles */ u32 qpr[32]; @@ -409,6 +438,7 @@ struct kvm_vcpu_arch { ulong uamor; u32 ctrl; ulong dabr; + ulong cfar; #endif u32 vrsave; /* also USPRG0 */ u32 mmucr; @@ -440,8 +470,6 @@ struct kvm_vcpu_arch { u32 ccr0; u32 ccr1; - u32 dbcr0; - u32 dbcr1; u32 dbsr; u64 mmcr[3]; @@ -471,9 +499,12 @@ struct kvm_vcpu_arch { ulong fault_esr; ulong queued_dear; ulong queued_esr; + spinlock_t wdt_lock; + struct timer_list wdt_timer; u32 tlbcfg[4]; u32 mmucfg; u32 epr; + struct kvmppc_booke_debug_reg dbg_reg; #endif gpa_t paddr_accessed; gva_t vaddr_accessed; @@ -486,9 +517,12 @@ struct kvm_vcpu_arch { u8 osi_needed; u8 osi_enabled; u8 papr_enabled; + u8 watchdog_enabled; u8 sane; u8 cpu_type; u8 hcall_needed; + u8 epr_enabled; + u8 epr_needed; u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ @@ -497,7 +531,6 @@ struct kvm_vcpu_arch { u64 dec_jiffies; u64 dec_expires; unsigned long pending_exceptions; - u16 last_cpu; u8 ceded; u8 prodded; u32 last_inst; @@ -534,13 +567,17 @@ struct kvm_vcpu_arch { unsigned long dtl_index; u64 stolen_logged; struct kvmppc_vpa slb_shadow; + + spinlock_t tbacct_lock; + u64 busy_stolen; + u64 busy_preempt; #endif }; /* Values for vcpu->arch.state */ -#define KVMPPC_VCPU_STOPPED 0 -#define KVMPPC_VCPU_BUSY_IN_HOST 1 -#define KVMPPC_VCPU_RUNNABLE 2 +#define KVMPPC_VCPU_NOTREADY 0 +#define KVMPPC_VCPU_RUNNABLE 1 +#define KVMPPC_VCPU_BUSY_IN_HOST 2 /* Values for vcpu->arch.io_gpr */ #define KVM_MMIO_REG_MASK 0x001f diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h index 9365860fb7f..2b119654b4c 100644 --- a/arch/powerpc/include/asm/kvm_para.h +++ b/arch/powerpc/include/asm/kvm_para.h @@ -21,7 +21,6 @@ #include <uapi/asm/kvm_para.h> - #ifdef CONFIG_KVM_GUEST #include <linux/of.h> @@ -55,7 +54,7 @@ static unsigned long kvm_hypercall(unsigned long *in, unsigned long *out, unsigned long nr) { - return HC_EV_UNIMPLEMENTED; + return EV_UNIMPLEMENTED; } #endif @@ -66,7 +65,7 @@ static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2) unsigned long out[8]; unsigned long r; - r = kvm_hypercall(in, out, nr | HC_VENDOR_KVM); + r = kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); *r2 = out[0]; return r; @@ -77,7 +76,7 @@ static inline long kvm_hypercall0(unsigned int nr) unsigned long in[8]; unsigned long out[8]; - return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); + return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); } static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) @@ -86,7 +85,7 @@ static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) unsigned long out[8]; in[0] = p1; - return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); + return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); } static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, @@ -97,7 +96,7 @@ static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, in[0] = p1; in[1] = p2; - return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); + return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); } static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, @@ -109,7 +108,7 @@ static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, in[0] = p1; in[1] = p2; in[2] = p3; - return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); + return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); } static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, @@ -123,7 +122,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, in[1] = p2; in[2] = p3; in[3] = p4; - return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); + return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); } diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index e006f0bdea9..44a657adf41 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -28,6 +28,7 @@ #include <linux/types.h> #include <linux/kvm_types.h> #include <linux/kvm_host.h> +#include <linux/bug.h> #ifdef CONFIG_PPC_BOOK3S #include <asm/kvm_book3s.h> #else @@ -43,12 +44,11 @@ enum emulation_result { EMULATE_DO_DCR, /* kvm_run filled with DCR request */ EMULATE_FAIL, /* can't emulate this instruction */ EMULATE_AGAIN, /* something went wrong. go again */ + EMULATE_DO_PAPR, /* kvm_run filled with PAPR request */ }; extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); -extern char kvmppc_handlers_start[]; -extern unsigned long kvmppc_handler_len; extern void kvmppc_handler_highmem(void); extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); @@ -68,6 +68,8 @@ extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); extern void kvmppc_decrementer_func(unsigned long data); extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu); +extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu); +extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu); /* Core-specific hooks */ @@ -104,6 +106,7 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); +extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int op, int *advance); @@ -111,6 +114,7 @@ extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong val); extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *val); +extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); extern int kvmppc_booke_init(void); extern void kvmppc_booke_exit(void); @@ -139,16 +143,28 @@ extern struct kvmppc_linear_info *kvm_alloc_hpt(void); extern void kvm_release_hpt(struct kvmppc_linear_info *li); extern int kvmppc_core_init_vm(struct kvm *kvm); extern void kvmppc_core_destroy_vm(struct kvm *kvm); +extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont); +extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, + unsigned long npages); extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem); extern void kvmppc_core_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem); + struct kvm_userspace_memory_region *mem, + struct kvm_memory_slot old); extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info); +extern void kvmppc_core_flush_memslot(struct kvm *kvm, + struct kvm_memory_slot *memslot); extern int kvmppc_bookehv_init(void); extern void kvmppc_bookehv_exit(void); +extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu); + +extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *); + /* * Cuts out inst bits with ordering according to spec. * That means the leftmost bit is zero. All given bits are included. @@ -182,6 +198,41 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value) return r; } +union kvmppc_one_reg { + u32 wval; + u64 dval; + vector128 vval; + u64 vsxval[2]; + struct { + u64 addr; + u64 length; + } vpaval; +}; + +#define one_reg_size(id) \ + (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) + +#define get_reg_val(id, reg) ({ \ + union kvmppc_one_reg __u; \ + switch (one_reg_size(id)) { \ + case 4: __u.wval = (reg); break; \ + case 8: __u.dval = (reg); break; \ + default: BUG(); \ + } \ + __u; \ +}) + + +#define set_reg_val(id, val) ({ \ + u64 __v; \ + switch (one_reg_size(id)) { \ + case 4: __v = (val).wval; break; \ + case 8: __v = (val).dval; break; \ + default: BUG(); \ + } \ + __v; \ +}) + void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); @@ -190,6 +241,8 @@ int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); +int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *); +int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *); void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); @@ -209,6 +262,15 @@ static inline void kvm_linear_init(void) {} #endif +static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr) +{ +#ifdef CONFIG_KVM_BOOKE_HV + mtspr(SPRN_GEPR, epr); +#elif defined(CONFIG_BOOKE) + vcpu->arch.epr = epr; +#endif +} + int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, struct kvm_config_tlb *cfg); int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, @@ -230,5 +292,36 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn) } } +/* Please call after prepare_to_enter. This function puts the lazy ee state + back to normal mode, without actually enabling interrupts. */ +static inline void kvmppc_lazy_ee_enable(void) +{ +#ifdef CONFIG_PPC64 + /* Only need to enable IRQs by hard enabling them after this */ + local_paca->irq_happened = 0; + local_paca->soft_enabled = 1; +#endif +} + +static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb) +{ + ulong ea; + ulong msr_64bit = 0; + + ea = kvmppc_get_gpr(vcpu, rb); + if (ra) + ea += kvmppc_get_gpr(vcpu, ra); + +#if defined(CONFIG_PPC_BOOK3E_64) + msr_64bit = MSR_CM; +#elif defined(CONFIG_PPC_BOOK3S_64) + msr_64bit = MSR_SF; +#endif + + if (!(vcpu->arch.shared->msr & msr_64bit)) + ea = (uint32_t)ea; + + return ea; +} #endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 531fe0c3108..b1e7f2af101 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -145,7 +145,7 @@ struct dtl_entry { extern struct kmem_cache *dtl_cache; /* - * When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls + * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls * reading from the dispatch trace log. If other code wants to consume * DTL entries, it can set this pointer to a function that will get * called once for each DTL entry that gets processed. diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index c4231973edd..3d6b4100dac 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -166,9 +166,6 @@ struct machdep_calls { unsigned long size, pgprot_t vma_prot); - /* Idle loop for this platform, leave empty for default idle loop */ - void (*idle_loop)(void); - /* * Function for waiting for work with reduced power in idle loop; * called with interrupts disabled. @@ -183,6 +180,10 @@ struct machdep_calls { int (*set_dabr)(unsigned long dabr, unsigned long dabrx); + /* Set DAWR for this platform, leave empty for default implemenation */ + int (*set_dawr)(unsigned long dawr, + unsigned long dawrx); + #ifdef CONFIG_PPC32 /* XXX for now */ /* A general init function, called by ppc_init in init/main.c. May be NULL. */ @@ -320,28 +321,28 @@ static inline void log_error(char *buf, unsigned int err_type, int fatal) ppc_md.log_error(buf, err_type, fatal); } -#define __define_machine_initcall(mach,level,fn,id) \ +#define __define_machine_initcall(mach, fn, id) \ static int __init __machine_initcall_##mach##_##fn(void) { \ if (machine_is(mach)) return fn(); \ return 0; \ } \ - __define_initcall(level,__machine_initcall_##mach##_##fn,id); - -#define machine_core_initcall(mach,fn) __define_machine_initcall(mach,"1",fn,1) -#define machine_core_initcall_sync(mach,fn) __define_machine_initcall(mach,"1s",fn,1s) -#define machine_postcore_initcall(mach,fn) __define_machine_initcall(mach,"2",fn,2) -#define machine_postcore_initcall_sync(mach,fn) __define_machine_initcall(mach,"2s",fn,2s) -#define machine_arch_initcall(mach,fn) __define_machine_initcall(mach,"3",fn,3) -#define machine_arch_initcall_sync(mach,fn) __define_machine_initcall(mach,"3s",fn,3s) -#define machine_subsys_initcall(mach,fn) __define_machine_initcall(mach,"4",fn,4) -#define machine_subsys_initcall_sync(mach,fn) __define_machine_initcall(mach,"4s",fn,4s) -#define machine_fs_initcall(mach,fn) __define_machine_initcall(mach,"5",fn,5) -#define machine_fs_initcall_sync(mach,fn) __define_machine_initcall(mach,"5s",fn,5s) -#define machine_rootfs_initcall(mach,fn) __define_machine_initcall(mach,"rootfs",fn,rootfs) -#define machine_device_initcall(mach,fn) __define_machine_initcall(mach,"6",fn,6) -#define machine_device_initcall_sync(mach,fn) __define_machine_initcall(mach,"6s",fn,6s) -#define machine_late_initcall(mach,fn) __define_machine_initcall(mach,"7",fn,7) -#define machine_late_initcall_sync(mach,fn) __define_machine_initcall(mach,"7s",fn,7s) + __define_initcall(__machine_initcall_##mach##_##fn, id); + +#define machine_core_initcall(mach, fn) __define_machine_initcall(mach, fn, 1) +#define machine_core_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 1s) +#define machine_postcore_initcall(mach, fn) __define_machine_initcall(mach, fn, 2) +#define machine_postcore_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 2s) +#define machine_arch_initcall(mach, fn) __define_machine_initcall(mach, fn, 3) +#define machine_arch_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 3s) +#define machine_subsys_initcall(mach, fn) __define_machine_initcall(mach, fn, 4) +#define machine_subsys_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 4s) +#define machine_fs_initcall(mach, fn) __define_machine_initcall(mach, fn, 5) +#define machine_fs_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 5s) +#define machine_rootfs_initcall(mach, fn) __define_machine_initcall(mach, fn, rootfs) +#define machine_device_initcall(mach, fn) __define_machine_initcall(mach, fn, 6) +#define machine_device_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 6s) +#define machine_late_initcall(mach, fn) __define_machine_initcall(mach, fn, 7) +#define machine_late_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 7s) #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_MACHDEP_H */ diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index eeabcdbc30f..99d43e0c1e4 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -59,7 +59,7 @@ #define MAS1_TSIZE_SHIFT 7 #define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) -#define MAS2_EPN 0xFFFFF000 +#define MAS2_EPN (~0xFFFUL) #define MAS2_X0 0x00000040 #define MAS2_X1 0x00000020 #define MAS2_W 0x00000010 diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 9673f73eb8d..2fdb47a19ef 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -121,6 +121,16 @@ extern char initial_stab[]; #define PP_RXRX 3 /* Supervisor read, User read */ #define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */ +/* Fields for tlbiel instruction in architecture 2.06 */ +#define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */ +#define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */ +#define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */ +#define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */ +#define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */ +#define TLBIEL_INVAL_SET_SHIFT 12 + +#define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */ + #ifndef __ASSEMBLY__ struct hash_pte { diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 5e38eedea21..691fd8aca93 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -101,6 +101,7 @@ #define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE #define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE #define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE +#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE #define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ MMU_FTR_CI_LARGE_PAGE #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ diff --git a/arch/powerpc/include/asm/mpc5121.h b/arch/powerpc/include/asm/mpc5121.h index 8c0ab2ca689..885c040d619 100644 --- a/arch/powerpc/include/asm/mpc5121.h +++ b/arch/powerpc/include/asm/mpc5121.h @@ -53,4 +53,21 @@ struct mpc512x_ccm { u32 m4ccr; /* MSCAN4 CCR */ u8 res[0x98]; /* Reserved */ }; + +/* + * LPC Module + */ +struct mpc512x_lpc { + u32 cs_cfg[8]; /* CS config */ + u32 cs_ctrl; /* CS Control Register */ + u32 cs_status; /* CS Status Register */ + u32 burst_ctrl; /* CS Burst Control Register */ + u32 deadcycle_ctrl; /* CS Deadcycle Control Register */ + u32 holdcycle_ctrl; /* CS Holdcycle Control Register */ + u32 alt; /* Address Latch Timing Register */ +}; + +int mpc512x_cs_config(unsigned int cs, u32 val); +int __init mpc5121_clk_init(void); + #endif /* __ASM_POWERPC_MPC5121_H__ */ diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h index 639dc96077a..d697b08994c 100644 --- a/arch/powerpc/include/asm/oprofile_impl.h +++ b/arch/powerpc/include/asm/oprofile_impl.h @@ -34,7 +34,7 @@ struct op_system_config { unsigned long mmcra; #ifdef CONFIG_OPROFILE_CELL /* Register for oprofile user tool to check cell kernel profiling - * suport. + * support. */ unsigned long cell_support; #endif diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h deleted file mode 100644 index c07edfe98b9..00000000000 --- a/arch/powerpc/include/asm/pSeries_reconfig.h +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef _PPC64_PSERIES_RECONFIG_H -#define _PPC64_PSERIES_RECONFIG_H -#ifdef __KERNEL__ - -#include <linux/notifier.h> - -/* - * Use this API if your code needs to know about OF device nodes being - * added or removed on pSeries systems. - */ - -#define PSERIES_RECONFIG_ADD 0x0001 -#define PSERIES_RECONFIG_REMOVE 0x0002 -#define PSERIES_DRCONF_MEM_ADD 0x0003 -#define PSERIES_DRCONF_MEM_REMOVE 0x0004 -#define PSERIES_UPDATE_PROPERTY 0x0005 - -/** - * pSeries_reconfig_notify - Notifier value structure for OFDT property updates - * - * @node: Device tree node which owns the property being updated - * @property: Updated property - */ -struct pSeries_reconfig_prop_update { - struct device_node *node; - struct property *property; -}; - -#ifdef CONFIG_PPC_PSERIES -extern int pSeries_reconfig_notifier_register(struct notifier_block *); -extern void pSeries_reconfig_notifier_unregister(struct notifier_block *); -extern int pSeries_reconfig_notify(unsigned long action, void *p); -/* Not the best place to put this, will be fixed when we move some - * of the rtas suspend-me stuff to pseries */ -extern void pSeries_coalesce_init(void); -#else /* !CONFIG_PPC_PSERIES */ -static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb) -{ - return 0; -} -static inline void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { } -static inline void pSeries_coalesce_init(void) { } -#endif /* CONFIG_PPC_PSERIES */ - - -#endif /* __KERNEL__ */ -#endif /* _PPC64_PSERIES_RECONFIG_H */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index e9e7a6999bb..77c91e74b61 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -93,9 +93,9 @@ struct paca_struct { * Now, starting in cacheline 2, the exception save areas */ /* used for most interrupts/exceptions */ - u64 exgen[11] __attribute__((aligned(0x80))); - u64 exmc[11]; /* used for machine checks */ - u64 exslb[11]; /* used for SLB/segment table misses + u64 exgen[12] __attribute__((aligned(0x80))); + u64 exmc[12]; /* used for machine checks */ + u64 exslb[12]; /* used for SLB/segment table misses * on the linear mapping */ /* SLB related definitions */ u16 vmalloc_sllp; @@ -137,6 +137,9 @@ struct paca_struct { u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ u8 nap_state_lost; /* NV GPR values lost in power7_idle */ u64 sprg3; /* Saved user-visible sprg */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + u64 tm_scratch; /* TM scratch area for reclaim */ +#endif #ifdef CONFIG_PPC_POWERNV /* Pointer to OPAL machine check event structure set by the @@ -167,7 +170,6 @@ struct paca_struct { }; extern struct paca_struct *paca; -extern __initdata struct paca_struct boot_paca; extern void initialise_paca(struct paca_struct *new_paca, int cpu); extern void setup_paca(struct paca_struct *new_paca); extern void allocate_pacas(void); diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h index 1ca1102b4a2..6dc2577932b 100644 --- a/arch/powerpc/include/asm/parport.h +++ b/arch/powerpc/include/asm/parport.h @@ -12,7 +12,7 @@ #include <asm/prom.h> -static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) +static int parport_pc_find_nonpci_ports (int autoirq, int autodma) { struct device_node *np; const u32 *prop; diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 9710be3a2d1..d0aec72722e 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -11,6 +11,7 @@ #include <linux/types.h> #include <asm/hw_irq.h> +#include <linux/device.h> #define MAX_HWEVENTS 8 #define MAX_EVENT_ALTERNATIVES 8 @@ -35,6 +36,7 @@ struct power_pmu { void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); int (*limited_pmc_event)(u64 event_id); u32 flags; + const struct attribute_group **attr_groups; int n_generic; int *generic_events; int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] @@ -45,11 +47,11 @@ struct power_pmu { /* * Values for power_pmu.flags */ -#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */ -#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */ -#define PPMU_NO_SIPR 4 /* no SIPR/HV in MMCRA at all */ -#define PPMU_NO_CONT_SAMPLING 8 /* no continuous sampling */ -#define PPMU_SIAR_VALID 16 /* Processor has SIAR Valid bit */ +#define PPMU_LIMITED_PMC5_6 0x00000001 /* PMC5/6 have limited function */ +#define PPMU_ALT_SIPR 0x00000002 /* uses alternate posn for SIPR/HV */ +#define PPMU_NO_SIPR 0x00000004 /* no SIPR/HV in MMCRA at all */ +#define PPMU_NO_CONT_SAMPLING 0x00000008 /* no continuous sampling */ +#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */ /* * Values for flags to get_alternatives() @@ -109,3 +111,27 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs); * If an event_id is not subject to the constraint expressed by a particular * field, then it will have 0 in both the mask and value for that field. */ + +extern ssize_t power_events_sysfs_show(struct device *dev, + struct device_attribute *attr, char *page); + +/* + * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix. + * + * Having a suffix allows us to have aliases in sysfs - eg: the generic + * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and + * 'PM_CYC' where the latter is the name by which the event is known in + * POWER CPU specification. + */ +#define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix +#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr + +#define EVENT_ATTR(_name, _id, _suffix) \ + PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \ + power_events_sysfs_show) + +#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g) +#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g) + +#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p) +#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 5f73ce63fca..8752bc8e34a 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -1,5 +1,5 @@ /* - * Copyright 2009 Freescale Semicondutor, Inc. + * Copyright 2009 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -86,6 +86,7 @@ #define PPC_INST_DCBA_MASK 0xfc0007fe #define PPC_INST_DCBAL 0x7c2005ec #define PPC_INST_DCBZL 0x7c2007ec +#define PPC_INST_ICBT 0x7c00002c #define PPC_INST_ISEL 0x7c00001e #define PPC_INST_ISEL_MASK 0xfc00003e #define PPC_INST_LDARX 0x7c0000a8 @@ -99,6 +100,7 @@ #define PPC_INST_MFSPR_PVR 0x7c1f42a6 #define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff #define PPC_INST_MSGSND 0x7c00019c +#define PPC_INST_MSGSNDP 0x7c00011c #define PPC_INST_NOP 0x60000000 #define PPC_INST_POPCNTB 0x7c0000f4 #define PPC_INST_POPCNTB_MASK 0xfc0007fe @@ -127,6 +129,9 @@ #define PPC_INST_TLBSRX_DOT 0x7c0006a5 #define PPC_INST_XXLOR 0xf0000510 #define PPC_INST_XVCPSGNDP 0xf0000780 +#define PPC_INST_TRECHKPT 0x7c0007dd +#define PPC_INST_TRECLAIM 0x7c00075d +#define PPC_INST_TABORT 0x7c00071d #define PPC_INST_NAP 0x4c000364 #define PPC_INST_SLEEP 0x4c0003a4 @@ -168,9 +173,12 @@ #define PPC_INST_AND 0x7c000038 #define PPC_INST_ANDDOT 0x7c000039 #define PPC_INST_OR 0x7c000378 +#define PPC_INST_XOR 0x7c000278 #define PPC_INST_ANDI 0x70000000 #define PPC_INST_ORI 0x60000000 #define PPC_INST_ORIS 0x64000000 +#define PPC_INST_XORI 0x68000000 +#define PPC_INST_XORIS 0x6c000000 #define PPC_INST_NEG 0x7c0000d0 #define PPC_INST_BRANCH 0x48000000 #define PPC_INST_BRANCH_COND 0x40800000 @@ -198,6 +206,7 @@ #define __PPC_MB(s) (((s) & 0x1f) << 6) #define __PPC_ME(s) (((s) & 0x1f) << 1) #define __PPC_BI(s) (((s) & 0x1f) << 16) +#define __PPC_CT(t) (((t) & 0x0f) << 21) /* * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a @@ -222,6 +231,8 @@ ___PPC_RB(b) | __PPC_EH(eh)) #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ ___PPC_RB(b)) +#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \ + ___PPC_RB(b)) #define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ __PPC_RA(a) | __PPC_RS(s)) #define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \ @@ -260,6 +271,8 @@ __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) #define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \ __PPC_RT(t) | __PPC_RB(b)) +#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \ + __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b)) /* PASemi instructions */ #define LBZCIX(t,a,b) stringify_in_c(.long PPC_INST_LBZCIX | \ __PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b)) @@ -284,4 +297,11 @@ #define PPC_NAP stringify_in_c(.long PPC_INST_NAP) #define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP) +/* Transactional memory instructions */ +#define TRECHKPT stringify_in_c(.long PPC_INST_TRECHKPT) +#define TRECLAIM(r) stringify_in_c(.long PPC_INST_TRECLAIM \ + | __PPC_RA(r)) +#define TABORT(r) stringify_in_c(.long PPC_INST_TABORT \ + | __PPC_RA(r)) + #endif /* _ASM_POWERPC_PPC_OPCODE_H */ diff --git a/arch/powerpc/include/asm/ppc4xx_ocm.h b/arch/powerpc/include/asm/ppc4xx_ocm.h new file mode 100644 index 00000000000..6ce90460553 --- /dev/null +++ b/arch/powerpc/include/asm/ppc4xx_ocm.h @@ -0,0 +1,45 @@ +/* + * PowerPC 4xx OCM memory allocation support + * + * (C) Copyright 2009, Applied Micro Circuits Corporation + * Victor Gallardo (vgallardo@amcc.com) + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#ifndef __ASM_POWERPC_PPC4XX_OCM_H__ +#define __ASM_POWERPC_PPC4XX_OCM_H__ + +#define PPC4XX_OCM_NON_CACHED 0 +#define PPC4XX_OCM_CACHED 1 + +#if defined(CONFIG_PPC4xx_OCM) + +void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align, + int flags, const char *owner); +void ppc4xx_ocm_free(const void *virt); + +#else + +#define ppc4xx_ocm_alloc(phys, size, align, flags, owner) NULL +#define ppc4xx_ocm_free(addr) ((void)0) + +#endif /* CONFIG_PPC4xx_OCM */ + +#endif /* __ASM_POWERPC_PPC4XX_OCM_H__ */ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index ea2a86e8ff9..cea8496091f 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -24,13 +24,12 @@ * user_time and system_time fields in the paca. */ -#ifndef CONFIG_VIRT_CPU_ACCOUNTING +#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #define ACCOUNT_CPU_USER_ENTRY(ra, rb) #define ACCOUNT_CPU_USER_EXIT(ra, rb) #define ACCOUNT_STOLEN_TIME #else #define ACCOUNT_CPU_USER_ENTRY(ra, rb) \ - beq 2f; /* if from kernel mode */ \ MFTB(ra); /* get timebase */ \ ld rb,PACA_STARTTIME_USER(r13); \ std ra,PACA_STARTTIME(r13); \ @@ -38,7 +37,6 @@ ld ra,PACA_USER_TIME(r13); \ add ra,ra,rb; /* add on to user time */ \ std ra,PACA_USER_TIME(r13); \ -2: #define ACCOUNT_CPU_USER_EXIT(ra, rb) \ MFTB(ra); /* get timebase */ \ @@ -70,7 +68,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #endif /* CONFIG_PPC_SPLPAR */ -#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ /* * Macros for storing registers into and loading registers from @@ -125,6 +123,89 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) +/* Save/restore FPRs, VRs and VSRs from their checkpointed backups in + * thread_struct: + */ +#define SAVE_FPR_TRANSACT(n, base) stfd n,THREAD_TRANSACT_FPR0+ \ + 8*TS_FPRWIDTH*(n)(base) +#define SAVE_2FPRS_TRANSACT(n, base) SAVE_FPR_TRANSACT(n, base); \ + SAVE_FPR_TRANSACT(n+1, base) +#define SAVE_4FPRS_TRANSACT(n, base) SAVE_2FPRS_TRANSACT(n, base); \ + SAVE_2FPRS_TRANSACT(n+2, base) +#define SAVE_8FPRS_TRANSACT(n, base) SAVE_4FPRS_TRANSACT(n, base); \ + SAVE_4FPRS_TRANSACT(n+4, base) +#define SAVE_16FPRS_TRANSACT(n, base) SAVE_8FPRS_TRANSACT(n, base); \ + SAVE_8FPRS_TRANSACT(n+8, base) +#define SAVE_32FPRS_TRANSACT(n, base) SAVE_16FPRS_TRANSACT(n, base); \ + SAVE_16FPRS_TRANSACT(n+16, base) + +#define REST_FPR_TRANSACT(n, base) lfd n,THREAD_TRANSACT_FPR0+ \ + 8*TS_FPRWIDTH*(n)(base) +#define REST_2FPRS_TRANSACT(n, base) REST_FPR_TRANSACT(n, base); \ + REST_FPR_TRANSACT(n+1, base) +#define REST_4FPRS_TRANSACT(n, base) REST_2FPRS_TRANSACT(n, base); \ + REST_2FPRS_TRANSACT(n+2, base) +#define REST_8FPRS_TRANSACT(n, base) REST_4FPRS_TRANSACT(n, base); \ + REST_4FPRS_TRANSACT(n+4, base) +#define REST_16FPRS_TRANSACT(n, base) REST_8FPRS_TRANSACT(n, base); \ + REST_8FPRS_TRANSACT(n+8, base) +#define REST_32FPRS_TRANSACT(n, base) REST_16FPRS_TRANSACT(n, base); \ + REST_16FPRS_TRANSACT(n+16, base) + + +#define SAVE_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \ + stvx n,b,base +#define SAVE_2VRS_TRANSACT(n,b,base) SAVE_VR_TRANSACT(n,b,base); \ + SAVE_VR_TRANSACT(n+1,b,base) +#define SAVE_4VRS_TRANSACT(n,b,base) SAVE_2VRS_TRANSACT(n,b,base); \ + SAVE_2VRS_TRANSACT(n+2,b,base) +#define SAVE_8VRS_TRANSACT(n,b,base) SAVE_4VRS_TRANSACT(n,b,base); \ + SAVE_4VRS_TRANSACT(n+4,b,base) +#define SAVE_16VRS_TRANSACT(n,b,base) SAVE_8VRS_TRANSACT(n,b,base); \ + SAVE_8VRS_TRANSACT(n+8,b,base) +#define SAVE_32VRS_TRANSACT(n,b,base) SAVE_16VRS_TRANSACT(n,b,base); \ + SAVE_16VRS_TRANSACT(n+16,b,base) + +#define REST_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \ + lvx n,b,base +#define REST_2VRS_TRANSACT(n,b,base) REST_VR_TRANSACT(n,b,base); \ + REST_VR_TRANSACT(n+1,b,base) +#define REST_4VRS_TRANSACT(n,b,base) REST_2VRS_TRANSACT(n,b,base); \ + REST_2VRS_TRANSACT(n+2,b,base) +#define REST_8VRS_TRANSACT(n,b,base) REST_4VRS_TRANSACT(n,b,base); \ + REST_4VRS_TRANSACT(n+4,b,base) +#define REST_16VRS_TRANSACT(n,b,base) REST_8VRS_TRANSACT(n,b,base); \ + REST_8VRS_TRANSACT(n+8,b,base) +#define REST_32VRS_TRANSACT(n,b,base) REST_16VRS_TRANSACT(n,b,base); \ + REST_16VRS_TRANSACT(n+16,b,base) + + +#define SAVE_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \ + STXVD2X(n,R##base,R##b) +#define SAVE_2VSRS_TRANSACT(n,b,base) SAVE_VSR_TRANSACT(n,b,base); \ + SAVE_VSR_TRANSACT(n+1,b,base) +#define SAVE_4VSRS_TRANSACT(n,b,base) SAVE_2VSRS_TRANSACT(n,b,base); \ + SAVE_2VSRS_TRANSACT(n+2,b,base) +#define SAVE_8VSRS_TRANSACT(n,b,base) SAVE_4VSRS_TRANSACT(n,b,base); \ + SAVE_4VSRS_TRANSACT(n+4,b,base) +#define SAVE_16VSRS_TRANSACT(n,b,base) SAVE_8VSRS_TRANSACT(n,b,base); \ + SAVE_8VSRS_TRANSACT(n+8,b,base) +#define SAVE_32VSRS_TRANSACT(n,b,base) SAVE_16VSRS_TRANSACT(n,b,base); \ + SAVE_16VSRS_TRANSACT(n+16,b,base) + +#define REST_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \ + LXVD2X(n,R##base,R##b) +#define REST_2VSRS_TRANSACT(n,b,base) REST_VSR_TRANSACT(n,b,base); \ + REST_VSR_TRANSACT(n+1,b,base) +#define REST_4VSRS_TRANSACT(n,b,base) REST_2VSRS_TRANSACT(n,b,base); \ + REST_2VSRS_TRANSACT(n+2,b,base) +#define REST_8VSRS_TRANSACT(n,b,base) REST_4VSRS_TRANSACT(n,b,base); \ + REST_4VSRS_TRANSACT(n+4,b,base) +#define REST_16VSRS_TRANSACT(n,b,base) REST_8VSRS_TRANSACT(n,b,base); \ + REST_8VSRS_TRANSACT(n+8,b,base) +#define REST_32VSRS_TRANSACT(n,b,base) REST_16VSRS_TRANSACT(n,b,base); \ + REST_16VSRS_TRANSACT(n+16,b,base) + /* Save the lower 32 VSRs in the thread VSR region */ #define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,R##base,R##b) #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) @@ -391,6 +472,31 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) FTR_SECTION_ELSE_NESTED(848); \ mtocrf (FXM), RS; \ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848) + +/* + * PPR restore macros used in entry_64.S + * Used for P7 or later processors + */ +#define HMT_MEDIUM_LOW_HAS_PPR \ +BEGIN_FTR_SECTION_NESTED(944) \ + HMT_MEDIUM_LOW; \ +END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,944) + +#define SET_DEFAULT_THREAD_PPR(ra, rb) \ +BEGIN_FTR_SECTION_NESTED(945) \ + lis ra,INIT_PPR@highest; /* default ppr=3 */ \ + ld rb,PACACURRENT(r13); \ + sldi ra,ra,32; /* 11- 13 bits are used for ppr */ \ + std ra,TASKTHREADPPR(rb); \ +END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945) + +#define RESTORE_PPR(ra, rb) \ +BEGIN_FTR_SECTION_NESTED(946) \ + ld ra,PACACURRENT(r13); \ + ld rb,TASKTHREADPPR(ra); \ + mtspr SPRN_PPR,rb; /* Restore PPR */ \ +END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946) + #endif /* diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 87502046c0d..7ff9eaa3ea6 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -18,11 +18,22 @@ #define TS_FPRWIDTH 1 #endif +#ifdef CONFIG_PPC64 +/* Default SMT priority is set to 3. Use 11- 13bits to save priority. */ +#define PPR_PRIORITY 3 +#ifdef __ASSEMBLY__ +#define INIT_PPR (PPR_PRIORITY << 50) +#else +#define INIT_PPR ((u64)PPR_PRIORITY << 50) +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_PPC64 */ + #ifndef __ASSEMBLY__ #include <linux/compiler.h> #include <linux/cache.h> #include <asm/ptrace.h> #include <asm/types.h> +#include <asm/hw_breakpoint.h> /* We do _not_ want to define new machine types at all, those must die * in favor of using the device-tree @@ -141,6 +152,7 @@ typedef struct { #define TS_FPROFFSET 0 #define TS_VSRLOWOFFSET 1 #define TS_FPR(i) fpr[i][TS_FPROFFSET] +#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET] struct thread_struct { unsigned long ksp; /* Kernel stack pointer */ @@ -215,8 +227,7 @@ struct thread_struct { struct perf_event *last_hit_ubp; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif - unsigned long dabr; /* Data address breakpoint register */ - unsigned long dabrx; /* ... extension */ + struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */ unsigned long trap_nr; /* last trap # on this thread */ #ifdef CONFIG_ALTIVEC /* Complete AltiVec register set */ @@ -236,6 +247,34 @@ struct thread_struct { unsigned long spefscr; /* SPE & eFP status */ int used_spe; /* set if process has used spe */ #endif /* CONFIG_SPE */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + u64 tm_tfhar; /* Transaction fail handler addr */ + u64 tm_texasr; /* Transaction exception & summary */ + u64 tm_tfiar; /* Transaction fail instr address reg */ + unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */ + struct pt_regs ckpt_regs; /* Checkpointed registers */ + + /* + * Transactional FP and VSX 0-31 register set. + * NOTE: the sense of these is the opposite of the integer ckpt_regs! + * + * When a transaction is active/signalled/scheduled etc., *regs is the + * most recent set of/speculated GPRs with ckpt_regs being the older + * checkpointed regs to which we roll back if transaction aborts. + * + * However, fpr[] is the checkpointed 'base state' of FP regs, and + * transact_fpr[] is the new set of transactional values. + * VRs work the same way. + */ + double transact_fpr[32][TS_FPRWIDTH]; + struct { + unsigned int pad; + unsigned int val; /* Floating point status */ + } transact_fpscr; + vector128 transact_vr[32] __attribute__((aligned(16))); + vector128 transact_vscr __attribute__((aligned(16))); + unsigned long transact_vrsave; +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #ifdef CONFIG_KVM_BOOK3S_32_HANDLER void* kvm_shadow_vcpu; /* KVM internal data */ #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ @@ -245,6 +284,10 @@ struct thread_struct { #ifdef CONFIG_PPC64 unsigned long dscr; int dscr_inherit; + unsigned long ppr; /* used to save/restore SMT priority */ +#endif +#ifdef CONFIG_PPC_BOOK3S_64 + unsigned long tar; #endif }; @@ -278,6 +321,7 @@ struct thread_struct { .fpr = {{0}}, \ .fpscr = { .val = 0, }, \ .fpexc_mode = 0, \ + .ppr = INIT_PPR, \ } #endif diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index b5c91901e38..99c92d5363e 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -58,6 +58,22 @@ static inline int of_node_to_nid(struct device_node *device) { return 0; } extern void of_instantiate_rtc(void); +/* The of_drconf_cell struct defines the layout of the LMB array + * specified in the device tree property + * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory + */ +struct of_drconf_cell { + u64 base_addr; + u32 drc_index; + u32 reserved; + u32 aa_index; + u32 flags; +}; + +#define DRCONF_MEM_ASSIGNED 0x00000008 +#define DRCONF_MEM_AI_INVALID 0x00000040 +#define DRCONF_MEM_RESERVED 0x00000080 + /* These includes are put at the bottom because they may contain things * that are overridden by this file. Ideally they shouldn't be included * by this file, but there are a bunch of .c files that currently depend diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index 0e15db4d703..678a7c1d9cb 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -245,7 +245,7 @@ enum lv1_result { static inline const char* ps3_result(int result) { -#if defined(DEBUG) +#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT) switch (result) { case LV1_SUCCESS: return "LV1_SUCCESS (0)"; diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index eedf427c912..3e13e23e4fd 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h @@ -23,7 +23,7 @@ /* Note the full page bits must be in the same location as for normal * 4k pages as the same assembly will be used to insert 64K pages - * wether the kernel has CONFIG_PPC_64K_PAGES or not + * whether the kernel has CONFIG_PPC_64K_PAGES or not */ #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h index 229571a4939..32b9bfa0c9b 100644 --- a/arch/powerpc/include/asm/qe.h +++ b/arch/powerpc/include/asm/qe.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. * * Authors: Shlomi Gridish <gridish@freescale.com> * Li Yang <leoli@freescale.com> diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h index f706164b0bd..25784cc959a 100644 --- a/arch/powerpc/include/asm/qe_ic.h +++ b/arch/powerpc/include/asm/qe_ic.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. * * Authors: Shlomi Gridish <gridish@freescale.com> * Li Yang <leoli@freescale.com> diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index d24c1416396..c9c67fc888c 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -29,6 +29,10 @@ #define MSR_SF_LG 63 /* Enable 64 bit mode */ #define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */ #define MSR_HV_LG 60 /* Hypervisor state */ +#define MSR_TS_T_LG 34 /* Trans Mem state: Transactional */ +#define MSR_TS_S_LG 33 /* Trans Mem state: Suspended */ +#define MSR_TS_LG 33 /* Trans Mem state (2 bits) */ +#define MSR_TM_LG 32 /* Trans Mem Available */ #define MSR_VEC_LG 25 /* Enable AltiVec */ #define MSR_VSX_LG 23 /* Enable VSX */ #define MSR_POW_LG 18 /* Enable Power Management */ @@ -98,6 +102,26 @@ #define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */ #define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */ +#define MSR_TM __MASK(MSR_TM_LG) /* Transactional Mem Available */ +#define MSR_TS_N 0 /* Non-transactional */ +#define MSR_TS_S __MASK(MSR_TS_S_LG) /* Transaction Suspended */ +#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */ +#define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */ +#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ +#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) +#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) + +/* Reason codes describing kernel causes for transaction aborts. By + convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if + the failure is persistent. +*/ +#define TM_CAUSE_RESCHED 0xfe +#define TM_CAUSE_TLBI 0xfc +#define TM_CAUSE_FAC_UNAV 0xfa +#define TM_CAUSE_SYSCALL 0xf9 /* Persistent */ +#define TM_CAUSE_MISC 0xf6 +#define TM_CAUSE_SIGNAL 0xf4 + #if defined(CONFIG_PPC_BOOK3S_64) #define MSR_64BIT MSR_SF @@ -193,6 +217,10 @@ #define SPRN_UAMOR 0x9d /* User Authority Mask Override Register */ #define SPRN_AMOR 0x15d /* Authority Mask Override Register */ #define SPRN_ACOP 0x1F /* Available Coprocessor Register */ +#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */ +#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */ +#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */ +#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */ #define SPRN_CTRLF 0x088 #define SPRN_CTRLT 0x098 #define CTRL_CT 0xc0000000 /* current thread */ @@ -200,10 +228,12 @@ #define CTRL_CT1 0x40000000 /* thread 1 */ #define CTRL_TE 0x00c00000 /* thread enable */ #define CTRL_RUNLATCH 0x1 +#define SPRN_DAWR 0xB4 +#define SPRN_DAWRX 0xBC +#define DAWRX_USER (1UL << 0) +#define DAWRX_KERNEL (1UL << 1) +#define DAWRX_HYP (1UL << 2) #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ -#define DABR_TRANSLATION (1UL << 2) -#define DABR_DATA_WRITE (1UL << 1) -#define DABR_DATA_READ (1UL << 0) #define SPRN_DABR2 0x13D /* e300 */ #define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */ #define DABRX_USER (1UL << 0) @@ -235,6 +265,10 @@ #define SPRN_HRMOR 0x139 /* Real mode offset register */ #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ +#define SPRN_FSCR 0x099 /* Facility Status & Control Register */ +#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ +#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ +#define SPRN_TAR 0x32f /* Target Address Register */ #define SPRN_LPCR 0x13E /* LPAR Control Register */ #define LPCR_VPM0 (1ul << (63-0)) #define LPCR_VPM1 (1ul << (63-1)) @@ -249,6 +283,8 @@ #define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */ #define LPCR_RMLS_SH (63-37) #define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */ +#define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */ +#define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */ #define LPCR_PECE 0x00007000 /* powersave exit cause enable */ #define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */ #define LPCR_PECE1 0x00002000 /* decrementer can cause exit */ @@ -287,6 +323,7 @@ #define SPRN_DBAT6U 0x23C /* Data BAT 6 Upper Register */ #define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */ #define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */ +#define SPRN_PPR 0x380 /* SMT Thread status Register */ #define SPRN_DEC 0x016 /* Decrement Register */ #define SPRN_DER 0x095 /* Debug Enable Regsiter */ @@ -481,6 +518,7 @@ #ifndef SPRN_PIR #define SPRN_PIR 0x3FF /* Processor Identification Register */ #endif +#define SPRN_TIR 0x1BE /* Thread Identification Register */ #define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */ #define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */ #define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */ @@ -518,6 +556,7 @@ #define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */ #define SRR1_WS_DEEP 0x00010000 /* All resources maintained */ #define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */ +#define SRR1_PROGILL 0x00080000 /* Illegal instruction */ #define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */ #define SRR1_PROGTRAP 0x00020000 /* Trap */ #define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */ @@ -760,7 +799,7 @@ * HV mode in which case it is HSPRG0 * * 64-bit server: - * - SPRG0 unused (reserved for HV on Power4) + * - SPRG0 scratch for TM recheckpoint/reclaim (reserved for HV on Power4) * - SPRG2 scratch for exception vectors * - SPRG3 CPU and NUMA node for VDSO getcpu (user visible) * - HSPRG0 stores PACA in HV mode @@ -918,8 +957,6 @@ #define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG9 #define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG9 #endif -#define SPRN_SPRG_RVCPU SPRN_SPRG1 -#define SPRN_SPRG_WVCPU SPRN_SPRG1 #endif #ifdef CONFIG_8xx @@ -1029,6 +1066,7 @@ #define PVR_970MP 0x0044 #define PVR_970GX 0x0045 #define PVR_POWER7p 0x004A +#define PVR_POWER8 0x004B #define PVR_BE 0x0070 #define PVR_PA6T 0x0090 diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 2d916c4982c..b417de3cc2c 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -56,6 +56,7 @@ #define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */ #define SPRN_EPCR 0x133 /* Embedded Processor Control Register */ #define SPRN_DBCR2 0x136 /* Debug Control Register 2 */ +#define SPRN_DBCR4 0x233 /* Debug Control Register 4 */ #define SPRN_MSRP 0x137 /* MSR Protect Register */ #define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */ #define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */ @@ -539,6 +540,13 @@ #define TCR_FIE 0x00800000 /* FIT Interrupt Enable */ #define TCR_ARE 0x00400000 /* Auto Reload Enable */ +#ifdef CONFIG_E500 +#define TCR_GET_WP(tcr) ((((tcr) & 0xC0000000) >> 30) | \ + (((tcr) & 0x1E0000) >> 15)) +#else +#define TCR_GET_WP(tcr) (((tcr) & 0xC0000000) >> 30) +#endif + /* Bit definitions for the TSR. */ #define TSR_ENW 0x80000000 /* Enable Next Watchdog */ #define TSR_WIS 0x40000000 /* WDT Interrupt Status */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 557cff845de..aef00c67590 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -353,8 +353,13 @@ static inline int page_is_rtas_user_buf(unsigned long pfn) return 1; return 0; } + +/* Not the best place to put pSeries_coalesce_init, will be fixed when we + * move some of the rtas suspend-me stuff to pseries */ +extern void pSeries_coalesce_init(void); #else static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;} +static inline void pSeries_coalesce_init(void) { } #endif extern int call_rtas(const char *, int, int, unsigned long *, ...); diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index a0f358d4a00..4ee06fe15de 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -10,6 +10,9 @@ extern char __end_interrupts[]; +extern char __prom_init_toc_start[]; +extern char __prom_init_toc_end[]; + static inline int in_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_stext && addr < (unsigned long)__init_end) diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h new file mode 100644 index 00000000000..d3ca85529b8 --- /dev/null +++ b/arch/powerpc/include/asm/setup.h @@ -0,0 +1,29 @@ +#ifndef _ASM_POWERPC_SETUP_H +#define _ASM_POWERPC_SETUP_H + +#include <uapi/asm/setup.h> + +#ifndef __ASSEMBLY__ +extern void ppc_printk_progress(char *s, unsigned short hex); + +extern unsigned int rtas_data; +extern int mem_init_done; /* set on boot once kmalloc can be called */ +extern int init_bootmem_done; /* set once bootmem is available */ +extern unsigned long long memory_limit; +extern unsigned long klimit; +extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); + +struct device_node; +extern void note_scsi_host(struct device_node *, void *); + +/* Used in very early kernel initialization. */ +extern unsigned long reloc_offset(void); +extern unsigned long add_reloc_offset(unsigned long); +extern void reloc_got2(unsigned long); + +#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_SETUP_H */ + diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h index 189998bb61c..fbe66c46389 100644 --- a/arch/powerpc/include/asm/signal.h +++ b/arch/powerpc/include/asm/signal.h @@ -1,8 +1,7 @@ #ifndef _ASM_POWERPC_SIGNAL_H #define _ASM_POWERPC_SIGNAL_H +#define __ARCH_HAS_SA_RESTORER #include <uapi/asm/signal.h> -struct pt_regs; -#define ptrace_signal_deliver(regs, cookie) do { } while (0) #endif /* _ASM_POWERPC_SIGNAL_H */ diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index e807e9d8e3f..195ce2ac569 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -54,8 +54,8 @@ struct smp_ops_t { extern void smp_send_debugger_break(void); extern void start_secondary_resume(void); -extern void __devinit smp_generic_give_timebase(void); -extern void __devinit smp_generic_take_timebase(void); +extern void smp_generic_give_timebase(void); +extern void smp_generic_take_timebase(void); DECLARE_PER_CPU(unsigned int, cpu_pvr); @@ -67,6 +67,14 @@ void generic_mach_cpu_die(void); void generic_set_cpu_dead(unsigned int cpu); void generic_set_cpu_up(unsigned int cpu); int generic_check_cpu_restart(unsigned int cpu); + +extern void inhibit_secondary_onlining(void); +extern void uninhibit_secondary_onlining(void); + +#else /* HOTPLUG_CPU */ +static inline void inhibit_secondary_onlining(void) {} +static inline void uninhibit_secondary_onlining(void) {} + #endif #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h index ae20ce1af4c..6e909f3e6a4 100644 --- a/arch/powerpc/include/asm/smu.h +++ b/arch/powerpc/include/asm/smu.h @@ -132,7 +132,7 @@ * * At this point, the OF driver seems to have a limitation on transfer * sizes of 0xd bytes on reads and 0x5 bytes on writes. I do not know - * wether this is just an OF limit due to some temporary buffer size + * whether this is just an OF limit due to some temporary buffer size * or if this is an SMU imposed limit. This driver has the same limitation * for now as I use a 0x10 bytes temporary buffer as well * @@ -236,7 +236,7 @@ * 3 (optional): enable nmi? [0x00 or 0x01] * * Returns: - * If parameter 2 is 0x00 and parameter 3 is not specified, returns wether + * If parameter 2 is 0x00 and parameter 3 is not specified, returns whether * NMI is enabled. Otherwise unknown. */ #define SMU_CMD_MISC_df_NMI_OPTION 0x04 diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 7124fc06ad4..5b23f910ee5 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -96,7 +96,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) #if defined(CONFIG_PPC_SPLPAR) /* We only yield to the hypervisor if we are in shared processor mode */ -#define SHARED_PROCESSOR (get_lppaca()->shared_proc) +#define SHARED_PROCESSOR (local_paca->lppaca_ptr->shared_proc) extern void __spin_yield(arch_spinlock_t *lock); extern void __rw_yield(arch_rwlock_t *lock); #else /* SPLPAR */ diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 329db4ec12c..23be8f1e7e6 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -5,11 +5,8 @@ #include <linux/compiler.h> #include <linux/linkage.h> #include <linux/types.h> -#include <asm/signal.h> -struct pt_regs; struct rtas_args; -struct sigaction; asmlinkage unsigned long sys_mmap(unsigned long addr, size_t len, unsigned long prot, unsigned long flags, @@ -17,29 +14,8 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, size_t len, asmlinkage unsigned long sys_mmap2(unsigned long addr, size_t len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); -asmlinkage int sys_clone(unsigned long clone_flags, unsigned long usp, - int __user *parent_tidp, void __user *child_threadptr, - int __user *child_tidp, int p6, struct pt_regs *regs); -asmlinkage int sys_fork(unsigned long p1, unsigned long p2, - unsigned long p3, unsigned long p4, unsigned long p5, - unsigned long p6, struct pt_regs *regs); -asmlinkage int sys_vfork(unsigned long p1, unsigned long p2, - unsigned long p3, unsigned long p4, unsigned long p5, - unsigned long p6, struct pt_regs *regs); -asmlinkage long sys_pipe(int __user *fildes); -asmlinkage long sys_pipe2(int __user *fildes, int flags); -asmlinkage long sys_rt_sigaction(int sig, - const struct sigaction __user *act, - struct sigaction __user *oact, size_t sigsetsize); asmlinkage long ppc64_personality(unsigned long personality); asmlinkage int ppc_rtas(struct rtas_args __user *uargs); -asmlinkage time_t sys64_time(time_t __user * tloc); - -asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, - size_t sigsetsize); -asmlinkage long sys_sigaltstack(const stack_t __user *uss, - stack_t __user *uoss, unsigned long r5, unsigned long r6, - unsigned long r7, unsigned long r8, struct pt_regs *regs); #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_SYSCALLS_H */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 84083876985..ebbec52d21b 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -10,8 +10,8 @@ SYSCALL_SPU(read) SYSCALL_SPU(write) COMPAT_SYS_SPU(open) SYSCALL_SPU(close) -COMPAT_SYS_SPU(waitpid) -COMPAT_SYS_SPU(creat) +SYSCALL_SPU(waitpid) +SYSCALL_SPU(creat) SYSCALL_SPU(link) SYSCALL_SPU(unlink) COMPAT_SYS(execve) @@ -22,7 +22,7 @@ SYSCALL_SPU(chmod) SYSCALL_SPU(lchown) SYSCALL(ni_syscall) OLDSYS(stat) -SYSX_SPU(sys_lseek,ppc32_lseek,sys_lseek) +COMPAT_SYS_SPU(lseek) SYSCALL_SPU(getpid) COMPAT_SYS(mount) SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount) @@ -36,13 +36,13 @@ SYSCALL(pause) COMPAT_SYS(utime) SYSCALL(ni_syscall) SYSCALL(ni_syscall) -COMPAT_SYS_SPU(access) -COMPAT_SYS_SPU(nice) +SYSCALL_SPU(access) +SYSCALL_SPU(nice) SYSCALL(ni_syscall) SYSCALL_SPU(sync) -COMPAT_SYS_SPU(kill) +SYSCALL_SPU(kill) SYSCALL_SPU(rename) -COMPAT_SYS_SPU(mkdir) +SYSCALL_SPU(mkdir) SYSCALL_SPU(rmdir) SYSCALL_SPU(dup) SYSCALL_SPU(pipe) @@ -60,10 +60,10 @@ SYSCALL(ni_syscall) COMPAT_SYS_SPU(ioctl) COMPAT_SYS_SPU(fcntl) SYSCALL(ni_syscall) -COMPAT_SYS_SPU(setpgid) +SYSCALL_SPU(setpgid) SYSCALL(ni_syscall) SYSX(sys_ni_syscall,sys_olduname, sys_olduname) -COMPAT_SYS_SPU(umask) +SYSCALL_SPU(umask) SYSCALL_SPU(chroot) COMPAT_SYS(ustat) SYSCALL_SPU(dup2) @@ -72,23 +72,24 @@ SYSCALL_SPU(getpgrp) SYSCALL_SPU(setsid) SYS32ONLY(sigaction) SYSCALL_SPU(sgetmask) -COMPAT_SYS_SPU(ssetmask) +SYSCALL_SPU(ssetmask) SYSCALL_SPU(setreuid) SYSCALL_SPU(setregid) +#define compat_sys_sigsuspend sys_sigsuspend SYS32ONLY(sigsuspend) COMPAT_SYS(sigpending) -COMPAT_SYS_SPU(sethostname) +SYSCALL_SPU(sethostname) COMPAT_SYS_SPU(setrlimit) COMPAT_SYS(old_getrlimit) COMPAT_SYS_SPU(getrusage) COMPAT_SYS_SPU(gettimeofday) COMPAT_SYS_SPU(settimeofday) -COMPAT_SYS_SPU(getgroups) -COMPAT_SYS_SPU(setgroups) +SYSCALL_SPU(getgroups) +SYSCALL_SPU(setgroups) SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select) SYSCALL_SPU(symlink) OLDSYS(lstat) -COMPAT_SYS_SPU(readlink) +SYSCALL_SPU(readlink) SYSCALL(uselib) SYSCALL(swapon) SYSCALL(reboot) @@ -99,14 +100,14 @@ COMPAT_SYS_SPU(truncate) COMPAT_SYS_SPU(ftruncate) SYSCALL_SPU(fchmod) SYSCALL_SPU(fchown) -COMPAT_SYS_SPU(getpriority) -COMPAT_SYS_SPU(setpriority) +SYSCALL_SPU(getpriority) +SYSCALL_SPU(setpriority) SYSCALL(ni_syscall) COMPAT_SYS(statfs) COMPAT_SYS(fstatfs) SYSCALL(ni_syscall) COMPAT_SYS_SPU(socketcall) -COMPAT_SYS_SPU(syslog) +SYSCALL_SPU(syslog) COMPAT_SYS_SPU(setitimer) COMPAT_SYS_SPU(getitimer) COMPAT_SYS_SPU(newstat) @@ -124,7 +125,7 @@ COMPAT_SYS(ipc) SYSCALL_SPU(fsync) SYS32ONLY(sigreturn) PPC_SYS(clone) -COMPAT_SYS_SPU(setdomainname) +SYSCALL_SPU(setdomainname) SYSCALL_SPU(newuname) SYSCALL(ni_syscall) COMPAT_SYS_SPU(adjtimex) @@ -135,10 +136,10 @@ SYSCALL(init_module) SYSCALL(delete_module) SYSCALL(ni_syscall) SYSCALL(quotactl) -COMPAT_SYS_SPU(getpgid) +SYSCALL_SPU(getpgid) SYSCALL_SPU(fchdir) SYSCALL_SPU(bdflush) -COMPAT_SYS(sysfs) +SYSCALL_SPU(sysfs) SYSX_SPU(ppc64_personality,ppc64_personality,sys_personality) SYSCALL(ni_syscall) SYSCALL_SPU(setfsuid) @@ -150,20 +151,20 @@ SYSCALL_SPU(flock) SYSCALL_SPU(msync) COMPAT_SYS_SPU(readv) COMPAT_SYS_SPU(writev) -COMPAT_SYS_SPU(getsid) +SYSCALL_SPU(getsid) SYSCALL_SPU(fdatasync) COMPAT_SYS(sysctl) SYSCALL_SPU(mlock) SYSCALL_SPU(munlock) SYSCALL_SPU(mlockall) SYSCALL_SPU(munlockall) -COMPAT_SYS_SPU(sched_setparam) -COMPAT_SYS_SPU(sched_getparam) -COMPAT_SYS_SPU(sched_setscheduler) -COMPAT_SYS_SPU(sched_getscheduler) +SYSCALL_SPU(sched_setparam) +SYSCALL_SPU(sched_getparam) +SYSCALL_SPU(sched_setscheduler) +SYSCALL_SPU(sched_getscheduler) SYSCALL_SPU(sched_yield) -COMPAT_SYS_SPU(sched_get_priority_max) -COMPAT_SYS_SPU(sched_get_priority_min) +SYSCALL_SPU(sched_get_priority_max) +SYSCALL_SPU(sched_get_priority_min) COMPAT_SYS_SPU(sched_rr_get_interval) COMPAT_SYS_SPU(nanosleep) SYSCALL_SPU(mremap) @@ -174,7 +175,7 @@ SYSCALL_SPU(poll) SYSCALL(ni_syscall) SYSCALL_SPU(setresgid) SYSCALL_SPU(getresgid) -COMPAT_SYS_SPU(prctl) +SYSCALL_SPU(prctl) COMPAT_SYS(rt_sigreturn) COMPAT_SYS(rt_sigaction) COMPAT_SYS(rt_sigprocmask) @@ -253,7 +254,7 @@ COMPAT_SYS_SPU(clock_gettime) COMPAT_SYS_SPU(clock_getres) COMPAT_SYS_SPU(clock_nanosleep) SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext) -COMPAT_SYS_SPU(tgkill) +SYSCALL_SPU(tgkill) COMPAT_SYS_SPU(utimes) COMPAT_SYS_SPU(statfs64) COMPAT_SYS_SPU(fstatfs64) @@ -276,8 +277,8 @@ COMPAT_SYS(add_key) COMPAT_SYS(request_key) COMPAT_SYS(keyctl) COMPAT_SYS(waitid) -COMPAT_SYS(ioprio_set) -COMPAT_SYS(ioprio_get) +SYSCALL(ioprio_set) +SYSCALL(ioprio_get) SYSCALL(inotify_init) SYSCALL(inotify_add_watch) SYSCALL(inotify_rm_watch) @@ -356,3 +357,5 @@ COMPAT_SYS_SPU(sendmmsg) SYSCALL_SPU(setns) COMPAT_SYS(process_vm_readv) COMPAT_SYS(process_vm_writev) +SYSCALL(finit_module) +SYSCALL(ni_syscall) /* sys_kcmp */ diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h new file mode 100644 index 00000000000..4b4449abf3f --- /dev/null +++ b/arch/powerpc/include/asm/tm.h @@ -0,0 +1,20 @@ +/* + * Transactional memory support routines to reclaim and recheckpoint + * transactional process state. + * + * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation. + */ + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +extern void do_load_up_transact_fpu(struct thread_struct *thread); +extern void do_load_up_transact_altivec(struct thread_struct *thread); +#endif + +extern void tm_enable(void); +extern void tm_reclaim(struct thread_struct *thread, + unsigned long orig_msr, uint8_t cause); +extern void tm_recheckpoint(struct thread_struct *thread, + unsigned long orig_msr); +extern void tm_abort(uint8_t cause); +extern void tm_save_sprs(struct thread_struct *thread); +extern void tm_restore_sprs(struct thread_struct *thread); diff --git a/arch/powerpc/include/asm/ucc.h b/arch/powerpc/include/asm/ucc.h index 46b09ba6bea..6927ac26516 100644 --- a/arch/powerpc/include/asm/ucc.h +++ b/arch/powerpc/include/asm/ucc.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. * * Authors: Shlomi Gridish <gridish@freescale.com> * Li Yang <leoli@freescale.com> diff --git a/arch/powerpc/include/asm/ucc_fast.h b/arch/powerpc/include/asm/ucc_fast.h index 4644c840e2f..72ea9bab07d 100644 --- a/arch/powerpc/include/asm/ucc_fast.h +++ b/arch/powerpc/include/asm/ucc_fast.h @@ -1,7 +1,7 @@ /* * Internal header file for UCC FAST unit routines. * - * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. * * Authors: Shlomi Gridish <gridish@freescale.com> * Li Yang <leoli@freescale.com> diff --git a/arch/powerpc/include/asm/ucc_slow.h b/arch/powerpc/include/asm/ucc_slow.h index cf131ffdb8d..c44131e68e1 100644 --- a/arch/powerpc/include/asm/ucc_slow.h +++ b/arch/powerpc/include/asm/ucc_slow.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. * * Authors: Shlomi Gridish <gridish@freescale.com> * Li Yang <leoli@freescale.com> diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index b3038817b8d..5a7510e9d09 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h @@ -21,7 +21,6 @@ extern int (*udbg_getc_poll)(void); extern void udbg_puts(const char *s); extern int udbg_write(const char *s, int n); -extern int udbg_read(char *buf, int buflen); extern void register_early_udbg_console(void); extern void udbg_printf(const char *fmt, ...) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 921dce6d844..1487f0f1229 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls 353 +#define __NR_syscalls 355 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls @@ -44,19 +44,17 @@ #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK -#define __ARCH_WANT_SYS_RT_SIGACTION -#define __ARCH_WANT_SYS_RT_SIGSUSPEND #ifdef CONFIG_PPC32 #define __ARCH_WANT_OLD_STAT #endif #ifdef CONFIG_PPC64 #define __ARCH_WANT_COMPAT_SYS_TIME -#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_SYS_NEWFSTATAT #define __ARCH_WANT_COMPAT_SYS_SENDFILE #endif -#define __ARCH_WANT_SYS_EXECVE -#define __ARCH_WANT_KERNEL_EXECVE +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_CLONE /* * "Conditional" syscalls diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h index df81cb72d1e..68d0cc998b1 100644 --- a/arch/powerpc/include/asm/vio.h +++ b/arch/powerpc/include/asm/vio.h @@ -139,7 +139,7 @@ extern void vio_unregister_driver(struct vio_driver *drv); extern int vio_cmo_entitlement_update(size_t); extern void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired); -extern void __devinit vio_unregister_device(struct vio_dev *dev); +extern void vio_unregister_device(struct vio_dev *dev); extern int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op); diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild index a33c3c03bb2..f7bca637074 100644 --- a/arch/powerpc/include/uapi/asm/Kbuild +++ b/arch/powerpc/include/uapi/asm/Kbuild @@ -7,6 +7,7 @@ header-y += bootx.h header-y += byteorder.h header-y += cputable.h header-y += elf.h +header-y += epapr_hcalls.h header-y += errno.h header-y += fcntl.h header-y += ioctl.h diff --git a/arch/powerpc/include/uapi/asm/epapr_hcalls.h b/arch/powerpc/include/uapi/asm/epapr_hcalls.h new file mode 100644 index 00000000000..7f9c74b4670 --- /dev/null +++ b/arch/powerpc/include/uapi/asm/epapr_hcalls.h @@ -0,0 +1,98 @@ +/* + * ePAPR hcall interface + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Author: Timur Tabi <timur@freescale.com> + * + * This file is provided under a dual BSD/GPL license. When using or + * redistributing this file, you may do so under either license. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _UAPI_ASM_POWERPC_EPAPR_HCALLS_H +#define _UAPI_ASM_POWERPC_EPAPR_HCALLS_H + +#define EV_BYTE_CHANNEL_SEND 1 +#define EV_BYTE_CHANNEL_RECEIVE 2 +#define EV_BYTE_CHANNEL_POLL 3 +#define EV_INT_SET_CONFIG 4 +#define EV_INT_GET_CONFIG 5 +#define EV_INT_SET_MASK 6 +#define EV_INT_GET_MASK 7 +#define EV_INT_IACK 9 +#define EV_INT_EOI 10 +#define EV_INT_SEND_IPI 11 +#define EV_INT_SET_TASK_PRIORITY 12 +#define EV_INT_GET_TASK_PRIORITY 13 +#define EV_DOORBELL_SEND 14 +#define EV_MSGSND 15 +#define EV_IDLE 16 + +/* vendor ID: epapr */ +#define EV_LOCAL_VENDOR_ID 0 /* for private use */ +#define EV_EPAPR_VENDOR_ID 1 +#define EV_FSL_VENDOR_ID 2 /* Freescale Semiconductor */ +#define EV_IBM_VENDOR_ID 3 /* IBM */ +#define EV_GHS_VENDOR_ID 4 /* Green Hills Software */ +#define EV_ENEA_VENDOR_ID 5 /* Enea */ +#define EV_WR_VENDOR_ID 6 /* Wind River Systems */ +#define EV_AMCC_VENDOR_ID 7 /* Applied Micro Circuits */ +#define EV_KVM_VENDOR_ID 42 /* KVM */ + +/* The max number of bytes that a byte channel can send or receive per call */ +#define EV_BYTE_CHANNEL_MAX_BYTES 16 + + +#define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num)) +#define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num) + +/* epapr return codes */ +#define EV_SUCCESS 0 +#define EV_EPERM 1 /* Operation not permitted */ +#define EV_ENOENT 2 /* Entry Not Found */ +#define EV_EIO 3 /* I/O error occured */ +#define EV_EAGAIN 4 /* The operation had insufficient + * resources to complete and should be + * retried + */ +#define EV_ENOMEM 5 /* There was insufficient memory to + * complete the operation */ +#define EV_EFAULT 6 /* Bad guest address */ +#define EV_ENODEV 7 /* No such device */ +#define EV_EINVAL 8 /* An argument supplied to the hcall + was out of range or invalid */ +#define EV_INTERNAL 9 /* An internal error occured */ +#define EV_CONFIG 10 /* A configuration error was detected */ +#define EV_INVALID_STATE 11 /* The object is in an invalid state */ +#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */ +#define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */ + +#endif /* _UAPI_ASM_POWERPC_EPAPR_HCALLS_H */ diff --git a/arch/powerpc/include/uapi/asm/ioctls.h b/arch/powerpc/include/uapi/asm/ioctls.h index e9b78870aaa..49a25796a61 100644 --- a/arch/powerpc/include/uapi/asm/ioctls.h +++ b/arch/powerpc/include/uapi/asm/ioctls.h @@ -97,6 +97,9 @@ #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ #define TIOCVHANGUP 0x5437 +#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */ +#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ +#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ #define TIOCSERCONFIG 0x5453 #define TIOCSERGWILD 0x5454 diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 1bea4d8ea6f..16064d00adb 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -114,7 +114,10 @@ struct kvm_regs { /* Embedded Floating Point (SPE) -- IVOR32-34 if KVM_SREGS_E_IVOR */ #define KVM_SREGS_E_SPE (1 << 9) -/* External Proxy (EXP) -- EPR */ +/* + * DEPRECATED! USE ONE_REG FOR THIS ONE! + * External Proxy (EXP) -- EPR + */ #define KVM_SREGS_EXP (1 << 10) /* External PID (E.PD) -- EPSC/EPLC */ @@ -221,6 +224,12 @@ struct kvm_sregs { __u32 dbsr; /* KVM_SREGS_E_UPDATE_DBSR */ __u32 dbcr[3]; + /* + * iac/dac registers are 64bit wide, while this API + * interface provides only lower 32 bits on 64 bit + * processors. ONE_REG interface is added for 64bit + * iac/dac registers. + */ __u32 iac[4]; __u32 dac[2]; __u32 dvc[2]; @@ -325,6 +334,87 @@ struct kvm_book3e_206_tlb_params { __u32 reserved[8]; }; +/* For KVM_PPC_GET_HTAB_FD */ +struct kvm_get_htab_fd { + __u64 flags; + __u64 start_index; + __u64 reserved[2]; +}; + +/* Values for kvm_get_htab_fd.flags */ +#define KVM_GET_HTAB_BOLTED_ONLY ((__u64)0x1) +#define KVM_GET_HTAB_WRITE ((__u64)0x2) + +/* + * Data read on the file descriptor is formatted as a series of + * records, each consisting of a header followed by a series of + * `n_valid' HPTEs (16 bytes each), which are all valid. Following + * those valid HPTEs there are `n_invalid' invalid HPTEs, which + * are not represented explicitly in the stream. The same format + * is used for writing. + */ +struct kvm_get_htab_header { + __u32 index; + __u16 n_valid; + __u16 n_invalid; +}; + #define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1) +#define KVM_REG_PPC_IAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x2) +#define KVM_REG_PPC_IAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3) +#define KVM_REG_PPC_IAC3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x4) +#define KVM_REG_PPC_IAC4 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x5) +#define KVM_REG_PPC_DAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x6) +#define KVM_REG_PPC_DAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x7) +#define KVM_REG_PPC_DABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8) +#define KVM_REG_PPC_DSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9) +#define KVM_REG_PPC_PURR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa) +#define KVM_REG_PPC_SPURR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb) +#define KVM_REG_PPC_DAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc) +#define KVM_REG_PPC_DSISR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd) +#define KVM_REG_PPC_AMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xe) +#define KVM_REG_PPC_UAMOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xf) + +#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10) +#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11) +#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12) + +#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18) +#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19) +#define KVM_REG_PPC_PMC3 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1a) +#define KVM_REG_PPC_PMC4 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1b) +#define KVM_REG_PPC_PMC5 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1c) +#define KVM_REG_PPC_PMC6 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1d) +#define KVM_REG_PPC_PMC7 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1e) +#define KVM_REG_PPC_PMC8 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1f) + +/* 32 floating-point registers */ +#define KVM_REG_PPC_FPR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x20) +#define KVM_REG_PPC_FPR(n) (KVM_REG_PPC_FPR0 + (n)) +#define KVM_REG_PPC_FPR31 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3f) + +/* 32 VMX/Altivec vector registers */ +#define KVM_REG_PPC_VR0 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x40) +#define KVM_REG_PPC_VR(n) (KVM_REG_PPC_VR0 + (n)) +#define KVM_REG_PPC_VR31 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x5f) + +/* 32 double-width FP registers for VSX */ +/* High-order halves overlap with FP regs */ +#define KVM_REG_PPC_VSR0 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x60) +#define KVM_REG_PPC_VSR(n) (KVM_REG_PPC_VSR0 + (n)) +#define KVM_REG_PPC_VSR31 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x7f) + +/* FP and vector status/control registers */ +#define KVM_REG_PPC_FPSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80) +#define KVM_REG_PPC_VSCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81) + +/* Virtual processor areas */ +/* For SLB & DTL, address in high (first) half, length in low half */ +#define KVM_REG_PPC_VPA_ADDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x82) +#define KVM_REG_PPC_VPA_SLB (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x83) +#define KVM_REG_PPC_VPA_DTL (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x84) + +#define KVM_REG_PPC_EPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x85) +#define KVM_REG_PPC_EPR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x86) #endif /* __LINUX_KVM_POWERPC_H */ diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h index 5e04383a1db..e3af3286a06 100644 --- a/arch/powerpc/include/uapi/asm/kvm_para.h +++ b/arch/powerpc/include/uapi/asm/kvm_para.h @@ -75,9 +75,10 @@ struct kvm_vcpu_arch_shared { }; #define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */ -#define HC_VENDOR_KVM (42 << 16) -#define HC_EV_SUCCESS 0 -#define HC_EV_UNIMPLEMENTED 12 + +#define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num) + +#include <asm/epapr_hcalls.h> #define KVM_FEATURE_MAGIC_PAGE 1 diff --git a/arch/powerpc/include/uapi/asm/ptrace.h b/arch/powerpc/include/uapi/asm/ptrace.h index ee67a2bc91b..66b9ca4ee94 100644 --- a/arch/powerpc/include/uapi/asm/ptrace.h +++ b/arch/powerpc/include/uapi/asm/ptrace.h @@ -108,6 +108,7 @@ struct pt_regs { #define PT_DAR 41 #define PT_DSISR 42 #define PT_RESULT 43 +#define PT_DSCR 44 #define PT_REGS_COUNT 44 #define PT_FPR0 48 /* each FP reg occupies 2 slots in this space */ @@ -146,34 +147,34 @@ struct pt_regs { * structures. This also simplifies the implementation of a bi-arch * (combined (32- and 64-bit) gdb. */ -#define PTRACE_GETVRREGS 18 -#define PTRACE_SETVRREGS 19 +#define PTRACE_GETVRREGS 0x12 +#define PTRACE_SETVRREGS 0x13 /* Get/set all the upper 32-bits of the SPE registers, accumulator, and * spefscr, in one go */ -#define PTRACE_GETEVRREGS 20 -#define PTRACE_SETEVRREGS 21 +#define PTRACE_GETEVRREGS 0x14 +#define PTRACE_SETEVRREGS 0x15 /* Get the first 32 128bit VSX registers */ -#define PTRACE_GETVSRREGS 27 -#define PTRACE_SETVSRREGS 28 +#define PTRACE_GETVSRREGS 0x1b +#define PTRACE_SETVSRREGS 0x1c /* * Get or set a debug register. The first 16 are DABR registers and the * second 16 are IABR registers. */ -#define PTRACE_GET_DEBUGREG 25 -#define PTRACE_SET_DEBUGREG 26 +#define PTRACE_GET_DEBUGREG 0x19 +#define PTRACE_SET_DEBUGREG 0x1a /* (new) PTRACE requests using the same numbers as x86 and the same * argument ordering. Additionally, they support more registers too */ -#define PTRACE_GETREGS 12 -#define PTRACE_SETREGS 13 -#define PTRACE_GETFPREGS 14 -#define PTRACE_SETFPREGS 15 -#define PTRACE_GETREGS64 22 -#define PTRACE_SETREGS64 23 +#define PTRACE_GETREGS 0xc +#define PTRACE_SETREGS 0xd +#define PTRACE_GETFPREGS 0xe +#define PTRACE_SETFPREGS 0xf +#define PTRACE_GETREGS64 0x16 +#define PTRACE_SETREGS64 0x17 /* Calls to trace a 64bit program from a 32bit program */ #define PPC_PTRACE_PEEKTEXT_3264 0x95 diff --git a/arch/powerpc/include/uapi/asm/setup.h b/arch/powerpc/include/uapi/asm/setup.h index 8b9a306260b..552df83f1a4 100644 --- a/arch/powerpc/include/uapi/asm/setup.h +++ b/arch/powerpc/include/uapi/asm/setup.h @@ -1,32 +1 @@ -#ifndef _ASM_POWERPC_SETUP_H -#define _ASM_POWERPC_SETUP_H - #include <asm-generic/setup.h> - -#ifndef __ASSEMBLY__ -extern void ppc_printk_progress(char *s, unsigned short hex); - -extern unsigned int rtas_data; -extern int mem_init_done; /* set on boot once kmalloc can be called */ -extern int init_bootmem_done; /* set once bootmem is available */ -extern unsigned long long memory_limit; -extern unsigned long klimit; -extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); - -extern void via_cuda_init(void); -extern void read_rtc_time(void); -extern void pmac_find_display(void); - -struct device_node; -extern void note_scsi_host(struct device_node *, void *); - -/* Used in very early kernel initialization. */ -extern unsigned long reloc_offset(void); -extern unsigned long add_reloc_offset(unsigned long); -extern void reloc_got2(unsigned long); - -#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) - -#endif /* !__ASSEMBLY__ */ - -#endif /* _ASM_POWERPC_SETUP_H */ diff --git a/arch/powerpc/include/uapi/asm/signal.h b/arch/powerpc/include/uapi/asm/signal.h index 48fa8d3f2f9..6c69ee94fd8 100644 --- a/arch/powerpc/include/uapi/asm/signal.h +++ b/arch/powerpc/include/uapi/asm/signal.h @@ -85,17 +85,12 @@ typedef struct { #define SA_RESTORER 0x04000000U -/* - * sigaltstack controls - */ -#define SS_ONSTACK 1 -#define SS_DISABLE 2 - #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 #include <asm-generic/signal-defs.h> +#ifndef __KERNEL__ struct old_sigaction { __sighandler_t sa_handler; old_sigset_t sa_mask; @@ -109,10 +104,7 @@ struct sigaction { __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; - -struct k_sigaction { - struct sigaction sa; -}; +#endif typedef struct sigaltstack { void __user *ss_sp; diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h index 3d5179bb122..a26dcaece50 100644 --- a/arch/powerpc/include/uapi/asm/socket.h +++ b/arch/powerpc/include/uapi/asm/socket.h @@ -29,7 +29,7 @@ #define SO_PRIORITY 12 #define SO_LINGER 13 #define SO_BSDCOMPAT 14 -/* To add :#define SO_REUSEPORT 15 */ +#define SO_REUSEPORT 15 #define SO_RCVLOWAT 16 #define SO_SNDLOWAT 17 #define SO_RCVTIMEO 18 @@ -47,6 +47,7 @@ /* Socket filtering */ #define SO_ATTACH_FILTER 26 #define SO_DETACH_FILTER 27 +#define SO_GET_FILTER SO_ATTACH_FILTER #define SO_PEERNAME 28 #define SO_TIMESTAMP 29 @@ -76,4 +77,6 @@ /* Instruct lower device to use last 4-bytes of skb data as FCS */ #define SO_NOFCS 43 +#define SO_LOCK_FILTER 44 + #endif /* _ASM_POWERPC_SOCKET_H */ diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 380b5d37a90..74cb4d72d67 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -375,6 +375,8 @@ #define __NR_setns 350 #define __NR_process_vm_readv 351 #define __NR_process_vm_writev 352 +#define __NR_finit_module 353 +#define __NR_kcmp 354 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index cde12f8a4eb..f960a794455 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -7,7 +7,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror ifeq ($(CONFIG_PPC64),y) -CFLAGS_prom_init.o += -mno-minimal-toc +CFLAGS_prom_init.o += $(NO_MINIMAL_TOC) endif ifeq ($(CONFIG_PPC32),y) CFLAGS_prom_init.o += -fPIC @@ -38,7 +38,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ paca.o nvram_64.o firmware.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o -obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power7.o +obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o obj64-$(CONFIG_RELOCATABLE) += reloc_64.o obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o @@ -75,8 +75,8 @@ endif obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o obj-$(CONFIG_44x) += cpu_setup_44x.o -obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o -obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o +obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o +obj-$(CONFIG_PPC_DOORBELL) += dbell.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o extra-y := head_$(CONFIG_WORD_SIZE).o @@ -91,7 +91,6 @@ obj-$(CONFIG_RELOCATABLE_PPC32) += reloc_32.o obj-$(CONFIG_PPC32) += entry_32.o setup_32.o obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o obj-$(CONFIG_KGDB) += kgdb.o -obj-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init.o obj-$(CONFIG_MODULES) += ppc_ksyms.o obj-$(CONFIG_BOOTX_TEXT) += btext.o obj-$(CONFIG_SMP) += smp.o @@ -122,6 +121,8 @@ ifneq ($(CONFIG_PPC_INDIRECT_IO),y) obj-y += iomap.o endif +obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM) += tm.o + obj-$(CONFIG_PPC64) += $(obj64-y) obj-$(CONFIG_PPC32) += $(obj32-y) @@ -142,6 +143,7 @@ GCOV_PROFILE_kprobes.o := n extra-$(CONFIG_PPC_FPU) += fpu.o extra-$(CONFIG_ALTIVEC) += vector.o extra-$(CONFIG_PPC64) += entry_64.o +extra-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init.o extra-y += systbl_chk.i $(obj)/systbl.o: systbl_chk diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 7523539cfe9..b6c17ec9b16 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -77,6 +77,7 @@ int main(void) DEFINE(NMI_MASK, NMI_MASK); DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr)); DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit)); + DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr)); #else DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); #endif /* CONFIG_PPC64 */ @@ -117,10 +118,38 @@ int main(void) #ifdef CONFIG_KVM_BOOK3S_32_HANDLER DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); #endif -#ifdef CONFIG_KVM_BOOKE_HV +#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE) DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu)); #endif +#ifdef CONFIG_PPC_BOOK3S_64 + DEFINE(THREAD_TAR, offsetof(struct thread_struct, tar)); +#endif +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch)); + DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar)); + DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr)); + DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar)); + DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); + DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct, + transact_vr[0])); + DEFINE(THREAD_TRANSACT_VSCR, offsetof(struct thread_struct, + transact_vscr)); + DEFINE(THREAD_TRANSACT_VRSAVE, offsetof(struct thread_struct, + transact_vrsave)); + DEFINE(THREAD_TRANSACT_FPR0, offsetof(struct thread_struct, + transact_fpr[0])); + DEFINE(THREAD_TRANSACT_FPSCR, offsetof(struct thread_struct, + transact_fpscr)); +#ifdef CONFIG_VSX + DEFINE(THREAD_TRANSACT_VSR0, offsetof(struct thread_struct, + transact_fpr[0])); +#endif + /* Local pt_regs on stack for Transactional Memory funcs. */ + DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD + + sizeof(struct pt_regs) + 16); +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); @@ -441,8 +470,7 @@ int main(void) DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock)); - DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter)); - DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu)); + DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits)); DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr)); DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor)); DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); @@ -470,12 +498,12 @@ int main(void) DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); - DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu)); DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); + DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); @@ -555,6 +583,10 @@ int main(void) DEFINE(IPI_PRIORITY, IPI_PRIORITY); #endif /* CONFIG_KVM_BOOK3S_64_HV */ +#ifdef CONFIG_PPC_BOOK3S_64 + HSTATE_FIELD(HSTATE_CFAR, cfar); +#endif /* CONFIG_PPC_BOOK3S_64 */ + #else /* CONFIG_PPC_BOOK3S */ DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); diff --git a/arch/powerpc/kernel/cpu_setup_power7.S b/arch/powerpc/kernel/cpu_setup_power.S index 76797c5105d..ea847abb0d0 100644 --- a/arch/powerpc/kernel/cpu_setup_power7.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -27,6 +27,7 @@ _GLOBAL(__setup_cpu_power7) beqlr li r0,0 mtspr SPRN_LPID,r0 + mfspr r3,SPRN_LPCR bl __init_LPCR bl __init_TLB mtlr r11 @@ -39,6 +40,37 @@ _GLOBAL(__restore_cpu_power7) beqlr li r0,0 mtspr SPRN_LPID,r0 + mfspr r3,SPRN_LPCR + bl __init_LPCR + bl __init_TLB + mtlr r11 + blr + +_GLOBAL(__setup_cpu_power8) + mflr r11 + bl __init_FSCR + bl __init_hvmode_206 + mtlr r11 + beqlr + li r0,0 + mtspr SPRN_LPID,r0 + mfspr r3,SPRN_LPCR + oris r3, r3, LPCR_AIL_3@h + bl __init_LPCR + bl __init_TLB + mtlr r11 + blr + +_GLOBAL(__restore_cpu_power8) + mflr r11 + bl __init_FSCR + mfmsr r3 + rldicl. r0,r3,4,63 + beqlr + li r0,0 + mtspr SPRN_LPID,r0 + mfspr r3,SPRN_LPCR + oris r3, r3, LPCR_AIL_3@h bl __init_LPCR bl __init_TLB mtlr r11 @@ -57,6 +89,7 @@ __init_hvmode_206: __init_LPCR: /* Setup a sane LPCR: + * Called with initial LPCR in R3 * * LPES = 0b01 (HSRR0/1 used for 0x500) * PECE = 0b111 @@ -67,7 +100,6 @@ __init_LPCR: * * Other bits untouched for now */ - mfspr r3,SPRN_LPCR li r5,1 rldimi r3,r5, LPCR_LPES_SH, 64-LPCR_LPES_SH-2 ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2) @@ -82,6 +114,12 @@ __init_LPCR: isync blr +__init_FSCR: + mfspr r3,SPRN_FSCR + ori r3,r3,FSCR_TAR|FSCR_DSCR + mtspr SPRN_FSCR,r3 + blr + __init_TLB: /* Clear the TLB */ li r6,128 diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 0514c21f138..75a3d71b895 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -68,6 +68,8 @@ extern void __restore_cpu_pa6t(void); extern void __restore_cpu_ppc970(void); extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_power7(void); +extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec); +extern void __restore_cpu_power8(void); extern void __restore_cpu_a2(void); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_E500) @@ -94,6 +96,10 @@ extern void __restore_cpu_e5500(void); PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_PSERIES_PERFMON_COMPAT) +#define COMMON_USER_POWER8 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\ + PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ + PPC_FEATURE_TRUE_LE | \ + PPC_FEATURE_PSERIES_PERFMON_COMPAT) #define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\ PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_HAS_ALTIVEC_COMP) @@ -429,6 +435,21 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_restore = __restore_cpu_power7, .platform = "power7", }, + { /* 2.07-compliant processor, i.e. Power8 "architected" mode */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000004, + .cpu_name = "POWER8 (architected)", + .cpu_features = CPU_FTRS_POWER8, + .cpu_user_features = COMMON_USER_POWER8, + .mmu_features = MMU_FTRS_POWER8, + .icache_bsize = 128, + .dcache_bsize = 128, + .oprofile_type = PPC_OPROFILE_POWER4, + .oprofile_cpu_type = "ppc64/ibm-compat-v1", + .cpu_setup = __setup_cpu_power8, + .cpu_restore = __restore_cpu_power8, + .platform = "power8", + }, { /* Power7 */ .pvr_mask = 0xffff0000, .pvr_value = 0x003f0000, @@ -463,6 +484,23 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_restore = __restore_cpu_power7, .platform = "power7+", }, + { /* Power8 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x004b0000, + .cpu_name = "POWER8 (raw)", + .cpu_features = CPU_FTRS_POWER8, + .cpu_user_features = COMMON_USER_POWER8, + .mmu_features = MMU_FTRS_POWER8, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .oprofile_cpu_type = "ppc64/power8", + .oprofile_type = PPC_OPROFILE_POWER4, + .cpu_setup = __setup_cpu_power8, + .cpu_restore = __restore_cpu_power8, + .platform = "power8", + }, { /* Cell Broadband Engine */ .pvr_mask = 0xffff0000, .pvr_value = 0x00700000, diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index a892680668d..9ebbc24bb23 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c @@ -21,7 +21,7 @@ #ifdef CONFIG_SMP void doorbell_setup_this_cpu(void) { - unsigned long tag = mfspr(SPRN_PIR) & 0x3fff; + unsigned long tag = mfspr(SPRN_DOORBELL_CPUTAG) & PPC_DBELL_TAG_MASK; smp_muxed_ipi_set_data(smp_processor_id(), tag); } @@ -30,7 +30,7 @@ void doorbell_cause_ipi(int cpu, unsigned long data) { /* Order previous accesses vs. msgsnd, which is treated as a store */ mb(); - ppc_msgsnd(PPC_DBELL, 0, data); + ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, data); } void doorbell_exception(struct pt_regs *regs) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 9499385676e..e514de57a12 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -439,16 +439,13 @@ ret_from_fork: ret_from_kernel_thread: REST_NVGPRS(r1) bl schedule_tail + li r3,0 + stw r3,0(r1) mtlr r14 mr r3,r15 PPC440EP_ERR42 blrl li r3,0 - b do_exit # no return - - .globl __ret_from_kernel_execve -__ret_from_kernel_execve: - addi r1,r3,-STACK_FRAME_OVERHEAD b ret_from_syscall /* Traced system call support */ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 56e0ff0878b..256c5bf0adb 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -62,8 +62,9 @@ system_call_common: std r12,_MSR(r1) std r0,GPR0(r1) std r10,GPR1(r1) + beq 2f /* if from kernel mode */ ACCOUNT_CPU_USER_ENTRY(r10, r11) - std r2,GPR2(r1) +2: std r2,GPR2(r1) std r3,GPR3(r1) mfcr r2 std r4,GPR4(r1) @@ -94,7 +95,7 @@ system_call_common: addi r9,r1,STACK_FRAME_OVERHEAD ld r11,exception_marker@toc(r2) std r11,-16(r9) /* "regshere" marker */ -#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR) +#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR) BEGIN_FW_FTR_SECTION beq 33f /* if from user, see if there are any DTL entries to process */ @@ -110,7 +111,7 @@ BEGIN_FW_FTR_SECTION addi r9,r1,STACK_FRAME_OVERHEAD 33: END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) -#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */ +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */ /* * A syscall should always be called with interrupts enabled @@ -226,6 +227,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) beq- 1f ACCOUNT_CPU_USER_EXIT(r11, r12) + HMT_MEDIUM_LOW_HAS_PPR ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ 1: ld r2,GPR2(r1) ld r1,GPR1(r1) @@ -302,6 +304,7 @@ syscall_exit_work: subi r12,r12,TI_FLAGS 4: /* Anything else left to do? */ + SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) beq .ret_from_except_lite @@ -373,17 +376,13 @@ _GLOBAL(ret_from_fork) _GLOBAL(ret_from_kernel_thread) bl .schedule_tail REST_NVGPRS(r1) - REST_GPR(2,r1) + li r3,0 + std r3,0(r1) + ld r14, 0(r14) mtlr r14 mr r3,r15 blrl li r3,0 - b .do_exit # no return - -_GLOBAL(__ret_from_kernel_execve) - addi r1,r3,-STACK_FRAME_OVERHEAD - li r10,1 - std r10,SOFTE(r1) b syscall_exit .section ".toc","aw" @@ -449,6 +448,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR) std r23,_CCR(r1) std r1,KSP(r3) /* Set old stack pointer */ +#ifdef CONFIG_PPC_BOOK3S_64 +BEGIN_FTR_SECTION + /* + * Back up the TAR across context switches. Note that the TAR is not + * available for use in the kernel. (To provide this, the TAR should + * be backed up/restored on exception entry/exit instead, and be in + * pt_regs. FIXME, this should be in pt_regs anyway (for debug).) + */ + mfspr r0,SPRN_TAR + std r0,THREAD_TAR(r3) +END_FTR_SECTION_IFSET(CPU_FTR_BCTAR) +#endif + #ifdef CONFIG_SMP /* We need a sync somewhere here to make sure that if the * previous task gets rescheduled on another CPU, it sees all @@ -531,6 +543,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) mr r1,r8 /* start using new stack pointer */ std r7,PACAKSAVE(r13) +#ifdef CONFIG_PPC_BOOK3S_64 +BEGIN_FTR_SECTION + ld r0,THREAD_TAR(r4) + mtspr SPRN_TAR,r0 +END_FTR_SECTION_IFSET(CPU_FTR_BCTAR) +#endif + #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION ld r0,THREAD_VRSAVE(r4) @@ -668,6 +687,19 @@ resume_kernel: ld r4,TI_FLAGS(r9) andi. r0,r4,_TIF_NEED_RESCHED bne 1b + + /* + * arch_local_irq_restore() from preempt_schedule_irq above may + * enable hard interrupt but we really should disable interrupts + * when we return from the interrupt, and so that we don't get + * interrupted after loading SRR0/1. + */ +#ifdef CONFIG_PPC_BOOK3E + wrteei 0 +#else + ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ + mtmsrd r10,1 /* Update machine state */ +#endif /* CONFIG_PPC_BOOK3E */ #endif /* CONFIG_PREEMPT */ .globl fast_exc_return_irq @@ -753,6 +785,10 @@ fast_exception_return: andc r4,r4,r0 /* r0 contains MSR_RI here */ mtmsrd r4,1 +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* TM debug */ + std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */ +#endif /* * r13 is our per cpu area, only restore it if we are returning to * userspace the value stored in the stack frame may belong to @@ -761,6 +797,7 @@ fast_exception_return: andi. r0,r3,MSR_PR beq 1f ACCOUNT_CPU_USER_EXIT(r2, r4) + RESTORE_PPR(r2, r4) REST_GPR(13, r1) 1: mtspr SPRN_SRR1,r3 @@ -840,13 +877,22 @@ restore_check_irq_replay: addi r3,r1,STACK_FRAME_OVERHEAD; bl .timer_interrupt b .ret_from_except +#ifdef CONFIG_PPC_DOORBELL +1: #ifdef CONFIG_PPC_BOOK3E -1: cmpwi cr0,r3,0x280 + cmpwi cr0,r3,0x280 +#else + BEGIN_FTR_SECTION + cmpwi cr0,r3,0xe80 + FTR_SECTION_ELSE + cmpwi cr0,r3,0xa00 + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) +#endif /* CONFIG_PPC_BOOK3E */ bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; bl .doorbell_exception b .ret_from_except -#endif /* CONFIG_PPC_BOOK3E */ +#endif /* CONFIG_PPC_DOORBELL */ 1: b .ret_from_except /* What else to do here ? */ unrecov_restore: diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S index 697b390ebfd..62c0dc23782 100644 --- a/arch/powerpc/kernel/epapr_hcalls.S +++ b/arch/powerpc/kernel/epapr_hcalls.S @@ -8,13 +8,41 @@ */ #include <linux/threads.h> +#include <asm/epapr_hcalls.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> +#include <asm/asm-compat.h> #include <asm/asm-offsets.h> +/* epapr_ev_idle() was derived from e500_idle() */ +_GLOBAL(epapr_ev_idle) + CURRENT_THREAD_INFO(r3, r1) + PPC_LL r4, TI_LOCAL_FLAGS(r3) /* set napping bit */ + ori r4, r4,_TLF_NAPPING /* so when we take an exception */ + PPC_STL r4, TI_LOCAL_FLAGS(r3) /* it will return to our caller */ + + wrteei 1 + +idle_loop: + LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE)) + +.global epapr_ev_idle_start +epapr_ev_idle_start: + li r3, -1 + nop + nop + nop + + /* + * Guard against spurious wakeups from a hypervisor -- + * only interrupt will cause us to return to LR due to + * _TLF_NAPPING. + */ + b idle_loop + /* Hypercall entry point. Will be patched with device tree instructions. */ .global epapr_hypercall_start epapr_hypercall_start: diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index 028aeae370b..f3eab8594d9 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -21,6 +21,10 @@ #include <asm/epapr_hcalls.h> #include <asm/cacheflush.h> #include <asm/code-patching.h> +#include <asm/machdep.h> + +extern void epapr_ev_idle(void); +extern u32 epapr_ev_idle_start[]; bool epapr_paravirt_enabled; @@ -41,8 +45,13 @@ static int __init epapr_paravirt_init(void) if (len % 4 || len > (4 * 4)) return -ENODEV; - for (i = 0; i < (len / 4); i++) + for (i = 0; i < (len / 4); i++) { patch_instruction(epapr_hypercall_start + i, insts[i]); + patch_instruction(epapr_ev_idle_start + i, insts[i]); + } + + if (of_get_property(hyper_node, "has-idle", NULL)) + ppc_md.power_save = epapr_ev_idle; epapr_paravirt_enabled = true; diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 4684e33a26c..ae54553eacd 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -159,8 +159,9 @@ exc_##n##_common: \ std r9,GPR9(r1); /* save r9 in stackframe */ \ std r10,_NIP(r1); /* save SRR0 to stackframe */ \ std r11,_MSR(r1); /* save SRR1 to stackframe */ \ + beq 2f; /* if from kernel mode */ \ ACCOUNT_CPU_USER_ENTRY(r10,r11);/* accounting (uses cr0+eq) */ \ - ld r3,excf+EX_R10(r13); /* get back r10 */ \ +2: ld r3,excf+EX_R10(r13); /* get back r10 */ \ ld r4,excf+EX_R11(r13); /* get back r11 */ \ mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 */ \ std r12,GPR12(r1); /* save r12 in stackframe */ \ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 10b658ad65e..87ef8f5ee5b 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -19,12 +19,76 @@ /* * We layout physical memory as follows: * 0x0000 - 0x00ff : Secondary processor spin code - * 0x0100 - 0x2fff : pSeries Interrupt prologs - * 0x3000 - 0x5fff : interrupt support common interrupt prologs - * 0x6000 - 0x6fff : Initial (CPU0) segment table + * 0x0100 - 0x17ff : pSeries Interrupt prologs + * 0x1800 - 0x4000 : interrupt support common interrupt prologs + * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1 + * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1 * 0x7000 - 0x7fff : FWNMI data area - * 0x8000 - : Early init and support code + * 0x8000 - 0x8fff : Initial (CPU0) segment table + * 0x9000 - : Early init and support code */ + /* Syscall routine is used twice, in reloc-off and reloc-on paths */ +#define SYSCALL_PSERIES_1 \ +BEGIN_FTR_SECTION \ + cmpdi r0,0x1ebe ; \ + beq- 1f ; \ +END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ + mr r9,r13 ; \ + GET_PACA(r13) ; \ + mfspr r11,SPRN_SRR0 ; \ +0: + +#define SYSCALL_PSERIES_2_RFID \ + mfspr r12,SPRN_SRR1 ; \ + ld r10,PACAKBASE(r13) ; \ + LOAD_HANDLER(r10, system_call_entry) ; \ + mtspr SPRN_SRR0,r10 ; \ + ld r10,PACAKMSR(r13) ; \ + mtspr SPRN_SRR1,r10 ; \ + rfid ; \ + b . ; /* prevent speculative execution */ + +#define SYSCALL_PSERIES_3 \ + /* Fast LE/BE switch system call */ \ +1: mfspr r12,SPRN_SRR1 ; \ + xori r12,r12,MSR_LE ; \ + mtspr SPRN_SRR1,r12 ; \ + rfid ; /* return to userspace */ \ + b . ; \ +2: mfspr r12,SPRN_SRR1 ; \ + andi. r12,r12,MSR_PR ; \ + bne 0b ; \ + mtspr SPRN_SRR0,r3 ; \ + mtspr SPRN_SRR1,r4 ; \ + mtspr SPRN_SDR1,r5 ; \ + rfid ; \ + b . ; /* prevent speculative execution */ + +#if defined(CONFIG_RELOCATABLE) + /* + * We can't branch directly; in the direct case we use LR + * and system_call_entry restores LR. (We thus need to move + * LR to r10 in the RFID case too.) + */ +#define SYSCALL_PSERIES_2_DIRECT \ + mflr r10 ; \ + ld r12,PACAKBASE(r13) ; \ + LOAD_HANDLER(r12, system_call_entry_direct) ; \ + mtctr r12 ; \ + mfspr r12,SPRN_SRR1 ; \ + /* Re-use of r13... No spare regs to do this */ \ + li r13,MSR_RI ; \ + mtmsrd r13,1 ; \ + GET_PACA(r13) ; /* get r13 back */ \ + bctr ; +#else + /* We can branch directly */ +#define SYSCALL_PSERIES_2_DIRECT \ + mfspr r12,SPRN_SRR1 ; \ + li r10,MSR_RI ; \ + mtmsrd r10,1 ; /* Set RI (EE=0) */ \ + b system_call_entry_direct ; +#endif /* * This is the start of the interrupt handlers for pSeries @@ -40,7 +104,7 @@ __start_interrupts: .globl system_reset_pSeries; system_reset_pSeries: - HMT_MEDIUM; + HMT_MEDIUM_PPR_DISCARD SET_SCRATCH0(r13) #ifdef CONFIG_PPC_P7_NAP BEGIN_FTR_SECTION @@ -89,12 +153,15 @@ machine_check_pSeries_1: * some code path might still want to branch into the original * vector */ - b machine_check_pSeries + HMT_MEDIUM_PPR_DISCARD + SET_SCRATCH0(r13) /* save r13 */ + EXCEPTION_PROLOG_0(PACA_EXMC) + b machine_check_pSeries_0 . = 0x300 .globl data_access_pSeries data_access_pSeries: - HMT_MEDIUM + HMT_MEDIUM_PPR_DISCARD SET_SCRATCH0(r13) BEGIN_FTR_SECTION b data_access_check_stab @@ -106,8 +173,9 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) . = 0x380 .globl data_access_slb_pSeries data_access_slb_pSeries: - HMT_MEDIUM + HMT_MEDIUM_PPR_DISCARD SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXSLB) EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380) std r3,PACA_EXSLB+EX_R3(r13) mfspr r3,SPRN_DAR @@ -137,8 +205,9 @@ data_access_slb_pSeries: . = 0x480 .globl instruction_access_slb_pSeries instruction_access_slb_pSeries: - HMT_MEDIUM + HMT_MEDIUM_PPR_DISCARD SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXSLB) EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) std r3,PACA_EXSLB+EX_R3(r13) mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ @@ -188,7 +257,7 @@ hardware_interrupt_hv: MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) STD_EXCEPTION_HV(0x980, 0x982, hdecrementer) - STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) + MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super) KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) @@ -207,31 +276,11 @@ system_call_pSeries: KVMTEST(0xc00) GET_SCRATCH0(r13) #endif -BEGIN_FTR_SECTION - cmpdi r0,0x1ebe - beq- 1f -END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) - mr r9,r13 - GET_PACA(r13) - mfspr r11,SPRN_SRR0 - mfspr r12,SPRN_SRR1 - ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10, system_call_entry) - mtspr SPRN_SRR0,r10 - ld r10,PACAKMSR(r13) - mtspr SPRN_SRR1,r10 - rfid - b . /* prevent speculative execution */ - + SYSCALL_PSERIES_1 + SYSCALL_PSERIES_2_RFID + SYSCALL_PSERIES_3 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) -/* Fast LE/BE switch system call */ -1: mfspr r12,SPRN_SRR1 - xori r12,r12,MSR_LE - mtspr SPRN_SRR1,r12 - rfid /* return to userspace */ - b . - STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00) @@ -240,16 +289,30 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) */ . = 0xe00 hv_exception_trampoline: + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) b h_data_storage_hv + . = 0xe20 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) b h_instr_storage_hv + . = 0xe40 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) b emulation_assist_hv - . = 0xe50 - b hmi_exception_hv + . = 0xe60 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) b hmi_exception_hv + . = 0xe80 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) + b h_doorbell_hv + /* We need to deal with the Altivec unavailable exception * here which is at 0xf20, thus in the middle of the * prolog code of the PerformanceMonitor one. A little @@ -257,16 +320,27 @@ hv_exception_trampoline: */ performance_monitor_pSeries_1: . = 0xf00 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) b performance_monitor_pSeries altivec_unavailable_pSeries_1: . = 0xf20 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) b altivec_unavailable_pSeries vsx_unavailable_pSeries_1: . = 0xf40 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) b vsx_unavailable_pSeries + . = 0xf60 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) + b tm_unavailable_pSeries + #ifdef CONFIG_CBE_RAS STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) @@ -276,13 +350,11 @@ vsx_unavailable_pSeries_1: KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) . = 0x1500 - .global denorm_Hypervisor + .global denorm_exception_hv denorm_exception_hv: - HMT_MEDIUM + HMT_MEDIUM_PPR_DISCARD mtspr SPRN_SPRG_HSCRATCH0,r13 - mfspr r13,SPRN_SPRG_HPACA - std r9,PACA_EXGEN+EX_R9(r13) - std r10,PACA_EXGEN+EX_R10(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) std r11,PACA_EXGEN+EX_R11(r13) std r12,PACA_EXGEN+EX_R12(r13) mfspr r9,SPRN_SPRG_HSCRATCH0 @@ -311,20 +383,24 @@ denorm_exception_hv: #ifdef CONFIG_CBE_RAS STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802) +#else + . = 0x1800 #endif /* CONFIG_CBE_RAS */ - . = 0x3000 /*** Out of line interrupts support ***/ + .align 7 /* moved from 0x200 */ machine_check_pSeries: .globl machine_check_fwnmi machine_check_fwnmi: - HMT_MEDIUM + HMT_MEDIUM_PPR_DISCARD SET_SCRATCH0(r13) /* save r13 */ - EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, - EXC_STD, KVMTEST, 0x200) + EXCEPTION_PROLOG_0(PACA_EXMC) +machine_check_pSeries_0: + EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200) + EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD) KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) /* moved from 0x300 */ @@ -450,6 +526,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) mtspr SPRN_HSRR0,r11 mtcrf 0x80,r9 ld r9,PACA_EXGEN+EX_R9(r13) + RESTORE_PPR_PACA(PACA_EXGEN, r10) ld r10,PACA_EXGEN+EX_R10(r13) ld r11,PACA_EXGEN+EX_R11(r13) ld r12,PACA_EXGEN+EX_R12(r13) @@ -460,28 +537,34 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) .align 7 /* moved from 0xe00 */ - STD_EXCEPTION_HV(., 0xe02, h_data_storage) + STD_EXCEPTION_HV_OOL(0xe02, h_data_storage) KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02) - STD_EXCEPTION_HV(., 0xe22, h_instr_storage) + STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage) KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22) - STD_EXCEPTION_HV(., 0xe42, emulation_assist) + STD_EXCEPTION_HV_OOL(0xe42, emulation_assist) KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42) - STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */ + STD_EXCEPTION_HV_OOL(0xe62, hmi_exception) /* need to flush cache ? */ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62) + MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82) /* moved from 0xf00 */ - STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor) + STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00) - STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) + STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20) - STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) + STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) + STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable) + KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60) /* - * An interrupt came in while soft-disabled. We set paca->irq_happened, - * then, if it was a decrementer interrupt, we bump the dec to max and - * and return, else we hard disable and return. This is called with - * r10 containing the value to OR to the paca field. + * An interrupt came in while soft-disabled. We set paca->irq_happened, then: + * - If it was a decrementer interrupt, we bump the dec to max and and return. + * - If it was a doorbell we return immediately since doorbells are edge + * triggered and won't automatically refire. + * - else we hard disable and return. + * This is called with r10 containing the value to OR to the paca field. */ #define MASKED_INTERRUPT(_H) \ masked_##_H##interrupt: \ @@ -489,13 +572,15 @@ masked_##_H##interrupt: \ lbz r11,PACAIRQHAPPENED(r13); \ or r11,r11,r10; \ stb r11,PACAIRQHAPPENED(r13); \ - andi. r10,r10,PACA_IRQ_DEC; \ - beq 1f; \ + cmpwi r10,PACA_IRQ_DEC; \ + bne 1f; \ lis r10,0x7fff; \ ori r10,r10,0xffff; \ mtspr SPRN_DEC,r10; \ b 2f; \ -1: mfspr r10,SPRN_##_H##SRR1; \ +1: cmpwi r10,PACA_IRQ_DBELL; \ + beq 2f; \ + mfspr r10,SPRN_##_H##SRR1; \ rldicl r10,r10,48,1; /* clear MSR_EE */ \ rotldi r10,r10,16; \ mtspr SPRN_##_H##SRR1,r10; \ @@ -512,8 +597,8 @@ masked_##_H##interrupt: \ /* * Called from arch_local_irq_enable when an interrupt needs - * to be resent. r3 contains 0x500 or 0x900 to indicate which - * kind of interrupt. MSR:EE is already off. We generate a + * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate + * which kind of interrupt. MSR:EE is already off. We generate a * stackframe like if a real interrupt had happened. * * Note: While MSR:EE is off, we need to make sure that _MSR @@ -529,9 +614,18 @@ _GLOBAL(__replay_interrupt) mflr r11 mfcr r9 ori r12,r12,MSR_EE - andi. r3,r3,0x0800 - bne decrementer_common - b hardware_interrupt_common + cmpwi r3,0x900 + beq decrementer_common + cmpwi r3,0x500 + beq hardware_interrupt_common +BEGIN_FTR_SECTION + cmpwi r3,0xe80 + beq h_doorbell_common +FTR_SECTION_ELSE + cmpwi r3,0xa00 + beq doorbell_super_common +ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) + blr #ifdef CONFIG_PPC_PSERIES /* @@ -540,7 +634,7 @@ _GLOBAL(__replay_interrupt) .globl system_reset_fwnmi .align 7 system_reset_fwnmi: - HMT_MEDIUM + HMT_MEDIUM_PPR_DISCARD SET_SCRATCH0(r13) /* save r13 */ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, NOTEST, 0x100) @@ -575,16 +669,12 @@ slb_miss_user_pseries: b . /* prevent spec. execution */ #endif /* __DISABLED__ */ - .align 7 - .globl __end_interrupts -__end_interrupts: - /* * Code from here down to __end_handlers is invoked from the * exception prologs above. Because the prologs assemble the * addresses of these handlers using the LOAD_HANDLER macro, - * which uses an addi instruction, these handlers must be in - * the first 32k of the kernel image. + * which uses an ori instruction, these handlers must be in + * the first 64k of the kernel image. */ /*** Common interrupt handlers ***/ @@ -609,12 +699,21 @@ machine_check_common: STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) - STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) +#ifdef CONFIG_PPC_DOORBELL + STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception) +#else + STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception) +#endif STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) - STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) - STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) + STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) + STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) +#ifdef CONFIG_PPC_DOORBELL + STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) +#else + STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception) +#endif STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception) @@ -629,7 +728,167 @@ machine_check_common: STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) #endif /* CONFIG_CBE_RAS */ + /* + * Relocation-on interrupts: A subset of the interrupts can be delivered + * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering + * it. Addresses are the same as the original interrupt addresses, but + * offset by 0xc000000000004000. + * It's impossible to receive interrupts below 0x300 via this mechanism. + * KVM: None of these traps are from the guest ; anything that escalated + * to HV=1 from HV=0 is delivered via real mode handlers. + */ + + /* + * This uses the standard macro, since the original 0x300 vector + * only has extra guff for STAB-based processors -- which never + * come here. + */ + STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access) + . = 0x4380 + .globl data_access_slb_relon_pSeries +data_access_slb_relon_pSeries: + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXSLB) + EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380) + std r3,PACA_EXSLB+EX_R3(r13) + mfspr r3,SPRN_DAR + mfspr r12,SPRN_SRR1 +#ifndef CONFIG_RELOCATABLE + b .slb_miss_realmode +#else + /* + * We can't just use a direct branch to .slb_miss_realmode + * because the distance from here to there depends on where + * the kernel ends up being put. + */ + mfctr r11 + ld r10,PACAKBASE(r13) + LOAD_HANDLER(r10, .slb_miss_realmode) + mtctr r10 + bctr +#endif + + STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access) + . = 0x4480 + .globl instruction_access_slb_relon_pSeries +instruction_access_slb_relon_pSeries: + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXSLB) + EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480) + std r3,PACA_EXSLB+EX_R3(r13) + mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ + mfspr r12,SPRN_SRR1 +#ifndef CONFIG_RELOCATABLE + b .slb_miss_realmode +#else + mfctr r11 + ld r10,PACAKBASE(r13) + LOAD_HANDLER(r10, .slb_miss_realmode) + mtctr r10 + bctr +#endif + + . = 0x4500 + .globl hardware_interrupt_relon_pSeries; + .globl hardware_interrupt_relon_hv; +hardware_interrupt_relon_pSeries: +hardware_interrupt_relon_hv: + BEGIN_FTR_SECTION + _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV) + FTR_SECTION_ELSE + _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR) + ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_206) + STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment) + STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check) + STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable) + MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer) + STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer) + MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super) + STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b) + + . = 0x4c00 + .globl system_call_relon_pSeries +system_call_relon_pSeries: + HMT_MEDIUM + SYSCALL_PSERIES_1 + SYSCALL_PSERIES_2_DIRECT + SYSCALL_PSERIES_3 + + STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step) + + . = 0x4e00 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) + b h_data_storage_relon_hv + + . = 0x4e20 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) + b h_instr_storage_relon_hv + + . = 0x4e40 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) + b emulation_assist_relon_hv + + . = 0x4e60 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) + b hmi_exception_relon_hv + + . = 0x4e80 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) + b h_doorbell_relon_hv + +performance_monitor_relon_pSeries_1: + . = 0x4f00 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) + b performance_monitor_relon_pSeries + +altivec_unavailable_relon_pSeries_1: + . = 0x4f20 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) + b altivec_unavailable_relon_pSeries + +vsx_unavailable_relon_pSeries_1: + . = 0x4f40 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) + b vsx_unavailable_relon_pSeries + +tm_unavailable_relon_pSeries_1: + . = 0x4f60 + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXGEN) + b tm_unavailable_relon_pSeries + + STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) +#ifdef CONFIG_PPC_DENORMALISATION + . = 0x5500 + b denorm_exception_hv +#endif +#ifdef CONFIG_HVC_SCOM + STD_RELON_EXCEPTION_HV(0x5600, 0x1600, maintence_interrupt) + KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1600) +#endif /* CONFIG_HVC_SCOM */ + STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist) + + /* Other future vectors */ + .align 7 + .globl __end_interrupts +__end_interrupts: + .align 7 +system_call_entry_direct: +#if defined(CONFIG_RELOCATABLE) + /* The first level prologue may have used LR to get here, saving + * orig in r10. To save hacking/ifdeffing common code, restore here. + */ + mtlr r10 +#endif system_call_entry: b system_call_common @@ -714,21 +973,21 @@ data_access_common: ld r3,PACA_EXGEN+EX_DAR(r13) lwz r4,PACA_EXGEN+EX_DSISR(r13) li r5,0x300 - b .do_hash_page /* Try to handle as hpte fault */ + b .do_hash_page /* Try to handle as hpte fault */ .align 7 - .globl h_data_storage_common + .globl h_data_storage_common h_data_storage_common: - mfspr r10,SPRN_HDAR - std r10,PACA_EXGEN+EX_DAR(r13) - mfspr r10,SPRN_HDSISR - stw r10,PACA_EXGEN+EX_DSISR(r13) - EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) - bl .save_nvgprs + mfspr r10,SPRN_HDAR + std r10,PACA_EXGEN+EX_DAR(r13) + mfspr r10,SPRN_HDSISR + stw r10,PACA_EXGEN+EX_DSISR(r13) + EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) + bl .save_nvgprs DISABLE_INTS - addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception - b .ret_from_except + addi r3,r1,STACK_FRAME_OVERHEAD + bl .unknown_exception + b .ret_from_except .align 7 .globl instruction_access_common @@ -741,7 +1000,7 @@ instruction_access_common: li r5,0x400 b .do_hash_page /* Try to handle as hpte fault */ - STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) + STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) /* * Here is the common SLB miss user that is used when going to virtual @@ -843,6 +1102,7 @@ _GLOBAL(slb_miss_realmode) mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ .machine pop + RESTORE_PPR_PACA(PACA_EXSLB, r9) ld r9,PACA_EXSLB+EX_R9(r13) ld r10,PACA_EXSLB+EX_R10(r13) ld r11,PACA_EXSLB+EX_R11(r13) @@ -916,9 +1176,26 @@ fp_unavailable_common: addi r3,r1,STACK_FRAME_OVERHEAD bl .kernel_fp_unavailable_exception BUG_OPCODE -1: bl .load_up_fpu +1: +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +BEGIN_FTR_SECTION + /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in + * transaction), go do TM stuff + */ + rldicl. r0, r12, (64-MSR_TS_LG), (64-2) + bne- 2f +END_FTR_SECTION_IFSET(CPU_FTR_TM) +#endif + bl .load_up_fpu b fast_exception_return - +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +2: /* User process was in a transaction */ + bl .save_nvgprs + DISABLE_INTS + addi r3,r1,STACK_FRAME_OVERHEAD + bl .fp_unavailable_tm + b .ret_from_except +#endif .align 7 .globl altivec_unavailable_common altivec_unavailable_common: @@ -926,8 +1203,25 @@ altivec_unavailable_common: #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION beq 1f +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + BEGIN_FTR_SECTION_NESTED(69) + /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in + * transaction), go do TM stuff + */ + rldicl. r0, r12, (64-MSR_TS_LG), (64-2) + bne- 2f + END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) +#endif bl .load_up_altivec b fast_exception_return +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +2: /* User process was in a transaction */ + bl .save_nvgprs + DISABLE_INTS + addi r3,r1,STACK_FRAME_OVERHEAD + bl .altivec_unavailable_tm + b .ret_from_except +#endif 1: END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif @@ -944,7 +1238,24 @@ vsx_unavailable_common: #ifdef CONFIG_VSX BEGIN_FTR_SECTION beq 1f +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + BEGIN_FTR_SECTION_NESTED(69) + /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in + * transaction), go do TM stuff + */ + rldicl. r0, r12, (64-MSR_TS_LG), (64-2) + bne- 2f + END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) +#endif b .load_up_vsx +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +2: /* User process was in a transaction */ + bl .save_nvgprs + DISABLE_INTS + addi r3,r1,STACK_FRAME_OVERHEAD + bl .vsx_unavailable_tm + b .ret_from_except +#endif 1: END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif @@ -955,9 +1266,75 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) b .ret_from_except .align 7 + .globl tm_unavailable_common +tm_unavailable_common: + EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN) + bl .save_nvgprs + DISABLE_INTS + addi r3,r1,STACK_FRAME_OVERHEAD + bl .tm_unavailable_exception + b .ret_from_except + + .align 7 .globl __end_handlers __end_handlers: + /* Equivalents to the above handlers for relocation-on interrupt vectors */ + STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00) + STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20) + STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40) + STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60) + MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80) + + STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) + STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) + STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) + STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable) + +#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) +/* + * Data area reserved for FWNMI option. + * This address (0x7000) is fixed by the RPA. + */ + .= 0x7000 + .globl fwnmi_data_area +fwnmi_data_area: + + /* pseries and powernv need to keep the whole page from + * 0x7000 to 0x8000 free for use by the firmware + */ + . = 0x8000 +#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ + +/* Space for CPU0's segment table */ + .balign 4096 + .globl initial_stab +initial_stab: + .space 4096 + +#ifdef CONFIG_PPC_POWERNV +_GLOBAL(opal_mc_secondary_handler) + HMT_MEDIUM_PPR_DISCARD + SET_SCRATCH0(r13) + GET_PACA(r13) + clrldi r3,r3,2 + tovirt(r3,r3) + std r3,PACA_OPAL_MC_EVT(r13) + ld r13,OPAL_MC_SRR0(r3) + mtspr SPRN_SRR0,r13 + ld r13,OPAL_MC_SRR1(r3) + mtspr SPRN_SRR1,r13 + ld r3,OPAL_MC_GPR3(r3) + GET_SCRATCH0(r13) + b machine_check_pSeries +#endif /* CONFIG_PPC_POWERNV */ + + /* * Hash table stuff */ @@ -1029,7 +1406,7 @@ handle_dabr_fault: ld r4,_DAR(r1) ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_dabr + bl .do_break 12: b .ret_from_except_lite @@ -1151,41 +1528,3 @@ _GLOBAL(do_stab_bolted) ld r13,PACA_EXSLB+EX_R13(r13) rfid b . /* prevent speculative execution */ - -#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) -/* - * Data area reserved for FWNMI option. - * This address (0x7000) is fixed by the RPA. - */ - .= 0x7000 - .globl fwnmi_data_area -fwnmi_data_area: - - /* pseries and powernv need to keep the whole page from - * 0x7000 to 0x8000 free for use by the firmware - */ - . = 0x8000 -#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ - -/* Space for CPU0's segment table */ - .balign 4096 - .globl initial_stab -initial_stab: - .space 4096 - -#ifdef CONFIG_PPC_POWERNV -_GLOBAL(opal_mc_secondary_handler) - HMT_MEDIUM - SET_SCRATCH0(r13) - GET_PACA(r13) - clrldi r3,r3,2 - tovirt(r3,r3) - std r3,PACA_OPAL_MC_EVT(r13) - ld r13,OPAL_MC_SRR0(r3) - mtspr SPRN_SRR0,r13 - ld r13,OPAL_MC_SRR1(r3) - mtspr SPRN_SRR1,r13 - ld r3,OPAL_MC_GPR3(r3) - GET_SCRATCH0(r13) - b machine_check_pSeries -#endif /* CONFIG_PPC_POWERNV */ diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index e0ada05f2df..caeaabf11a2 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -35,6 +35,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 2: REST_32VSRS(n,c,base); \ 3: +#define __REST_32FPVSRS_TRANSACT(n,c,base) \ +BEGIN_FTR_SECTION \ + b 2f; \ +END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ + REST_32FPRS_TRANSACT(n,base); \ + b 3f; \ +2: REST_32VSRS_TRANSACT(n,c,base); \ +3: + #define __SAVE_32FPVSRS(n,c,base) \ BEGIN_FTR_SECTION \ b 2f; \ @@ -45,11 +54,68 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 3: #else #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) +#define __REST_32FPVSRS_TRANSACT(n,b,base) REST_32FPRS(n, base) #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) #endif #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base) +#define REST_32FPVSRS_TRANSACT(n,c,base) \ + __REST_32FPVSRS_TRANSACT(n,__REG_##c,__REG_##base) #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base) +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +/* + * Wrapper to call load_up_fpu from C. + * void do_load_up_fpu(struct pt_regs *regs); + */ +_GLOBAL(do_load_up_fpu) + mflr r0 + std r0, 16(r1) + stdu r1, -112(r1) + + subi r6, r3, STACK_FRAME_OVERHEAD + /* load_up_fpu expects r12=MSR, r13=PACA, and returns + * with r12 = new MSR. + */ + ld r12,_MSR(r6) + GET_PACA(r13) + + bl load_up_fpu + std r12,_MSR(r6) + + ld r0, 112+16(r1) + addi r1, r1, 112 + mtlr r0 + blr + + +/* void do_load_up_transact_fpu(struct thread_struct *thread) + * + * This is similar to load_up_fpu but for the transactional version of the FP + * register set. It doesn't mess with the task MSR or valid flags. + * Furthermore, we don't do lazy FP with TM currently. + */ +_GLOBAL(do_load_up_transact_fpu) + mfmsr r6 + ori r5,r6,MSR_FP +#ifdef CONFIG_VSX +BEGIN_FTR_SECTION + oris r5,r5,MSR_VSX@h +END_FTR_SECTION_IFSET(CPU_FTR_VSX) +#endif + SYNC + MTMSRD(r5) + + lfd fr0,THREAD_TRANSACT_FPSCR(r3) + MTFSF_L(fr0) + REST_32FPVSRS_TRANSACT(0, R4, R3) + + /* FP/VSX off again */ + MTMSRD(r6) + SYNC + + blr +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ + /* * This task wants to use the FPU now. * On UP, disable FP for the task which had the FPU previously, diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 4989661b710..8a9b6f59822 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -430,30 +430,18 @@ label: EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE) /* 0x1000 - Programmable Interval Timer (PIT) Exception */ - START_EXCEPTION(0x1000, Decrementer) - NORMAL_EXCEPTION_PROLOG - lis r0,TSR_PIS@h - mtspr SPRN_TSR,r0 /* Clear the PIT exception */ - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_LITE(0x1000, timer_interrupt) - -#if 0 -/* NOTE: - * FIT and WDT handlers are not implemented yet. - */ + . = 0x1000 + b Decrementer /* 0x1010 - Fixed Interval Timer (FIT) Exception */ - STND_EXCEPTION(0x1010, FITException, unknown_exception) + . = 0x1010 + b FITException /* 0x1020 - Watchdog Timer (WDT) Exception */ -#ifdef CONFIG_BOOKE_WDT - CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException) -#else - CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception) -#endif -#endif + . = 0x1020 + b WDTException /* 0x1100 - Data TLB Miss Exception * As the name implies, translation is not in the MMU, so search the @@ -738,6 +726,29 @@ label: (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) + /* Programmable Interval Timer (PIT) Exception. (from 0x1000) */ +Decrementer: + NORMAL_EXCEPTION_PROLOG + lis r0,TSR_PIS@h + mtspr SPRN_TSR,r0 /* Clear the PIT exception */ + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_LITE(0x1000, timer_interrupt) + + /* Fixed Interval Timer (FIT) Exception. (from 0x1010) */ +FITException: + NORMAL_EXCEPTION_PROLOG + addi r3,r1,STACK_FRAME_OVERHEAD; + EXC_XFER_EE(0x1010, unknown_exception) + + /* Watchdog Timer (WDT) Exception. (from 0x1020) */ +WDTException: + CRITICAL_EXCEPTION_PROLOG; + addi r3,r1,STACK_FRAME_OVERHEAD; + EXC_XFER_TEMPLATE(WatchdogException, 0x1020+2, + (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), + NOCOPY, crit_transfer_to_handler, + ret_from_crit_exc) + /* * The other Data TLB exceptions bail out to this point * if they can't resolve the lightweight TLB fault. diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 58bddee8e1e..0886ae6dd5b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -122,6 +122,8 @@ __secondary_hold: #endif /* Grab our physical cpu number */ mr r24,r3 + /* stash r4 for book3e */ + mr r25,r4 /* Tell the master cpu we're here */ /* Relocation is off & we are located at an address less */ @@ -129,16 +131,31 @@ __secondary_hold: std r24,__secondary_hold_acknowledge-_stext(0) sync + li r26,0 +#ifdef CONFIG_PPC_BOOK3E + tovirt(r26,r26) +#endif /* All secondary cpus wait here until told to start. */ -100: ld r4,__secondary_hold_spinloop-_stext(0) +100: ld r4,__secondary_hold_spinloop-_stext(r26) cmpdi 0,r4,0 beq 100b #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) +#ifdef CONFIG_PPC_BOOK3E + tovirt(r4,r4) +#endif ld r4,0(r4) /* deref function descriptor */ mtctr r4 mr r3,r24 + /* + * it may be the case that other platforms have r4 right to + * begin with, this gives us some safety in case it is not + */ +#ifdef CONFIG_PPC_BOOK3E + mr r4,r25 +#else li r4,0 +#endif /* Make sure that patched code is visible */ isync bctr @@ -169,6 +186,7 @@ _GLOBAL(generic_secondary_thread_init) /* get a valid TOC pointer, wherever we're mapped at */ bl .relative_toc + tovirt(r2,r2) #ifdef CONFIG_PPC_BOOK3E /* Book3E initialization */ @@ -195,6 +213,7 @@ _GLOBAL(generic_secondary_smp_init) /* get a valid TOC pointer, wherever we're mapped at */ bl .relative_toc + tovirt(r2,r2) #ifdef CONFIG_PPC_BOOK3E /* Book3E initialization */ @@ -422,7 +441,7 @@ _STATIC(__after_prom_start) tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */ #endif -#ifdef CONFIG_CRASH_DUMP +#ifdef CONFIG_RELOCATABLE /* * Check if the kernel has to be running as relocatable kernel based on the * variable __run_at_load, if it is set the kernel is treated as relocatable @@ -432,7 +451,8 @@ _STATIC(__after_prom_start) cmplwi cr0,r7,1 bne 3f - li r5,__end_interrupts - _stext /* just copy interrupts */ + /* just copy interrupts */ + LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext) b 5f 3: #endif @@ -530,6 +550,7 @@ _GLOBAL(pmac_secondary_start) /* get TOC pointer (real address) */ bl .relative_toc + tovirt(r2,r2) /* Copy some CPU settings from CPU 0 */ bl .__restore_cpu_ppc970 @@ -664,6 +685,13 @@ _GLOBAL(enable_64b_mode) * This puts the TOC pointer into r2, offset by 0x8000 (as expected * by the toolchain). It computes the correct value for wherever we * are running at the moment, using position-independent code. + * + * Note: The compiler constructs pointers using offsets from the + * TOC in -mcmodel=medium mode. After we relocate to 0 but before + * the MMU is on we need our TOC to be a virtual address otherwise + * these pointers will be real addresses which may get stored and + * accessed later with the MMU on. We use tovirt() at the call + * sites to handle this. */ _GLOBAL(relative_toc) mflr r0 @@ -680,8 +708,9 @@ p_toc: .llong __toc_start + 0x8000 - 0b * This is where the main kernel code starts. */ _INIT_STATIC(start_here_multiplatform) - /* set up the TOC (real address) */ - bl .relative_toc + /* set up the TOC */ + bl .relative_toc + tovirt(r2,r2) /* Clear out the BSS. It may have been done in prom_init, * already but that's irrelevant since prom_init will soon @@ -703,6 +732,7 @@ _INIT_STATIC(start_here_multiplatform) #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL /* Setup OPAL entry */ + LOAD_REG_ADDR(r11, opal) std r28,0(r11); std r29,8(r11); #endif diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index a89cae481b0..a949bdfc962 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -73,7 +73,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) * If so, DABR will be populated in single_step_dabr_instruction(). */ if (current->thread.last_hit_ubp != bp) - set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); + set_breakpoint(info); return 0; } @@ -97,7 +97,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) } *slot = NULL; - set_dabr(0, 0); + hw_breakpoint_disable(); } /* @@ -127,19 +127,13 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp) int arch_bp_generic_fields(int type, int *gen_bp_type) { - switch (type) { - case DABR_DATA_READ: - *gen_bp_type = HW_BREAKPOINT_R; - break; - case DABR_DATA_WRITE: - *gen_bp_type = HW_BREAKPOINT_W; - break; - case (DABR_DATA_WRITE | DABR_DATA_READ): - *gen_bp_type = (HW_BREAKPOINT_W | HW_BREAKPOINT_R); - break; - default: + *gen_bp_type = 0; + if (type & HW_BRK_TYPE_READ) + *gen_bp_type |= HW_BREAKPOINT_R; + if (type & HW_BRK_TYPE_WRITE) + *gen_bp_type |= HW_BREAKPOINT_W; + if (*gen_bp_type == 0) return -EINVAL; - } return 0; } @@ -148,35 +142,28 @@ int arch_bp_generic_fields(int type, int *gen_bp_type) */ int arch_validate_hwbkpt_settings(struct perf_event *bp) { - int ret = -EINVAL; + int ret = -EINVAL, length_max; struct arch_hw_breakpoint *info = counter_arch_bp(bp); if (!bp) return ret; - switch (bp->attr.bp_type) { - case HW_BREAKPOINT_R: - info->type = DABR_DATA_READ; - break; - case HW_BREAKPOINT_W: - info->type = DABR_DATA_WRITE; - break; - case HW_BREAKPOINT_R | HW_BREAKPOINT_W: - info->type = (DABR_DATA_READ | DABR_DATA_WRITE); - break; - default: + info->type = HW_BRK_TYPE_TRANSLATE; + if (bp->attr.bp_type & HW_BREAKPOINT_R) + info->type |= HW_BRK_TYPE_READ; + if (bp->attr.bp_type & HW_BREAKPOINT_W) + info->type |= HW_BRK_TYPE_WRITE; + if (info->type == HW_BRK_TYPE_TRANSLATE) + /* must set alteast read or write */ return ret; - } - + if (!(bp->attr.exclude_user)) + info->type |= HW_BRK_TYPE_USER; + if (!(bp->attr.exclude_kernel)) + info->type |= HW_BRK_TYPE_KERNEL; + if (!(bp->attr.exclude_hv)) + info->type |= HW_BRK_TYPE_HYP; info->address = bp->attr.bp_addr; info->len = bp->attr.bp_len; - info->dabrx = DABRX_ALL; - if (bp->attr.exclude_user) - info->dabrx &= ~DABRX_USER; - if (bp->attr.exclude_kernel) - info->dabrx &= ~DABRX_KERNEL; - if (bp->attr.exclude_hv) - info->dabrx &= ~DABRX_HYP; /* * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) @@ -184,8 +171,16 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the * 'symbolsize' should satisfy the check below. */ + length_max = 8; /* DABR */ + if (cpu_has_feature(CPU_FTR_DAWR)) { + length_max = 512 ; /* 64 doublewords */ + /* DAWR region can't cross 512 boundary */ + if ((bp->attr.bp_addr >> 10) != + ((bp->attr.bp_addr + bp->attr.bp_len) >> 10)) + return -EINVAL; + } if (info->len > - (HW_BREAKPOINT_LEN - (info->address & HW_BREAKPOINT_ALIGN))) + (length_max - (info->address & HW_BREAKPOINT_ALIGN))) return -EINVAL; return 0; } @@ -204,7 +199,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) info = counter_arch_bp(tsk->thread.last_hit_ubp); regs->msr &= ~MSR_SE; - set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); + set_breakpoint(info); tsk->thread.last_hit_ubp = NULL; } @@ -222,7 +217,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) unsigned long dar = regs->dar; /* Disable breakpoints during exception handling */ - set_dabr(0, 0); + hw_breakpoint_disable(); /* * The counter may be concurrently released but that can only @@ -255,8 +250,9 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) * we still need to single-step the instruction, but we don't * generate an event. */ - info->extraneous_interrupt = !((bp->attr.bp_addr <= dar) && - (dar - bp->attr.bp_addr < bp->attr.bp_len)); + if (!((bp->attr.bp_addr <= dar) && + (dar - bp->attr.bp_addr < bp->attr.bp_len))) + info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; /* Do not emulate user-space instructions, instead single-step them */ if (user_mode(regs)) { @@ -285,10 +281,10 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) * As a policy, the callback is invoked in a 'trigger-after-execute' * fashion */ - if (!info->extraneous_interrupt) + if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) perf_bp_event(bp, regs); - set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); + set_breakpoint(info); out: rcu_read_unlock(); return rc; @@ -317,10 +313,10 @@ int __kprobes single_step_dabr_instruction(struct die_args *args) * We shall invoke the user-defined callback function in the single * stepping handler to confirm to 'trigger-after-execute' semantics */ - if (!info->extraneous_interrupt) + if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) perf_bp_event(bp, regs); - set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); + set_breakpoint(info); current->thread.last_hit_ubp = NULL; /* diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 2099d9a879e..ea78761aa16 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -55,9 +55,6 @@ __setup("powersave=off", powersave_off); */ void cpu_idle(void) { - if (ppc_md.idle_loop) - ppc_md.idle_loop(); /* doesn't return */ - set_thread_flag(TIF_POLLING_NRFLAG); while (1) { tick_nohz_idle_enter(); diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c index 12d329bcbb9..50e90b7e713 100644 --- a/arch/powerpc/kernel/io-workarounds.c +++ b/arch/powerpc/kernel/io-workarounds.c @@ -118,7 +118,7 @@ static void iowa_##name at \ #undef DEF_PCI_AC_RET #undef DEF_PCI_AC_NORET -static const struct ppc_pci_io __devinitconst iowa_pci_io = { +static const struct ppc_pci_io iowa_pci_io = { #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) .name = iowa_##name, #define DEF_PCI_AC_NORET(name, at, al, space, aa) .name = iowa_##name, @@ -146,7 +146,7 @@ static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, } /* Enable IO workaround */ -static void __devinit io_workaround_init(void) +static void io_workaround_init(void) { static int io_workaround_inited; @@ -158,9 +158,8 @@ static void __devinit io_workaround_init(void) } /* Register new bus to support workaround */ -void __devinit iowa_register_bus(struct pci_controller *phb, - struct ppc_pci_io *ops, - int (*initfunc)(struct iowa_bus *, void *), void *data) +void iowa_register_bus(struct pci_controller *phb, struct ppc_pci_io *ops, + int (*initfunc)(struct iowa_bus *, void *), void *data) { struct iowa_bus *bus; struct device_node *np = phb->dn; diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 8226c6cb348..31c4fdc6859 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -656,7 +656,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) struct iommu_pool *p; /* number of bytes needed for the bitmap */ - sz = (tbl->it_size + 7) >> 3; + sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz)); if (!page) @@ -708,7 +708,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) void iommu_free_table(struct iommu_table *tbl, const char *node_name) { - unsigned long bitmap_sz, i; + unsigned long bitmap_sz; unsigned int order; if (!tbl || !tbl->it_map) { @@ -717,18 +717,19 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) return; } + /* + * In case we have reserved the first bit, we should not emit + * the warning below. + */ + if (tbl->it_offset == 0) + clear_bit(0, tbl->it_map); + /* verify that table contains no entries */ - /* it_size is in entries, and we're examining 64 at a time */ - for (i = 0; i < (tbl->it_size/64); i++) { - if (tbl->it_map[i] != 0) { - printk(KERN_WARNING "%s: Unexpected TCEs for %s\n", - __func__, node_name); - break; - } - } + if (!bitmap_empty(tbl->it_map, tbl->it_size)) + pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name); /* calculate bitmap size in bytes */ - bitmap_sz = (tbl->it_size + 7) / 8; + bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); /* free bitmap */ order = get_order(bitmap_sz); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 71413f41278..4f97fe34552 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -122,8 +122,8 @@ static inline notrace int decrementer_check_overflow(void) } /* This is called whenever we are re-enabling interrupts - * and returns either 0 (nothing to do) or 500/900 if there's - * either an EE or a DEC to generate. + * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if + * there's an EE, DEC or DBELL to generate. * * This is called in two contexts: From arch_local_irq_restore() * before soft-enabling interrupts, and from the exception exit @@ -182,6 +182,13 @@ notrace unsigned int __check_irq_replay(void) local_paca->irq_happened &= ~PACA_IRQ_DBELL; if (happened & PACA_IRQ_DBELL) return 0x280; +#else + local_paca->irq_happened &= ~PACA_IRQ_DBELL; + if (happened & PACA_IRQ_DBELL) { + if (cpu_has_feature(CPU_FTR_HVMODE)) + return 0xe80; + return 0xa00; + } #endif /* CONFIG_PPC_BOOK3E */ /* There should be nothing left ! */ diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index d45ec58703c..0f199709796 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -41,8 +41,8 @@ EXPORT_SYMBOL_GPL(isa_bridge_pcidev); #define ISA_SPACE_MASK 0x1 #define ISA_SPACE_IO 0x1 -static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node, - unsigned long phb_io_base_phys) +static void pci_process_ISA_OF_ranges(struct device_node *isa_node, + unsigned long phb_io_base_phys) { /* We should get some saner parsing here and remove these structs */ struct pci_address { @@ -170,8 +170,8 @@ void __init isa_bridge_find_early(struct pci_controller *hose) * isa_bridge_find_late - Find and map the ISA IO space upon discovery of * a new ISA bridge */ -static void __devinit isa_bridge_find_late(struct pci_dev *pdev, - struct device_node *devnode) +static void isa_bridge_find_late(struct pci_dev *pdev, + struct device_node *devnode) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); @@ -215,8 +215,8 @@ static void isa_bridge_remove(void) /** * isa_bridge_notify - Get notified of PCI devices addition/removal */ -static int __devinit isa_bridge_notify(struct notifier_block *nb, - unsigned long action, void *data) +static int isa_bridge_notify(struct notifier_block *nb, unsigned long action, + void *data) { struct device *dev = data; struct pci_dev *pdev = to_pci_dev(dev); diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index c470a40b29f..5ca82cd4a37 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -154,12 +154,12 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs) static int kgdb_singlestep(struct pt_regs *regs) { struct thread_info *thread_info, *exception_thread_info; - struct thread_info *backup_current_thread_info = \ - (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL); + struct thread_info *backup_current_thread_info; if (user_mode(regs)) return 0; + backup_current_thread_info = (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL); /* * On Book E and perhaps other processors, singlestep is handled on * the critical exception stack. This causes current_thread_info() @@ -185,6 +185,7 @@ static int kgdb_singlestep(struct pt_regs *regs) /* Restore current_thread_info lastly. */ memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info); + kfree(backup_current_thread_info); return 1; } @@ -198,7 +199,7 @@ static int kgdb_iabr_match(struct pt_regs *regs) return 1; } -static int kgdb_dabr_match(struct pt_regs *regs) +static int kgdb_break_match(struct pt_regs *regs) { if (user_mode(regs)) return 0; @@ -458,7 +459,7 @@ static void *old__debugger; static void *old__debugger_bpt; static void *old__debugger_sstep; static void *old__debugger_iabr_match; -static void *old__debugger_dabr_match; +static void *old__debugger_break_match; static void *old__debugger_fault_handler; int kgdb_arch_init(void) @@ -468,7 +469,7 @@ int kgdb_arch_init(void) old__debugger_bpt = __debugger_bpt; old__debugger_sstep = __debugger_sstep; old__debugger_iabr_match = __debugger_iabr_match; - old__debugger_dabr_match = __debugger_dabr_match; + old__debugger_break_match = __debugger_break_match; old__debugger_fault_handler = __debugger_fault_handler; __debugger_ipi = kgdb_call_nmi_hook; @@ -476,7 +477,7 @@ int kgdb_arch_init(void) __debugger_bpt = kgdb_handle_breakpoint; __debugger_sstep = kgdb_singlestep; __debugger_iabr_match = kgdb_iabr_match; - __debugger_dabr_match = kgdb_dabr_match; + __debugger_break_match = kgdb_break_match; __debugger_fault_handler = kgdb_not_implemented; return 0; @@ -489,6 +490,6 @@ void kgdb_arch_exit(void) __debugger_bpt = old__debugger_bpt; __debugger_sstep = old__debugger_sstep; __debugger_iabr_match = old__debugger_iabr_match; - __debugger_dabr_match = old__debugger_dabr_match; + __debugger_break_match = old__debugger_break_match; __debugger_fault_handler = old__debugger_fault_handler; } diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index e88c6433181..11f5b03a0b0 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -310,7 +310,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; - struct hlist_node *node, *tmp; + struct hlist_node *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; @@ -330,7 +330,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, * real return address, and all the rest will point to * kretprobe_trampoline */ - hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + hlist_for_each_entry_safe(ri, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; @@ -357,7 +357,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, kretprobe_hash_unlock(current, &flags); preempt_enable_no_resched(); - hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); } diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 867db1de894..a61b133c4f9 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -419,7 +419,7 @@ static void kvm_map_magic_page(void *data) in[0] = KVM_MAGIC_PAGE; in[1] = KVM_MAGIC_PAGE; - kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE); + kvm_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE)); *features = out[0]; } diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index bedd12e1cfb..0733b05eb85 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -387,7 +387,7 @@ void __init find_legacy_serial_ports(void) of_node_put(parent); continue; } - /* Check for known pciclass, and also check wether we have + /* Check for known pciclass, and also check whether we have * a device with child nodes for ports or not */ if (of_device_is_compatible(np, "pciclass,0700") || diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index fa9f6c72f55..e1ec57e87b3 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -218,23 +218,23 @@ static void __init export_crashk_values(struct device_node *node) * be sure what's in them, so remove them. */ prop = of_find_property(node, "linux,crashkernel-base", NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); prop = of_find_property(node, "linux,crashkernel-size", NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); if (crashk_res.start != 0) { - prom_add_property(node, &crashk_base_prop); + of_add_property(node, &crashk_base_prop); crashk_size = resource_size(&crashk_res); - prom_add_property(node, &crashk_size_prop); + of_add_property(node, &crashk_size_prop); } /* * memory_limit is required by the kexec-tools to limit the * crash regions to the actual memory used. */ - prom_update_property(node, &memory_limit_prop); + of_update_property(node, &memory_limit_prop); } static int __init kexec_setup(void) @@ -249,11 +249,11 @@ static int __init kexec_setup(void) /* remove any stale properties so ours can be found */ prop = of_find_property(node, kernel_end_prop.name, NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); /* information needed by userspace when using default_machine_kexec */ kernel_end = __pa(_end); - prom_add_property(node, &kernel_end_prop); + of_add_property(node, &kernel_end_prop); export_crashk_values(node); diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index d7f609086a9..466a2908bb6 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -162,6 +162,8 @@ static int kexec_all_irq_disabled = 0; static void kexec_smp_down(void *arg) { local_irq_disable(); + hard_irq_disable(); + mb(); /* make sure our irqs are disabled before we say they are */ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; while(kexec_all_irq_disabled == 0) @@ -244,6 +246,8 @@ static void kexec_prepare_cpus(void) wake_offline_cpus(); smp_call_function(kexec_smp_down, NULL, /* wait */0); local_irq_disable(); + hard_irq_disable(); + mb(); /* make sure IRQs are disabled before we say they are */ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; @@ -281,6 +285,7 @@ static void kexec_prepare_cpus(void) if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(0, 0); local_irq_disable(); + hard_irq_disable(); } #endif /* SMP */ @@ -389,14 +394,14 @@ static int __init export_htab_values(void) /* remove any stale propertys so ours can be found */ prop = of_find_property(node, htab_base_prop.name, NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); prop = of_find_property(node, htab_size_prop.name, NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); htab_base = __pa(htab_address); - prom_add_property(node, &htab_base_prop); - prom_add_property(node, &htab_size_prop); + of_add_property(node, &htab_base_prop); + of_add_property(node, &htab_size_prop); of_node_put(node); return 0; diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 9f44a775a10..6ee59a0eb26 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -386,6 +386,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | (value & 0xffff); break; + case R_PPC64_TOC16_LO: + /* Subtract TOC pointer */ + value -= my_r2(sechdrs, me); + *((uint16_t *) location) + = (*((uint16_t *) location) & ~0xffff) + | (value & 0xffff); + break; + case R_PPC64_TOC16_DS: /* Subtract TOC pointer */ value -= my_r2(sechdrs, me); @@ -399,6 +407,28 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | (value & 0xfffc); break; + case R_PPC64_TOC16_LO_DS: + /* Subtract TOC pointer */ + value -= my_r2(sechdrs, me); + if ((value & 3) != 0) { + printk("%s: bad TOC16_LO_DS relocation (%lu)\n", + me->name, value); + return -ENOEXEC; + } + *((uint16_t *) location) + = (*((uint16_t *) location) & ~0xfffc) + | (value & 0xfffc); + break; + + case R_PPC64_TOC16_HA: + /* Subtract TOC pointer */ + value -= my_r2(sechdrs, me); + value = ((value + 0x8000) >> 16); + *((uint16_t *) location) + = (*((uint16_t *) location) & ~0xffff) + | (value & 0xffff); + break; + case R_PPC_REL24: /* FIXME: Handle weak symbols here --RR */ if (sym->st_shndx == SHN_UNDEF) { diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index 2049f2d00ff..a7b74307672 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c @@ -37,7 +37,7 @@ * lacking some bits needed here. */ -static int __devinit of_pci_phb_probe(struct platform_device *dev) +static int of_pci_phb_probe(struct platform_device *dev) { struct pci_controller *phb; @@ -71,10 +71,8 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev) eeh_dev_phb_init_dynamic(phb); /* Register devices with EEH */ -#ifdef CONFIG_EEH if (dev->dev.of_node->child) eeh_add_device_tree_early(dev->dev.of_node); -#endif /* CONFIG_EEH */ /* Scan the bus */ pcibios_scan_phb(phb); @@ -82,19 +80,20 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev) return -ENXIO; /* Claim resources. This might need some rework as well depending - * wether we are doing probe-only or not, like assigning unassigned + * whether we are doing probe-only or not, like assigning unassigned * resources etc... */ pcibios_claim_one_bus(phb->bus); /* Finish EEH setup */ -#ifdef CONFIG_EEH eeh_add_device_tree_late(phb->bus); -#endif /* Add probed PCI devices to the device model */ pci_bus_add_devices(phb->bus); + /* sysfs files should only be added after devices are added */ + eeh_add_sysfs_files(phb->bus); + return 0; } diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index cd6da855090..f8f24685f10 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -120,8 +120,6 @@ struct slb_shadow slb_shadow[] __cacheline_aligned = { struct paca_struct *paca; EXPORT_SYMBOL(paca); -struct paca_struct boot_paca; - void __init initialise_paca(struct paca_struct *new_paca, int cpu) { /* The TOC register (GPR2) points 32kB into the TOC, so that 64kB diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 7f94f760dd0..fa12ae42d98 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -673,9 +673,8 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar, * - Some 32 bits platforms such as 4xx can have physical space larger than * 32 bits so we need to use 64 bits values for the parsing */ -void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, - struct device_node *dev, - int primary) +void pci_process_bridge_OF_ranges(struct pci_controller *hose, + struct device_node *dev, int primary) { const u32 *ranges; int rlen; @@ -848,7 +847,7 @@ int pci_proc_domain(struct pci_bus *bus) /* This header fixup will do the resource fixup for all devices as they are * probed, but not for bridge ranges */ -static void __devinit pcibios_fixup_resources(struct pci_dev *dev) +static void pcibios_fixup_resources(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); int i; @@ -902,8 +901,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); * things go more smoothly when it gets it right. It should covers cases such * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges */ -static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus, - struct resource *res) +static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus, + struct resource *res) { struct pci_controller *hose = pci_bus_to_host(bus); struct pci_dev *dev = bus->self; @@ -967,7 +966,7 @@ static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus, } /* Fixup resources of a PCI<->PCI bridge */ -static void __devinit pcibios_fixup_bridge(struct pci_bus *bus) +static void pcibios_fixup_bridge(struct pci_bus *bus) { struct resource *res; int i; @@ -1007,7 +1006,7 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus) } } -void __devinit pcibios_setup_bus_self(struct pci_bus *bus) +void pcibios_setup_bus_self(struct pci_bus *bus) { /* Fix up the bus resources for P2P bridges */ if (bus->self != NULL) @@ -1024,7 +1023,7 @@ void __devinit pcibios_setup_bus_self(struct pci_bus *bus) ppc_md.pci_dma_bus_setup(bus); } -void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) +void pcibios_setup_bus_devices(struct pci_bus *bus) { struct pci_dev *dev; @@ -1063,7 +1062,7 @@ void pcibios_set_master(struct pci_dev *dev) /* No special bus mastering setup handling */ } -void __devinit pcibios_fixup_bus(struct pci_bus *bus) +void pcibios_fixup_bus(struct pci_bus *bus) { /* When called from the generic PCI probe, read PCI<->PCI bridge * bases. This is -not- called when generating the PCI tree from @@ -1080,7 +1079,7 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus) } EXPORT_SYMBOL(pcibios_fixup_bus); -void __devinit pci_fixup_cardbus(struct pci_bus *bus) +void pci_fixup_cardbus(struct pci_bus *bus) { /* Now fixup devices on that bus */ pcibios_setup_bus_devices(bus); @@ -1264,7 +1263,7 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus) pcibios_allocate_bus_resources(b); } -static inline void __devinit alloc_resource(struct pci_dev *dev, int idx) +static inline void alloc_resource(struct pci_dev *dev, int idx) { struct resource *pr, *r = &dev->resource[idx]; @@ -1428,8 +1427,6 @@ void __init pcibios_resource_survey(void) ppc_md.pcibios_fixup(); } -#ifdef CONFIG_HOTPLUG - /* This is used by the PCI hotplug driver to allocate resource * of newly plugged busses. We can try to consolidate with the * rest of the code later, for now, keep it as-is as our main @@ -1480,16 +1477,17 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus) pcibios_allocate_bus_resources(bus); pcibios_claim_one_bus(bus); + /* Fixup EEH */ + eeh_add_device_tree_late(bus); + /* Add new devices to global lists. Register in proc, sysfs. */ pci_bus_add_devices(bus); - /* Fixup EEH */ - eeh_add_device_tree_late(bus); + /* sysfs files should only be added after devices are added */ + eeh_add_sysfs_files(bus); } EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); -#endif /* CONFIG_HOTPLUG */ - int pcibios_enable_device(struct pci_dev *dev, int mask) { if (ppc_md.pcibios_enable_device_hook) @@ -1504,7 +1502,8 @@ resource_size_t pcibios_io_space_offset(struct pci_controller *hose) return (unsigned long) hose->io_base_virt - _IO_BASE; } -static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, struct list_head *resources) +static void pcibios_setup_phb_resources(struct pci_controller *hose, + struct list_head *resources) { struct resource *res; int i; @@ -1643,7 +1642,7 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus * @hose: Pointer to the PCI host controller instance structure */ -void __devinit pcibios_scan_phb(struct pci_controller *hose) +void pcibios_scan_phb(struct pci_controller *hose) { LIST_HEAD(resources); struct pci_bus *bus; diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 4b06ec5a502..e37c2152acf 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -208,12 +208,12 @@ pci_create_OF_bus_map(void) of_prop->name = "pci-OF-bus-map"; of_prop->length = 256; of_prop->value = &of_prop[1]; - prom_add_property(dn, of_prop); + of_add_property(dn, of_prop); of_node_put(dn); } } -void __devinit pcibios_setup_phb_io_space(struct pci_controller *hose) +void pcibios_setup_phb_io_space(struct pci_controller *hose) { unsigned long io_offset; struct resource *res = &hose->io_resource; diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 4ff190ff24a..51a133a78a0 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -74,8 +74,6 @@ static int __init pcibios_init(void) subsys_initcall(pcibios_init); -#ifdef CONFIG_HOTPLUG - int pcibios_unmap_io_space(struct pci_bus *bus) { struct pci_controller *hose; @@ -124,9 +122,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus) } EXPORT_SYMBOL_GPL(pcibios_unmap_io_space); -#endif /* CONFIG_HOTPLUG */ - -static int __devinit pcibios_map_phb_io_space(struct pci_controller *hose) +static int pcibios_map_phb_io_space(struct pci_controller *hose) { struct vm_struct *area; unsigned long phys_page; @@ -177,7 +173,7 @@ static int __devinit pcibios_map_phb_io_space(struct pci_controller *hose) return 0; } -int __devinit pcibios_map_io_space(struct pci_bus *bus) +int pcibios_map_io_space(struct pci_bus *bus) { WARN_ON(bus == NULL); @@ -197,7 +193,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus) } EXPORT_SYMBOL_GPL(pcibios_map_io_space); -void __devinit pcibios_setup_phb_io_space(struct pci_controller *hose) +void pcibios_setup_phb_io_space(struct pci_controller *hose) { pcibios_map_phb_io_space(hose); } diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index dd9e4a04bf7..e7af165f8b9 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -36,7 +36,7 @@ * Traverse_func that inits the PCI fields of the device node. * NOTE: this *must* be done before read/write config to the device. */ -void * __devinit update_dn_pci_info(struct device_node *dn, void *data) +void *update_dn_pci_info(struct device_node *dn, void *data) { struct pci_controller *phb = data; const int *type = @@ -129,7 +129,7 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre, * subsystem is set up, before kmalloc is valid) and during the * dynamic lpar operation of adding a PHB to a running system. */ -void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb) +void pci_devs_phb_init_dynamic(struct pci_controller *phb) { struct device_node *dn = phb->dn; struct pci_dn *pdn; diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 30378a19f65..2a67e9baa59 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -204,7 +204,7 @@ EXPORT_SYMBOL(of_create_pci_dev); * this routine in turn call of_scan_bus() recusively to scan for more child * devices. */ -void __devinit of_scan_pci_bridge(struct pci_dev *dev) +void of_scan_pci_bridge(struct pci_dev *dev) { struct device_node *node = dev->dev.of_node; struct pci_bus *bus; @@ -299,8 +299,8 @@ EXPORT_SYMBOL(of_scan_pci_bridge); * @bus: pci_bus structure for the PCI bus * @rescan_existing: Flag indicating bus has already been set up */ -static void __devinit __of_scan_bus(struct device_node *node, - struct pci_bus *bus, int rescan_existing) +static void __of_scan_bus(struct device_node *node, struct pci_bus *bus, + int rescan_existing) { struct device_node *child; const u32 *reg; @@ -348,8 +348,7 @@ static void __devinit __of_scan_bus(struct device_node *node, * @node: device tree node for the PCI bus * @bus: pci_bus structure for the PCI bus */ -void __devinit of_scan_bus(struct device_node *node, - struct pci_bus *bus) +void of_scan_bus(struct device_node *node, struct pci_bus *bus) { __of_scan_bus(node, bus, 0); } @@ -363,8 +362,7 @@ EXPORT_SYMBOL_GPL(of_scan_bus); * Same as of_scan_bus, but for a pci_bus structure that has already been * setup. */ -void __devinit of_rescan_bus(struct device_node *node, - struct pci_bus *bus) +void of_rescan_bus(struct device_node *node, struct pci_bus *bus) { __of_scan_bus(node, bus, 1); } diff --git a/arch/powerpc/kernel/ppc32.h b/arch/powerpc/kernel/ppc32.h index 02fb0ee2609..a27c914d580 100644 --- a/arch/powerpc/kernel/ppc32.h +++ b/arch/powerpc/kernel/ppc32.h @@ -16,30 +16,6 @@ /* These are here to support 32-bit syscalls on a 64-bit kernel. */ -#define __old_sigaction32 old_sigaction32 - -struct __old_sigaction32 { - compat_uptr_t sa_handler; - compat_old_sigset_t sa_mask; - unsigned int sa_flags; - compat_uptr_t sa_restorer; /* not used by Linux/SPARC yet */ -}; - - - -struct sigaction32 { - compat_uptr_t sa_handler; /* Really a pointer, but need to deal with 32 bits */ - unsigned int sa_flags; - compat_uptr_t sa_restorer; /* Another 32 bit pointer */ - compat_sigset_t sa_mask; /* A 32 bit mask */ -}; - -typedef struct sigaltstack_32 { - unsigned int ss_sp; - int ss_flags; - compat_size_t ss_size; -} stack_32_t; - struct pt_regs32 { unsigned int gpr[32]; unsigned int nip; @@ -75,7 +51,7 @@ struct mcontext32 { struct ucontext32 { unsigned int uc_flags; unsigned int uc_link; - stack_32_t uc_stack; + compat_stack_t uc_stack; int uc_pad[7]; compat_uptr_t uc_regs; /* points to uc_mcontext field */ compat_sigset_t uc_sigmask; /* mask last for extensibility */ diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 19e4288d848..78b8766fd79 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -43,6 +43,7 @@ #include <asm/dcr.h> #include <asm/ftrace.h> #include <asm/switch_to.h> +#include <asm/epapr_hcalls.h> #ifdef CONFIG_PPC32 extern void transfer_to_handler(void); @@ -191,3 +192,7 @@ EXPORT_SYMBOL(__arch_hweight64); #ifdef CONFIG_PPC_BOOK3S_64 EXPORT_SYMBOL_GPL(mmu_psize_defs); #endif + +#ifdef CONFIG_EPAPR_PARAVIRT +EXPORT_SYMBOL(epapr_hypercall_start); +#endif diff --git a/arch/powerpc/kernel/proc_powerpc.c b/arch/powerpc/kernel/proc_powerpc.c index c8ae3714e79..f19d0bdc324 100644 --- a/arch/powerpc/kernel/proc_powerpc.c +++ b/arch/powerpc/kernel/proc_powerpc.c @@ -32,7 +32,7 @@ static loff_t page_map_seek( struct file *file, loff_t off, int whence) { loff_t new; - struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); + struct proc_dir_entry *dp = PDE(file_inode(file)); switch(whence) { case 0: @@ -55,13 +55,13 @@ static loff_t page_map_seek( struct file *file, loff_t off, int whence) static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { - struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); + struct proc_dir_entry *dp = PDE(file_inode(file)); return simple_read_from_buffer(buf, nbytes, ppos, dp->data, dp->size); } static int page_map_mmap( struct file *file, struct vm_area_struct *vma ) { - struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); + struct proc_dir_entry *dp = PDE(file_inode(file)); if ((vma->vm_end - vma->vm_start) > dp->size) return -EINVAL; diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index ba48233500f..59dd545fdde 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -50,6 +50,7 @@ #include <asm/runlatch.h> #include <asm/syscalls.h> #include <asm/switch_to.h> +#include <asm/tm.h> #include <asm/debug.h> #ifdef CONFIG_PPC64 #include <asm/firmware.h> @@ -57,6 +58,13 @@ #include <linux/kprobes.h> #include <linux/kdebug.h> +/* Transactional Memory debug */ +#ifdef TM_DEBUG_SW +#define TM_DEBUG(x...) printk(KERN_INFO x) +#else +#define TM_DEBUG(x...) do { } while(0) +#endif + extern unsigned long _get_SP(void); #ifndef CONFIG_SMP @@ -271,7 +279,7 @@ void do_send_trap(struct pt_regs *regs, unsigned long address, force_sig_info(SIGTRAP, &info, current); } #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ -void do_dabr(struct pt_regs *regs, unsigned long address, +void do_break (struct pt_regs *regs, unsigned long address, unsigned long error_code) { siginfo_t info; @@ -281,11 +289,11 @@ void do_dabr(struct pt_regs *regs, unsigned long address, 11, SIGSEGV) == NOTIFY_STOP) return; - if (debugger_dabr_match(regs)) + if (debugger_break_match(regs)) return; - /* Clear the DABR */ - set_dabr(0, 0); + /* Clear the breakpoint */ + hw_breakpoint_disable(); /* Deliver the signal to userspace */ info.si_signo = SIGTRAP; @@ -296,7 +304,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address, } #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ -static DEFINE_PER_CPU(unsigned long, current_dabr); +static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk); #ifdef CONFIG_PPC_ADV_DEBUG_REGS /* @@ -364,39 +372,214 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread) #ifndef CONFIG_HAVE_HW_BREAKPOINT static void set_debug_reg_defaults(struct thread_struct *thread) { - if (thread->dabr) { - thread->dabr = 0; - thread->dabrx = 0; - set_dabr(0, 0); - } + thread->hw_brk.address = 0; + thread->hw_brk.type = 0; + set_breakpoint(&thread->hw_brk); } #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ -int set_dabr(unsigned long dabr, unsigned long dabrx) -{ - __get_cpu_var(current_dabr) = dabr; - - if (ppc_md.set_dabr) - return ppc_md.set_dabr(dabr, dabrx); - - /* XXX should we have a CPU_FTR_HAS_DABR ? */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS +static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) +{ mtspr(SPRN_DAC1, dabr); #ifdef CONFIG_PPC_47x isync(); #endif + return 0; +} #elif defined(CONFIG_PPC_BOOK3S) +static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) +{ mtspr(SPRN_DABR, dabr); mtspr(SPRN_DABRX, dabrx); + return 0; +} +#else +static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) +{ + return -EINVAL; +} #endif + +static inline int set_dabr(struct arch_hw_breakpoint *brk) +{ + unsigned long dabr, dabrx; + + dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR); + dabrx = ((brk->type >> 3) & 0x7); + + if (ppc_md.set_dabr) + return ppc_md.set_dabr(dabr, dabrx); + + return __set_dabr(dabr, dabrx); +} + +static inline int set_dawr(struct arch_hw_breakpoint *brk) +{ + unsigned long dawr, dawrx, mrd; + + dawr = brk->address; + + dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \ + << (63 - 58); //* read/write bits */ + dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \ + << (63 - 59); //* translate */ + dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \ + >> 3; //* PRIM bits */ + /* dawr length is stored in field MDR bits 48:53. Matches range in + doublewords (64 bits) baised by -1 eg. 0b000000=1DW and + 0b111111=64DW. + brk->len is in bytes. + This aligns up to double word size, shifts and does the bias. + */ + mrd = ((brk->len + 7) >> 3) - 1; + dawrx |= (mrd & 0x3f) << (63 - 53); + + if (ppc_md.set_dawr) + return ppc_md.set_dawr(dawr, dawrx); + mtspr(SPRN_DAWR, dawr); + mtspr(SPRN_DAWRX, dawrx); return 0; } +int set_breakpoint(struct arch_hw_breakpoint *brk) +{ + __get_cpu_var(current_brk) = *brk; + + if (cpu_has_feature(CPU_FTR_DAWR)) + return set_dawr(brk); + + return set_dabr(brk); +} + #ifdef CONFIG_PPC64 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); #endif +static inline bool hw_brk_match(struct arch_hw_breakpoint *a, + struct arch_hw_breakpoint *b) +{ + if (a->address != b->address) + return false; + if (a->type != b->type) + return false; + if (a->len != b->len) + return false; + return true; +} +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +static inline void tm_reclaim_task(struct task_struct *tsk) +{ + /* We have to work out if we're switching from/to a task that's in the + * middle of a transaction. + * + * In switching we need to maintain a 2nd register state as + * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the + * checkpointed (tbegin) state in ckpt_regs and saves the transactional + * (current) FPRs into oldtask->thread.transact_fpr[]. + * + * We also context switch (save) TFHAR/TEXASR/TFIAR in here. + */ + struct thread_struct *thr = &tsk->thread; + + if (!thr->regs) + return; + + if (!MSR_TM_ACTIVE(thr->regs->msr)) + goto out_and_saveregs; + + /* Stash the original thread MSR, as giveup_fpu et al will + * modify it. We hold onto it to see whether the task used + * FP & vector regs. + */ + thr->tm_orig_msr = thr->regs->msr; + + TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " + "ccr=%lx, msr=%lx, trap=%lx)\n", + tsk->pid, thr->regs->nip, + thr->regs->ccr, thr->regs->msr, + thr->regs->trap); + + tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED); + + TM_DEBUG("--- tm_reclaim on pid %d complete\n", + tsk->pid); + +out_and_saveregs: + /* Always save the regs here, even if a transaction's not active. + * This context-switches a thread's TM info SPRs. We do it here to + * be consistent with the restore path (in recheckpoint) which + * cannot happen later in _switch(). + */ + tm_save_sprs(thr); +} + +static inline void tm_recheckpoint_new_task(struct task_struct *new) +{ + unsigned long msr; + + if (!cpu_has_feature(CPU_FTR_TM)) + return; + + /* Recheckpoint the registers of the thread we're about to switch to. + * + * If the task was using FP, we non-lazily reload both the original and + * the speculative FP register states. This is because the kernel + * doesn't see if/when a TM rollback occurs, so if we take an FP + * unavoidable later, we are unable to determine which set of FP regs + * need to be restored. + */ + if (!new->thread.regs) + return; + + /* The TM SPRs are restored here, so that TEXASR.FS can be set + * before the trecheckpoint and no explosion occurs. + */ + tm_restore_sprs(&new->thread); + + if (!MSR_TM_ACTIVE(new->thread.regs->msr)) + return; + msr = new->thread.tm_orig_msr; + /* Recheckpoint to restore original checkpointed register state. */ + TM_DEBUG("*** tm_recheckpoint of pid %d " + "(new->msr 0x%lx, new->origmsr 0x%lx)\n", + new->pid, new->thread.regs->msr, msr); + + /* This loads the checkpointed FP/VEC state, if used */ + tm_recheckpoint(&new->thread, msr); + + /* This loads the speculative FP/VEC state, if used */ + if (msr & MSR_FP) { + do_load_up_transact_fpu(&new->thread); + new->thread.regs->msr |= + (MSR_FP | new->thread.fpexc_mode); + } + if (msr & MSR_VEC) { + do_load_up_transact_altivec(&new->thread); + new->thread.regs->msr |= MSR_VEC; + } + /* We may as well turn on VSX too since all the state is restored now */ + if (msr & MSR_VSX) + new->thread.regs->msr |= MSR_VSX; + + TM_DEBUG("*** tm_recheckpoint of pid %d complete " + "(kernel msr 0x%lx)\n", + new->pid, mfmsr()); +} + +static inline void __switch_to_tm(struct task_struct *prev) +{ + if (cpu_has_feature(CPU_FTR_TM)) { + tm_enable(); + tm_reclaim_task(prev); + } +} +#else +#define tm_recheckpoint_new_task(new) +#define __switch_to_tm(prev) +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ + struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *new) { @@ -407,6 +590,8 @@ struct task_struct *__switch_to(struct task_struct *prev, struct ppc64_tlb_batch *batch; #endif + __switch_to_tm(prev); + #ifdef CONFIG_SMP /* avoid complexity of lazy save/restore of fpu * by just saving it every time we switch out if @@ -481,8 +666,8 @@ struct task_struct *__switch_to(struct task_struct *prev, * schedule DABR */ #ifndef CONFIG_HAVE_HW_BREAKPOINT - if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) - set_dabr(new->thread.dabr, new->thread.dabrx); + if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) + set_breakpoint(&new->thread.hw_brk); #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif @@ -522,6 +707,9 @@ struct task_struct *__switch_to(struct task_struct *prev, * of sync. Hard disable here. */ hard_irq_disable(); + + tm_recheckpoint_new_task(new); + last = _switch(old_thread, new_thread); #ifdef CONFIG_PPC_BOOK3S_64 @@ -683,6 +871,9 @@ void show_regs(struct pt_regs * regs) printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); #endif +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch); +#endif show_stack(current, (unsigned long *) regs->gpr[1]); if (!user_mode(regs)) show_instructions(regs); @@ -733,8 +924,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */ int copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p, - struct pt_regs *regs) + unsigned long arg, struct task_struct *p) { struct pt_regs *childregs, *kregs; extern void ret_from_fork(void); @@ -745,25 +935,25 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, /* Copy registers */ sp -= sizeof(struct pt_regs); childregs = (struct pt_regs *) sp; - if (!regs) { - /* for kernel thread, set `current' and stackptr in new task */ + if (unlikely(p->flags & PF_KTHREAD)) { + struct thread_info *ti = (void *)task_stack_page(p); memset(childregs, 0, sizeof(struct pt_regs)); childregs->gpr[1] = sp + sizeof(struct pt_regs); + childregs->gpr[14] = usp; /* function */ #ifdef CONFIG_PPC64 - childregs->gpr[14] = *(unsigned long *)usp; - childregs->gpr[2] = ((unsigned long *)usp)[1], clear_tsk_thread_flag(p, TIF_32BIT); -#else - childregs->gpr[14] = usp; /* function */ - childregs->gpr[2] = (unsigned long) p; + childregs->softe = 1; #endif childregs->gpr[15] = arg; p->thread.regs = NULL; /* no user register state */ + ti->flags |= _TIF_RESTOREALL; f = ret_from_kernel_thread; } else { + struct pt_regs *regs = current_pt_regs(); CHECK_FULL_REGS(regs); *childregs = *regs; - childregs->gpr[1] = usp; + if (usp) + childregs->gpr[1] = usp; p->thread.regs = childregs; childregs->gpr[3] = 0; /* Result from fork() */ if (clone_flags & CLONE_SETTLS) { @@ -814,6 +1004,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, p->thread.dscr_inherit = current->thread.dscr_inherit; p->thread.dscr = current->thread.dscr; } + if (cpu_has_feature(CPU_FTR_HAS_PPR)) + p->thread.ppr = INIT_PPR; #endif /* * The PPC64 ABI makes use of a TOC to contain function @@ -893,7 +1085,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) regs->msr = MSR_USER32; } #endif - discard_lazy_cpu_state(); #ifdef CONFIG_VSX current->thread.used_vsr = 0; @@ -913,6 +1104,13 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) current->thread.spefscr = 0; current->thread.used_spe = 0; #endif /* CONFIG_SPE */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (cpu_has_feature(CPU_FTR_TM)) + regs->msr |= MSR_TM; + current->thread.tm_tfhar = 0; + current->thread.tm_texasr = 0; + current->thread.tm_tfiar = 0; +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ } #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ @@ -1027,51 +1225,6 @@ int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); } -#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff)) - -int sys_clone(unsigned long clone_flags, unsigned long usp, - int __user *parent_tidp, void __user *child_threadptr, - int __user *child_tidp, int p6, - struct pt_regs *regs) -{ - CHECK_FULL_REGS(regs); - if (usp == 0) - usp = regs->gpr[1]; /* stack pointer for child */ -#ifdef CONFIG_PPC64 - if (is_32bit_task()) { - parent_tidp = TRUNC_PTR(parent_tidp); - child_tidp = TRUNC_PTR(child_tidp); - } -#endif - return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); -} - -int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, - unsigned long p4, unsigned long p5, unsigned long p6, - struct pt_regs *regs) -{ - CHECK_FULL_REGS(regs); - return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); -} - -int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3, - unsigned long p4, unsigned long p5, unsigned long p6, - struct pt_regs *regs) -{ - CHECK_FULL_REGS(regs); - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], - regs, 0, NULL, NULL); -} - -void __ret_from_kernel_execve(struct pt_regs *normal) -__noreturn; - -void ret_from_kernel_execve(struct pt_regs *normal) -{ - set_thread_flag(TIF_RESTOREALL); - __ret_from_kernel_execve(normal); -} - static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, unsigned long nbytes) { diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 37725e86651..8b6f7a99cce 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -32,6 +32,7 @@ #include <linux/debugfs.h> #include <linux/irq.h> #include <linux/memblock.h> +#include <linux/of.h> #include <asm/prom.h> #include <asm/rtas.h> @@ -49,11 +50,11 @@ #include <asm/btext.h> #include <asm/sections.h> #include <asm/machdep.h> -#include <asm/pSeries_reconfig.h> #include <asm/pci-bridge.h> #include <asm/kexec.h> #include <asm/opal.h> #include <asm/fadump.h> +#include <asm/debug.h> #include <mm/mmu_decl.h> @@ -802,7 +803,7 @@ static int prom_reconfig_notifier(struct notifier_block *nb, int err; switch (action) { - case PSERIES_RECONFIG_ADD: + case OF_RECONFIG_ATTACH_NODE: err = of_finish_dynamic_node(node); if (err < 0) printk(KERN_ERR "finish_node returned %d\n", err); @@ -821,7 +822,7 @@ static struct notifier_block prom_reconfig_nb = { static int __init prom_reconfig_setup(void) { - return pSeries_reconfig_notifier_register(&prom_reconfig_nb); + return of_reconfig_notifier_register(&prom_reconfig_nb); } __initcall(prom_reconfig_setup); #endif diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index cb6c123722a..7f7fb7fd991 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -66,8 +66,8 @@ * is running at whatever address it has been loaded at. * On ppc32 we compile with -mrelocatable, which means that references * to extern and static variables get relocated automatically. - * On ppc64 we have to relocate the references explicitly with - * RELOC. (Note that strings count as static variables.) + * ppc64 objects are always relocatable, we just need to relocate the + * TOC. * * Because OF may have mapped I/O devices into the area starting at * KERNELBASE, particularly on CHRP machines, we can't safely call @@ -79,13 +79,11 @@ * On ppc64, 64 bit values are truncated to 32 bits (and * fortunately don't get interpreted as two arguments). */ +#define ADDR(x) (u32)(unsigned long)(x) + #ifdef CONFIG_PPC64 -#define RELOC(x) (*PTRRELOC(&(x))) -#define ADDR(x) (u32) add_reloc_offset((unsigned long)(x)) #define OF_WORKAROUNDS 0 #else -#define RELOC(x) (x) -#define ADDR(x) (u32) (x) #define OF_WORKAROUNDS of_workarounds int of_workarounds; #endif @@ -95,7 +93,7 @@ int of_workarounds; #define PROM_BUG() do { \ prom_printf("kernel BUG at %s line 0x%x!\n", \ - RELOC(__FILE__), __LINE__); \ + __FILE__, __LINE__); \ __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \ } while (0) @@ -233,7 +231,7 @@ static int __init call_prom(const char *service, int nargs, int nret, ...) for (i = 0; i < nret; i++) args.args[nargs+i] = 0; - if (enter_prom(&args, RELOC(prom_entry)) < 0) + if (enter_prom(&args, prom_entry) < 0) return PROM_ERROR; return (nret > 0) ? args.args[nargs] : 0; @@ -258,7 +256,7 @@ static int __init call_prom_ret(const char *service, int nargs, int nret, for (i = 0; i < nret; i++) args.args[nargs+i] = 0; - if (enter_prom(&args, RELOC(prom_entry)) < 0) + if (enter_prom(&args, prom_entry) < 0) return PROM_ERROR; if (rets != NULL) @@ -272,20 +270,19 @@ static int __init call_prom_ret(const char *service, int nargs, int nret, static void __init prom_print(const char *msg) { const char *p, *q; - struct prom_t *_prom = &RELOC(prom); - if (_prom->stdout == 0) + if (prom.stdout == 0) return; for (p = msg; *p != 0; p = q) { for (q = p; *q != 0 && *q != '\n'; ++q) ; if (q > p) - call_prom("write", 3, 1, _prom->stdout, p, q - p); + call_prom("write", 3, 1, prom.stdout, p, q - p); if (*q == 0) break; ++q; - call_prom("write", 3, 1, _prom->stdout, ADDR("\r\n"), 2); + call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2); } } @@ -294,7 +291,6 @@ static void __init prom_print_hex(unsigned long val) { int i, nibbles = sizeof(val)*2; char buf[sizeof(val)*2+1]; - struct prom_t *_prom = &RELOC(prom); for (i = nibbles-1; i >= 0; i--) { buf[i] = (val & 0xf) + '0'; @@ -303,7 +299,7 @@ static void __init prom_print_hex(unsigned long val) val >>= 4; } buf[nibbles] = '\0'; - call_prom("write", 3, 1, _prom->stdout, buf, nibbles); + call_prom("write", 3, 1, prom.stdout, buf, nibbles); } /* max number of decimal digits in an unsigned long */ @@ -312,7 +308,6 @@ static void __init prom_print_dec(unsigned long val) { int i, size; char buf[UL_DIGITS+1]; - struct prom_t *_prom = &RELOC(prom); for (i = UL_DIGITS-1; i >= 0; i--) { buf[i] = (val % 10) + '0'; @@ -322,7 +317,7 @@ static void __init prom_print_dec(unsigned long val) } /* shift stuff down */ size = UL_DIGITS - i; - call_prom("write", 3, 1, _prom->stdout, buf+i, size); + call_prom("write", 3, 1, prom.stdout, buf+i, size); } static void __init prom_printf(const char *format, ...) @@ -331,22 +326,18 @@ static void __init prom_printf(const char *format, ...) va_list args; unsigned long v; long vs; - struct prom_t *_prom = &RELOC(prom); va_start(args, format); -#ifdef CONFIG_PPC64 - format = PTRRELOC(format); -#endif for (p = format; *p != 0; p = q) { for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) ; if (q > p) - call_prom("write", 3, 1, _prom->stdout, p, q - p); + call_prom("write", 3, 1, prom.stdout, p, q - p); if (*q == 0) break; if (*q == '\n') { ++q; - call_prom("write", 3, 1, _prom->stdout, + call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2); continue; } @@ -368,7 +359,7 @@ static void __init prom_printf(const char *format, ...) ++q; vs = va_arg(args, int); if (vs < 0) { - prom_print(RELOC("-")); + prom_print("-"); vs = -vs; } prom_print_dec(vs); @@ -389,7 +380,7 @@ static void __init prom_printf(const char *format, ...) ++q; vs = va_arg(args, long); if (vs < 0) { - prom_print(RELOC("-")); + prom_print("-"); vs = -vs; } prom_print_dec(vs); @@ -403,7 +394,6 @@ static void __init prom_printf(const char *format, ...) static unsigned int __init prom_claim(unsigned long virt, unsigned long size, unsigned long align) { - struct prom_t *_prom = &RELOC(prom); if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) { /* @@ -414,21 +404,21 @@ static unsigned int __init prom_claim(unsigned long virt, unsigned long size, prom_arg_t result; ret = call_prom_ret("call-method", 5, 2, &result, - ADDR("claim"), _prom->memory, + ADDR("claim"), prom.memory, align, size, virt); if (ret != 0 || result == -1) return -1; ret = call_prom_ret("call-method", 5, 2, &result, - ADDR("claim"), _prom->mmumap, + ADDR("claim"), prom.mmumap, align, size, virt); if (ret != 0) { call_prom("call-method", 4, 1, ADDR("release"), - _prom->memory, size, virt); + prom.memory, size, virt); return -1; } /* the 0x12 is M (coherence) + PP == read/write */ call_prom("call-method", 6, 1, - ADDR("map"), _prom->mmumap, 0x12, size, virt, virt); + ADDR("map"), prom.mmumap, 0x12, size, virt, virt); return virt; } return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size, @@ -437,13 +427,10 @@ static unsigned int __init prom_claim(unsigned long virt, unsigned long size, static void __init __attribute__((noreturn)) prom_panic(const char *reason) { -#ifdef CONFIG_PPC64 - reason = PTRRELOC(reason); -#endif prom_print(reason); /* Do not call exit because it clears the screen on pmac * it also causes some sort of double-fault on early pmacs */ - if (RELOC(of_platform) == PLATFORM_POWERMAC) + if (of_platform == PLATFORM_POWERMAC) asm("trap\n"); /* ToDo: should put up an SRC here on pSeries */ @@ -525,13 +512,13 @@ static int __init prom_setprop(phandle node, const char *nodename, add_string(&p, tohex((u32)(unsigned long) value)); add_string(&p, tohex(valuelen)); add_string(&p, tohex(ADDR(pname))); - add_string(&p, tohex(strlen(RELOC(pname)))); + add_string(&p, tohex(strlen(pname))); add_string(&p, "property"); *p = 0; return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd); } -/* We can't use the standard versions because of RELOC headaches. */ +/* We can't use the standard versions because of relocation headaches. */ #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ || ('a' <= (c) && (c) <= 'f') \ || ('A' <= (c) && (c) <= 'F')) @@ -598,43 +585,42 @@ unsigned long prom_memparse(const char *ptr, const char **retptr) */ static void __init early_cmdline_parse(void) { - struct prom_t *_prom = &RELOC(prom); const char *opt; char *p; int l = 0; - RELOC(prom_cmd_line[0]) = 0; - p = RELOC(prom_cmd_line); - if ((long)_prom->chosen > 0) - l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1); + prom_cmd_line[0] = 0; + p = prom_cmd_line; + if ((long)prom.chosen > 0) + l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1); #ifdef CONFIG_CMDLINE if (l <= 0 || p[0] == '\0') /* dbl check */ - strlcpy(RELOC(prom_cmd_line), - RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line)); + strlcpy(prom_cmd_line, + CONFIG_CMDLINE, sizeof(prom_cmd_line)); #endif /* CONFIG_CMDLINE */ - prom_printf("command line: %s\n", RELOC(prom_cmd_line)); + prom_printf("command line: %s\n", prom_cmd_line); #ifdef CONFIG_PPC64 - opt = strstr(RELOC(prom_cmd_line), RELOC("iommu=")); + opt = strstr(prom_cmd_line, "iommu="); if (opt) { prom_printf("iommu opt is: %s\n", opt); opt += 6; while (*opt && *opt == ' ') opt++; - if (!strncmp(opt, RELOC("off"), 3)) - RELOC(prom_iommu_off) = 1; - else if (!strncmp(opt, RELOC("force"), 5)) - RELOC(prom_iommu_force_on) = 1; + if (!strncmp(opt, "off", 3)) + prom_iommu_off = 1; + else if (!strncmp(opt, "force", 5)) + prom_iommu_force_on = 1; } #endif - opt = strstr(RELOC(prom_cmd_line), RELOC("mem=")); + opt = strstr(prom_cmd_line, "mem="); if (opt) { opt += 4; - RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt); + prom_memory_limit = prom_memparse(opt, (const char **)&opt); #ifdef CONFIG_PPC64 /* Align to 16 MB == size of ppc64 large page */ - RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000); + prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); #endif } } @@ -671,6 +657,7 @@ static void __init early_cmdline_parse(void) #define OV1_PPC_2_04 0x08 /* set if we support PowerPC 2.04 */ #define OV1_PPC_2_05 0x04 /* set if we support PowerPC 2.05 */ #define OV1_PPC_2_06 0x02 /* set if we support PowerPC 2.06 */ +#define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */ /* Option vector 2: Open Firmware options supported */ #define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */ @@ -707,6 +694,7 @@ static void __init early_cmdline_parse(void) #define OV5_PFO_HW_RNG 0x80 /* PFO Random Number Generator */ #define OV5_PFO_HW_842 0x40 /* PFO Compression Accelerator */ #define OV5_PFO_HW_ENCR 0x20 /* PFO Encryption Accelerator */ +#define OV5_SUB_PROCESSORS 0x01 /* 1,2,or 4 Sub-Processors supported */ /* Option Vector 6: IBM PAPR hints */ #define OV6_LINUX 0x02 /* Linux is our OS */ @@ -719,6 +707,8 @@ static unsigned char ibm_architecture_vec[] = { W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */ W(0xffff0000), W(0x003e0000), /* POWER6 */ W(0xffff0000), W(0x003f0000), /* POWER7 */ + W(0xffff0000), W(0x004b0000), /* POWER8 */ + W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */ W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */ @@ -728,7 +718,7 @@ static unsigned char ibm_architecture_vec[] = { 3 - 2, /* length */ 0, /* don't ignore, don't halt */ OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | - OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06, + OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, /* option vector 2: Open Firmware options supported */ 34 - 2, /* length */ @@ -755,7 +745,7 @@ static unsigned char ibm_architecture_vec[] = { OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ /* option vector 5: PAPR/OF options */ - 18 - 2, /* length */ + 19 - 2, /* length */ 0, /* don't ignore, don't halt */ OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | OV5_DONATE_DEDICATE_CPU | OV5_MSI, @@ -769,13 +759,14 @@ static unsigned char ibm_architecture_vec[] = { * must match by the macro below. Update the definition if * the structure layout changes. */ -#define IBM_ARCH_VEC_NRCORES_OFFSET 101 +#define IBM_ARCH_VEC_NRCORES_OFFSET 117 W(NR_CPUS), /* number of cores supported */ 0, 0, 0, 0, OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR | OV5_PFO_HW_842, + OV5_SUB_PROCESSORS, /* option vector 6: IBM PAPR hints */ 4 - 2, /* length */ 0, @@ -882,7 +873,7 @@ static int __init prom_count_smt_threads(void) type[0] = 0; prom_getprop(node, "device_type", type, sizeof(type)); - if (strcmp(type, RELOC("cpu"))) + if (strcmp(type, "cpu")) continue; /* * There is an entry for each smt thread, each entry being @@ -924,7 +915,7 @@ static void __init prom_send_capabilities(void) * (we assume this is the same for all cores) and use it to * divide NR_CPUS. */ - cores = (u32 *)PTRRELOC(&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET]); + cores = (u32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET]; if (*cores != NR_CPUS) { prom_printf("WARNING ! " "ibm_architecture_vec structure inconsistent: %lu!\n", @@ -1000,21 +991,21 @@ static void __init prom_send_capabilities(void) */ static unsigned long __init alloc_up(unsigned long size, unsigned long align) { - unsigned long base = RELOC(alloc_bottom); + unsigned long base = alloc_bottom; unsigned long addr = 0; if (align) base = _ALIGN_UP(base, align); prom_debug("alloc_up(%x, %x)\n", size, align); - if (RELOC(ram_top) == 0) + if (ram_top == 0) prom_panic("alloc_up() called with mem not initialized\n"); if (align) - base = _ALIGN_UP(RELOC(alloc_bottom), align); + base = _ALIGN_UP(alloc_bottom, align); else - base = RELOC(alloc_bottom); + base = alloc_bottom; - for(; (base + size) <= RELOC(alloc_top); + for(; (base + size) <= alloc_top; base = _ALIGN_UP(base + 0x100000, align)) { prom_debug(" trying: 0x%x\n\r", base); addr = (unsigned long)prom_claim(base, size, 0); @@ -1026,14 +1017,14 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) } if (addr == 0) return 0; - RELOC(alloc_bottom) = addr + size; + alloc_bottom = addr + size; prom_debug(" -> %x\n", addr); - prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom)); - prom_debug(" alloc_top : %x\n", RELOC(alloc_top)); - prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high)); - prom_debug(" rmo_top : %x\n", RELOC(rmo_top)); - prom_debug(" ram_top : %x\n", RELOC(ram_top)); + prom_debug(" alloc_bottom : %x\n", alloc_bottom); + prom_debug(" alloc_top : %x\n", alloc_top); + prom_debug(" alloc_top_hi : %x\n", alloc_top_high); + prom_debug(" rmo_top : %x\n", rmo_top); + prom_debug(" ram_top : %x\n", ram_top); return addr; } @@ -1049,32 +1040,32 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, unsigned long base, addr = 0; prom_debug("alloc_down(%x, %x, %s)\n", size, align, - highmem ? RELOC("(high)") : RELOC("(low)")); - if (RELOC(ram_top) == 0) + highmem ? "(high)" : "(low)"); + if (ram_top == 0) prom_panic("alloc_down() called with mem not initialized\n"); if (highmem) { /* Carve out storage for the TCE table. */ - addr = _ALIGN_DOWN(RELOC(alloc_top_high) - size, align); - if (addr <= RELOC(alloc_bottom)) + addr = _ALIGN_DOWN(alloc_top_high - size, align); + if (addr <= alloc_bottom) return 0; /* Will we bump into the RMO ? If yes, check out that we * didn't overlap existing allocations there, if we did, * we are dead, we must be the first in town ! */ - if (addr < RELOC(rmo_top)) { + if (addr < rmo_top) { /* Good, we are first */ - if (RELOC(alloc_top) == RELOC(rmo_top)) - RELOC(alloc_top) = RELOC(rmo_top) = addr; + if (alloc_top == rmo_top) + alloc_top = rmo_top = addr; else return 0; } - RELOC(alloc_top_high) = addr; + alloc_top_high = addr; goto bail; } - base = _ALIGN_DOWN(RELOC(alloc_top) - size, align); - for (; base > RELOC(alloc_bottom); + base = _ALIGN_DOWN(alloc_top - size, align); + for (; base > alloc_bottom; base = _ALIGN_DOWN(base - 0x100000, align)) { prom_debug(" trying: 0x%x\n\r", base); addr = (unsigned long)prom_claim(base, size, 0); @@ -1084,15 +1075,15 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, } if (addr == 0) return 0; - RELOC(alloc_top) = addr; + alloc_top = addr; bail: prom_debug(" -> %x\n", addr); - prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom)); - prom_debug(" alloc_top : %x\n", RELOC(alloc_top)); - prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high)); - prom_debug(" rmo_top : %x\n", RELOC(rmo_top)); - prom_debug(" ram_top : %x\n", RELOC(ram_top)); + prom_debug(" alloc_bottom : %x\n", alloc_bottom); + prom_debug(" alloc_top : %x\n", alloc_top); + prom_debug(" alloc_top_hi : %x\n", alloc_top_high); + prom_debug(" rmo_top : %x\n", rmo_top); + prom_debug(" ram_top : %x\n", ram_top); return addr; } @@ -1132,7 +1123,7 @@ static unsigned long __init prom_next_cell(int s, cell_t **cellp) static void __init reserve_mem(u64 base, u64 size) { u64 top = base + size; - unsigned long cnt = RELOC(mem_reserve_cnt); + unsigned long cnt = mem_reserve_cnt; if (size == 0) return; @@ -1147,9 +1138,9 @@ static void __init reserve_mem(u64 base, u64 size) if (cnt >= (MEM_RESERVE_MAP_SIZE - 1)) prom_panic("Memory reserve map exhausted !\n"); - RELOC(mem_reserve_map)[cnt].base = base; - RELOC(mem_reserve_map)[cnt].size = size; - RELOC(mem_reserve_cnt) = cnt + 1; + mem_reserve_map[cnt].base = base; + mem_reserve_map[cnt].size = size; + mem_reserve_cnt = cnt + 1; } /* @@ -1162,7 +1153,6 @@ static void __init prom_init_mem(void) char *path, type[64]; unsigned int plen; cell_t *p, *endp; - struct prom_t *_prom = &RELOC(prom); u32 rac, rsc; /* @@ -1171,14 +1161,14 @@ static void __init prom_init_mem(void) * 2) top of memory */ rac = 2; - prom_getprop(_prom->root, "#address-cells", &rac, sizeof(rac)); + prom_getprop(prom.root, "#address-cells", &rac, sizeof(rac)); rsc = 1; - prom_getprop(_prom->root, "#size-cells", &rsc, sizeof(rsc)); + prom_getprop(prom.root, "#size-cells", &rsc, sizeof(rsc)); prom_debug("root_addr_cells: %x\n", (unsigned long) rac); prom_debug("root_size_cells: %x\n", (unsigned long) rsc); prom_debug("scanning memory:\n"); - path = RELOC(prom_scratch); + path = prom_scratch; for (node = 0; prom_next_node(&node); ) { type[0] = 0; @@ -1191,15 +1181,15 @@ static void __init prom_init_mem(void) */ prom_getprop(node, "name", type, sizeof(type)); } - if (strcmp(type, RELOC("memory"))) + if (strcmp(type, "memory")) continue; - plen = prom_getprop(node, "reg", RELOC(regbuf), sizeof(regbuf)); + plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf)); if (plen > sizeof(regbuf)) { prom_printf("memory node too large for buffer !\n"); plen = sizeof(regbuf); } - p = RELOC(regbuf); + p = regbuf; endp = p + (plen / sizeof(cell_t)); #ifdef DEBUG_PROM @@ -1217,14 +1207,14 @@ static void __init prom_init_mem(void) if (size == 0) continue; prom_debug(" %x %x\n", base, size); - if (base == 0 && (RELOC(of_platform) & PLATFORM_LPAR)) - RELOC(rmo_top) = size; - if ((base + size) > RELOC(ram_top)) - RELOC(ram_top) = base + size; + if (base == 0 && (of_platform & PLATFORM_LPAR)) + rmo_top = size; + if ((base + size) > ram_top) + ram_top = base + size; } } - RELOC(alloc_bottom) = PAGE_ALIGN((unsigned long)&RELOC(_end) + 0x4000); + alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000); /* * If prom_memory_limit is set we reduce the upper limits *except* for @@ -1232,20 +1222,20 @@ static void __init prom_init_mem(void) * TCE's up there. */ - RELOC(alloc_top_high) = RELOC(ram_top); + alloc_top_high = ram_top; - if (RELOC(prom_memory_limit)) { - if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) { + if (prom_memory_limit) { + if (prom_memory_limit <= alloc_bottom) { prom_printf("Ignoring mem=%x <= alloc_bottom.\n", - RELOC(prom_memory_limit)); - RELOC(prom_memory_limit) = 0; - } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) { + prom_memory_limit); + prom_memory_limit = 0; + } else if (prom_memory_limit >= ram_top) { prom_printf("Ignoring mem=%x >= ram_top.\n", - RELOC(prom_memory_limit)); - RELOC(prom_memory_limit) = 0; + prom_memory_limit); + prom_memory_limit = 0; } else { - RELOC(ram_top) = RELOC(prom_memory_limit); - RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit)); + ram_top = prom_memory_limit; + rmo_top = min(rmo_top, prom_memory_limit); } } @@ -1257,36 +1247,35 @@ static void __init prom_init_mem(void) * Since 768MB is plenty of room, and we need to cap to something * reasonable on 32-bit, cap at 768MB on all machines. */ - if (!RELOC(rmo_top)) - RELOC(rmo_top) = RELOC(ram_top); - RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top)); - RELOC(alloc_top) = RELOC(rmo_top); - RELOC(alloc_top_high) = RELOC(ram_top); + if (!rmo_top) + rmo_top = ram_top; + rmo_top = min(0x30000000ul, rmo_top); + alloc_top = rmo_top; + alloc_top_high = ram_top; /* * Check if we have an initrd after the kernel but still inside * the RMO. If we do move our bottom point to after it. */ - if (RELOC(prom_initrd_start) && - RELOC(prom_initrd_start) < RELOC(rmo_top) && - RELOC(prom_initrd_end) > RELOC(alloc_bottom)) - RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end)); + if (prom_initrd_start && + prom_initrd_start < rmo_top && + prom_initrd_end > alloc_bottom) + alloc_bottom = PAGE_ALIGN(prom_initrd_end); prom_printf("memory layout at init:\n"); - prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit)); - prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom)); - prom_printf(" alloc_top : %x\n", RELOC(alloc_top)); - prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high)); - prom_printf(" rmo_top : %x\n", RELOC(rmo_top)); - prom_printf(" ram_top : %x\n", RELOC(ram_top)); + prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit); + prom_printf(" alloc_bottom : %x\n", alloc_bottom); + prom_printf(" alloc_top : %x\n", alloc_top); + prom_printf(" alloc_top_hi : %x\n", alloc_top_high); + prom_printf(" rmo_top : %x\n", rmo_top); + prom_printf(" ram_top : %x\n", ram_top); } static void __init prom_close_stdin(void) { - struct prom_t *_prom = &RELOC(prom); ihandle val; - if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0) + if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) call_prom("close", 1, 0, val); } @@ -1327,19 +1316,19 @@ static void __init prom_query_opal(void) } prom_printf("Querying for OPAL presence... "); - rc = opal_query_takeover(&RELOC(prom_opal_size), - &RELOC(prom_opal_align)); + rc = opal_query_takeover(&prom_opal_size, + &prom_opal_align); prom_debug("(rc = %ld) ", rc); if (rc != 0) { prom_printf("not there.\n"); return; } - RELOC(of_platform) = PLATFORM_OPAL; + of_platform = PLATFORM_OPAL; prom_printf(" there !\n"); - prom_debug(" opal_size = 0x%lx\n", RELOC(prom_opal_size)); - prom_debug(" opal_align = 0x%lx\n", RELOC(prom_opal_align)); - if (RELOC(prom_opal_align) < 0x10000) - RELOC(prom_opal_align) = 0x10000; + prom_debug(" opal_size = 0x%lx\n", prom_opal_size); + prom_debug(" opal_align = 0x%lx\n", prom_opal_align); + if (prom_opal_align < 0x10000) + prom_opal_align = 0x10000; } static int prom_rtas_call(int token, int nargs, int nret, int *outputs, ...) @@ -1360,8 +1349,8 @@ static int prom_rtas_call(int token, int nargs, int nret, int *outputs, ...) for (i = 0; i < nret; ++i) rtas_args.rets[i] = 0; - opal_enter_rtas(&rtas_args, RELOC(prom_rtas_data), - RELOC(prom_rtas_entry)); + opal_enter_rtas(&rtas_args, prom_rtas_data, + prom_rtas_entry); if (nret > 1 && outputs != NULL) for (i = 0; i < nret-1; ++i) @@ -1376,9 +1365,8 @@ static void __init prom_opal_hold_cpus(void) phandle node; char type[64]; u32 servers[8]; - struct prom_t *_prom = &RELOC(prom); - void *entry = (unsigned long *)&RELOC(opal_secondary_entry); - struct opal_secondary_data *data = &RELOC(opal_secondary_data); + void *entry = (unsigned long *)&opal_secondary_entry; + struct opal_secondary_data *data = &opal_secondary_data; prom_debug("prom_opal_hold_cpus: start...\n"); prom_debug(" - entry = 0x%x\n", entry); @@ -1391,12 +1379,12 @@ static void __init prom_opal_hold_cpus(void) for (node = 0; prom_next_node(&node); ) { type[0] = 0; prom_getprop(node, "device_type", type, sizeof(type)); - if (strcmp(type, RELOC("cpu")) != 0) + if (strcmp(type, "cpu") != 0) continue; /* Skip non-configured cpus. */ if (prom_getprop(node, "status", type, sizeof(type)) > 0) - if (strcmp(type, RELOC("okay")) != 0) + if (strcmp(type, "okay") != 0) continue; cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers, @@ -1407,7 +1395,7 @@ static void __init prom_opal_hold_cpus(void) for (i = 0; i < cnt; i++) { cpu = servers[i]; prom_debug("CPU %d ... ", cpu); - if (cpu == _prom->cpu) { + if (cpu == prom.cpu) { prom_debug("booted !\n"); continue; } @@ -1418,7 +1406,7 @@ static void __init prom_opal_hold_cpus(void) * spinloop. */ data->ack = -1; - rc = prom_rtas_call(RELOC(prom_rtas_start_cpu), 3, 1, + rc = prom_rtas_call(prom_rtas_start_cpu, 3, 1, NULL, cpu, entry, data); prom_debug("rtas rc=%d ...", rc); @@ -1438,21 +1426,21 @@ static void __init prom_opal_hold_cpus(void) static void __init prom_opal_takeover(void) { - struct opal_secondary_data *data = &RELOC(opal_secondary_data); + struct opal_secondary_data *data = &opal_secondary_data; struct opal_takeover_args *args = &data->args; - u64 align = RELOC(prom_opal_align); + u64 align = prom_opal_align; u64 top_addr, opal_addr; - args->k_image = (u64)RELOC(_stext); + args->k_image = (u64)_stext; args->k_size = _end - _stext; args->k_entry = 0; args->k_entry2 = 0x60; top_addr = _ALIGN_UP(args->k_size, align); - if (RELOC(prom_initrd_start) != 0) { - args->rd_image = RELOC(prom_initrd_start); - args->rd_size = RELOC(prom_initrd_end) - args->rd_image; + if (prom_initrd_start != 0) { + args->rd_image = prom_initrd_start; + args->rd_size = prom_initrd_end - args->rd_image; args->rd_loc = top_addr; top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align); } @@ -1464,13 +1452,13 @@ static void __init prom_opal_takeover(void) * has plenty of memory, and we ask for the HAL for now to * be just below the 1G point, or above the initrd */ - opal_addr = _ALIGN_DOWN(0x40000000 - RELOC(prom_opal_size), align); + opal_addr = _ALIGN_DOWN(0x40000000 - prom_opal_size, align); if (opal_addr < top_addr) opal_addr = top_addr; args->hal_addr = opal_addr; /* Copy the command line to the kernel image */ - strlcpy(RELOC(boot_command_line), RELOC(prom_cmd_line), + strlcpy(boot_command_line, prom_cmd_line, COMMAND_LINE_SIZE); prom_debug(" k_image = 0x%lx\n", args->k_image); @@ -1552,8 +1540,8 @@ static void __init prom_instantiate_opal(void) &entry, sizeof(entry)); #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL - RELOC(prom_opal_base) = base; - RELOC(prom_opal_entry) = entry; + prom_opal_base = base; + prom_opal_entry = entry; #endif prom_debug("prom_instantiate_opal: end...\n"); } @@ -1611,9 +1599,9 @@ static void __init prom_instantiate_rtas(void) #ifdef CONFIG_PPC_POWERNV /* PowerVN takeover hack */ - RELOC(prom_rtas_data) = base; - RELOC(prom_rtas_entry) = entry; - prom_getprop(rtas_node, "start-cpu", &RELOC(prom_rtas_start_cpu), 4); + prom_rtas_data = base; + prom_rtas_entry = entry; + prom_getprop(rtas_node, "start-cpu", &prom_rtas_start_cpu, 4); #endif prom_debug("rtas base = 0x%x\n", base); prom_debug("rtas entry = 0x%x\n", entry); @@ -1688,20 +1676,20 @@ static void __init prom_initialize_tce_table(void) phandle node; ihandle phb_node; char compatible[64], type[64], model[64]; - char *path = RELOC(prom_scratch); + char *path = prom_scratch; u64 base, align; u32 minalign, minsize; u64 tce_entry, *tce_entryp; u64 local_alloc_top, local_alloc_bottom; u64 i; - if (RELOC(prom_iommu_off)) + if (prom_iommu_off) return; prom_debug("starting prom_initialize_tce_table\n"); /* Cache current top of allocs so we reserve a single block */ - local_alloc_top = RELOC(alloc_top_high); + local_alloc_top = alloc_top_high; local_alloc_bottom = local_alloc_top; /* Search all nodes looking for PHBs. */ @@ -1714,19 +1702,19 @@ static void __init prom_initialize_tce_table(void) prom_getprop(node, "device_type", type, sizeof(type)); prom_getprop(node, "model", model, sizeof(model)); - if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL)) + if ((type[0] == 0) || (strstr(type, "pci") == NULL)) continue; /* Keep the old logic intact to avoid regression. */ if (compatible[0] != 0) { - if ((strstr(compatible, RELOC("python")) == NULL) && - (strstr(compatible, RELOC("Speedwagon")) == NULL) && - (strstr(compatible, RELOC("Winnipeg")) == NULL)) + if ((strstr(compatible, "python") == NULL) && + (strstr(compatible, "Speedwagon") == NULL) && + (strstr(compatible, "Winnipeg") == NULL)) continue; } else if (model[0] != 0) { - if ((strstr(model, RELOC("ython")) == NULL) && - (strstr(model, RELOC("peedwagon")) == NULL) && - (strstr(model, RELOC("innipeg")) == NULL)) + if ((strstr(model, "ython") == NULL) && + (strstr(model, "peedwagon") == NULL) && + (strstr(model, "innipeg") == NULL)) continue; } @@ -1805,8 +1793,8 @@ static void __init prom_initialize_tce_table(void) /* These are only really needed if there is a memory limit in * effect, but we don't know so export them always. */ - RELOC(prom_tce_alloc_start) = local_alloc_bottom; - RELOC(prom_tce_alloc_end) = local_alloc_top; + prom_tce_alloc_start = local_alloc_bottom; + prom_tce_alloc_end = local_alloc_top; /* Flag the first invalid entry */ prom_debug("ending prom_initialize_tce_table\n"); @@ -1843,7 +1831,6 @@ static void __init prom_hold_cpus(void) unsigned int reg; phandle node; char type[64]; - struct prom_t *_prom = &RELOC(prom); unsigned long *spinloop = (void *) LOW_ADDR(__secondary_hold_spinloop); unsigned long *acknowledge @@ -1869,12 +1856,12 @@ static void __init prom_hold_cpus(void) for (node = 0; prom_next_node(&node); ) { type[0] = 0; prom_getprop(node, "device_type", type, sizeof(type)); - if (strcmp(type, RELOC("cpu")) != 0) + if (strcmp(type, "cpu") != 0) continue; /* Skip non-configured cpus. */ if (prom_getprop(node, "status", type, sizeof(type)) > 0) - if (strcmp(type, RELOC("okay")) != 0) + if (strcmp(type, "okay") != 0) continue; reg = -1; @@ -1888,7 +1875,7 @@ static void __init prom_hold_cpus(void) */ *acknowledge = (unsigned long)-1; - if (reg != _prom->cpu) { + if (reg != prom.cpu) { /* Primary Thread of non-boot cpu or any thread */ prom_printf("starting cpu hw idx %lu... ", reg); call_prom("start-cpu", 3, 0, node, @@ -1915,22 +1902,20 @@ static void __init prom_hold_cpus(void) static void __init prom_init_client_services(unsigned long pp) { - struct prom_t *_prom = &RELOC(prom); - /* Get a handle to the prom entry point before anything else */ - RELOC(prom_entry) = pp; + prom_entry = pp; /* get a handle for the stdout device */ - _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen")); - if (!PHANDLE_VALID(_prom->chosen)) + prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen")); + if (!PHANDLE_VALID(prom.chosen)) prom_panic("cannot find chosen"); /* msg won't be printed :( */ /* get device tree root */ - _prom->root = call_prom("finddevice", 1, 1, ADDR("/")); - if (!PHANDLE_VALID(_prom->root)) + prom.root = call_prom("finddevice", 1, 1, ADDR("/")); + if (!PHANDLE_VALID(prom.root)) prom_panic("cannot find device tree root"); /* msg won't be printed :( */ - _prom->mmumap = 0; + prom.mmumap = 0; } #ifdef CONFIG_PPC32 @@ -1941,7 +1926,6 @@ static void __init prom_init_client_services(unsigned long pp) */ static void __init prom_find_mmu(void) { - struct prom_t *_prom = &RELOC(prom); phandle oprom; char version[64]; @@ -1959,10 +1943,10 @@ static void __init prom_find_mmu(void) call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim"); } else return; - _prom->memory = call_prom("open", 1, 1, ADDR("/memory")); - prom_getprop(_prom->chosen, "mmu", &_prom->mmumap, - sizeof(_prom->mmumap)); - if (!IHANDLE_VALID(_prom->memory) || !IHANDLE_VALID(_prom->mmumap)) + prom.memory = call_prom("open", 1, 1, ADDR("/memory")); + prom_getprop(prom.chosen, "mmu", &prom.mmumap, + sizeof(prom.mmumap)); + if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap)) of_workarounds &= ~OF_WA_CLAIM; /* hmmm */ } #else @@ -1971,36 +1955,34 @@ static void __init prom_find_mmu(void) static void __init prom_init_stdout(void) { - struct prom_t *_prom = &RELOC(prom); - char *path = RELOC(of_stdout_device); + char *path = of_stdout_device; char type[16]; u32 val; - if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0) + if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0) prom_panic("cannot find stdout"); - _prom->stdout = val; + prom.stdout = val; /* Get the full OF pathname of the stdout device */ memset(path, 0, 256); - call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255); - val = call_prom("instance-to-package", 1, 1, _prom->stdout); - prom_setprop(_prom->chosen, "/chosen", "linux,stdout-package", + call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); + val = call_prom("instance-to-package", 1, 1, prom.stdout); + prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", &val, sizeof(val)); - prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device)); - prom_setprop(_prom->chosen, "/chosen", "linux,stdout-path", + prom_printf("OF stdout device is: %s\n", of_stdout_device); + prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", path, strlen(path) + 1); /* If it's a display, note it */ memset(type, 0, sizeof(type)); prom_getprop(val, "device_type", type, sizeof(type)); - if (strcmp(type, RELOC("display")) == 0) + if (strcmp(type, "display") == 0) prom_setprop(val, path, "linux,boot-display", NULL, 0); } static int __init prom_find_machine_type(void) { - struct prom_t *_prom = &RELOC(prom); char compat[256]; int len, i = 0; #ifdef CONFIG_PPC64 @@ -2009,7 +1991,7 @@ static int __init prom_find_machine_type(void) #endif /* Look for a PowerMac or a Cell */ - len = prom_getprop(_prom->root, "compatible", + len = prom_getprop(prom.root, "compatible", compat, sizeof(compat)-1); if (len > 0) { compat[len] = 0; @@ -2018,16 +2000,16 @@ static int __init prom_find_machine_type(void) int sl = strlen(p); if (sl == 0) break; - if (strstr(p, RELOC("Power Macintosh")) || - strstr(p, RELOC("MacRISC"))) + if (strstr(p, "Power Macintosh") || + strstr(p, "MacRISC")) return PLATFORM_POWERMAC; #ifdef CONFIG_PPC64 /* We must make sure we don't detect the IBM Cell * blades as pSeries due to some firmware issues, * so we do it here. */ - if (strstr(p, RELOC("IBM,CBEA")) || - strstr(p, RELOC("IBM,CPBW-1.0"))) + if (strstr(p, "IBM,CBEA") || + strstr(p, "IBM,CPBW-1.0")) return PLATFORM_GENERIC; #endif /* CONFIG_PPC64 */ i += sl + 1; @@ -2044,11 +2026,11 @@ static int __init prom_find_machine_type(void) * non-IBM designs ! * - it has /rtas */ - len = prom_getprop(_prom->root, "device_type", + len = prom_getprop(prom.root, "device_type", compat, sizeof(compat)-1); if (len <= 0) return PLATFORM_GENERIC; - if (strcmp(compat, RELOC("chrp"))) + if (strcmp(compat, "chrp")) return PLATFORM_GENERIC; /* Default to pSeries. We need to know if we are running LPAR */ @@ -2110,11 +2092,11 @@ static void __init prom_check_displays(void) for (node = 0; prom_next_node(&node); ) { memset(type, 0, sizeof(type)); prom_getprop(node, "device_type", type, sizeof(type)); - if (strcmp(type, RELOC("display")) != 0) + if (strcmp(type, "display") != 0) continue; /* It seems OF doesn't null-terminate the path :-( */ - path = RELOC(prom_scratch); + path = prom_scratch; memset(path, 0, PROM_SCRATCH_SIZE); /* @@ -2138,15 +2120,15 @@ static void __init prom_check_displays(void) /* Setup a usable color table when the appropriate * method is available. Should update this to set-colors */ - clut = RELOC(default_colors); + clut = default_colors; for (i = 0; i < 16; i++, clut += 3) if (prom_set_color(ih, i, clut[0], clut[1], clut[2]) != 0) break; #ifdef CONFIG_LOGO_LINUX_CLUT224 - clut = PTRRELOC(RELOC(logo_linux_clut224.clut)); - for (i = 0; i < RELOC(logo_linux_clut224.clutsize); i++, clut += 3) + clut = PTRRELOC(logo_linux_clut224.clut); + for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3) if (prom_set_color(ih, i + 32, clut[0], clut[1], clut[2]) != 0) break; @@ -2166,8 +2148,8 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, unsigned long room, chunk; prom_debug("Chunk exhausted, claiming more at %x...\n", - RELOC(alloc_bottom)); - room = RELOC(alloc_top) - RELOC(alloc_bottom); + alloc_bottom); + room = alloc_top - alloc_bottom; if (room > DEVTREE_CHUNK_SIZE) room = DEVTREE_CHUNK_SIZE; if (room < PAGE_SIZE) @@ -2193,9 +2175,9 @@ static unsigned long __init dt_find_string(char *str) { char *s, *os; - s = os = (char *)RELOC(dt_string_start); + s = os = (char *)dt_string_start; s += 4; - while (s < (char *)RELOC(dt_string_end)) { + while (s < (char *)dt_string_end) { if (strcmp(s, str) == 0) return s - os; s += strlen(s) + 1; @@ -2217,10 +2199,10 @@ static void __init scan_dt_build_strings(phandle node, unsigned long soff; phandle child; - sstart = (char *)RELOC(dt_string_start); + sstart = (char *)dt_string_start; /* get and store all property names */ - prev_name = RELOC(""); + prev_name = ""; for (;;) { /* 64 is max len of name including nul. */ namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); @@ -2231,9 +2213,9 @@ static void __init scan_dt_build_strings(phandle node, } /* skip "name" */ - if (strcmp(namep, RELOC("name")) == 0) { + if (strcmp(namep, "name") == 0) { *mem_start = (unsigned long)namep; - prev_name = RELOC("name"); + prev_name = "name"; continue; } /* get/create string entry */ @@ -2244,7 +2226,7 @@ static void __init scan_dt_build_strings(phandle node, } else { /* Trim off some if we can */ *mem_start = (unsigned long)namep + strlen(namep) + 1; - RELOC(dt_string_end) = *mem_start; + dt_string_end = *mem_start; } prev_name = namep; } @@ -2299,35 +2281,35 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, } /* get it again for debugging */ - path = RELOC(prom_scratch); + path = prom_scratch; memset(path, 0, PROM_SCRATCH_SIZE); call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1); /* get and store all properties */ - prev_name = RELOC(""); - sstart = (char *)RELOC(dt_string_start); + prev_name = ""; + sstart = (char *)dt_string_start; for (;;) { if (call_prom("nextprop", 3, 1, node, prev_name, - RELOC(pname)) != 1) + pname) != 1) break; /* skip "name" */ - if (strcmp(RELOC(pname), RELOC("name")) == 0) { - prev_name = RELOC("name"); + if (strcmp(pname, "name") == 0) { + prev_name = "name"; continue; } /* find string offset */ - soff = dt_find_string(RELOC(pname)); + soff = dt_find_string(pname); if (soff == 0) { prom_printf("WARNING: Can't find string index for" - " <%s>, node %s\n", RELOC(pname), path); + " <%s>, node %s\n", pname, path); break; } prev_name = sstart + soff; /* get length */ - l = call_prom("getproplen", 2, 1, node, RELOC(pname)); + l = call_prom("getproplen", 2, 1, node, pname); /* sanity checks */ if (l == PROM_ERROR) @@ -2340,10 +2322,10 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, /* push property content */ valp = make_room(mem_start, mem_end, l, 4); - call_prom("getprop", 4, 1, node, RELOC(pname), valp, l); + call_prom("getprop", 4, 1, node, pname, valp, l); *mem_start = _ALIGN(*mem_start, 4); - if (!strcmp(RELOC(pname), RELOC("phandle"))) + if (!strcmp(pname, "phandle")) has_phandle = 1; } @@ -2351,7 +2333,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, * existed (can happen with OPAL) */ if (!has_phandle) { - soff = dt_find_string(RELOC("linux,phandle")); + soff = dt_find_string("linux,phandle"); if (soff == 0) prom_printf("WARNING: Can't find string index for" " <linux-phandle> node %s\n", path); @@ -2379,7 +2361,6 @@ static void __init flatten_device_tree(void) phandle root; unsigned long mem_start, mem_end, room; struct boot_param_header *hdr; - struct prom_t *_prom = &RELOC(prom); char *namep; u64 *rsvmap; @@ -2387,10 +2368,10 @@ static void __init flatten_device_tree(void) * Check how much room we have between alloc top & bottom (+/- a * few pages), crop to 1MB, as this is our "chunk" size */ - room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000; + room = alloc_top - alloc_bottom - 0x4000; if (room > DEVTREE_CHUNK_SIZE) room = DEVTREE_CHUNK_SIZE; - prom_debug("starting device tree allocs at %x\n", RELOC(alloc_bottom)); + prom_debug("starting device tree allocs at %x\n", alloc_bottom); /* Now try to claim that */ mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); @@ -2407,66 +2388,66 @@ static void __init flatten_device_tree(void) mem_start = _ALIGN(mem_start, 4); hdr = make_room(&mem_start, &mem_end, sizeof(struct boot_param_header), 4); - RELOC(dt_header_start) = (unsigned long)hdr; + dt_header_start = (unsigned long)hdr; rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); /* Start of strings */ mem_start = PAGE_ALIGN(mem_start); - RELOC(dt_string_start) = mem_start; + dt_string_start = mem_start; mem_start += 4; /* hole */ /* Add "linux,phandle" in there, we'll need it */ namep = make_room(&mem_start, &mem_end, 16, 1); - strcpy(namep, RELOC("linux,phandle")); + strcpy(namep, "linux,phandle"); mem_start = (unsigned long)namep + strlen(namep) + 1; /* Build string array */ prom_printf("Building dt strings...\n"); scan_dt_build_strings(root, &mem_start, &mem_end); - RELOC(dt_string_end) = mem_start; + dt_string_end = mem_start; /* Build structure */ mem_start = PAGE_ALIGN(mem_start); - RELOC(dt_struct_start) = mem_start; + dt_struct_start = mem_start; prom_printf("Building dt structure...\n"); scan_dt_build_struct(root, &mem_start, &mem_end); dt_push_token(OF_DT_END, &mem_start, &mem_end); - RELOC(dt_struct_end) = PAGE_ALIGN(mem_start); + dt_struct_end = PAGE_ALIGN(mem_start); /* Finish header */ - hdr->boot_cpuid_phys = _prom->cpu; + hdr->boot_cpuid_phys = prom.cpu; hdr->magic = OF_DT_HEADER; - hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start); - hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start); - hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start); - hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start); - hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start); + hdr->totalsize = dt_struct_end - dt_header_start; + hdr->off_dt_struct = dt_struct_start - dt_header_start; + hdr->off_dt_strings = dt_string_start - dt_header_start; + hdr->dt_strings_size = dt_string_end - dt_string_start; + hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - dt_header_start; hdr->version = OF_DT_VERSION; /* Version 16 is not backward compatible */ hdr->last_comp_version = 0x10; /* Copy the reserve map in */ - memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map)); + memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map)); #ifdef DEBUG_PROM { int i; prom_printf("reserved memory map:\n"); - for (i = 0; i < RELOC(mem_reserve_cnt); i++) + for (i = 0; i < mem_reserve_cnt; i++) prom_printf(" %x - %x\n", - RELOC(mem_reserve_map)[i].base, - RELOC(mem_reserve_map)[i].size); + mem_reserve_map[i].base, + mem_reserve_map[i].size); } #endif /* Bump mem_reserve_cnt to cause further reservations to fail * since it's too late. */ - RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE; + mem_reserve_cnt = MEM_RESERVE_MAP_SIZE; prom_printf("Device tree strings 0x%x -> 0x%x\n", - RELOC(dt_string_start), RELOC(dt_string_end)); + dt_string_start, dt_string_end); prom_printf("Device tree struct 0x%x -> 0x%x\n", - RELOC(dt_struct_start), RELOC(dt_struct_end)); + dt_struct_start, dt_struct_end); } @@ -2521,7 +2502,6 @@ static void __init fixup_device_tree_maple_memory_controller(void) phandle mc; u32 mc_reg[4]; char *name = "/hostbridge@f8000000"; - struct prom_t *_prom = &RELOC(prom); u32 ac, sc; mc = call_prom("finddevice", 1, 1, ADDR(name)); @@ -2531,8 +2511,8 @@ static void __init fixup_device_tree_maple_memory_controller(void) if (prom_getproplen(mc, "reg") != 8) return; - prom_getprop(_prom->root, "#address-cells", &ac, sizeof(ac)); - prom_getprop(_prom->root, "#size-cells", &sc, sizeof(sc)); + prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac)); + prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc)); if ((ac != 2) || (sc != 2)) return; @@ -2801,50 +2781,94 @@ static void __init fixup_device_tree(void) static void __init prom_find_boot_cpu(void) { - struct prom_t *_prom = &RELOC(prom); u32 getprop_rval; ihandle prom_cpu; phandle cpu_pkg; - _prom->cpu = 0; - if (prom_getprop(_prom->chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0) + prom.cpu = 0; + if (prom_getprop(prom.chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0) return; cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval)); - _prom->cpu = getprop_rval; + prom.cpu = getprop_rval; - prom_debug("Booting CPU hw index = %lu\n", _prom->cpu); + prom_debug("Booting CPU hw index = %lu\n", prom.cpu); } static void __init prom_check_initrd(unsigned long r3, unsigned long r4) { #ifdef CONFIG_BLK_DEV_INITRD - struct prom_t *_prom = &RELOC(prom); - if (r3 && r4 && r4 != 0xdeadbeef) { unsigned long val; - RELOC(prom_initrd_start) = is_kernel_addr(r3) ? __pa(r3) : r3; - RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4; + prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3; + prom_initrd_end = prom_initrd_start + r4; - val = RELOC(prom_initrd_start); - prom_setprop(_prom->chosen, "/chosen", "linux,initrd-start", + val = prom_initrd_start; + prom_setprop(prom.chosen, "/chosen", "linux,initrd-start", &val, sizeof(val)); - val = RELOC(prom_initrd_end); - prom_setprop(_prom->chosen, "/chosen", "linux,initrd-end", + val = prom_initrd_end; + prom_setprop(prom.chosen, "/chosen", "linux,initrd-end", &val, sizeof(val)); - reserve_mem(RELOC(prom_initrd_start), - RELOC(prom_initrd_end) - RELOC(prom_initrd_start)); + reserve_mem(prom_initrd_start, + prom_initrd_end - prom_initrd_start); - prom_debug("initrd_start=0x%x\n", RELOC(prom_initrd_start)); - prom_debug("initrd_end=0x%x\n", RELOC(prom_initrd_end)); + prom_debug("initrd_start=0x%x\n", prom_initrd_start); + prom_debug("initrd_end=0x%x\n", prom_initrd_end); } #endif /* CONFIG_BLK_DEV_INITRD */ } +#ifdef CONFIG_PPC64 +#ifdef CONFIG_RELOCATABLE +static void reloc_toc(void) +{ +} + +static void unreloc_toc(void) +{ +} +#else +static void __reloc_toc(void *tocstart, unsigned long offset, + unsigned long nr_entries) +{ + unsigned long i; + unsigned long *toc_entry = (unsigned long *)tocstart; + + for (i = 0; i < nr_entries; i++) { + *toc_entry = *toc_entry + offset; + toc_entry++; + } +} + +static void reloc_toc(void) +{ + unsigned long offset = reloc_offset(); + unsigned long nr_entries = + (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); + + /* Need to add offset to get at __prom_init_toc_start */ + __reloc_toc(__prom_init_toc_start + offset, offset, nr_entries); + + mb(); +} + +static void unreloc_toc(void) +{ + unsigned long offset = reloc_offset(); + unsigned long nr_entries = + (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); + + mb(); + + /* __prom_init_toc_start has been relocated, no need to add offset */ + __reloc_toc(__prom_init_toc_start, -offset, nr_entries); +} +#endif +#endif /* * We enter here early on, when the Open Firmware prom is still @@ -2856,20 +2880,19 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long r6, unsigned long r7, unsigned long kbase) { - struct prom_t *_prom; unsigned long hdr; #ifdef CONFIG_PPC32 unsigned long offset = reloc_offset(); reloc_got2(offset); +#else + reloc_toc(); #endif - _prom = &RELOC(prom); - /* * First zero the BSS */ - memset(&RELOC(__bss_start), 0, __bss_stop - __bss_start); + memset(&__bss_start, 0, __bss_stop - __bss_start); /* * Init interface to Open Firmware, get some node references, @@ -2888,14 +2911,14 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, */ prom_init_stdout(); - prom_printf("Preparing to boot %s", RELOC(linux_banner)); + prom_printf("Preparing to boot %s", linux_banner); /* * Get default machine type. At this point, we do not differentiate * between pSeries SMP and pSeries LPAR */ - RELOC(of_platform) = prom_find_machine_type(); - prom_printf("Detected machine type: %x\n", RELOC(of_platform)); + of_platform = prom_find_machine_type(); + prom_printf("Detected machine type: %x\n", of_platform); #ifndef CONFIG_NONSTATIC_KERNEL /* Bail if this is a kdump kernel. */ @@ -2912,15 +2935,15 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, /* * On pSeries, inform the firmware about our capabilities */ - if (RELOC(of_platform) == PLATFORM_PSERIES || - RELOC(of_platform) == PLATFORM_PSERIES_LPAR) + if (of_platform == PLATFORM_PSERIES || + of_platform == PLATFORM_PSERIES_LPAR) prom_send_capabilities(); #endif /* * Copy the CPU hold code */ - if (RELOC(of_platform) != PLATFORM_POWERMAC) + if (of_platform != PLATFORM_POWERMAC) copy_and_flush(0, kbase, 0x100, 0); /* @@ -2949,7 +2972,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, * that uses the allocator, we need to make sure we get the top of memory * available for us here... */ - if (RELOC(of_platform) == PLATFORM_PSERIES) + if (of_platform == PLATFORM_PSERIES) prom_initialize_tce_table(); #endif @@ -2957,19 +2980,19 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, * On non-powermacs, try to instantiate RTAS. PowerMacs don't * have a usable RTAS implementation. */ - if (RELOC(of_platform) != PLATFORM_POWERMAC && - RELOC(of_platform) != PLATFORM_OPAL) + if (of_platform != PLATFORM_POWERMAC && + of_platform != PLATFORM_OPAL) prom_instantiate_rtas(); #ifdef CONFIG_PPC_POWERNV /* Detect HAL and try instanciating it & doing takeover */ - if (RELOC(of_platform) == PLATFORM_PSERIES_LPAR) { + if (of_platform == PLATFORM_PSERIES_LPAR) { prom_query_opal(); - if (RELOC(of_platform) == PLATFORM_OPAL) { + if (of_platform == PLATFORM_OPAL) { prom_opal_hold_cpus(); prom_opal_takeover(); } - } else if (RELOC(of_platform) == PLATFORM_OPAL) + } else if (of_platform == PLATFORM_OPAL) prom_instantiate_opal(); #endif @@ -2983,32 +3006,32 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, * * PowerMacs use a different mechanism to spin CPUs */ - if (RELOC(of_platform) != PLATFORM_POWERMAC && - RELOC(of_platform) != PLATFORM_OPAL) + if (of_platform != PLATFORM_POWERMAC && + of_platform != PLATFORM_OPAL) prom_hold_cpus(); /* * Fill in some infos for use by the kernel later on */ - if (RELOC(prom_memory_limit)) - prom_setprop(_prom->chosen, "/chosen", "linux,memory-limit", - &RELOC(prom_memory_limit), + if (prom_memory_limit) + prom_setprop(prom.chosen, "/chosen", "linux,memory-limit", + &prom_memory_limit, sizeof(prom_memory_limit)); #ifdef CONFIG_PPC64 - if (RELOC(prom_iommu_off)) - prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off", + if (prom_iommu_off) + prom_setprop(prom.chosen, "/chosen", "linux,iommu-off", NULL, 0); - if (RELOC(prom_iommu_force_on)) - prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on", + if (prom_iommu_force_on) + prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on", NULL, 0); - if (RELOC(prom_tce_alloc_start)) { - prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-start", - &RELOC(prom_tce_alloc_start), + if (prom_tce_alloc_start) { + prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start", + &prom_tce_alloc_start, sizeof(prom_tce_alloc_start)); - prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-end", - &RELOC(prom_tce_alloc_end), + prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end", + &prom_tce_alloc_end, sizeof(prom_tce_alloc_end)); } #endif @@ -3030,8 +3053,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, * closed stdin already (in particular the powerbook 101). It * appears that the OPAL version of OFW doesn't like it either. */ - if (RELOC(of_platform) != PLATFORM_POWERMAC && - RELOC(of_platform) != PLATFORM_OPAL) + if (of_platform != PLATFORM_POWERMAC && + of_platform != PLATFORM_OPAL) prom_close_stdin(); /* @@ -3046,22 +3069,24 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, * tree and NULL as r5, thus triggering the new entry point which * is common to us and kexec */ - hdr = RELOC(dt_header_start); + hdr = dt_header_start; /* Don't print anything after quiesce under OPAL, it crashes OFW */ - if (RELOC(of_platform) != PLATFORM_OPAL) { + if (of_platform != PLATFORM_OPAL) { prom_printf("returning from prom_init\n"); prom_debug("->dt_header_start=0x%x\n", hdr); } #ifdef CONFIG_PPC32 reloc_got2(-offset); +#else + unreloc_toc(); #endif #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL /* OPAL early debug gets the OPAL base & entry in r8 and r9 */ __start(hdr, kbase, 0, 0, 0, - RELOC(prom_opal_base), RELOC(prom_opal_entry)); + prom_opal_base, prom_opal_entry); #else __start(hdr, kbase, 0, 0, 0, 0, 0); #endif diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index 70f4286eaa7..3765da6be4f 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -22,7 +22,7 @@ __secondary_hold_acknowledge __secondary_hold_spinloop __start strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 reloc_got2 kernstart_addr memstart_addr linux_banner _stext opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry -boot_command_line" +boot_command_line __prom_init_toc_start __prom_init_toc_end" NM="$1" OBJ="$2" diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 79d8e56470d..245c1b6a085 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -179,6 +179,30 @@ static int set_user_msr(struct task_struct *task, unsigned long msr) return 0; } +#ifdef CONFIG_PPC64 +static unsigned long get_user_dscr(struct task_struct *task) +{ + return task->thread.dscr; +} + +static int set_user_dscr(struct task_struct *task, unsigned long dscr) +{ + task->thread.dscr = dscr; + task->thread.dscr_inherit = 1; + return 0; +} +#else +static unsigned long get_user_dscr(struct task_struct *task) +{ + return -EIO; +} + +static int set_user_dscr(struct task_struct *task, unsigned long dscr) +{ + return -EIO; +} +#endif + /* * We prevent mucking around with the reserved area of trap * which are used internally by the kernel. @@ -200,6 +224,9 @@ unsigned long ptrace_get_reg(struct task_struct *task, int regno) if (regno == PT_MSR) return get_user_msr(task); + if (regno == PT_DSCR) + return get_user_dscr(task); + if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) return ((unsigned long *)task->thread.regs)[regno]; @@ -218,6 +245,8 @@ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data) return set_user_msr(task, data); if (regno == PT_TRAP) return set_user_trap(task, data); + if (regno == PT_DSCR) + return set_user_dscr(task, data); if (regno <= PT_MAX_PUT_REG) { ((unsigned long *)task->thread.regs)[regno] = data; @@ -905,6 +934,9 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, struct perf_event *bp; struct perf_event_attr attr; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ +#ifndef CONFIG_PPC_ADV_DEBUG_REGS + struct arch_hw_breakpoint hw_brk; +#endif /* For ppc64 we support one DABR and no IABR's at the moment (ppc64). * For embedded processors we support one DAC and no IAC's at the @@ -931,14 +963,17 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, */ /* Ensure breakpoint translation bit is set */ - if (data && !(data & DABR_TRANSLATION)) + if (data && !(data & HW_BRK_TYPE_TRANSLATE)) return -EIO; + hw_brk.address = data & (~HW_BRK_TYPE_DABR); + hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; + hw_brk.len = 8; #ifdef CONFIG_HAVE_HW_BREAKPOINT if (ptrace_get_breakpoints(task) < 0) return -ESRCH; bp = thread->ptrace_bps[0]; - if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) { + if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) { if (bp) { unregister_hw_breakpoint(bp); thread->ptrace_bps[0] = NULL; @@ -948,10 +983,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, } if (bp) { attr = bp->attr; - attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN; - arch_bp_generic_fields(data & - (DABR_DATA_WRITE | DABR_DATA_READ), - &attr.bp_type); + attr.bp_addr = hw_brk.address; + arch_bp_generic_fields(hw_brk.type, &attr.bp_type); + + /* Enable breakpoint */ + attr.disabled = false; + ret = modify_user_hw_breakpoint(bp, &attr); if (ret) { ptrace_put_breakpoints(task); @@ -959,16 +996,15 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, } thread->ptrace_bps[0] = bp; ptrace_put_breakpoints(task); - thread->dabr = data; - thread->dabrx = DABRX_ALL; + thread->hw_brk = hw_brk; return 0; } /* Create a new breakpoint request if one doesn't exist already */ hw_breakpoint_init(&attr); - attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN; - arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ), - &attr.bp_type); + attr.bp_addr = hw_brk.address; + arch_bp_generic_fields(hw_brk.type, + &attr.bp_type); thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, ptrace_triggered, NULL, task); @@ -981,10 +1017,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, ptrace_put_breakpoints(task); #endif /* CONFIG_HAVE_HW_BREAKPOINT */ - - /* Move contents to the DABR register */ - task->thread.dabr = data; - task->thread.dabrx = DABRX_ALL; + task->thread.hw_brk = hw_brk; #else /* CONFIG_PPC_ADV_DEBUG_REGS */ /* As described above, it was assumed 3 bits were passed with the data * address, but we will assume only the mode bits will be passed @@ -1037,7 +1070,7 @@ void ptrace_disable(struct task_struct *child) } #ifdef CONFIG_PPC_ADV_DEBUG_REGS -static long set_intruction_bp(struct task_struct *child, +static long set_instruction_bp(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) { int slot; @@ -1338,8 +1371,14 @@ static int set_dac_range(struct task_struct *child, static long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) { +#ifdef CONFIG_HAVE_HW_BREAKPOINT + int len = 0; + struct thread_struct *thread = &(child->thread); + struct perf_event *bp; + struct perf_event_attr attr; +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ #ifndef CONFIG_PPC_ADV_DEBUG_REGS - unsigned long dabr; + struct arch_hw_breakpoint brk; #endif if (bp_info->version != 1) @@ -1365,7 +1404,7 @@ static long ppc_set_hwdebug(struct task_struct *child, if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) || (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)) return -EINVAL; - return set_intruction_bp(child, bp_info); + return set_instruction_bp(child, bp_info); } if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT) return set_dac(child, bp_info); @@ -1381,32 +1420,75 @@ static long ppc_set_hwdebug(struct task_struct *child, */ if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 || (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 || - bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT || bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE) return -EINVAL; - if (child->thread.dabr) - return -ENOSPC; - if ((unsigned long)bp_info->addr >= TASK_SIZE) return -EIO; - dabr = (unsigned long)bp_info->addr & ~7UL; - dabr |= DABR_TRANSLATION; + brk.address = bp_info->addr & ~7UL; + brk.type = HW_BRK_TYPE_TRANSLATE; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) - dabr |= DABR_DATA_READ; + brk.type |= HW_BRK_TYPE_READ; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) - dabr |= DABR_DATA_WRITE; + brk.type |= HW_BRK_TYPE_WRITE; +#ifdef CONFIG_HAVE_HW_BREAKPOINT + if (ptrace_get_breakpoints(child) < 0) + return -ESRCH; + + /* + * Check if the request is for 'range' breakpoints. We can + * support it if range < 8 bytes. + */ + if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) { + len = bp_info->addr2 - bp_info->addr; + } else if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) { + ptrace_put_breakpoints(child); + return -EINVAL; + } + bp = thread->ptrace_bps[0]; + if (bp) { + ptrace_put_breakpoints(child); + return -ENOSPC; + } + + /* Create a new breakpoint request if one doesn't exist already */ + hw_breakpoint_init(&attr); + attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN; + attr.bp_len = len; + arch_bp_generic_fields(brk.type, &attr.bp_type); + + thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, + ptrace_triggered, NULL, child); + if (IS_ERR(bp)) { + thread->ptrace_bps[0] = NULL; + ptrace_put_breakpoints(child); + return PTR_ERR(bp); + } + + ptrace_put_breakpoints(child); + return 1; +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + + if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) + return -EINVAL; + + if (child->thread.hw_brk.address) + return -ENOSPC; - child->thread.dabr = dabr; - child->thread.dabrx = DABRX_ALL; + child->thread.hw_brk = brk; return 1; #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ } -static long ppc_del_hwdebug(struct task_struct *child, long addr, long data) +static long ppc_del_hwdebug(struct task_struct *child, long data) { +#ifdef CONFIG_HAVE_HW_BREAKPOINT + int ret = 0; + struct thread_struct *thread = &(child->thread); + struct perf_event *bp; +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS int rc; @@ -1426,10 +1508,26 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data) #else if (data != 1) return -EINVAL; - if (child->thread.dabr == 0) + +#ifdef CONFIG_HAVE_HW_BREAKPOINT + if (ptrace_get_breakpoints(child) < 0) + return -ESRCH; + + bp = thread->ptrace_bps[0]; + if (bp) { + unregister_hw_breakpoint(bp); + thread->ptrace_bps[0] = NULL; + } else + ret = -ENOENT; + ptrace_put_breakpoints(child); + return ret; +#else /* CONFIG_HAVE_HW_BREAKPOINT */ + if (child->thread.hw_brk.address == 0) return -ENOENT; - child->thread.dabr = 0; + child->thread.hw_brk.address = 0; + child->thread.hw_brk.type = 0; +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ return 0; #endif @@ -1536,7 +1634,11 @@ long arch_ptrace(struct task_struct *child, long request, dbginfo.data_bp_alignment = 4; #endif dbginfo.sizeof_condition = 0; +#ifdef CONFIG_HAVE_HW_BREAKPOINT + dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE; +#else dbginfo.features = 0; +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ if (!access_ok(VERIFY_WRITE, datavp, @@ -1563,11 +1665,14 @@ long arch_ptrace(struct task_struct *child, long request, } case PPC_PTRACE_DELHWDEBUG: { - ret = ppc_del_hwdebug(child, addr, data); + ret = ppc_del_hwdebug(child, data); break; } case PTRACE_GET_DEBUGREG: { +#ifndef CONFIG_PPC_ADV_DEBUG_REGS + unsigned long dabr_fake; +#endif ret = -EINVAL; /* We only support one DABR and no IABRS at the moment */ if (addr > 0) @@ -1575,7 +1680,9 @@ long arch_ptrace(struct task_struct *child, long request, #ifdef CONFIG_PPC_ADV_DEBUG_REGS ret = put_user(child->thread.dac1, datalp); #else - ret = put_user(child->thread.dabr, datalp); + dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) | + (child->thread.hw_brk.type & HW_BRK_TYPE_DABR)); + ret = put_user(dabr_fake, datalp); #endif break; } diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c index 8c21658719d..c0244e76683 100644 --- a/arch/powerpc/kernel/ptrace32.c +++ b/arch/powerpc/kernel/ptrace32.c @@ -252,6 +252,9 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, } case PTRACE_GET_DEBUGREG: { +#ifndef CONFIG_PPC_ADV_DEBUG_REGS + unsigned long dabr_fake; +#endif ret = -EINVAL; /* We only support one DABR and no IABRS at the moment */ if (addr > 0) @@ -259,7 +262,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, #ifdef CONFIG_PPC_ADV_DEBUG_REGS ret = put_user(child->thread.dac1, (u32 __user *)data); #else - ret = put_user(child->thread.dabr, (u32 __user *)data); + dabr_fake = ( + (child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) | + (child->thread.hw_brk.type & HW_BRK_TYPE_DABR)); + ret = put_user(dabr_fake, (u32 __user *)data); #endif break; } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index fcec38241f7..1fd6e7b2f39 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -42,7 +42,6 @@ #include <asm/time.h> #include <asm/mmu.h> #include <asm/topology.h> -#include <asm/pSeries_reconfig.h> struct rtas_t rtas = { .lock = __ARCH_SPIN_LOCK_UNLOCKED diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 20b0120db0c..c642f013298 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -191,7 +191,7 @@ static void free_flash_list(struct flash_block_list *f) static int rtas_flash_release(struct inode *inode, struct file *file) { - struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); + struct proc_dir_entry *dp = PDE(file_inode(file)); struct rtas_update_flash_t *uf; uf = (struct rtas_update_flash_t *) dp->data; @@ -253,7 +253,7 @@ static void get_flash_status_msg(int status, char *buf) static ssize_t rtas_flash_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); + struct proc_dir_entry *dp = PDE(file_inode(file)); struct rtas_update_flash_t *uf; char msg[RTAS_MSG_MAXLEN]; @@ -282,7 +282,7 @@ void rtas_block_ctor(void *ptr) static ssize_t rtas_flash_write(struct file *file, const char __user *buffer, size_t count, loff_t *off) { - struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); + struct proc_dir_entry *dp = PDE(file_inode(file)); struct rtas_update_flash_t *uf; char *p; int next_free; @@ -374,7 +374,7 @@ static void manage_flash(struct rtas_manage_flash_t *args_buf) static ssize_t manage_flash_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); + struct proc_dir_entry *dp = PDE(file_inode(file)); struct rtas_manage_flash_t *args_buf; char msg[RTAS_MSG_MAXLEN]; int msglen; @@ -391,7 +391,7 @@ static ssize_t manage_flash_read(struct file *file, char __user *buf, static ssize_t manage_flash_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { - struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); + struct proc_dir_entry *dp = PDE(file_inode(file)); struct rtas_manage_flash_t *args_buf; const char reject_str[] = "0"; const char commit_str[] = "1"; @@ -462,7 +462,7 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, static ssize_t validate_flash_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); + struct proc_dir_entry *dp = PDE(file_inode(file)); struct rtas_validate_flash_t *args_buf; char msg[RTAS_MSG_MAXLEN]; int msglen; @@ -477,7 +477,7 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf, static ssize_t validate_flash_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { - struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); + struct proc_dir_entry *dp = PDE(file_inode(file)); struct rtas_validate_flash_t *args_buf; int rc; @@ -526,7 +526,7 @@ done: static int validate_flash_release(struct inode *inode, struct file *file) { - struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); + struct proc_dir_entry *dp = PDE(file_inode(file)); struct rtas_validate_flash_t *args_buf; args_buf = (struct rtas_validate_flash_t *) dp->data; @@ -650,10 +650,8 @@ static int initialize_flash_pde_data(const char *rtas_call_name, int token; dp->data = kzalloc(buf_size, GFP_KERNEL); - if (dp->data == NULL) { - remove_flash_pde(dp); + if (dp->data == NULL) return -ENOMEM; - } /* * This code assumes that the status int is the first member of the diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 6de63e3250b..71cb20d6ec6 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -209,7 +209,7 @@ void __init init_pci_config_tokens (void) ibm_write_pci_config = rtas_token("ibm,write-pci-config"); } -unsigned long __devinit get_phb_buid (struct device_node *phb) +unsigned long get_phb_buid (struct device_node *phb) { struct resource r; @@ -237,7 +237,7 @@ static int phb_set_bus_ranges(struct device_node *dev, return 0; } -int __devinit rtas_setup_phb(struct pci_controller *phb) +int rtas_setup_phb(struct pci_controller *phb) { struct device_node *dev = phb->dn; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index efb6a41b313..75fbaceb5c8 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -156,6 +156,15 @@ early_param("smt-enabled", early_smt_enabled); #define check_smt_enabled() #endif /* CONFIG_SMP */ +/** Fix up paca fields required for the boot cpu */ +static void fixup_boot_paca(void) +{ + /* The boot cpu is started */ + get_paca()->cpu_start = 1; + /* Allow percpu accesses to work until we setup percpu data */ + get_paca()->data_offset = 0; +} + /* * Early initialization entry point. This is called by head.S * with MMU translation disabled. We rely on the "feature" of @@ -177,6 +186,8 @@ early_param("smt-enabled", early_smt_enabled); void __init early_setup(unsigned long dt_ptr) { + static __initdata struct paca_struct boot_paca; + /* -------- printk is _NOT_ safe to use here ! ------- */ /* Identify CPU type */ @@ -185,6 +196,7 @@ void __init early_setup(unsigned long dt_ptr) /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ initialise_paca(&boot_paca, 0); setup_paca(&boot_paca); + fixup_boot_paca(); /* Initialize lockdep early or else spinlocks will blow */ lockdep_init(); @@ -205,11 +217,7 @@ void __init early_setup(unsigned long dt_ptr) /* Now we know the logical id of our boot cpu, setup the paca. */ setup_paca(&paca[boot_cpuid]); - - /* Fix up paca fields required for the boot cpu */ - get_paca()->cpu_start = 1; - /* Allow percpu accesses to "work" until we setup percpu data */ - get_paca()->data_offset = 0; + fixup_boot_paca(); /* Probe the machine type */ probe_machine(); @@ -601,6 +609,11 @@ void __init setup_arch(char **cmdline_p) kvm_linear_init(); + /* Interrupt code needs to be 64K-aligned */ + if ((unsigned long)_stext & 0xffff) + panic("Kernelbase not 64K-aligned (0x%lx)!\n", + (unsigned long)_stext); + ppc64_boot_msg(0x15, "Setup Done"); } diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index a2dc75793bd..cf12eae02de 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -130,8 +130,9 @@ static int do_signal(struct pt_regs *regs) * user space. The DABR will have been cleared if it * triggered inside the kernel. */ - if (current->thread.dabr) - set_dabr(current->thread.dabr, current->thread.dabrx); + if (current->thread.hw_brk.address && + current->thread.hw_brk.type) + set_breakpoint(¤t->thread.hw_brk); #endif /* Re-enable the breakpoints for the signal stack */ thread_change_pc(current, regs); @@ -158,10 +159,8 @@ static int do_signal(struct pt_regs *regs) void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { - if (thread_info_flags & _TIF_UPROBE) { - clear_thread_flag(TIF_UPROBE); + if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); - } if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); @@ -171,10 +170,3 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) tracehook_notify_resume(regs); } } - -long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, - unsigned long r5, unsigned long r6, unsigned long r7, - unsigned long r8, struct pt_regs *regs) -{ - return do_sigaltstack(uss, uoss, regs->gpr[1]); -} diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h index e00acb41393..ec84c901cea 100644 --- a/arch/powerpc/kernel/signal.h +++ b/arch/powerpc/kernel/signal.h @@ -25,13 +25,21 @@ extern int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, extern unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task); +extern unsigned long copy_transact_fpr_to_user(void __user *to, + struct task_struct *task); extern unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from); +extern unsigned long copy_transact_fpr_from_user(struct task_struct *task, + void __user *from); #ifdef CONFIG_VSX extern unsigned long copy_vsx_to_user(void __user *to, struct task_struct *task); +extern unsigned long copy_transact_vsx_to_user(void __user *to, + struct task_struct *task); extern unsigned long copy_vsx_from_user(struct task_struct *task, void __user *from); +extern unsigned long copy_transact_vsx_from_user(struct task_struct *task, + void __user *from); #endif #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 804e323c139..3acb28e245b 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -43,6 +43,7 @@ #include <asm/sigcontext.h> #include <asm/vdso.h> #include <asm/switch_to.h> +#include <asm/tm.h> #ifdef CONFIG_PPC64 #include "ppc32.h" #include <asm/unistd.h> @@ -56,10 +57,7 @@ #undef DEBUG_SIG #ifdef CONFIG_PPC64 -#define sys_sigsuspend compat_sys_sigsuspend -#define sys_rt_sigsuspend compat_sys_rt_sigsuspend #define sys_rt_sigreturn compat_sys_rt_sigreturn -#define sys_sigaction compat_sys_sigaction #define sys_swapcontext compat_sys_swapcontext #define sys_sigreturn compat_sys_sigreturn @@ -68,6 +66,8 @@ #define mcontext mcontext32 #define ucontext ucontext32 +#define __save_altstack __compat_save_altstack + /* * Userspace code may pass a ucontext which doesn't include VSX added * at the end. We need to check for this case. @@ -130,23 +130,6 @@ static inline int get_sigset_t(sigset_t *set, return 0; } -static inline int get_old_sigaction(struct k_sigaction *new_ka, - struct old_sigaction __user *act) -{ - compat_old_sigset_t mask; - compat_uptr_t handler, restorer; - - if (get_user(handler, &act->sa_handler) || - __get_user(restorer, &act->sa_restorer) || - __get_user(new_ka->sa.sa_flags, &act->sa_flags) || - __get_user(mask, &act->sa_mask)) - return -EFAULT; - new_ka->sa.sa_handler = compat_ptr(handler); - new_ka->sa.sa_restorer = compat_ptr(restorer); - siginitset(&new_ka->sa.sa_mask, mask); - return 0; -} - #define to_user_ptr(p) ptr_to_compat(p) #define from_user_ptr(p) compat_ptr(p) @@ -196,21 +179,6 @@ static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset) return copy_from_user(set, uset, sizeof(*uset)); } -static inline int get_old_sigaction(struct k_sigaction *new_ka, - struct old_sigaction __user *act) -{ - old_sigset_t mask; - - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || - __get_user(new_ka->sa.sa_handler, &act->sa_handler) || - __get_user(new_ka->sa.sa_restorer, &act->sa_restorer) || - __get_user(new_ka->sa.sa_flags, &act->sa_flags) || - __get_user(mask, &act->sa_mask)) - return -EFAULT; - siginitset(&new_ka->sa.sa_mask, mask); - return 0; -} - #define to_user_ptr(p) ((unsigned long)(p)) #define from_user_ptr(p) ((void __user *)(p)) @@ -234,50 +202,8 @@ static inline int restore_general_regs(struct pt_regs *regs, return -EFAULT; return 0; } - -#endif /* CONFIG_PPC64 */ - -/* - * Atomically swap in the new signal mask, and wait for a signal. - */ -long sys_sigsuspend(old_sigset_t mask) -{ - sigset_t blocked; - siginitset(&blocked, mask); - return sigsuspend(&blocked); -} - -long sys_sigaction(int sig, struct old_sigaction __user *act, - struct old_sigaction __user *oact) -{ - struct k_sigaction new_ka, old_ka; - int ret; - -#ifdef CONFIG_PPC64 - if (sig < 0) - sig = -sig; #endif - if (act) { - if (get_old_sigaction(&new_ka, act)) - return -EFAULT; - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - if (!ret && oact) { - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || - __put_user(to_user_ptr(old_ka.sa.sa_handler), - &oact->sa_handler) || - __put_user(to_user_ptr(old_ka.sa.sa_restorer), - &oact->sa_restorer) || - __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) - return -EFAULT; - } - - return ret; -} - /* * When we have signals to deliver, we set up on the * user stack, going down from the original stack pointer: @@ -293,6 +219,10 @@ long sys_sigaction(int sig, struct old_sigaction __user *act, struct sigframe { struct sigcontext sctx; /* the sigcontext */ struct mcontext mctx; /* all the register values */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + struct sigcontext sctx_transact; + struct mcontext mctx_transact; +#endif /* * Programs using the rs6000/xcoff abi can save up to 19 gp * regs and 18 fp regs below sp before decrementing it. @@ -321,6 +251,9 @@ struct rt_sigframe { struct siginfo info; #endif struct ucontext uc; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + struct ucontext uc_transact; +#endif /* * Programs using the rs6000/xcoff abi can save up to 19 gp * regs and 18 fp regs below sp before decrementing it. @@ -381,6 +314,61 @@ unsigned long copy_vsx_from_user(struct task_struct *task, task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return 0; } + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +unsigned long copy_transact_fpr_to_user(void __user *to, + struct task_struct *task) +{ + double buf[ELF_NFPREG]; + int i; + + /* save FPR copy to local buffer then write to the thread_struct */ + for (i = 0; i < (ELF_NFPREG - 1) ; i++) + buf[i] = task->thread.TS_TRANS_FPR(i); + memcpy(&buf[i], &task->thread.transact_fpscr, sizeof(double)); + return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); +} + +unsigned long copy_transact_fpr_from_user(struct task_struct *task, + void __user *from) +{ + double buf[ELF_NFPREG]; + int i; + + if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) + return 1; + for (i = 0; i < (ELF_NFPREG - 1) ; i++) + task->thread.TS_TRANS_FPR(i) = buf[i]; + memcpy(&task->thread.transact_fpscr, &buf[i], sizeof(double)); + + return 0; +} + +unsigned long copy_transact_vsx_to_user(void __user *to, + struct task_struct *task) +{ + double buf[ELF_NVSRHALFREG]; + int i; + + /* save FPR copy to local buffer then write to the thread_struct */ + for (i = 0; i < ELF_NVSRHALFREG; i++) + buf[i] = task->thread.transact_fpr[i][TS_VSRLOWOFFSET]; + return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); +} + +unsigned long copy_transact_vsx_from_user(struct task_struct *task, + void __user *from) +{ + double buf[ELF_NVSRHALFREG]; + int i; + + if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) + return 1; + for (i = 0; i < ELF_NVSRHALFREG ; i++) + task->thread.transact_fpr[i][TS_VSRLOWOFFSET] = buf[i]; + return 0; +} +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #else inline unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) @@ -395,6 +383,22 @@ inline unsigned long copy_fpr_from_user(struct task_struct *task, return __copy_from_user(task->thread.fpr, from, ELF_NFPREG * sizeof(double)); } + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +inline unsigned long copy_transact_fpr_to_user(void __user *to, + struct task_struct *task) +{ + return __copy_to_user(to, task->thread.transact_fpr, + ELF_NFPREG * sizeof(double)); +} + +inline unsigned long copy_transact_fpr_from_user(struct task_struct *task, + void __user *from) +{ + return __copy_from_user(task->thread.transact_fpr, from, + ELF_NFPREG * sizeof(double)); +} +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #endif /* @@ -483,6 +487,156 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, return 0; } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +/* + * Save the current user registers on the user stack. + * We only save the altivec/spe registers if the process has used + * altivec/spe instructions at some point. + * We also save the transactional registers to a second ucontext in the + * frame. + * + * See save_user_regs() and signal_64.c:setup_tm_sigcontexts(). + */ +static int save_tm_user_regs(struct pt_regs *regs, + struct mcontext __user *frame, + struct mcontext __user *tm_frame, int sigret) +{ + unsigned long msr = regs->msr; + + /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs, + * thread.transact_fpr[], thread.transact_vr[], etc. + */ + tm_enable(); + tm_reclaim(¤t->thread, msr, TM_CAUSE_SIGNAL); + + /* Make sure floating point registers are stored in regs */ + flush_fp_to_thread(current); + + /* Save both sets of general registers */ + if (save_general_regs(¤t->thread.ckpt_regs, frame) + || save_general_regs(regs, tm_frame)) + return 1; + + /* Stash the top half of the 64bit MSR into the 32bit MSR word + * of the transactional mcontext. This way we have a backward-compatible + * MSR in the 'normal' (checkpointed) mcontext and additionally one can + * also look at what type of transaction (T or S) was active at the + * time of the signal. + */ + if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR])) + return 1; + +#ifdef CONFIG_ALTIVEC + /* save altivec registers */ + if (current->thread.used_vr) { + flush_altivec_to_thread(current); + if (__copy_to_user(&frame->mc_vregs, current->thread.vr, + ELF_NVRREG * sizeof(vector128))) + return 1; + if (msr & MSR_VEC) { + if (__copy_to_user(&tm_frame->mc_vregs, + current->thread.transact_vr, + ELF_NVRREG * sizeof(vector128))) + return 1; + } else { + if (__copy_to_user(&tm_frame->mc_vregs, + current->thread.vr, + ELF_NVRREG * sizeof(vector128))) + return 1; + } + + /* set MSR_VEC in the saved MSR value to indicate that + * frame->mc_vregs contains valid data + */ + msr |= MSR_VEC; + } + + /* We always copy to/from vrsave, it's 0 if we don't have or don't + * use altivec. Since VSCR only contains 32 bits saved in the least + * significant bits of a vector, we "cheat" and stuff VRSAVE in the + * most significant bits of that same vector. --BenH + */ + if (__put_user(current->thread.vrsave, + (u32 __user *)&frame->mc_vregs[32])) + return 1; + if (msr & MSR_VEC) { + if (__put_user(current->thread.transact_vrsave, + (u32 __user *)&tm_frame->mc_vregs[32])) + return 1; + } else { + if (__put_user(current->thread.vrsave, + (u32 __user *)&tm_frame->mc_vregs[32])) + return 1; + } +#endif /* CONFIG_ALTIVEC */ + + if (copy_fpr_to_user(&frame->mc_fregs, current)) + return 1; + if (msr & MSR_FP) { + if (copy_transact_fpr_to_user(&tm_frame->mc_fregs, current)) + return 1; + } else { + if (copy_fpr_to_user(&tm_frame->mc_fregs, current)) + return 1; + } + +#ifdef CONFIG_VSX + /* + * Copy VSR 0-31 upper half from thread_struct to local + * buffer, then write that to userspace. Also set MSR_VSX in + * the saved MSR value to indicate that frame->mc_vregs + * contains valid data + */ + if (current->thread.used_vsr) { + __giveup_vsx(current); + if (copy_vsx_to_user(&frame->mc_vsregs, current)) + return 1; + if (msr & MSR_VSX) { + if (copy_transact_vsx_to_user(&tm_frame->mc_vsregs, + current)) + return 1; + } else { + if (copy_vsx_to_user(&tm_frame->mc_vsregs, current)) + return 1; + } + + msr |= MSR_VSX; + } +#endif /* CONFIG_VSX */ +#ifdef CONFIG_SPE + /* SPE regs are not checkpointed with TM, so this section is + * simply the same as in save_user_regs(). + */ + if (current->thread.used_spe) { + flush_spe_to_thread(current); + if (__copy_to_user(&frame->mc_vregs, current->thread.evr, + ELF_NEVRREG * sizeof(u32))) + return 1; + /* set MSR_SPE in the saved MSR value to indicate that + * frame->mc_vregs contains valid data */ + msr |= MSR_SPE; + } + + /* We always copy to/from spefscr */ + if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) + return 1; +#endif /* CONFIG_SPE */ + + if (__put_user(msr, &frame->mc_gregs[PT_MSR])) + return 1; + if (sigret) { + /* Set up the sigreturn trampoline: li r0,sigret; sc */ + if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) + || __put_user(0x44000002UL, &frame->tramp[1])) + return 1; + flush_icache_range((unsigned long) &frame->tramp[0], + (unsigned long) &frame->tramp[2]); + } + + return 0; +} +#endif + /* * Restore the current user register values from the user stack, * (except for MSR). @@ -588,90 +742,140 @@ static long restore_user_regs(struct pt_regs *regs, return 0; } -#ifdef CONFIG_PPC64 -long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act, - struct sigaction32 __user *oact, size_t sigsetsize) +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +/* + * Restore the current user register values from the user stack, except for + * MSR, and recheckpoint the original checkpointed register state for processes + * in transactions. + */ +static long restore_tm_user_regs(struct pt_regs *regs, + struct mcontext __user *sr, + struct mcontext __user *tm_sr) { - struct k_sigaction new_ka, old_ka; - int ret; + long err; + unsigned long msr; +#ifdef CONFIG_VSX + int i; +#endif - /* XXX: Don't preclude handling different sized sigset_t's. */ - if (sigsetsize != sizeof(compat_sigset_t)) - return -EINVAL; + /* + * restore general registers but not including MSR or SOFTE. Also + * take care of keeping r2 (TLS) intact if not a signal. + * See comment in signal_64.c:restore_tm_sigcontexts(); + * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR + * were set by the signal delivery. + */ + err = restore_general_regs(regs, tm_sr); + err |= restore_general_regs(¤t->thread.ckpt_regs, sr); - if (act) { - compat_uptr_t handler; + err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]); - ret = get_user(handler, &act->sa_handler); - new_ka.sa.sa_handler = compat_ptr(handler); - ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask); - ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); - if (ret) - return -EFAULT; - } + err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); + if (err) + return 1; - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - if (!ret && oact) { - ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler); - ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask); - ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); - } - return ret; -} + /* Restore the previous little-endian mode */ + regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); -/* - * Note: it is necessary to treat how as an unsigned int, with the - * corresponding cast to a signed int to insure that the proper - * conversion (sign extension) between the register representation - * of a signed int (msr in 32-bit mode) and the register representation - * of a signed int (msr in 64-bit mode) is performed. - */ -long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set, - compat_sigset_t __user *oset, size_t sigsetsize) -{ - sigset_t s; - sigset_t __user *up; - int ret; - mm_segment_t old_fs = get_fs(); + /* + * Do this before updating the thread state in + * current->thread.fpr/vr/evr. That way, if we get preempted + * and another task grabs the FPU/Altivec/SPE, it won't be + * tempted to save the current CPU state into the thread_struct + * and corrupt what we are writing there. + */ + discard_lazy_cpu_state(); - if (set) { - if (get_sigset_t(&s, set)) - return -EFAULT; +#ifdef CONFIG_ALTIVEC + regs->msr &= ~MSR_VEC; + if (msr & MSR_VEC) { + /* restore altivec registers from the stack */ + if (__copy_from_user(current->thread.vr, &sr->mc_vregs, + sizeof(sr->mc_vregs)) || + __copy_from_user(current->thread.transact_vr, + &tm_sr->mc_vregs, + sizeof(sr->mc_vregs))) + return 1; + } else if (current->thread.used_vr) { + memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128)); + memset(current->thread.transact_vr, 0, + ELF_NVRREG * sizeof(vector128)); } - set_fs(KERNEL_DS); - /* This is valid because of the set_fs() */ - up = (sigset_t __user *) &s; - ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL, - sigsetsize); - set_fs(old_fs); - if (ret) - return ret; - if (oset) { - if (put_sigset_t(oset, &s)) - return -EFAULT; - } - return 0; -} + /* Always get VRSAVE back */ + if (__get_user(current->thread.vrsave, + (u32 __user *)&sr->mc_vregs[32]) || + __get_user(current->thread.transact_vrsave, + (u32 __user *)&tm_sr->mc_vregs[32])) + return 1; +#endif /* CONFIG_ALTIVEC */ -long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize) -{ - sigset_t s; - int ret; - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - /* The __user pointer cast is valid because of the set_fs() */ - ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); - set_fs(old_fs); - if (!ret) { - if (put_sigset_t(set, &s)) - return -EFAULT; + regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); + + if (copy_fpr_from_user(current, &sr->mc_fregs) || + copy_transact_fpr_from_user(current, &tm_sr->mc_fregs)) + return 1; + +#ifdef CONFIG_VSX + regs->msr &= ~MSR_VSX; + if (msr & MSR_VSX) { + /* + * Restore altivec registers from the stack to a local + * buffer, then write this out to the thread_struct + */ + if (copy_vsx_from_user(current, &sr->mc_vsregs) || + copy_transact_vsx_from_user(current, &tm_sr->mc_vsregs)) + return 1; + } else if (current->thread.used_vsr) + for (i = 0; i < 32 ; i++) { + current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; + current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0; + } +#endif /* CONFIG_VSX */ + +#ifdef CONFIG_SPE + /* SPE regs are not checkpointed with TM, so this section is + * simply the same as in restore_user_regs(). + */ + regs->msr &= ~MSR_SPE; + if (msr & MSR_SPE) { + if (__copy_from_user(current->thread.evr, &sr->mc_vregs, + ELF_NEVRREG * sizeof(u32))) + return 1; + } else if (current->thread.used_spe) + memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); + + /* Always get SPEFSCR back */ + if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + + ELF_NEVRREG)) + return 1; +#endif /* CONFIG_SPE */ + + /* Now, recheckpoint. This loads up all of the checkpointed (older) + * registers, including FP and V[S]Rs. After recheckpointing, the + * transactional versions should be loaded. + */ + tm_enable(); + /* This loads the checkpointed FP/VEC state, if used */ + tm_recheckpoint(¤t->thread, msr); + /* The task has moved into TM state S, so ensure MSR reflects this */ + regs->msr = (regs->msr & ~MSR_TS_MASK) | MSR_TS_S; + + /* This loads the speculative FP/VEC state, if used */ + if (msr & MSR_FP) { + do_load_up_transact_fpu(¤t->thread); + regs->msr |= (MSR_FP | current->thread.fpexc_mode); + } + if (msr & MSR_VEC) { + do_load_up_transact_altivec(¤t->thread); + regs->msr |= MSR_VEC; } - return ret; -} + return 0; +} +#endif +#ifdef CONFIG_PPC64 int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s) { int err; @@ -740,79 +944,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) return 0; } - -/* - * Note: it is necessary to treat pid and sig as unsigned ints, with the - * corresponding cast to a signed int to insure that the proper conversion - * (sign extension) between the register representation of a signed int - * (msr in 32-bit mode) and the register representation of a signed int - * (msr in 64-bit mode) is performed. - */ -long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo) -{ - siginfo_t info; - int ret; - mm_segment_t old_fs = get_fs(); - - ret = copy_siginfo_from_user32(&info, uinfo); - if (unlikely(ret)) - return ret; - - set_fs (KERNEL_DS); - /* The __user pointer cast is valid becasuse of the set_fs() */ - ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info); - set_fs (old_fs); - return ret; -} -/* - * Start Alternate signal stack support - * - * System Calls - * sigaltatck compat_sys_sigaltstack - */ - -int compat_sys_sigaltstack(u32 __new, u32 __old, int r5, - int r6, int r7, int r8, struct pt_regs *regs) -{ - stack_32_t __user * newstack = compat_ptr(__new); - stack_32_t __user * oldstack = compat_ptr(__old); - stack_t uss, uoss; - int ret; - mm_segment_t old_fs; - unsigned long sp; - compat_uptr_t ss_sp; - - /* - * set sp to the user stack on entry to the system call - * the system call router sets R9 to the saved registers - */ - sp = regs->gpr[1]; - - /* Put new stack info in local 64 bit stack struct */ - if (newstack) { - if (get_user(ss_sp, &newstack->ss_sp) || - __get_user(uss.ss_flags, &newstack->ss_flags) || - __get_user(uss.ss_size, &newstack->ss_size)) - return -EFAULT; - uss.ss_sp = compat_ptr(ss_sp); - } - - old_fs = get_fs(); - set_fs(KERNEL_DS); - /* The __user pointer casts are valid because of the set_fs() */ - ret = do_sigaltstack( - newstack ? (stack_t __user *) &uss : NULL, - oldstack ? (stack_t __user *) &uoss : NULL, - sp); - set_fs(old_fs); - /* Copy the stack information to the user output buffer */ - if (!ret && oldstack && - (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) || - __put_user(uoss.ss_flags, &oldstack->ss_flags) || - __put_user(uoss.ss_size, &oldstack->ss_size))) - return -EFAULT; - return ret; -} #endif /* CONFIG_PPC64 */ /* @@ -827,6 +958,8 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, struct mcontext __user *frame; void __user *addr; unsigned long newsp = 0; + int sigret; + unsigned long tramp; /* Set up Signal Frame */ /* Put a Real Time Context onto stack */ @@ -838,11 +971,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, /* Put the siginfo & fill in most of the ucontext */ if (copy_siginfo_to_user(&rt_sf->info, info) || __put_user(0, &rt_sf->uc.uc_flags) - || __put_user(0, &rt_sf->uc.uc_link) - || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp) - || __put_user(sas_ss_flags(regs->gpr[1]), - &rt_sf->uc.uc_stack.ss_flags) - || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size) + || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1]) || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext), &rt_sf->uc.uc_regs) || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset)) @@ -852,14 +981,37 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, frame = &rt_sf->uc.uc_mcontext; addr = frame; if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { - if (save_user_regs(regs, frame, 0, 1)) - goto badframe; - regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp; + sigret = 0; + tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp; } else { - if (save_user_regs(regs, frame, __NR_rt_sigreturn, 1)) + sigret = __NR_rt_sigreturn; + tramp = (unsigned long) frame->tramp; + } + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (MSR_TM_ACTIVE(regs->msr)) { + if (save_tm_user_regs(regs, &rt_sf->uc.uc_mcontext, + &rt_sf->uc_transact.uc_mcontext, sigret)) + goto badframe; + } + else +#endif + if (save_user_regs(regs, frame, sigret, 1)) + goto badframe; + regs->link = tramp; + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (MSR_TM_ACTIVE(regs->msr)) { + if (__put_user((unsigned long)&rt_sf->uc_transact, + &rt_sf->uc.uc_link) + || __put_user(to_user_ptr(&rt_sf->uc_transact.uc_mcontext), + &rt_sf->uc_transact.uc_regs)) goto badframe; - regs->link = (unsigned long) frame->tramp; } + else +#endif + if (__put_user(0, &rt_sf->uc.uc_link)) + goto badframe; current->thread.fpscr.val = 0; /* turn off all fp exceptions */ @@ -878,6 +1030,13 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, regs->nip = (unsigned long) ka->sa.sa_handler; /* enter the signal handler in big-endian mode */ regs->msr &= ~MSR_LE; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Remove TM bits from thread's MSR. The MSR in the sigcontext + * just indicates to userland that we were doing a transaction, but we + * don't want to return in transactional state: + */ + regs->msr &= ~MSR_TS_MASK; +#endif return 1; badframe: @@ -925,6 +1084,35 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int return 0; } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +static int do_setcontext_tm(struct ucontext __user *ucp, + struct ucontext __user *tm_ucp, + struct pt_regs *regs) +{ + sigset_t set; + struct mcontext __user *mcp; + struct mcontext __user *tm_mcp; + u32 cmcp; + u32 tm_cmcp; + + if (get_sigset_t(&set, &ucp->uc_sigmask)) + return -EFAULT; + + if (__get_user(cmcp, &ucp->uc_regs) || + __get_user(tm_cmcp, &tm_ucp->uc_regs)) + return -EFAULT; + mcp = (struct mcontext __user *)(u64)cmcp; + tm_mcp = (struct mcontext __user *)(u64)tm_cmcp; + /* no need to check access_ok(mcp), since mcp < 4GB */ + + set_current_blocked(&set); + if (restore_tm_user_regs(regs, mcp, tm_mcp)) + return -EFAULT; + + return 0; +} +#endif + long sys_swapcontext(struct ucontext __user *old_ctx, struct ucontext __user *new_ctx, int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) @@ -1020,7 +1208,12 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, struct pt_regs *regs) { struct rt_sigframe __user *rt_sf; - +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + struct ucontext __user *uc_transact; + unsigned long msr_hi; + unsigned long tmp; + int tm_restore = 0; +#endif /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; @@ -1028,6 +1221,34 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf))) goto bad; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (__get_user(tmp, &rt_sf->uc.uc_link)) + goto bad; + uc_transact = (struct ucontext __user *)(uintptr_t)tmp; + if (uc_transact) { + u32 cmcp; + struct mcontext __user *mcp; + + if (__get_user(cmcp, &uc_transact->uc_regs)) + return -EFAULT; + mcp = (struct mcontext __user *)(u64)cmcp; + /* The top 32 bits of the MSR are stashed in the transactional + * ucontext. */ + if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR])) + goto bad; + + if (MSR_TM_SUSPENDED(msr_hi<<32)) { + /* We only recheckpoint on return if we're + * transaction. + */ + tm_restore = 1; + if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs)) + goto bad; + } + } + if (!tm_restore) + /* Fall through, for non-TM restore */ +#endif if (do_setcontext(&rt_sf->uc, regs, 1)) goto bad; @@ -1039,14 +1260,11 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, * change it. -- paulus */ #ifdef CONFIG_PPC64 - /* - * We use the compat_sys_ version that does the 32/64 bits conversion - * and takes userland pointer directly. What about error checking ? - * nobody does any... - */ - compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs); + if (compat_restore_altstack(&rt_sf->uc.uc_stack)) + goto bad; #else - do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]); + if (restore_altstack(&rt_sf->uc.uc_stack)) + goto bad; #endif set_thread_flag(TIF_RESTOREALL); return 0; @@ -1162,7 +1380,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, * always done it up until now so it is probably better not to * change it. -- paulus */ - do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]); + restore_altstack(&ctx->uc_stack); set_thread_flag(TIF_RESTOREALL); out: @@ -1179,6 +1397,8 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, struct sigcontext __user *sc; struct sigframe __user *frame; unsigned long newsp = 0; + int sigret; + unsigned long tramp; /* Set up Signal Frame */ frame = get_sigframe(ka, regs, sizeof(*frame), 1); @@ -1201,14 +1421,25 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, goto badframe; if (vdso32_sigtramp && current->mm->context.vdso_base) { - if (save_user_regs(regs, &frame->mctx, 0, 1)) - goto badframe; - regs->link = current->mm->context.vdso_base + vdso32_sigtramp; + sigret = 0; + tramp = current->mm->context.vdso_base + vdso32_sigtramp; } else { - if (save_user_regs(regs, &frame->mctx, __NR_sigreturn, 1)) + sigret = __NR_sigreturn; + tramp = (unsigned long) frame->mctx.tramp; + } + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (MSR_TM_ACTIVE(regs->msr)) { + if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact, + sigret)) goto badframe; - regs->link = (unsigned long) frame->mctx.tramp; } + else +#endif + if (save_user_regs(regs, &frame->mctx, sigret, 1)) + goto badframe; + + regs->link = tramp; current->thread.fpscr.val = 0; /* turn off all fp exceptions */ @@ -1223,7 +1454,13 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, regs->nip = (unsigned long) ka->sa.sa_handler; /* enter the signal handler in big-endian mode */ regs->msr &= ~MSR_LE; - +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Remove TM bits from thread's MSR. The MSR in the sigcontext + * just indicates to userland that we were doing a transaction, but we + * don't want to return in transactional state: + */ + regs->msr &= ~MSR_TS_MASK; +#endif return 1; badframe: diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index d183f8719a5..995f8543cb5 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -34,6 +34,7 @@ #include <asm/syscalls.h> #include <asm/vdso.h> #include <asm/switch_to.h> +#include <asm/tm.h> #include "signal.h" @@ -56,6 +57,9 @@ struct rt_sigframe { /* sys_rt_sigreturn requires the ucontext be the first field */ struct ucontext uc; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + struct ucontext uc_transact; +#endif unsigned long _unused[2]; unsigned int tramp[TRAMP_SIZE]; struct siginfo __user *pinfo; @@ -83,7 +87,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, * the context). This is very important because we must ensure we * don't lose the VRSAVE content that may have been set prior to * the process doing its first vector operation - * Userland shall check AT_HWCAP to know wether it can rely on the + * Userland shall check AT_HWCAP to know whether it can rely on the * v_regs pointer or not */ #ifdef CONFIG_ALTIVEC @@ -145,6 +149,145 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, return err; } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +/* + * As above, but Transactional Memory is in use, so deliver sigcontexts + * containing checkpointed and transactional register states. + * + * To do this, we treclaim to gather both sets of registers and set up the + * 'normal' sigcontext registers with rolled-back register values such that a + * simple signal handler sees a correct checkpointed register state. + * If interested, a TM-aware sighandler can examine the transactional registers + * in the 2nd sigcontext to determine the real origin of the signal. + */ +static long setup_tm_sigcontexts(struct sigcontext __user *sc, + struct sigcontext __user *tm_sc, + struct pt_regs *regs, + int signr, sigset_t *set, unsigned long handler) +{ + /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the + * process never used altivec yet (MSR_VEC is zero in pt_regs of + * the context). This is very important because we must ensure we + * don't lose the VRSAVE content that may have been set prior to + * the process doing its first vector operation + * Userland shall check AT_HWCAP to know wether it can rely on the + * v_regs pointer or not. + */ +#ifdef CONFIG_ALTIVEC + elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *) + (((unsigned long)sc->vmx_reserve + 15) & ~0xful); + elf_vrreg_t __user *tm_v_regs = (elf_vrreg_t __user *) + (((unsigned long)tm_sc->vmx_reserve + 15) & ~0xful); +#endif + unsigned long msr = regs->msr; + long err = 0; + + BUG_ON(!MSR_TM_ACTIVE(regs->msr)); + + /* tm_reclaim rolls back all reg states, saving checkpointed (older) + * GPRs to thread.ckpt_regs and (if used) FPRs to (newer) + * thread.transact_fp and/or VRs to (newer) thread.transact_vr. + * THEN we save out FP/VRs, if necessary, to the checkpointed (older) + * thread.fr[]/vr[]s. The transactional (newer) GPRs are on the + * stack, in *regs. + */ + tm_enable(); + tm_reclaim(¤t->thread, msr, TM_CAUSE_SIGNAL); + + flush_fp_to_thread(current); + +#ifdef CONFIG_ALTIVEC + err |= __put_user(v_regs, &sc->v_regs); + err |= __put_user(tm_v_regs, &tm_sc->v_regs); + + /* save altivec registers */ + if (current->thread.used_vr) { + flush_altivec_to_thread(current); + /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ + err |= __copy_to_user(v_regs, current->thread.vr, + 33 * sizeof(vector128)); + /* If VEC was enabled there are transactional VRs valid too, + * else they're a copy of the checkpointed VRs. + */ + if (msr & MSR_VEC) + err |= __copy_to_user(tm_v_regs, + current->thread.transact_vr, + 33 * sizeof(vector128)); + else + err |= __copy_to_user(tm_v_regs, + current->thread.vr, + 33 * sizeof(vector128)); + + /* set MSR_VEC in the MSR value in the frame to indicate + * that sc->v_reg contains valid data. + */ + msr |= MSR_VEC; + } + /* We always copy to/from vrsave, it's 0 if we don't have or don't + * use altivec. + */ + err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); + if (msr & MSR_VEC) + err |= __put_user(current->thread.transact_vrsave, + (u32 __user *)&tm_v_regs[33]); + else + err |= __put_user(current->thread.vrsave, + (u32 __user *)&tm_v_regs[33]); + +#else /* CONFIG_ALTIVEC */ + err |= __put_user(0, &sc->v_regs); + err |= __put_user(0, &tm_sc->v_regs); +#endif /* CONFIG_ALTIVEC */ + + /* copy fpr regs and fpscr */ + err |= copy_fpr_to_user(&sc->fp_regs, current); + if (msr & MSR_FP) + err |= copy_transact_fpr_to_user(&tm_sc->fp_regs, current); + else + err |= copy_fpr_to_user(&tm_sc->fp_regs, current); + +#ifdef CONFIG_VSX + /* + * Copy VSX low doubleword to local buffer for formatting, + * then out to userspace. Update v_regs to point after the + * VMX data. + */ + if (current->thread.used_vsr) { + __giveup_vsx(current); + v_regs += ELF_NVRREG; + tm_v_regs += ELF_NVRREG; + + err |= copy_vsx_to_user(v_regs, current); + + if (msr & MSR_VSX) + err |= copy_transact_vsx_to_user(tm_v_regs, current); + else + err |= copy_vsx_to_user(tm_v_regs, current); + + /* set MSR_VSX in the MSR value in the frame to + * indicate that sc->vs_reg) contains valid data. + */ + msr |= MSR_VSX; + } +#endif /* CONFIG_VSX */ + + err |= __put_user(&sc->gp_regs, &sc->regs); + err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs); + WARN_ON(!FULL_REGS(regs)); + err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE); + err |= __copy_to_user(&sc->gp_regs, + ¤t->thread.ckpt_regs, GP_REGS_SIZE); + err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]); + err |= __put_user(msr, &sc->gp_regs[PT_MSR]); + err |= __put_user(signr, &sc->signal); + err |= __put_user(handler, &sc->handler); + if (set != NULL) + err |= __put_user(set->sig[0], &sc->oldmask); + + return err; +} +#endif + /* * Restore the sigcontext from the signal frame. */ @@ -241,6 +384,153 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, return err; } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +/* + * Restore the two sigcontexts from the frame of a transactional processes. + */ + +static long restore_tm_sigcontexts(struct pt_regs *regs, + struct sigcontext __user *sc, + struct sigcontext __user *tm_sc) +{ +#ifdef CONFIG_ALTIVEC + elf_vrreg_t __user *v_regs, *tm_v_regs; +#endif + unsigned long err = 0; + unsigned long msr; +#ifdef CONFIG_VSX + int i; +#endif + /* copy the GPRs */ + err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr)); + err |= __copy_from_user(¤t->thread.ckpt_regs, sc->gp_regs, + sizeof(regs->gpr)); + + /* + * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP. + * TEXASR was set by the signal delivery reclaim, as was TFIAR. + * Users doing anything abhorrent like thread-switching w/ signals for + * TM-Suspended code will have to back TEXASR/TFIAR up themselves. + * For the case of getting a signal and simply returning from it, + * we don't need to re-copy them here. + */ + err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]); + err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]); + + /* get MSR separately, transfer the LE bit if doing signal return */ + err |= __get_user(msr, &sc->gp_regs[PT_MSR]); + regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); + + /* The following non-GPR non-FPR non-VR state is also checkpointed: */ + err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]); + err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]); + err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]); + err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]); + err |= __get_user(current->thread.ckpt_regs.ctr, + &sc->gp_regs[PT_CTR]); + err |= __get_user(current->thread.ckpt_regs.link, + &sc->gp_regs[PT_LNK]); + err |= __get_user(current->thread.ckpt_regs.xer, + &sc->gp_regs[PT_XER]); + err |= __get_user(current->thread.ckpt_regs.ccr, + &sc->gp_regs[PT_CCR]); + + /* These regs are not checkpointed; they can go in 'regs'. */ + err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); + err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); + err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); + err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); + + /* + * Do this before updating the thread state in + * current->thread.fpr/vr. That way, if we get preempted + * and another task grabs the FPU/Altivec, it won't be + * tempted to save the current CPU state into the thread_struct + * and corrupt what we are writing there. + */ + discard_lazy_cpu_state(); + + /* + * Force reload of FP/VEC. + * This has to be done before copying stuff into current->thread.fpr/vr + * for the reasons explained in the previous comment. + */ + regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); + +#ifdef CONFIG_ALTIVEC + err |= __get_user(v_regs, &sc->v_regs); + err |= __get_user(tm_v_regs, &tm_sc->v_regs); + if (err) + return err; + if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) + return -EFAULT; + if (tm_v_regs && !access_ok(VERIFY_READ, + tm_v_regs, 34 * sizeof(vector128))) + return -EFAULT; + /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ + if (v_regs != 0 && tm_v_regs != 0 && (msr & MSR_VEC) != 0) { + err |= __copy_from_user(current->thread.vr, v_regs, + 33 * sizeof(vector128)); + err |= __copy_from_user(current->thread.transact_vr, tm_v_regs, + 33 * sizeof(vector128)); + } + else if (current->thread.used_vr) { + memset(current->thread.vr, 0, 33 * sizeof(vector128)); + memset(current->thread.transact_vr, 0, 33 * sizeof(vector128)); + } + /* Always get VRSAVE back */ + if (v_regs != 0 && tm_v_regs != 0) { + err |= __get_user(current->thread.vrsave, + (u32 __user *)&v_regs[33]); + err |= __get_user(current->thread.transact_vrsave, + (u32 __user *)&tm_v_regs[33]); + } + else { + current->thread.vrsave = 0; + current->thread.transact_vrsave = 0; + } +#endif /* CONFIG_ALTIVEC */ + /* restore floating point */ + err |= copy_fpr_from_user(current, &sc->fp_regs); + err |= copy_transact_fpr_from_user(current, &tm_sc->fp_regs); +#ifdef CONFIG_VSX + /* + * Get additional VSX data. Update v_regs to point after the + * VMX data. Copy VSX low doubleword from userspace to local + * buffer for formatting, then into the taskstruct. + */ + if (v_regs && ((msr & MSR_VSX) != 0)) { + v_regs += ELF_NVRREG; + tm_v_regs += ELF_NVRREG; + err |= copy_vsx_from_user(current, v_regs); + err |= copy_transact_vsx_from_user(current, tm_v_regs); + } else { + for (i = 0; i < 32 ; i++) { + current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; + current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0; + } + } +#endif + tm_enable(); + /* This loads the checkpointed FP/VEC state, if used */ + tm_recheckpoint(¤t->thread, msr); + /* The task has moved into TM state S, so ensure MSR reflects this: */ + regs->msr = (regs->msr & ~MSR_TS_MASK) | __MASK(33); + + /* This loads the speculative FP/VEC state, if used */ + if (msr & MSR_FP) { + do_load_up_transact_fpu(¤t->thread); + regs->msr |= (MSR_FP | current->thread.fpexc_mode); + } + if (msr & MSR_VEC) { + do_load_up_transact_altivec(¤t->thread); + regs->msr |= MSR_VEC; + } + + return err; +} +#endif + /* * Setup the trampoline code on the stack */ @@ -355,6 +645,9 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, { struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; sigset_t set; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + unsigned long msr; +#endif /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; @@ -365,13 +658,26 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) + goto badframe; + if (MSR_TM_SUSPENDED(msr)) { + /* We recheckpoint on return. */ + struct ucontext __user *uc_transact; + if (__get_user(uc_transact, &uc->uc_link)) + goto badframe; + if (restore_tm_sigcontexts(regs, &uc->uc_mcontext, + &uc_transact->uc_mcontext)) + goto badframe; + } + else + /* Fall through, for non-TM restore */ +#endif if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext)) goto badframe; - /* do_sigaltstack expects a __user pointer and won't modify - * what's in there anyway - */ - do_sigaltstack(&uc->uc_stack, NULL, regs->gpr[1]); + if (restore_altstack(&uc->uc_stack)) + goto badframe; set_thread_flag(TIF_RESTOREALL); return 0; @@ -415,19 +721,39 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); - err |= __put_user(0, &frame->uc.uc_link); - err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - err |= __put_user(sas_ss_flags(regs->gpr[1]), - &frame->uc.uc_stack.ss_flags); - err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); - err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr, NULL, - (unsigned long)ka->sa.sa_handler, 1); + err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]); +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (MSR_TM_ACTIVE(regs->msr)) { + /* The ucontext_t passed to userland points to the second + * ucontext_t (for transactional state) with its uc_link ptr. + */ + err |= __put_user(&frame->uc_transact, &frame->uc.uc_link); + err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, + &frame->uc_transact.uc_mcontext, + regs, signr, + NULL, + (unsigned long)ka->sa.sa_handler); + } else +#endif + { + err |= __put_user(0, &frame->uc.uc_link); + err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr, + NULL, (unsigned long)ka->sa.sa_handler, + 1); + } err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto badframe; /* Make sure signal handler doesn't get spurious FP exceptions */ current->thread.fpscr.val = 0; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Remove TM bits from thread's MSR. The MSR in the sigcontext + * just indicates to userland that we were doing a transaction, but we + * don't want to return in transactional state: + */ + regs->msr &= ~MSR_TS_MASK; +#endif /* Set up to return from userspace. */ if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c index 640de836e46..e68fd1ae727 100644 --- a/arch/powerpc/kernel/smp-tbsync.c +++ b/arch/powerpc/kernel/smp-tbsync.c @@ -36,13 +36,13 @@ static struct { static volatile int running; -static void __devinit enter_contest(u64 mark, long add) +static void enter_contest(u64 mark, long add) { while (get_tb() < mark) tbsync->race_result = add; } -void __devinit smp_generic_take_timebase(void) +void smp_generic_take_timebase(void) { int cmd; u64 tb; @@ -75,7 +75,7 @@ void __devinit smp_generic_take_timebase(void) local_irq_restore(flags); } -static int __devinit start_contest(int cmd, long offset, int num) +static int start_contest(int cmd, long offset, int num) { int i, score=0; u64 tb; @@ -110,7 +110,7 @@ static int __devinit start_contest(int cmd, long offset, int num) return score; } -void __devinit smp_generic_give_timebase(void) +void smp_generic_give_timebase(void) { int i, score, score2, old, min=0, max=5000, offset=1000; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 2b952b5386f..76bd9da8cb7 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -82,7 +82,7 @@ int smt_enabled_at_boot = 1; static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; #ifdef CONFIG_PPC64 -int __devinit smp_generic_kick_cpu(int nr) +int smp_generic_kick_cpu(int nr) { BUG_ON(nr < 0 || nr >= NR_CPUS); @@ -311,7 +311,7 @@ void smp_send_stop(void) struct thread_info *current_set[NR_CPUS]; -static void __devinit smp_store_cpu_info(int id) +static void smp_store_cpu_info(int id) { per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); #ifdef CONFIG_PPC_FSL_BOOK3E @@ -355,7 +355,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) max_cpus = 1; } -void __devinit smp_prepare_boot_cpu(void) +void smp_prepare_boot_cpu(void) { BUG_ON(smp_processor_id() != boot_cpuid); #ifdef CONFIG_PPC64 @@ -427,6 +427,45 @@ int generic_check_cpu_restart(unsigned int cpu) { return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; } + +static atomic_t secondary_inhibit_count; + +/* + * Don't allow secondary CPU threads to come online + */ +void inhibit_secondary_onlining(void) +{ + /* + * This makes secondary_inhibit_count stable during cpu + * online/offline operations. + */ + get_online_cpus(); + + atomic_inc(&secondary_inhibit_count); + put_online_cpus(); +} +EXPORT_SYMBOL_GPL(inhibit_secondary_onlining); + +/* + * Allow secondary CPU threads to come online again + */ +void uninhibit_secondary_onlining(void) +{ + get_online_cpus(); + atomic_dec(&secondary_inhibit_count); + put_online_cpus(); +} +EXPORT_SYMBOL_GPL(uninhibit_secondary_onlining); + +static int secondaries_inhibited(void) +{ + return atomic_read(&secondary_inhibit_count); +} + +#else /* HOTPLUG_CPU */ + +#define secondaries_inhibited() 0 + #endif static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) @@ -445,6 +484,13 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) { int rc, c; + /* + * Don't allow secondary threads to come online if inhibited + */ + if (threads_per_core > 1 && secondaries_inhibited() && + cpu % threads_per_core != 0) + return -EBUSY; + if (smp_ops == NULL || (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) return -EINVAL; @@ -564,7 +610,7 @@ static struct device_node *cpu_to_l2cache(int cpu) } /* Activate a secondary processor. */ -void __devinit start_secondary(void *unused) +__cpuinit void start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); struct device_node *l2_cache; diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 9c2ed90ece8..d0bafc0cdf0 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -61,16 +61,6 @@ asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp, return compat_sys_select((int)n, inp, outp, exp, compat_ptr(tvp_x)); } -/* Note: it is necessary to treat option as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2) -{ - return sys_sysfs((int)option, arg1, arg2); -} - #ifdef CONFIG_SYSVIPC long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, u32 fifth) @@ -156,318 +146,6 @@ asmlinkage long compat_sys_sendfile64_wrapper(u32 out_fd, u32 in_fd, (off_t __user *)offset, count); } -/* Note: it is necessary to treat option as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5) -{ - return sys_prctl((int)option, - (unsigned long) arg2, - (unsigned long) arg3, - (unsigned long) arg4, - (unsigned long) arg5); -} - -/* Note: it is necessary to treat pid as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_sched_rr_get_interval(u32 pid, struct compat_timespec __user *interval) -{ - struct timespec t; - int ret; - mm_segment_t old_fs = get_fs (); - - /* The __user pointer cast is valid because of the set_fs() */ - set_fs (KERNEL_DS); - ret = sys_sched_rr_get_interval((int)pid, (struct timespec __user *) &t); - set_fs (old_fs); - if (put_compat_timespec(&t, interval)) - return -EFAULT; - return ret; -} - -/* Note: it is necessary to treat mode as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_access(const char __user * filename, u32 mode) -{ - return sys_access(filename, (int)mode); -} - - -/* Note: it is necessary to treat mode as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_creat(const char __user * pathname, u32 mode) -{ - return sys_creat(pathname, (int)mode); -} - - -/* Note: it is necessary to treat pid and options as unsigned ints, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_waitpid(u32 pid, unsigned int __user * stat_addr, u32 options) -{ - return sys_waitpid((int)pid, stat_addr, (int)options); -} - - -/* Note: it is necessary to treat gidsetsize as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_getgroups(u32 gidsetsize, gid_t __user *grouplist) -{ - return sys_getgroups((int)gidsetsize, grouplist); -} - - -/* Note: it is necessary to treat pid as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_getpgid(u32 pid) -{ - return sys_getpgid((int)pid); -} - - - -/* Note: it is necessary to treat pid as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_getsid(u32 pid) -{ - return sys_getsid((int)pid); -} - - -/* Note: it is necessary to treat pid and sig as unsigned ints, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_kill(u32 pid, u32 sig) -{ - return sys_kill((int)pid, (int)sig); -} - - -/* Note: it is necessary to treat mode as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_mkdir(const char __user * pathname, u32 mode) -{ - return sys_mkdir(pathname, (int)mode); -} - -long compat_sys_nice(u32 increment) -{ - /* sign extend increment */ - return sys_nice((int)increment); -} - -off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin) -{ - /* sign extend n */ - return sys_lseek(fd, (int)offset, origin); -} - -long compat_sys_truncate(const char __user * path, u32 length) -{ - /* sign extend length */ - return sys_truncate(path, (int)length); -} - -long compat_sys_ftruncate(int fd, u32 length) -{ - /* sign extend length */ - return sys_ftruncate(fd, (int)length); -} - -/* Note: it is necessary to treat bufsiz as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_readlink(const char __user * path, char __user * buf, u32 bufsiz) -{ - return sys_readlink(path, buf, (int)bufsiz); -} - -/* Note: it is necessary to treat option as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_sched_get_priority_max(u32 policy) -{ - return sys_sched_get_priority_max((int)policy); -} - - -/* Note: it is necessary to treat policy as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_sched_get_priority_min(u32 policy) -{ - return sys_sched_get_priority_min((int)policy); -} - - -/* Note: it is necessary to treat pid as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_sched_getparam(u32 pid, struct sched_param __user *param) -{ - return sys_sched_getparam((int)pid, param); -} - - -/* Note: it is necessary to treat pid as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_sched_getscheduler(u32 pid) -{ - return sys_sched_getscheduler((int)pid); -} - - -/* Note: it is necessary to treat pid as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_sched_setparam(u32 pid, struct sched_param __user *param) -{ - return sys_sched_setparam((int)pid, param); -} - - -/* Note: it is necessary to treat pid and policy as unsigned ints, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_sched_setscheduler(u32 pid, u32 policy, struct sched_param __user *param) -{ - return sys_sched_setscheduler((int)pid, (int)policy, param); -} - - -/* Note: it is necessary to treat len as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_setdomainname(char __user *name, u32 len) -{ - return sys_setdomainname(name, (int)len); -} - - -/* Note: it is necessary to treat gidsetsize as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_setgroups(u32 gidsetsize, gid_t __user *grouplist) -{ - return sys_setgroups((int)gidsetsize, grouplist); -} - - -asmlinkage long compat_sys_sethostname(char __user *name, u32 len) -{ - /* sign extend len */ - return sys_sethostname(name, (int)len); -} - - -/* Note: it is necessary to treat pid and pgid as unsigned ints, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_setpgid(u32 pid, u32 pgid) -{ - return sys_setpgid((int)pid, (int)pgid); -} - -long compat_sys_getpriority(u32 which, u32 who) -{ - /* sign extend which and who */ - return sys_getpriority((int)which, (int)who); -} - -long compat_sys_setpriority(u32 which, u32 who, u32 niceval) -{ - /* sign extend which, who and niceval */ - return sys_setpriority((int)which, (int)who, (int)niceval); -} - -long compat_sys_ioprio_get(u32 which, u32 who) -{ - /* sign extend which and who */ - return sys_ioprio_get((int)which, (int)who); -} - -long compat_sys_ioprio_set(u32 which, u32 who, u32 ioprio) -{ - /* sign extend which, who and ioprio */ - return sys_ioprio_set((int)which, (int)who, (int)ioprio); -} - -/* Note: it is necessary to treat newmask as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_ssetmask(u32 newmask) -{ - return sys_ssetmask((int) newmask); -} - -asmlinkage long compat_sys_syslog(u32 type, char __user * buf, u32 len) -{ - /* sign extend len */ - return sys_syslog(type, buf, (int)len); -} - - -/* Note: it is necessary to treat mask as an unsigned int, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long compat_sys_umask(u32 mask) -{ - return sys_umask((int)mask); -} - unsigned long compat_sys_mmap2(unsigned long addr, size_t len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) @@ -476,12 +154,6 @@ unsigned long compat_sys_mmap2(unsigned long addr, size_t len, return sys_mmap(addr, len, prot, flags, fd, pgoff << 12); } -long compat_sys_tgkill(u32 tgid, u32 pid, int sig) -{ - /* sign extend tgid, pid */ - return sys_tgkill((int)tgid, (int)pid, sig); -} - /* * long long munging: * The 32 bit ABI passes long longs in an odd even register pair. diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index cf357a059dd..3ce1f864c2d 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -607,7 +607,7 @@ static void register_nodes(void) int sysfs_add_device_to_node(struct device *dev, int nid) { - struct node *node = &node_devices[nid]; + struct node *node = node_devices[nid]; return sysfs_create_link(&node->dev.kobj, &dev->kobj, kobject_name(&dev->kobj)); } @@ -615,7 +615,7 @@ EXPORT_SYMBOL_GPL(sysfs_add_device_to_node); void sysfs_remove_device_from_node(struct device *dev, int nid) { - struct node *node = &node_devices[nid]; + struct node *node = node_devices[nid]; sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj)); } EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index ce4cb772dc7..f77fa22754b 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -143,7 +143,7 @@ EXPORT_SYMBOL_GPL(ppc_proc_freq); unsigned long ppc_tb_freq; EXPORT_SYMBOL_GPL(ppc_tb_freq); -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE /* * Factors for converting from cputime_t (timebase ticks) to * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds). @@ -297,6 +297,8 @@ static u64 vtime_delta(struct task_struct *tsk, u64 now, nowscaled, deltascaled; u64 udelta, delta, user_scaled; + WARN_ON_ONCE(!irqs_disabled()); + now = mftb(); nowscaled = read_spurr(now); get_paca()->system_time += now - get_paca()->starttime; @@ -345,6 +347,7 @@ void vtime_account_system(struct task_struct *tsk) if (stolen) account_steal_time(stolen); } +EXPORT_SYMBOL_GPL(vtime_account_system); void vtime_account_idle(struct task_struct *tsk) { @@ -355,15 +358,15 @@ void vtime_account_idle(struct task_struct *tsk) } /* - * Transfer the user and system times accumulated in the paca - * by the exception entry and exit code to the generic process - * user and system time records. + * Transfer the user time accumulated in the paca + * by the exception entry and exit code to the generic + * process user time records. * Must be called with interrupts disabled. - * Assumes that vtime_account() has been called recently - * (i.e. since the last entry from usermode) so that + * Assumes that vtime_account_system/idle() has been called + * recently (i.e. since the last entry from usermode) so that * get_paca()->user_time_scaled is up to date. */ -void account_process_tick(struct task_struct *tsk, int user_tick) +void vtime_account_user(struct task_struct *tsk) { cputime_t utime, utimescaled; @@ -375,13 +378,7 @@ void account_process_tick(struct task_struct *tsk, int user_tick) account_user_time(tsk, utime, utimescaled); } -void vtime_task_switch(struct task_struct *prev) -{ - vtime_account(prev); - account_process_tick(prev, 0); -} - -#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ +#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #define calc_cputime_factors() #endif @@ -498,10 +495,15 @@ void timer_interrupt(struct pt_regs * regs) set_dec(DECREMENTER_MAX); /* Some implementations of hotplug will get timer interrupts while - * offline, just ignore these + * offline, just ignore these and we also need to set + * decrementers_next_tb as MAX to make sure __check_irq_replay + * don't replay timer interrupt when return, otherwise we'll trap + * here infinitely :( */ - if (!cpu_online(smp_processor_id())) + if (!cpu_online(smp_processor_id())) { + *next_tb = ~(u64)0; return; + } /* Conditionally hard-enable interrupts now that the DEC has been * bumped to its maximum value @@ -667,7 +669,7 @@ int update_persistent_clock(struct timespec now) struct rtc_time tm; if (!ppc_md.set_rtc_time) - return 0; + return -ENODEV; to_tm(now.tv_sec + 1 + timezone_offset, &tm); tm.tm_year -= 1900; @@ -774,13 +776,8 @@ void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, void update_vsyscall_tz(void) { - /* Make userspace gettimeofday spin until we're done. */ - ++vdso_data->tb_update_count; - smp_mb(); vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; vdso_data->tz_dsttime = sys_tz.tz_dsttime; - smp_mb(); - ++vdso_data->tb_update_count; } static void __init clocksource_init(void) diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S new file mode 100644 index 00000000000..84dbace657c --- /dev/null +++ b/arch/powerpc/kernel/tm.S @@ -0,0 +1,388 @@ +/* + * Transactional memory support routines to reclaim and recheckpoint + * transactional process state. + * + * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation. + */ + +#include <asm/asm-offsets.h> +#include <asm/ppc_asm.h> +#include <asm/ppc-opcode.h> +#include <asm/ptrace.h> +#include <asm/reg.h> + +#ifdef CONFIG_VSX +/* See fpu.S, this is very similar but to save/restore checkpointed FPRs/VSRs */ +#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \ +BEGIN_FTR_SECTION \ + b 2f; \ +END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ + SAVE_32FPRS_TRANSACT(n,base); \ + b 3f; \ +2: SAVE_32VSRS_TRANSACT(n,c,base); \ +3: +/* ...and this is just plain borrowed from there. */ +#define __REST_32FPRS_VSRS(n,c,base) \ +BEGIN_FTR_SECTION \ + b 2f; \ +END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ + REST_32FPRS(n,base); \ + b 3f; \ +2: REST_32VSRS(n,c,base); \ +3: +#else +#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) SAVE_32FPRS_TRANSACT(n, base) +#define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base) +#endif +#define SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \ + __SAVE_32FPRS_VSRS_TRANSACT(n,__REG_##c,__REG_##base) +#define REST_32FPRS_VSRS(n,c,base) \ + __REST_32FPRS_VSRS(n,__REG_##c,__REG_##base) + +/* Stack frame offsets for local variables. */ +#define TM_FRAME_L0 TM_FRAME_SIZE-16 +#define TM_FRAME_L1 TM_FRAME_SIZE-8 +#define STACK_PARAM(x) (48+((x)*8)) + + +/* In order to access the TM SPRs, TM must be enabled. So, do so: */ +_GLOBAL(tm_enable) + mfmsr r4 + li r3, MSR_TM >> 32 + sldi r3, r3, 32 + and. r0, r4, r3 + bne 1f + or r4, r4, r3 + mtmsrd r4 +1: blr + +_GLOBAL(tm_save_sprs) + mfspr r0, SPRN_TFHAR + std r0, THREAD_TM_TFHAR(r3) + mfspr r0, SPRN_TEXASR + std r0, THREAD_TM_TEXASR(r3) + mfspr r0, SPRN_TFIAR + std r0, THREAD_TM_TFIAR(r3) + blr + +_GLOBAL(tm_restore_sprs) + ld r0, THREAD_TM_TFHAR(r3) + mtspr SPRN_TFHAR, r0 + ld r0, THREAD_TM_TEXASR(r3) + mtspr SPRN_TEXASR, r0 + ld r0, THREAD_TM_TFIAR(r3) + mtspr SPRN_TFIAR, r0 + blr + + /* Passed an 8-bit failure cause as first argument. */ +_GLOBAL(tm_abort) + TABORT(R3) + blr + + +/* void tm_reclaim(struct thread_struct *thread, + * unsigned long orig_msr, + * uint8_t cause) + * + * - Performs a full reclaim. This destroys outstanding + * transactions and updates thread->regs.tm_ckpt_* with the + * original checkpointed state. Note that thread->regs is + * unchanged. + * - FP regs are written back to thread->transact_fpr before + * reclaiming. These are the transactional (current) versions. + * + * Purpose is to both abort transactions of, and preserve the state of, + * a transactions at a context switch. We preserve/restore both sets of process + * state to restore them when the thread's scheduled again. We continue in + * userland as though nothing happened, but when the transaction is resumed + * they will abort back to the checkpointed state we save out here. + * + * Call with IRQs off, stacks get all out of sync for some periods in here! + */ +_GLOBAL(tm_reclaim) + mfcr r6 + mflr r0 + std r6, 8(r1) + std r0, 16(r1) + std r2, 40(r1) + stdu r1, -TM_FRAME_SIZE(r1) + + /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */ + + std r3, STACK_PARAM(0)(r1) + SAVE_NVGPRS(r1) + + mfmsr r14 + mr r15, r14 + ori r15, r15, MSR_FP + oris r15, r15, MSR_VEC@h +#ifdef CONFIG_VSX + BEGIN_FTR_SECTION + oris r15,r15, MSR_VSX@h + END_FTR_SECTION_IFSET(CPU_FTR_VSX) +#endif + mtmsrd r15 + std r14, TM_FRAME_L0(r1) + + /* Stash the stack pointer away for use after reclaim */ + std r1, PACAR1(r13) + + /* ******************** FPR/VR/VSRs ************ + * Before reclaiming, capture the current/transactional FPR/VR + * versions /if used/. + * + * (If VSX used, FP and VMX are implied. Or, we don't need to look + * at MSR.VSX as copying FP regs if .FP, vector regs if .VMX covers it.) + * + * We're passed the thread's MSR as parameter 2. + * + * We enabled VEC/FP/VSX in the msr above, so we can execute these + * instructions! + */ + andis. r0, r4, MSR_VEC@h + beq dont_backup_vec + + SAVE_32VRS_TRANSACT(0, r6, r3) /* r6 scratch, r3 thread */ + mfvscr vr0 + li r6, THREAD_TRANSACT_VSCR + stvx vr0, r3, r6 + mfspr r0, SPRN_VRSAVE + std r0, THREAD_TRANSACT_VRSAVE(r3) + +dont_backup_vec: + andi. r0, r4, MSR_FP + beq dont_backup_fp + + SAVE_32FPRS_VSRS_TRANSACT(0, R6, R3) /* r6 scratch, r3 thread */ + + mffs fr0 + stfd fr0,THREAD_TRANSACT_FPSCR(r3) + +dont_backup_fp: + /* The moment we treclaim, ALL of our GPRs will switch + * to user register state. (FPRs, CCR etc. also!) + * Use an sprg and a tm_scratch in the PACA to shuffle. + */ + TRECLAIM(R5) /* Cause in r5 */ + + /* ******************** GPRs ******************** */ + /* Stash the checkpointed r13 away in the scratch SPR and get the real + * paca + */ + SET_SCRATCH0(r13) + GET_PACA(r13) + + /* Stash the checkpointed r1 away in paca tm_scratch and get the real + * stack pointer back + */ + std r1, PACATMSCRATCH(r13) + ld r1, PACAR1(r13) + + /* Now get some more GPRS free */ + std r7, GPR7(r1) /* Temporary stash */ + std r12, GPR12(r1) /* '' '' '' */ + ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */ + + addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */ + + /* Make r7 look like an exception frame so that we + * can use the neat GPRx(n) macros. r7 is NOT a pt_regs ptr! + */ + subi r7, r7, STACK_FRAME_OVERHEAD + + /* Sync the userland GPRs 2-12, 14-31 to thread->regs: */ + SAVE_GPR(0, r7) /* user r0 */ + SAVE_GPR(2, r7) /* user r2 */ + SAVE_4GPRS(3, r7) /* user r3-r6 */ + SAVE_4GPRS(8, r7) /* user r8-r11 */ + ld r3, PACATMSCRATCH(r13) /* user r1 */ + ld r4, GPR7(r1) /* user r7 */ + ld r5, GPR12(r1) /* user r12 */ + GET_SCRATCH0(6) /* user r13 */ + std r3, GPR1(r7) + std r4, GPR7(r7) + std r5, GPR12(r7) + std r6, GPR13(r7) + + SAVE_NVGPRS(r7) /* user r14-r31 */ + + /* ******************** NIP ******************** */ + mfspr r3, SPRN_TFHAR + std r3, _NIP(r7) /* Returns to failhandler */ + /* The checkpointed NIP is ignored when rescheduling/rechkpting, + * but is used in signal return to 'wind back' to the abort handler. + */ + + /* ******************** CR,LR,CCR,MSR ********** */ + mfctr r3 + mflr r4 + mfcr r5 + mfxer r6 + + std r3, _CTR(r7) + std r4, _LINK(r7) + std r5, _CCR(r7) + std r6, _XER(r7) + + /* MSR and flags: We don't change CRs, and we don't need to alter + * MSR. + */ + + /* TM regs, incl TEXASR -- these live in thread_struct. Note they've + * been updated by the treclaim, to explain to userland the failure + * cause (aborted). + */ + mfspr r0, SPRN_TEXASR + mfspr r3, SPRN_TFHAR + mfspr r4, SPRN_TFIAR + std r0, THREAD_TM_TEXASR(r12) + std r3, THREAD_TM_TFHAR(r12) + std r4, THREAD_TM_TFIAR(r12) + + /* AMR and PPR are checkpointed too, but are unsupported by Linux. */ + + /* Restore original MSR/IRQ state & clear TM mode */ + ld r14, TM_FRAME_L0(r1) /* Orig MSR */ + li r15, 0 + rldimi r14, r15, MSR_TS_LG, (63-MSR_TS_LG)-1 + mtmsrd r14 + + REST_NVGPRS(r1) + + addi r1, r1, TM_FRAME_SIZE + ld r4, 8(r1) + ld r0, 16(r1) + mtcr r4 + mtlr r0 + ld r2, 40(r1) + blr + + + /* void tm_recheckpoint(struct thread_struct *thread, + * unsigned long orig_msr) + * - Restore the checkpointed register state saved by tm_reclaim + * when we switch_to a process. + * + * Call with IRQs off, stacks get all out of sync for + * some periods in here! + */ +_GLOBAL(tm_recheckpoint) + mfcr r5 + mflr r0 + std r5, 8(r1) + std r0, 16(r1) + std r2, 40(r1) + stdu r1, -TM_FRAME_SIZE(r1) + + /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. + * This is used for backing up the NVGPRs: + */ + SAVE_NVGPRS(r1) + + std r1, PACAR1(r13) + + /* Load complete register state from ts_ckpt* registers */ + + addi r7, r3, PT_CKPT_REGS /* Thread's ckpt_regs */ + + /* Make r7 look like an exception frame so that we + * can use the neat GPRx(n) macros. r7 is now NOT a pt_regs ptr! + */ + subi r7, r7, STACK_FRAME_OVERHEAD + + SET_SCRATCH0(r1) + + mfmsr r6 + /* R4 = original MSR to indicate whether thread used FP/Vector etc. */ + + /* Enable FP/vec in MSR if necessary! */ + lis r5, MSR_VEC@h + ori r5, r5, MSR_FP + and. r5, r4, r5 + beq restore_gprs /* if neither, skip both */ + +#ifdef CONFIG_VSX + BEGIN_FTR_SECTION + oris r5, r5, MSR_VSX@h + END_FTR_SECTION_IFSET(CPU_FTR_VSX) +#endif + or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */ + mtmsr r5 + + /* FP and VEC registers: These are recheckpointed from thread.fpr[] + * and thread.vr[] respectively. The thread.transact_fpr[] version + * is more modern, and will be loaded subsequently by any FPUnavailable + * trap. + */ + andis. r0, r4, MSR_VEC@h + beq dont_restore_vec + + li r5, THREAD_VSCR + lvx vr0, r3, r5 + mtvscr vr0 + REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ + ld r5, THREAD_VRSAVE(r3) + mtspr SPRN_VRSAVE, r5 + +dont_restore_vec: + andi. r0, r4, MSR_FP + beq dont_restore_fp + + lfd fr0, THREAD_FPSCR(r3) + MTFSF_L(fr0) + REST_32FPRS_VSRS(0, R4, R3) + +dont_restore_fp: + mtmsr r6 /* FP/Vec off again! */ + +restore_gprs: + /* ******************** CR,LR,CCR,MSR ********** */ + ld r3, _CTR(r7) + ld r4, _LINK(r7) + ld r5, _CCR(r7) + ld r6, _XER(r7) + + mtctr r3 + mtlr r4 + mtcr r5 + mtxer r6 + + /* MSR and flags: We don't change CRs, and we don't need to alter + * MSR. + */ + + REST_4GPRS(0, r7) /* GPR0-3 */ + REST_GPR(4, r7) /* GPR4-6 */ + REST_GPR(5, r7) + REST_GPR(6, r7) + REST_4GPRS(8, r7) /* GPR8-11 */ + REST_2GPRS(12, r7) /* GPR12-13 */ + + REST_NVGPRS(r7) /* GPR14-31 */ + + ld r7, GPR7(r7) /* GPR7 */ + + /* Commit register state as checkpointed state: */ + TRECHKPT + + /* Our transactional state has now changed. + * + * Now just get out of here. Transactional (current) state will be + * updated once restore is called on the return path in the _switch-ed + * -to process. + */ + + GET_PACA(r13) + GET_SCRATCH0(r1) + + REST_NVGPRS(r1) + + addi r1, r1, TM_FRAME_SIZE + ld r4, 8(r1) + ld r0, 16(r1) + mtcr r4 + mtlr r0 + ld r2, 40(r1) + blr + + /* ****************************************************************** */ diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 32518401af6..37cc40ef504 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -58,6 +58,7 @@ #include <asm/rio.h> #include <asm/fadump.h> #include <asm/switch_to.h> +#include <asm/tm.h> #include <asm/debug.h> #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) @@ -66,7 +67,7 @@ int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; -int (*__debugger_dabr_match)(struct pt_regs *regs) __read_mostly; +int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; EXPORT_SYMBOL(__debugger); @@ -74,10 +75,17 @@ EXPORT_SYMBOL(__debugger_ipi); EXPORT_SYMBOL(__debugger_bpt); EXPORT_SYMBOL(__debugger_sstep); EXPORT_SYMBOL(__debugger_iabr_match); -EXPORT_SYMBOL(__debugger_dabr_match); +EXPORT_SYMBOL(__debugger_break_match); EXPORT_SYMBOL(__debugger_fault_handler); #endif +/* Transactional Memory trap debug */ +#ifdef TM_DEBUG_SW +#define TM_DEBUG(x...) printk(KERN_INFO x) +#else +#define TM_DEBUG(x...) do { } while(0) +#endif + /* * Trap & Exception support */ @@ -138,7 +146,7 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, { bust_spinlocks(0); die_owner = -1; - add_taint(TAINT_DIE); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); die_nest_count--; oops_exit(); printk("\n"); @@ -350,6 +358,7 @@ static inline int check_io_access(struct pt_regs *regs) exception is in the MSR. */ #define get_reason(regs) ((regs)->msr) #define get_mc_reason(regs) ((regs)->msr) +#define REASON_TM 0x200000 #define REASON_FP 0x100000 #define REASON_ILLEGAL 0x80000 #define REASON_PRIVILEGED 0x40000 @@ -1020,6 +1029,38 @@ void __kprobes program_check_exception(struct pt_regs *regs) _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); return; } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (reason & REASON_TM) { + /* This is a TM "Bad Thing Exception" program check. + * This occurs when: + * - An rfid/hrfid/mtmsrd attempts to cause an illegal + * transition in TM states. + * - A trechkpt is attempted when transactional. + * - A treclaim is attempted when non transactional. + * - A tend is illegally attempted. + * - writing a TM SPR when transactional. + */ + if (!user_mode(regs) && + report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { + regs->nip += 4; + return; + } + /* If usermode caused this, it's done something illegal and + * gets a SIGILL slap on the wrist. We call it an illegal + * operand to distinguish from the instruction just being bad + * (e.g. executing a 'tend' on a CPU without TM!); it's an + * illegal /placement/ of a valid instruction. + */ + if (user_mode(regs)) { + _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); + return; + } else { + printk(KERN_EMERG "Unexpected TM Bad Thing exception " + "at %lx (msr 0x%x)\n", regs->nip, reason); + die("Unrecoverable exception", regs, SIGABRT); + } + } +#endif /* We restore the interrupt state now */ if (!arch_irq_disabled_regs(regs)) @@ -1160,6 +1201,109 @@ void vsx_unavailable_exception(struct pt_regs *regs) die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); } +void tm_unavailable_exception(struct pt_regs *regs) +{ + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + + /* Currently we never expect a TMU exception. Catch + * this and kill the process! + */ + printk(KERN_EMERG "Unexpected TM unavailable exception at %lx " + "(msr %lx)\n", + regs->nip, regs->msr); + + if (user_mode(regs)) { + _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); + return; + } + + die("Unexpected TM unavailable exception", regs, SIGABRT); +} + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + +extern void do_load_up_fpu(struct pt_regs *regs); + +void fp_unavailable_tm(struct pt_regs *regs) +{ + /* Note: This does not handle any kind of FP laziness. */ + + TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", + regs->nip, regs->msr); + tm_enable(); + + /* We can only have got here if the task started using FP after + * beginning the transaction. So, the transactional regs are just a + * copy of the checkpointed ones. But, we still need to recheckpoint + * as we're enabling FP for the process; it will return, abort the + * transaction, and probably retry but now with FP enabled. So the + * checkpointed FP registers need to be loaded. + */ + tm_reclaim(¤t->thread, current->thread.regs->msr, + TM_CAUSE_FAC_UNAV); + /* Reclaim didn't save out any FPRs to transact_fprs. */ + + /* Enable FP for the task: */ + regs->msr |= (MSR_FP | current->thread.fpexc_mode); + + /* This loads and recheckpoints the FP registers from + * thread.fpr[]. They will remain in registers after the + * checkpoint so we don't need to reload them after. + */ + tm_recheckpoint(¤t->thread, regs->msr); +} + +#ifdef CONFIG_ALTIVEC +extern void do_load_up_altivec(struct pt_regs *regs); + +void altivec_unavailable_tm(struct pt_regs *regs) +{ + /* See the comments in fp_unavailable_tm(). This function operates + * the same way. + */ + + TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," + "MSR=%lx\n", + regs->nip, regs->msr); + tm_enable(); + tm_reclaim(¤t->thread, current->thread.regs->msr, + TM_CAUSE_FAC_UNAV); + regs->msr |= MSR_VEC; + tm_recheckpoint(¤t->thread, regs->msr); + current->thread.used_vr = 1; +} +#endif + +#ifdef CONFIG_VSX +void vsx_unavailable_tm(struct pt_regs *regs) +{ + /* See the comments in fp_unavailable_tm(). This works similarly, + * though we're loading both FP and VEC registers in here. + * + * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC + * regs. Either way, set MSR_VSX. + */ + + TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," + "MSR=%lx\n", + regs->nip, regs->msr); + + tm_enable(); + /* This reclaims FP and/or VR regs if they're already enabled */ + tm_reclaim(¤t->thread, current->thread.regs->msr, + TM_CAUSE_FAC_UNAV); + + regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | + MSR_VSX; + /* This loads & recheckpoints FP and VRs. */ + tm_recheckpoint(¤t->thread, regs->msr); + current->thread.used_vsr = 1; +} +#endif +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ + void performance_monitor_exception(struct pt_regs *regs) { __get_cpu_var(irq_stat).pmu_irqs++; @@ -1515,7 +1659,7 @@ void unrecoverable_exception(struct pt_regs *regs) die("Unrecoverable exception", regs, SIGABRT); } -#ifdef CONFIG_BOOKE_WDT +#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) /* * Default handler for a Watchdog exception, * spins until a reboot occurs diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index c39c1ca77f4..f9748498fe5 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -122,29 +122,6 @@ int udbg_write(const char *s, int n) return n - remain; } -int udbg_read(char *buf, int buflen) -{ - char *p = buf; - int i, c; - - if (!udbg_getc) - return 0; - - for (i = 0; i < buflen; ++i) { - do { - c = udbg_getc(); - if (c == -1 && i == 0) - return -1; - - } while (c == 0x11 || c == 0x13); - if (c == 0 || c == -1) - break; - *p++ = c; - } - - return i; -} - #define UDBG_BUFSIZE 256 void udbg_printf(const char *fmt, ...) { diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index d2d46d1014f..bc77834dbf4 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c @@ -64,6 +64,8 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) autask->saved_trap_nr = current->thread.trap_nr; current->thread.trap_nr = UPROBE_TRAP_NR; regs->nip = current->utask->xol_vaddr; + + user_enable_single_step(current); return 0; } @@ -119,6 +121,8 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) * to be executed. */ regs->nip = utask->vaddr + MAX_UINSN_BYTES; + + user_disable_single_step(current); return 0; } @@ -162,6 +166,8 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) current->thread.trap_nr = utask->autask.saved_trap_nr; instruction_pointer_set(regs, utask->vaddr); + + user_disable_single_step(current); } /* diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index e830289d2e4..9e20999aaef 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -7,6 +7,57 @@ #include <asm/page.h> #include <asm/ptrace.h> +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +/* + * Wrapper to call load_up_altivec from C. + * void do_load_up_altivec(struct pt_regs *regs); + */ +_GLOBAL(do_load_up_altivec) + mflr r0 + std r0, 16(r1) + stdu r1, -112(r1) + + subi r6, r3, STACK_FRAME_OVERHEAD + /* load_up_altivec expects r12=MSR, r13=PACA, and returns + * with r12 = new MSR. + */ + ld r12,_MSR(r6) + GET_PACA(r13) + bl load_up_altivec + std r12,_MSR(r6) + + ld r0, 112+16(r1) + addi r1, r1, 112 + mtlr r0 + blr + +/* void do_load_up_transact_altivec(struct thread_struct *thread) + * + * This is similar to load_up_altivec but for the transactional version of the + * vector regs. It doesn't mess with the task MSR or valid flags. + * Furthermore, VEC laziness is not supported with TM currently. + */ +_GLOBAL(do_load_up_transact_altivec) + mfmsr r6 + oris r5,r6,MSR_VEC@h + MTMSRD(r5) + isync + + li r4,1 + stw r4,THREAD_USED_VR(r3) + + li r10,THREAD_TRANSACT_VSCR + lvx vr0,r10,r3 + mtvscr vr0 + REST_32VRS_TRANSACT(0,r4,r3) + + /* Disable VEC again. */ + MTMSRD(r6) + isync + + blr +#endif + /* * load_up_altivec(unused, unused, tsk) * Disable VMX for the task which had it previously, diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 201ba59738b..536016d792b 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -1289,7 +1289,7 @@ void vio_unregister_driver(struct vio_driver *viodrv) EXPORT_SYMBOL(vio_unregister_driver); /* vio_dev refcount hit 0 */ -static void __devinit vio_dev_release(struct device *dev) +static void vio_dev_release(struct device *dev) { struct iommu_table *tbl = get_iommu_table_base(dev); @@ -1545,7 +1545,7 @@ static struct device_attribute vio_dev_attrs[] = { __ATTR_NULL }; -void __devinit vio_unregister_device(struct vio_dev *viodev) +void vio_unregister_device(struct vio_dev *viodev) { device_unregister(&viodev->dev); } diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 65d1c08cf09..654e479802f 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -218,6 +218,11 @@ SECTIONS .got : AT(ADDR(.got) - LOAD_OFFSET) { __toc_start = .; +#ifndef CONFIG_RELOCATABLE + __prom_init_toc_start = .; + arch/powerpc/kernel/prom_init.o*(.toc .got) + __prom_init_toc_end = .; +#endif *(.got) *(.toc) } diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index 50e7dbc7356..3d7fd21c65f 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c @@ -83,6 +83,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) vcpu_44x->shadow_refs[i].gtlb_index = -1; vcpu->arch.cpu_type = KVM_CPU_440; + vcpu->arch.pvr = mfspr(SPRN_PVR); return 0; } diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index c8c61578fdf..35ec0a8547d 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c @@ -27,12 +27,70 @@ #include "booke.h" #include "44x_tlb.h" +#define XOP_MFDCRX 259 #define XOP_MFDCR 323 +#define XOP_MTDCRX 387 #define XOP_MTDCR 451 #define XOP_TLBSX 914 #define XOP_ICCCI 966 #define XOP_TLBWE 978 +static int emulate_mtdcr(struct kvm_vcpu *vcpu, int rs, int dcrn) +{ + /* emulate some access in kernel */ + switch (dcrn) { + case DCRN_CPR0_CONFIG_ADDR: + vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs); + return EMULATE_DONE; + default: + vcpu->run->dcr.dcrn = dcrn; + vcpu->run->dcr.data = kvmppc_get_gpr(vcpu, rs); + vcpu->run->dcr.is_write = 1; + vcpu->arch.dcr_is_write = 1; + vcpu->arch.dcr_needed = 1; + kvmppc_account_exit(vcpu, DCR_EXITS); + return EMULATE_DO_DCR; + } +} + +static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn) +{ + /* The guest may access CPR0 registers to determine the timebase + * frequency, and it must know the real host frequency because it + * can directly access the timebase registers. + * + * It would be possible to emulate those accesses in userspace, + * but userspace can really only figure out the end frequency. + * We could decompose that into the factors that compute it, but + * that's tricky math, and it's easier to just report the real + * CPR0 values. + */ + switch (dcrn) { + case DCRN_CPR0_CONFIG_ADDR: + kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr); + break; + case DCRN_CPR0_CONFIG_DATA: + local_irq_disable(); + mtdcr(DCRN_CPR0_CONFIG_ADDR, + vcpu->arch.cpr0_cfgaddr); + kvmppc_set_gpr(vcpu, rt, + mfdcr(DCRN_CPR0_CONFIG_DATA)); + local_irq_enable(); + break; + default: + vcpu->run->dcr.dcrn = dcrn; + vcpu->run->dcr.data = 0; + vcpu->run->dcr.is_write = 0; + vcpu->arch.dcr_is_write = 0; + vcpu->arch.io_gpr = rt; + vcpu->arch.dcr_needed = 1; + kvmppc_account_exit(vcpu, DCR_EXITS); + return EMULATE_DO_DCR; + } + + return EMULATE_DONE; +} + int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { @@ -50,55 +108,21 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, switch (get_xop(inst)) { case XOP_MFDCR: - /* The guest may access CPR0 registers to determine the timebase - * frequency, and it must know the real host frequency because it - * can directly access the timebase registers. - * - * It would be possible to emulate those accesses in userspace, - * but userspace can really only figure out the end frequency. - * We could decompose that into the factors that compute it, but - * that's tricky math, and it's easier to just report the real - * CPR0 values. - */ - switch (dcrn) { - case DCRN_CPR0_CONFIG_ADDR: - kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr); - break; - case DCRN_CPR0_CONFIG_DATA: - local_irq_disable(); - mtdcr(DCRN_CPR0_CONFIG_ADDR, - vcpu->arch.cpr0_cfgaddr); - kvmppc_set_gpr(vcpu, rt, - mfdcr(DCRN_CPR0_CONFIG_DATA)); - local_irq_enable(); - break; - default: - run->dcr.dcrn = dcrn; - run->dcr.data = 0; - run->dcr.is_write = 0; - vcpu->arch.io_gpr = rt; - vcpu->arch.dcr_needed = 1; - kvmppc_account_exit(vcpu, DCR_EXITS); - emulated = EMULATE_DO_DCR; - } + emulated = emulate_mfdcr(vcpu, rt, dcrn); + break; + case XOP_MFDCRX: + emulated = emulate_mfdcr(vcpu, rt, + kvmppc_get_gpr(vcpu, ra)); break; case XOP_MTDCR: - /* emulate some access in kernel */ - switch (dcrn) { - case DCRN_CPR0_CONFIG_ADDR: - vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs); - break; - default: - run->dcr.dcrn = dcrn; - run->dcr.data = kvmppc_get_gpr(vcpu, rs); - run->dcr.is_write = 1; - vcpu->arch.dcr_needed = 1; - kvmppc_account_exit(vcpu, DCR_EXITS); - emulated = EMULATE_DO_DCR; - } + emulated = emulate_mtdcr(vcpu, rs, dcrn); + break; + case XOP_MTDCRX: + emulated = emulate_mtdcr(vcpu, rs, + kvmppc_get_gpr(vcpu, ra)); break; case XOP_TLBWE: diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index f4dacb9c57f..63c67ec72e4 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -20,6 +20,7 @@ config KVM bool select PREEMPT_NOTIFIERS select ANON_INODES + select HAVE_KVM_EVENTFD config KVM_BOOK3S_HANDLER bool @@ -36,10 +37,11 @@ config KVM_BOOK3S_64_HANDLER config KVM_BOOK3S_PR bool select KVM_MMIO + select MMU_NOTIFIER config KVM_BOOK3S_32 tristate "KVM support for PowerPC book3s_32 processors" - depends on EXPERIMENTAL && PPC_BOOK3S_32 && !SMP && !PTE_64BIT + depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT select KVM select KVM_BOOK3S_32_HANDLER select KVM_BOOK3S_PR @@ -54,7 +56,7 @@ config KVM_BOOK3S_32 config KVM_BOOK3S_64 tristate "KVM support for PowerPC book3s_64 processors" - depends on EXPERIMENTAL && PPC_BOOK3S_64 + depends on PPC_BOOK3S_64 select KVM_BOOK3S_64_HANDLER select KVM ---help--- @@ -95,7 +97,7 @@ config KVM_BOOKE_HV config KVM_440 bool "KVM support for PowerPC 440 processors" - depends on EXPERIMENTAL && 44x + depends on 44x select KVM select KVM_MMIO ---help--- @@ -120,9 +122,10 @@ config KVM_EXIT_TIMING config KVM_E500V2 bool "KVM support for PowerPC E500v2 processors" - depends on EXPERIMENTAL && E500 && !PPC_E500MC + depends on E500 && !PPC_E500MC select KVM select KVM_MMIO + select MMU_NOTIFIER ---help--- Support running unmodified E500 guest kernels in virtual machines on E500v2 host processors. @@ -134,10 +137,11 @@ config KVM_E500V2 config KVM_E500MC bool "KVM support for PowerPC E500MC/E5500 processors" - depends on EXPERIMENTAL && PPC_E500MC + depends on PPC_E500MC select KVM select KVM_MMIO select KVM_BOOKE_HV + select MMU_NOTIFIER ---help--- Support running unmodified E500MC/E5500 (32-bit) guest kernels in virtual machines on E500MC/E5500 host processors. diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index c2a08636e6d..b772eded8c2 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -6,10 +6,12 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm -common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) +common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o \ + eventfd.o) CFLAGS_44x_tlb.o := -I. -CFLAGS_e500_tlb.o := -I. +CFLAGS_e500_mmu.o := -I. +CFLAGS_e500_mmu_host.o := -I. CFLAGS_emulate.o := -I. common-objs-y += powerpc.o emulate.o @@ -34,7 +36,8 @@ kvm-e500-objs := \ booke_emulate.o \ booke_interrupts.o \ e500.o \ - e500_tlb.o \ + e500_mmu.o \ + e500_mmu_host.o \ e500_emulate.o kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs) @@ -44,7 +47,8 @@ kvm-e500mc-objs := \ booke_emulate.o \ bookehv_interrupts.o \ e500mc.o \ - e500_tlb.o \ + e500_mmu.o \ + e500_mmu_host.o \ e500_emulate.o kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) @@ -72,10 +76,12 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \ book3s_hv_rmhandlers.o \ book3s_hv_rm_mmu.o \ book3s_64_vio_hv.o \ + book3s_hv_ras.o \ book3s_hv_builtin.o kvm-book3s_64-module-objs := \ ../../../virt/kvm/kvm_main.o \ + ../../../virt/kvm/eventfd.o \ powerpc.o \ emulate.o \ book3s.o \ diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 3f2a8360c85..a4b64528524 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -411,6 +411,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) return 0; } +int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) +{ + return 0; +} + +void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) +{ +} + int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; @@ -476,6 +485,122 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) return -ENOTSUPP; } +int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) +{ + int r; + union kvmppc_one_reg val; + int size; + long int i; + + size = one_reg_size(reg->id); + if (size > sizeof(val)) + return -EINVAL; + + r = kvmppc_get_one_reg(vcpu, reg->id, &val); + + if (r == -EINVAL) { + r = 0; + switch (reg->id) { + case KVM_REG_PPC_DAR: + val = get_reg_val(reg->id, vcpu->arch.shared->dar); + break; + case KVM_REG_PPC_DSISR: + val = get_reg_val(reg->id, vcpu->arch.shared->dsisr); + break; + case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: + i = reg->id - KVM_REG_PPC_FPR0; + val = get_reg_val(reg->id, vcpu->arch.fpr[i]); + break; + case KVM_REG_PPC_FPSCR: + val = get_reg_val(reg->id, vcpu->arch.fpscr); + break; +#ifdef CONFIG_ALTIVEC + case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: + if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { + r = -ENXIO; + break; + } + val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0]; + break; + case KVM_REG_PPC_VSCR: + if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { + r = -ENXIO; + break; + } + val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]); + break; +#endif /* CONFIG_ALTIVEC */ + default: + r = -EINVAL; + break; + } + } + if (r) + return r; + + if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) + r = -EFAULT; + + return r; +} + +int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) +{ + int r; + union kvmppc_one_reg val; + int size; + long int i; + + size = one_reg_size(reg->id); + if (size > sizeof(val)) + return -EINVAL; + + if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) + return -EFAULT; + + r = kvmppc_set_one_reg(vcpu, reg->id, &val); + + if (r == -EINVAL) { + r = 0; + switch (reg->id) { + case KVM_REG_PPC_DAR: + vcpu->arch.shared->dar = set_reg_val(reg->id, val); + break; + case KVM_REG_PPC_DSISR: + vcpu->arch.shared->dsisr = set_reg_val(reg->id, val); + break; + case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: + i = reg->id - KVM_REG_PPC_FPR0; + vcpu->arch.fpr[i] = set_reg_val(reg->id, val); + break; + case KVM_REG_PPC_FPSCR: + vcpu->arch.fpscr = set_reg_val(reg->id, val); + break; +#ifdef CONFIG_ALTIVEC + case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: + if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { + r = -ENXIO; + break; + } + vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; + break; + case KVM_REG_PPC_VSCR: + if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { + r = -ENXIO; + break; + } + vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val); + break; +#endif /* CONFIG_ALTIVEC */ + default: + r = -EINVAL; + break; + } + } + + return r; +} + int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index b0f625a3334..00e619bf608 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -155,7 +155,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) /* Get host physical address for gpa */ hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); - if (is_error_pfn(hpaddr)) { + if (is_error_noslot_pfn(hpaddr)) { printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); r = -EINVAL; @@ -254,6 +254,7 @@ next_pteg: kvmppc_mmu_hpte_cache_map(vcpu, pte); + kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); out: return r; } diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 4d72f9ebc55..ead58e31729 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -93,7 +93,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) /* Get host physical address for gpa */ hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); - if (is_error_pfn(hpaddr)) { + if (is_error_noslot_pfn(hpaddr)) { printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); r = -EINVAL; goto out; @@ -171,6 +171,7 @@ map_again: kvmppc_mmu_hpte_cache_map(vcpu, pte); } + kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); out: return r; diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index d95d11322a1..8cc18abd6dd 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -24,6 +24,9 @@ #include <linux/slab.h> #include <linux/hugetlb.h> #include <linux/vmalloc.h> +#include <linux/srcu.h> +#include <linux/anon_inodes.h> +#include <linux/file.h> #include <asm/tlbflush.h> #include <asm/kvm_ppc.h> @@ -40,6 +43,11 @@ /* Power architecture requires HPT is at least 256kB */ #define PPC_MIN_HPT_ORDER 18 +static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, + long pte_index, unsigned long pteh, + unsigned long ptel, unsigned long *pte_idx_ret); +static void kvmppc_rmap_reset(struct kvm *kvm); + long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) { unsigned long hpt; @@ -137,10 +145,11 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp) /* Set the entire HPT to 0, i.e. invalid HPTEs */ memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); /* - * Set the whole last_vcpu array to an invalid vcpu number. - * This ensures that each vcpu will flush its TLB on next entry. + * Reset all the reverse-mapping chains for all memslots */ - memset(kvm->arch.last_vcpu, 0xff, sizeof(kvm->arch.last_vcpu)); + kvmppc_rmap_reset(kvm); + /* Ensure that each vcpu will flush its TLB on next entry. */ + cpumask_setall(&kvm->arch.need_tlb_flush); *htab_orderp = order; err = 0; } else { @@ -184,6 +193,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, unsigned long addr, hash; unsigned long psize; unsigned long hp0, hp1; + unsigned long idx_ret; long ret; struct kvm *kvm = vcpu->kvm; @@ -215,7 +225,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, hash = (hash << 3) + 7; hp_v = hp0 | ((addr >> 16) & ~0x7fUL); hp_r = hp1 | addr; - ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r); + ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r, + &idx_ret); if (ret != H_SUCCESS) { pr_err("KVM: map_vrma at %lx failed, ret=%ld\n", addr, ret); @@ -260,7 +271,7 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) /* * This is called to get a reference to a guest page if there isn't - * one already in the kvm->arch.slot_phys[][] arrays. + * one already in the memslot->arch.slot_phys[] array. */ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, struct kvm_memory_slot *memslot, @@ -275,7 +286,7 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, struct vm_area_struct *vma; unsigned long pfn, i, npages; - physp = kvm->arch.slot_phys[memslot->id]; + physp = memslot->arch.slot_phys; if (!physp) return -EINVAL; if (physp[gfn - memslot->base_gfn]) @@ -353,15 +364,10 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, return err; } -/* - * We come here on a H_ENTER call from the guest when we are not - * using mmu notifiers and we don't have the requested page pinned - * already. - */ -long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, - long pte_index, unsigned long pteh, unsigned long ptel) +long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, + long pte_index, unsigned long pteh, + unsigned long ptel, unsigned long *pte_idx_ret) { - struct kvm *kvm = vcpu->kvm; unsigned long psize, gpa, gfn; struct kvm_memory_slot *memslot; long ret; @@ -389,8 +395,8 @@ long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, do_insert: /* Protect linux PTE lookup from page table destruction */ rcu_read_lock_sched(); /* this disables preemption too */ - vcpu->arch.pgdir = current->mm->pgd; - ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel); + ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, + current->mm->pgd, false, pte_idx_ret); rcu_read_unlock_sched(); if (ret == H_TOO_HARD) { /* this can't happen */ @@ -401,6 +407,19 @@ long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, } +/* + * We come here on a H_ENTER call from the guest when we are not + * using mmu notifiers and we don't have the requested page pinned + * already. + */ +long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, + long pte_index, unsigned long pteh, + unsigned long ptel) +{ + return kvmppc_virtmode_do_h_enter(vcpu->kvm, flags, pte_index, + pteh, ptel, &vcpu->arch.gpr[4]); +} + static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, gva_t eaddr) { @@ -570,7 +589,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, struct kvm *kvm = vcpu->kvm; unsigned long *hptep, hpte[3], r; unsigned long mmu_seq, psize, pte_size; - unsigned long gfn, hva, pfn; + unsigned long gpa, gfn, hva, pfn; struct kvm_memory_slot *memslot; unsigned long *rmap; struct revmap_entry *rev; @@ -608,15 +627,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, /* Translate the logical address and get the page */ psize = hpte_page_size(hpte[0], r); - gfn = hpte_rpn(r, psize); + gpa = (r & HPTE_R_RPN & ~(psize - 1)) | (ea & (psize - 1)); + gfn = gpa >> PAGE_SHIFT; memslot = gfn_to_memslot(kvm, gfn); /* No memslot means it's an emulated MMIO region */ - if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { - unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1)); + if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, dsisr & DSISR_ISSTORE); - } if (!kvm->arch.using_mmu_notifiers) return -EFAULT; /* should never get here */ @@ -710,7 +728,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, /* Check if we might have been invalidated; let the guest retry if so */ ret = RESUME_GUEST; - if (mmu_notifier_retry(vcpu, mmu_seq)) { + if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) { unlock_rmap(rmap); goto out_unlock; } @@ -756,6 +774,25 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, goto out_put; } +static void kvmppc_rmap_reset(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int srcu_idx; + + srcu_idx = srcu_read_lock(&kvm->srcu); + slots = kvm->memslots; + kvm_for_each_memslot(memslot, slots) { + /* + * This assumes it is acceptable to lose reference and + * change bits across a reset. + */ + memset(memslot->arch.rmap, 0, + memslot->npages * sizeof(*memslot->arch.rmap)); + } + srcu_read_unlock(&kvm->srcu, srcu_idx); +} + static int kvm_handle_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, @@ -850,7 +887,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, psize = hpte_page_size(hptep[0], ptel); if ((hptep[0] & HPTE_V_VALID) && hpte_rpn(ptel, psize) == gfn) { - hptep[0] |= HPTE_V_ABSENT; + if (kvm->arch.using_mmu_notifiers) + hptep[0] |= HPTE_V_ABSENT; kvmppc_invalidate_hpte(kvm, hptep, i); /* Harvest R and C */ rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C); @@ -877,6 +915,28 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) return 0; } +void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ + unsigned long *rmapp; + unsigned long gfn; + unsigned long n; + + rmapp = memslot->arch.rmap; + gfn = memslot->base_gfn; + for (n = memslot->npages; n; --n) { + /* + * Testing the present bit without locking is OK because + * the memslot has been marked invalid already, and hence + * no new HPTEs referencing this page can be created, + * thus the present bit can't go from 0 to 1. + */ + if (*rmapp & KVMPPC_RMAP_PRESENT) + kvm_unmap_rmapp(kvm, rmapp, gfn); + ++rmapp; + ++gfn; + } +} + static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, unsigned long gfn) { @@ -1030,16 +1090,16 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp) return ret; } -long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot, + unsigned long *map) { unsigned long i; - unsigned long *rmapp, *map; + unsigned long *rmapp; preempt_disable(); rmapp = memslot->arch.rmap; - map = memslot->dirty_bitmap; for (i = 0; i < memslot->npages; ++i) { - if (kvm_test_clear_dirty(kvm, rmapp)) + if (kvm_test_clear_dirty(kvm, rmapp) && map) __set_bit_le(i, map); ++rmapp; } @@ -1057,20 +1117,22 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, unsigned long hva, psize, offset; unsigned long pa; unsigned long *physp; + int srcu_idx; + srcu_idx = srcu_read_lock(&kvm->srcu); memslot = gfn_to_memslot(kvm, gfn); if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) - return NULL; + goto err; if (!kvm->arch.using_mmu_notifiers) { - physp = kvm->arch.slot_phys[memslot->id]; + physp = memslot->arch.slot_phys; if (!physp) - return NULL; + goto err; physp += gfn - memslot->base_gfn; pa = *physp; if (!pa) { if (kvmppc_get_guest_page(kvm, gfn, memslot, PAGE_SIZE) < 0) - return NULL; + goto err; pa = *physp; } page = pfn_to_page(pa >> PAGE_SHIFT); @@ -1079,9 +1141,11 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, hva = gfn_to_hva_memslot(memslot, gfn); npages = get_user_pages_fast(hva, 1, 1, pages); if (npages < 1) - return NULL; + goto err; page = pages[0]; } + srcu_read_unlock(&kvm->srcu, srcu_idx); + psize = PAGE_SIZE; if (PageHuge(page)) { page = compound_head(page); @@ -1091,6 +1155,10 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, if (nb_ret) *nb_ret = psize - offset; return page_address(page) + offset; + + err: + srcu_read_unlock(&kvm->srcu, srcu_idx); + return NULL; } void kvmppc_unpin_guest_page(struct kvm *kvm, void *va) @@ -1100,6 +1168,348 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va) put_page(page); } +/* + * Functions for reading and writing the hash table via reads and + * writes on a file descriptor. + * + * Reads return the guest view of the hash table, which has to be + * pieced together from the real hash table and the guest_rpte + * values in the revmap array. + * + * On writes, each HPTE written is considered in turn, and if it + * is valid, it is written to the HPT as if an H_ENTER with the + * exact flag set was done. When the invalid count is non-zero + * in the header written to the stream, the kernel will make + * sure that that many HPTEs are invalid, and invalidate them + * if not. + */ + +struct kvm_htab_ctx { + unsigned long index; + unsigned long flags; + struct kvm *kvm; + int first_pass; +}; + +#define HPTE_SIZE (2 * sizeof(unsigned long)) + +static long record_hpte(unsigned long flags, unsigned long *hptp, + unsigned long *hpte, struct revmap_entry *revp, + int want_valid, int first_pass) +{ + unsigned long v, r; + int ok = 1; + int valid, dirty; + + /* Unmodified entries are uninteresting except on the first pass */ + dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED); + if (!first_pass && !dirty) + return 0; + + valid = 0; + if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) { + valid = 1; + if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && + !(hptp[0] & HPTE_V_BOLTED)) + valid = 0; + } + if (valid != want_valid) + return 0; + + v = r = 0; + if (valid || dirty) { + /* lock the HPTE so it's stable and read it */ + preempt_disable(); + while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) + cpu_relax(); + v = hptp[0]; + if (v & HPTE_V_ABSENT) { + v &= ~HPTE_V_ABSENT; + v |= HPTE_V_VALID; + } + /* re-evaluate valid and dirty from synchronized HPTE value */ + valid = !!(v & HPTE_V_VALID); + if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED)) + valid = 0; + r = revp->guest_rpte | (hptp[1] & (HPTE_R_R | HPTE_R_C)); + dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED); + /* only clear modified if this is the right sort of entry */ + if (valid == want_valid && dirty) { + r &= ~HPTE_GR_MODIFIED; + revp->guest_rpte = r; + } + asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); + hptp[0] &= ~HPTE_V_HVLOCK; + preempt_enable(); + if (!(valid == want_valid && (first_pass || dirty))) + ok = 0; + } + hpte[0] = v; + hpte[1] = r; + return ok; +} + +static ssize_t kvm_htab_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct kvm_htab_ctx *ctx = file->private_data; + struct kvm *kvm = ctx->kvm; + struct kvm_get_htab_header hdr; + unsigned long *hptp; + struct revmap_entry *revp; + unsigned long i, nb, nw; + unsigned long __user *lbuf; + struct kvm_get_htab_header __user *hptr; + unsigned long flags; + int first_pass; + unsigned long hpte[2]; + + if (!access_ok(VERIFY_WRITE, buf, count)) + return -EFAULT; + + first_pass = ctx->first_pass; + flags = ctx->flags; + + i = ctx->index; + hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); + revp = kvm->arch.revmap + i; + lbuf = (unsigned long __user *)buf; + + nb = 0; + while (nb + sizeof(hdr) + HPTE_SIZE < count) { + /* Initialize header */ + hptr = (struct kvm_get_htab_header __user *)buf; + hdr.n_valid = 0; + hdr.n_invalid = 0; + nw = nb; + nb += sizeof(hdr); + lbuf = (unsigned long __user *)(buf + sizeof(hdr)); + + /* Skip uninteresting entries, i.e. clean on not-first pass */ + if (!first_pass) { + while (i < kvm->arch.hpt_npte && + !(revp->guest_rpte & HPTE_GR_MODIFIED)) { + ++i; + hptp += 2; + ++revp; + } + } + hdr.index = i; + + /* Grab a series of valid entries */ + while (i < kvm->arch.hpt_npte && + hdr.n_valid < 0xffff && + nb + HPTE_SIZE < count && + record_hpte(flags, hptp, hpte, revp, 1, first_pass)) { + /* valid entry, write it out */ + ++hdr.n_valid; + if (__put_user(hpte[0], lbuf) || + __put_user(hpte[1], lbuf + 1)) + return -EFAULT; + nb += HPTE_SIZE; + lbuf += 2; + ++i; + hptp += 2; + ++revp; + } + /* Now skip invalid entries while we can */ + while (i < kvm->arch.hpt_npte && + hdr.n_invalid < 0xffff && + record_hpte(flags, hptp, hpte, revp, 0, first_pass)) { + /* found an invalid entry */ + ++hdr.n_invalid; + ++i; + hptp += 2; + ++revp; + } + + if (hdr.n_valid || hdr.n_invalid) { + /* write back the header */ + if (__copy_to_user(hptr, &hdr, sizeof(hdr))) + return -EFAULT; + nw = nb; + buf = (char __user *)lbuf; + } else { + nb = nw; + } + + /* Check if we've wrapped around the hash table */ + if (i >= kvm->arch.hpt_npte) { + i = 0; + ctx->first_pass = 0; + break; + } + } + + ctx->index = i; + + return nb; +} + +static ssize_t kvm_htab_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct kvm_htab_ctx *ctx = file->private_data; + struct kvm *kvm = ctx->kvm; + struct kvm_get_htab_header hdr; + unsigned long i, j; + unsigned long v, r; + unsigned long __user *lbuf; + unsigned long *hptp; + unsigned long tmp[2]; + ssize_t nb; + long int err, ret; + int rma_setup; + + if (!access_ok(VERIFY_READ, buf, count)) + return -EFAULT; + + /* lock out vcpus from running while we're doing this */ + mutex_lock(&kvm->lock); + rma_setup = kvm->arch.rma_setup_done; + if (rma_setup) { + kvm->arch.rma_setup_done = 0; /* temporarily */ + /* order rma_setup_done vs. vcpus_running */ + smp_mb(); + if (atomic_read(&kvm->arch.vcpus_running)) { + kvm->arch.rma_setup_done = 1; + mutex_unlock(&kvm->lock); + return -EBUSY; + } + } + + err = 0; + for (nb = 0; nb + sizeof(hdr) <= count; ) { + err = -EFAULT; + if (__copy_from_user(&hdr, buf, sizeof(hdr))) + break; + + err = 0; + if (nb + hdr.n_valid * HPTE_SIZE > count) + break; + + nb += sizeof(hdr); + buf += sizeof(hdr); + + err = -EINVAL; + i = hdr.index; + if (i >= kvm->arch.hpt_npte || + i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) + break; + + hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); + lbuf = (unsigned long __user *)buf; + for (j = 0; j < hdr.n_valid; ++j) { + err = -EFAULT; + if (__get_user(v, lbuf) || __get_user(r, lbuf + 1)) + goto out; + err = -EINVAL; + if (!(v & HPTE_V_VALID)) + goto out; + lbuf += 2; + nb += HPTE_SIZE; + + if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) + kvmppc_do_h_remove(kvm, 0, i, 0, tmp); + err = -EIO; + ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, + tmp); + if (ret != H_SUCCESS) { + pr_err("kvm_htab_write ret %ld i=%ld v=%lx " + "r=%lx\n", ret, i, v, r); + goto out; + } + if (!rma_setup && is_vrma_hpte(v)) { + unsigned long psize = hpte_page_size(v, r); + unsigned long senc = slb_pgsize_encoding(psize); + unsigned long lpcr; + + kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | + (VRMA_VSID << SLB_VSID_SHIFT_1T); + lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; + lpcr |= senc << (LPCR_VRMASD_SH - 4); + kvm->arch.lpcr = lpcr; + rma_setup = 1; + } + ++i; + hptp += 2; + } + + for (j = 0; j < hdr.n_invalid; ++j) { + if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) + kvmppc_do_h_remove(kvm, 0, i, 0, tmp); + ++i; + hptp += 2; + } + err = 0; + } + + out: + /* Order HPTE updates vs. rma_setup_done */ + smp_wmb(); + kvm->arch.rma_setup_done = rma_setup; + mutex_unlock(&kvm->lock); + + if (err) + return err; + return nb; +} + +static int kvm_htab_release(struct inode *inode, struct file *filp) +{ + struct kvm_htab_ctx *ctx = filp->private_data; + + filp->private_data = NULL; + if (!(ctx->flags & KVM_GET_HTAB_WRITE)) + atomic_dec(&ctx->kvm->arch.hpte_mod_interest); + kvm_put_kvm(ctx->kvm); + kfree(ctx); + return 0; +} + +static struct file_operations kvm_htab_fops = { + .read = kvm_htab_read, + .write = kvm_htab_write, + .llseek = default_llseek, + .release = kvm_htab_release, +}; + +int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf) +{ + int ret; + struct kvm_htab_ctx *ctx; + int rwflag; + + /* reject flags we don't recognize */ + if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE)) + return -EINVAL; + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + kvm_get_kvm(kvm); + ctx->kvm = kvm; + ctx->index = ghf->start_index; + ctx->flags = ghf->flags; + ctx->first_pass = 1; + + rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY; + ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag); + if (ret < 0) { + kvm_put_kvm(kvm); + return ret; + } + + if (rwflag == O_RDONLY) { + mutex_lock(&kvm->slots_lock); + atomic_inc(&kvm->arch.hpte_mod_interest); + /* make sure kvmppc_do_h_enter etc. see the increment */ + synchronize_srcu_expedited(&kvm->srcu); + mutex_unlock(&kvm->slots_lock); + } + + return ret; +} + void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) { struct kvmppc_mmu *mmu = &vcpu->arch.mmu; diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index b9a989dc76c..836c56975e2 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -22,6 +22,7 @@ #include <asm/kvm_book3s.h> #include <asm/reg.h> #include <asm/switch_to.h> +#include <asm/time.h> #define OP_19_XOP_RFID 18 #define OP_19_XOP_RFI 50 @@ -33,6 +34,8 @@ #define OP_31_XOP_MTSRIN 242 #define OP_31_XOP_TLBIEL 274 #define OP_31_XOP_TLBIE 306 +/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */ +#define OP_31_XOP_FAKE_SC1 308 #define OP_31_XOP_SLBMTE 402 #define OP_31_XOP_SLBIE 434 #define OP_31_XOP_SLBIA 498 @@ -169,6 +172,32 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->arch.mmu.tlbie(vcpu, addr, large); break; } +#ifdef CONFIG_KVM_BOOK3S_64_PR + case OP_31_XOP_FAKE_SC1: + { + /* SC 1 papr hypercalls */ + ulong cmd = kvmppc_get_gpr(vcpu, 3); + int i; + + if ((vcpu->arch.shared->msr & MSR_PR) || + !vcpu->arch.papr_enabled) { + emulated = EMULATE_FAIL; + break; + } + + if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) + break; + + run->papr_hcall.nr = cmd; + for (i = 0; i < 9; ++i) { + ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); + run->papr_hcall.args[i] = gpr; + } + + emulated = EMULATE_DO_PAPR; + break; + } +#endif case OP_31_XOP_EIOIO: break; case OP_31_XOP_SLBMTE: @@ -395,6 +424,12 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) (mfmsr() & MSR_HV)) vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; break; + case SPRN_PURR: + to_book3s(vcpu)->purr_offset = spr_val - get_tb(); + break; + case SPRN_SPURR: + to_book3s(vcpu)->spurr_offset = spr_val - get_tb(); + break; case SPRN_GQR0: case SPRN_GQR1: case SPRN_GQR2: @@ -412,6 +447,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) case SPRN_CTRLF: case SPRN_CTRLT: case SPRN_L2CR: + case SPRN_DSCR: case SPRN_MMCR0_GEKKO: case SPRN_MMCR1_GEKKO: case SPRN_PMC1_GEKKO: @@ -419,6 +455,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) case SPRN_PMC3_GEKKO: case SPRN_PMC4_GEKKO: case SPRN_WPAR_GEKKO: + case SPRN_MSSSR0: break; unprivileged: default: @@ -483,9 +520,15 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) *spr_val = to_book3s(vcpu)->hid[5]; break; case SPRN_CFAR: - case SPRN_PURR: + case SPRN_DSCR: *spr_val = 0; break; + case SPRN_PURR: + *spr_val = get_tb() + to_book3s(vcpu)->purr_offset; + break; + case SPRN_SPURR: + *spr_val = get_tb() + to_book3s(vcpu)->purr_offset; + break; case SPRN_GQR0: case SPRN_GQR1: case SPRN_GQR2: @@ -509,6 +552,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) case SPRN_PMC3_GEKKO: case SPRN_PMC4_GEKKO: case SPRN_WPAR_GEKKO: + case SPRN_MSSSR0: *spr_val = 0; break; default: diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c index a150817d6d4..7057a02f090 100644 --- a/arch/powerpc/kvm/book3s_exports.c +++ b/arch/powerpc/kvm/book3s_exports.c @@ -28,8 +28,5 @@ EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); #ifdef CONFIG_ALTIVEC EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec); #endif -#ifdef CONFIG_VSX -EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx); -#endif #endif diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 721d4603a23..80dcc53a1ab 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -30,6 +30,7 @@ #include <linux/cpumask.h> #include <linux/spinlock.h> #include <linux/page-flags.h> +#include <linux/srcu.h> #include <asm/reg.h> #include <asm/cputable.h> @@ -46,6 +47,7 @@ #include <asm/page.h> #include <asm/hvcall.h> #include <asm/switch_to.h> +#include <asm/smp.h> #include <linux/gfp.h> #include <linux/vmalloc.h> #include <linux/highmem.h> @@ -55,25 +57,77 @@ /* #define EXIT_DEBUG_SIMPLE */ /* #define EXIT_DEBUG_INT */ +/* Used to indicate that a guest page fault needs to be handled */ +#define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1) + +/* Used as a "null" value for timebase values */ +#define TB_NIL (~(u64)0) + static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); +/* + * We use the vcpu_load/put functions to measure stolen time. + * Stolen time is counted as time when either the vcpu is able to + * run as part of a virtual core, but the task running the vcore + * is preempted or sleeping, or when the vcpu needs something done + * in the kernel by the task running the vcpu, but that task is + * preempted or sleeping. Those two things have to be counted + * separately, since one of the vcpu tasks will take on the job + * of running the core, and the other vcpu tasks in the vcore will + * sleep waiting for it to do that, but that sleep shouldn't count + * as stolen time. + * + * Hence we accumulate stolen time when the vcpu can run as part of + * a vcore using vc->stolen_tb, and the stolen time when the vcpu + * needs its task to do other things in the kernel (for example, + * service a page fault) in busy_stolen. We don't accumulate + * stolen time for a vcore when it is inactive, or for a vcpu + * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of + * a misnomer; it means that the vcpu task is not executing in + * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in + * the kernel. We don't have any way of dividing up that time + * between time that the vcpu is genuinely stopped, time that + * the task is actively working on behalf of the vcpu, and time + * that the task is preempted, so we don't count any of it as + * stolen. + * + * Updates to busy_stolen are protected by arch.tbacct_lock; + * updates to vc->stolen_tb are protected by the arch.tbacct_lock + * of the vcpu that has taken responsibility for running the vcore + * (i.e. vc->runner). The stolen times are measured in units of + * timebase ticks. (Note that the != TB_NIL checks below are + * purely defensive; they should never fail.) + */ + void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; - local_paca->kvm_hstate.kvm_vcpu = vcpu; - local_paca->kvm_hstate.kvm_vcore = vc; - if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) + spin_lock(&vcpu->arch.tbacct_lock); + if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && + vc->preempt_tb != TB_NIL) { vc->stolen_tb += mftb() - vc->preempt_tb; + vc->preempt_tb = TB_NIL; + } + if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && + vcpu->arch.busy_preempt != TB_NIL) { + vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; + vcpu->arch.busy_preempt = TB_NIL; + } + spin_unlock(&vcpu->arch.tbacct_lock); } void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; + spin_lock(&vcpu->arch.tbacct_lock); if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) vc->preempt_tb = mftb(); + if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) + vcpu->arch.busy_preempt = mftb(); + spin_unlock(&vcpu->arch.tbacct_lock); } void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) @@ -142,6 +196,22 @@ static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) vpa->yield_count = 1; } +static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, + unsigned long addr, unsigned long len) +{ + /* check address is cacheline aligned */ + if (addr & (L1_CACHE_BYTES - 1)) + return -EINVAL; + spin_lock(&vcpu->arch.vpa_update_lock); + if (v->next_gpa != addr || v->len != len) { + v->next_gpa = addr; + v->len = addr ? len : 0; + v->update_pending = 1; + } + spin_unlock(&vcpu->arch.vpa_update_lock); + return 0; +} + /* Length for a per-processor buffer is passed in at offset 4 in the buffer */ struct reg_vpa { u32 dummy; @@ -317,10 +387,16 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) { + if (!(vcpu->arch.vpa.update_pending || + vcpu->arch.slb_shadow.update_pending || + vcpu->arch.dtl.update_pending)) + return; + spin_lock(&vcpu->arch.vpa_update_lock); if (vcpu->arch.vpa.update_pending) { kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); - init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); + if (vcpu->arch.vpa.pinned_addr) + init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); } if (vcpu->arch.dtl.update_pending) { kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); @@ -332,24 +408,61 @@ static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) spin_unlock(&vcpu->arch.vpa_update_lock); } +/* + * Return the accumulated stolen time for the vcore up until `now'. + * The caller should hold the vcore lock. + */ +static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) +{ + u64 p; + + /* + * If we are the task running the vcore, then since we hold + * the vcore lock, we can't be preempted, so stolen_tb/preempt_tb + * can't be updated, so we don't need the tbacct_lock. + * If the vcore is inactive, it can't become active (since we + * hold the vcore lock), so the vcpu load/put functions won't + * update stolen_tb/preempt_tb, and we don't need tbacct_lock. + */ + if (vc->vcore_state != VCORE_INACTIVE && + vc->runner->arch.run_task != current) { + spin_lock(&vc->runner->arch.tbacct_lock); + p = vc->stolen_tb; + if (vc->preempt_tb != TB_NIL) + p += now - vc->preempt_tb; + spin_unlock(&vc->runner->arch.tbacct_lock); + } else { + p = vc->stolen_tb; + } + return p; +} + static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) { struct dtl_entry *dt; struct lppaca *vpa; - unsigned long old_stolen; + unsigned long stolen; + unsigned long core_stolen; + u64 now; dt = vcpu->arch.dtl_ptr; vpa = vcpu->arch.vpa.pinned_addr; - old_stolen = vcpu->arch.stolen_logged; - vcpu->arch.stolen_logged = vc->stolen_tb; + now = mftb(); + core_stolen = vcore_stolen_time(vc, now); + stolen = core_stolen - vcpu->arch.stolen_logged; + vcpu->arch.stolen_logged = core_stolen; + spin_lock(&vcpu->arch.tbacct_lock); + stolen += vcpu->arch.busy_stolen; + vcpu->arch.busy_stolen = 0; + spin_unlock(&vcpu->arch.tbacct_lock); if (!dt || !vpa) return; memset(dt, 0, sizeof(struct dtl_entry)); dt->dispatch_reason = 7; dt->processor_id = vc->pcpu + vcpu->arch.ptid; - dt->timebase = mftb(); - dt->enqueue_to_dispatch_time = vc->stolen_tb - old_stolen; + dt->timebase = now; + dt->enqueue_to_dispatch_time = stolen; dt->srr0 = kvmppc_get_pc(vcpu); dt->srr1 = vcpu->arch.shregs.msr; ++dt; @@ -366,13 +479,16 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) unsigned long req = kvmppc_get_gpr(vcpu, 3); unsigned long target, ret = H_SUCCESS; struct kvm_vcpu *tvcpu; + int idx; switch (req) { case H_ENTER: + idx = srcu_read_lock(&vcpu->kvm->srcu); ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6), kvmppc_get_gpr(vcpu, 7)); + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; case H_CEDE: break; @@ -429,6 +545,17 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, case BOOK3S_INTERRUPT_PERFMON: r = RESUME_GUEST; break; + case BOOK3S_INTERRUPT_MACHINE_CHECK: + /* + * Deliver a machine check interrupt to the guest. + * We have to do this, even if the host has handled the + * machine check, because machine checks use SRR0/1 and + * the interrupt might have trashed guest state in them. + */ + kvmppc_book3s_queue_irqprio(vcpu, + BOOK3S_INTERRUPT_MACHINE_CHECK); + r = RESUME_GUEST; + break; case BOOK3S_INTERRUPT_PROGRAM: { ulong flags; @@ -470,12 +597,12 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, * have been handled already. */ case BOOK3S_INTERRUPT_H_DATA_STORAGE: - r = kvmppc_book3s_hv_page_fault(run, vcpu, - vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); + r = RESUME_PAGE_FAULT; break; case BOOK3S_INTERRUPT_H_INST_STORAGE: - r = kvmppc_book3s_hv_page_fault(run, vcpu, - kvmppc_get_pc(vcpu), 0); + vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); + vcpu->arch.fault_dsisr = 0; + r = RESUME_PAGE_FAULT; break; /* * This occurs if the guest executes an illegal instruction. @@ -535,36 +662,174 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, return 0; } -int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) +int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { - int r = -EINVAL; + int r = 0; + long int i; - switch (reg->id) { + switch (id) { case KVM_REG_PPC_HIOR: - r = put_user(0, (u64 __user *)reg->addr); + *val = get_reg_val(id, 0); + break; + case KVM_REG_PPC_DABR: + *val = get_reg_val(id, vcpu->arch.dabr); + break; + case KVM_REG_PPC_DSCR: + *val = get_reg_val(id, vcpu->arch.dscr); + break; + case KVM_REG_PPC_PURR: + *val = get_reg_val(id, vcpu->arch.purr); + break; + case KVM_REG_PPC_SPURR: + *val = get_reg_val(id, vcpu->arch.spurr); + break; + case KVM_REG_PPC_AMR: + *val = get_reg_val(id, vcpu->arch.amr); + break; + case KVM_REG_PPC_UAMOR: + *val = get_reg_val(id, vcpu->arch.uamor); + break; + case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA: + i = id - KVM_REG_PPC_MMCR0; + *val = get_reg_val(id, vcpu->arch.mmcr[i]); + break; + case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: + i = id - KVM_REG_PPC_PMC1; + *val = get_reg_val(id, vcpu->arch.pmc[i]); + break; +#ifdef CONFIG_VSX + case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: + if (cpu_has_feature(CPU_FTR_VSX)) { + /* VSX => FP reg i is stored in arch.vsr[2*i] */ + long int i = id - KVM_REG_PPC_FPR0; + *val = get_reg_val(id, vcpu->arch.vsr[2 * i]); + } else { + /* let generic code handle it */ + r = -EINVAL; + } + break; + case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: + if (cpu_has_feature(CPU_FTR_VSX)) { + long int i = id - KVM_REG_PPC_VSR0; + val->vsxval[0] = vcpu->arch.vsr[2 * i]; + val->vsxval[1] = vcpu->arch.vsr[2 * i + 1]; + } else { + r = -ENXIO; + } + break; +#endif /* CONFIG_VSX */ + case KVM_REG_PPC_VPA_ADDR: + spin_lock(&vcpu->arch.vpa_update_lock); + *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); + spin_unlock(&vcpu->arch.vpa_update_lock); + break; + case KVM_REG_PPC_VPA_SLB: + spin_lock(&vcpu->arch.vpa_update_lock); + val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; + val->vpaval.length = vcpu->arch.slb_shadow.len; + spin_unlock(&vcpu->arch.vpa_update_lock); + break; + case KVM_REG_PPC_VPA_DTL: + spin_lock(&vcpu->arch.vpa_update_lock); + val->vpaval.addr = vcpu->arch.dtl.next_gpa; + val->vpaval.length = vcpu->arch.dtl.len; + spin_unlock(&vcpu->arch.vpa_update_lock); break; default: + r = -EINVAL; break; } return r; } -int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) +int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { - int r = -EINVAL; + int r = 0; + long int i; + unsigned long addr, len; - switch (reg->id) { + switch (id) { case KVM_REG_PPC_HIOR: - { - u64 hior; /* Only allow this to be set to zero */ - r = get_user(hior, (u64 __user *)reg->addr); - if (!r && (hior != 0)) + if (set_reg_val(id, *val)) r = -EINVAL; break; - } + case KVM_REG_PPC_DABR: + vcpu->arch.dabr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_DSCR: + vcpu->arch.dscr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_PURR: + vcpu->arch.purr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_SPURR: + vcpu->arch.spurr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_AMR: + vcpu->arch.amr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_UAMOR: + vcpu->arch.uamor = set_reg_val(id, *val); + break; + case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA: + i = id - KVM_REG_PPC_MMCR0; + vcpu->arch.mmcr[i] = set_reg_val(id, *val); + break; + case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: + i = id - KVM_REG_PPC_PMC1; + vcpu->arch.pmc[i] = set_reg_val(id, *val); + break; +#ifdef CONFIG_VSX + case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: + if (cpu_has_feature(CPU_FTR_VSX)) { + /* VSX => FP reg i is stored in arch.vsr[2*i] */ + long int i = id - KVM_REG_PPC_FPR0; + vcpu->arch.vsr[2 * i] = set_reg_val(id, *val); + } else { + /* let generic code handle it */ + r = -EINVAL; + } + break; + case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: + if (cpu_has_feature(CPU_FTR_VSX)) { + long int i = id - KVM_REG_PPC_VSR0; + vcpu->arch.vsr[2 * i] = val->vsxval[0]; + vcpu->arch.vsr[2 * i + 1] = val->vsxval[1]; + } else { + r = -ENXIO; + } + break; +#endif /* CONFIG_VSX */ + case KVM_REG_PPC_VPA_ADDR: + addr = set_reg_val(id, *val); + r = -EINVAL; + if (!addr && (vcpu->arch.slb_shadow.next_gpa || + vcpu->arch.dtl.next_gpa)) + break; + r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); + break; + case KVM_REG_PPC_VPA_SLB: + addr = val->vpaval.addr; + len = val->vpaval.length; + r = -EINVAL; + if (addr && !vcpu->arch.vpa.next_gpa) + break; + r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); + break; + case KVM_REG_PPC_VPA_DTL: + addr = val->vpaval.addr; + len = val->vpaval.length; + r = -EINVAL; + if (addr && (len < sizeof(struct dtl_entry) || + !vcpu->arch.vpa.next_gpa)) + break; + len -= len % sizeof(struct dtl_entry); + r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); + break; default: + r = -EINVAL; break; } @@ -599,20 +864,18 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) goto free_vcpu; vcpu->arch.shared = &vcpu->arch.shregs; - vcpu->arch.last_cpu = -1; vcpu->arch.mmcr[0] = MMCR0_FC; vcpu->arch.ctrl = CTRL_RUNLATCH; /* default to host PVR, since we can't spoof it */ vcpu->arch.pvr = mfspr(SPRN_PVR); kvmppc_set_pvr(vcpu, vcpu->arch.pvr); spin_lock_init(&vcpu->arch.vpa_update_lock); + spin_lock_init(&vcpu->arch.tbacct_lock); + vcpu->arch.busy_preempt = TB_NIL; kvmppc_mmu_book3s_hv_init(vcpu); - /* - * We consider the vcpu stopped until we see the first run ioctl for it. - */ - vcpu->arch.state = KVMPPC_VCPU_STOPPED; + vcpu->arch.state = KVMPPC_VCPU_NOTREADY; init_waitqueue_head(&vcpu->arch.cpu_run); @@ -624,9 +887,10 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) INIT_LIST_HEAD(&vcore->runnable_threads); spin_lock_init(&vcore->lock); init_waitqueue_head(&vcore->wq); - vcore->preempt_tb = mftb(); + vcore->preempt_tb = TB_NIL; } kvm->arch.vcores[core] = vcore; + kvm->arch.online_vcores++; } mutex_unlock(&kvm->lock); @@ -637,7 +901,6 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ++vcore->num_threads; spin_unlock(&vcore->lock); vcpu->arch.vcore = vcore; - vcpu->arch.stolen_logged = vcore->stolen_tb; vcpu->arch.cpu_type = KVM_CPU_3S_64; kvmppc_sanity_check(vcpu); @@ -697,17 +960,18 @@ extern void xics_wake_cpu(int cpu); static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, struct kvm_vcpu *vcpu) { - struct kvm_vcpu *v; + u64 now; if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) return; + spin_lock(&vcpu->arch.tbacct_lock); + now = mftb(); + vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - + vcpu->arch.stolen_logged; + vcpu->arch.busy_preempt = now; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; + spin_unlock(&vcpu->arch.tbacct_lock); --vc->n_runnable; - ++vc->n_busy; - /* decrement the physical thread id of each following vcpu */ - v = vcpu; - list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list) - --v->arch.ptid; list_del(&vcpu->arch.run_list); } @@ -720,6 +984,7 @@ static int kvmppc_grab_hwthread(int cpu) /* Ensure the thread won't go into the kernel if it wakes */ tpaca->kvm_hstate.hwthread_req = 1; + tpaca->kvm_hstate.kvm_vcpu = NULL; /* * If the thread is already executing in the kernel (e.g. handling @@ -769,7 +1034,6 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) smp_wmb(); #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) if (vcpu->arch.ptid) { - kvmppc_grab_hwthread(cpu); xics_wake_cpu(cpu); ++vc->n_woken; } @@ -795,7 +1059,8 @@ static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc) /* * Check that we are on thread 0 and that any other threads in - * this core are off-line. + * this core are off-line. Then grab the threads so they can't + * enter the kernel. */ static int on_primary_thread(void) { @@ -807,6 +1072,17 @@ static int on_primary_thread(void) while (++thr < threads_per_core) if (cpu_online(cpu + thr)) return 0; + + /* Grab all hw threads so they can't go into the kernel */ + for (thr = 1; thr < threads_per_core; ++thr) { + if (kvmppc_grab_hwthread(cpu + thr)) { + /* Couldn't grab one; let the others go */ + do { + kvmppc_release_hwthread(cpu + thr); + } while (--thr > 0); + return 0; + } + } return 1; } @@ -814,21 +1090,24 @@ static int on_primary_thread(void) * Run a set of guest threads on a physical core. * Called with vc->lock held. */ -static int kvmppc_run_core(struct kvmppc_vcore *vc) +static void kvmppc_run_core(struct kvmppc_vcore *vc) { struct kvm_vcpu *vcpu, *vcpu0, *vnext; long ret; u64 now; int ptid, i, need_vpa_update; + int srcu_idx; + struct kvm_vcpu *vcpus_to_update[threads_per_core]; /* don't start if any threads have a signal pending */ need_vpa_update = 0; list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { if (signal_pending(vcpu->arch.run_task)) - return 0; - need_vpa_update |= vcpu->arch.vpa.update_pending | - vcpu->arch.slb_shadow.update_pending | - vcpu->arch.dtl.update_pending; + return; + if (vcpu->arch.vpa.update_pending || + vcpu->arch.slb_shadow.update_pending || + vcpu->arch.dtl.update_pending) + vcpus_to_update[need_vpa_update++] = vcpu; } /* @@ -838,7 +1117,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) vc->n_woken = 0; vc->nap_count = 0; vc->entry_exit_count = 0; - vc->vcore_state = VCORE_RUNNING; + vc->vcore_state = VCORE_STARTING; vc->in_guest = 0; vc->napping_threads = 0; @@ -848,24 +1127,12 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) */ if (need_vpa_update) { spin_unlock(&vc->lock); - list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) - kvmppc_update_vpas(vcpu); + for (i = 0; i < need_vpa_update; ++i) + kvmppc_update_vpas(vcpus_to_update[i]); spin_lock(&vc->lock); } /* - * Make sure we are running on thread 0, and that - * secondary threads are offline. - * XXX we should also block attempts to bring any - * secondary threads online. - */ - if (threads_per_core > 1 && !on_primary_thread()) { - list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) - vcpu->arch.ret = -EBUSY; - goto out; - } - - /* * Assign physical thread IDs, first to non-ceded vcpus * and then to ceded ones. */ @@ -879,28 +1146,36 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) } } if (!vcpu0) - return 0; /* nothing to run */ + goto out; /* nothing to run; should never happen */ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) if (vcpu->arch.ceded) vcpu->arch.ptid = ptid++; - vc->stolen_tb += mftb() - vc->preempt_tb; + /* + * Make sure we are running on thread 0, and that + * secondary threads are offline. + */ + if (threads_per_core > 1 && !on_primary_thread()) { + list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) + vcpu->arch.ret = -EBUSY; + goto out; + } + vc->pcpu = smp_processor_id(); list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { kvmppc_start_thread(vcpu); kvmppc_create_dtl_entry(vcpu, vc); } - /* Grab any remaining hw threads so they can't go into the kernel */ - for (i = ptid; i < threads_per_core; ++i) - kvmppc_grab_hwthread(vc->pcpu + i); + vc->vcore_state = VCORE_RUNNING; preempt_disable(); spin_unlock(&vc->lock); kvm_guest_enter(); + + srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu); + __kvmppc_vcore_entry(NULL, vcpu0); - for (i = 0; i < threads_per_core; ++i) - kvmppc_release_hwthread(vc->pcpu + i); spin_lock(&vc->lock); /* disable sending of IPIs on virtual external irqs */ @@ -909,10 +1184,14 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) /* wait for secondary threads to finish writing their state to memory */ if (vc->nap_count < vc->n_woken) kvmppc_wait_for_nap(vc); + for (i = 0; i < threads_per_core; ++i) + kvmppc_release_hwthread(vc->pcpu + i); /* prevent other vcpu threads from doing kvmppc_start_thread() now */ vc->vcore_state = VCORE_EXITING; spin_unlock(&vc->lock); + srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx); + /* make sure updates to secondary vcpu structs are visible now */ smp_mb(); kvm_guest_exit(); @@ -920,6 +1199,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) preempt_enable(); kvm_resched(vcpu); + spin_lock(&vc->lock); now = get_tb(); list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { /* cancel pending dec exception if dec is positive */ @@ -943,10 +1223,8 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) } } - spin_lock(&vc->lock); out: vc->vcore_state = VCORE_INACTIVE; - vc->preempt_tb = mftb(); list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, arch.run_list) { if (vcpu->arch.ret != RESUME_GUEST) { @@ -954,8 +1232,6 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) wake_up(&vcpu->arch.cpu_run); } } - - return 1; } /* @@ -979,20 +1255,11 @@ static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state) static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) { DEFINE_WAIT(wait); - struct kvm_vcpu *v; - int all_idle = 1; prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE); vc->vcore_state = VCORE_SLEEPING; spin_unlock(&vc->lock); - list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { - if (!v->arch.ceded || v->arch.pending_exceptions) { - all_idle = 0; - break; - } - } - if (all_idle) - schedule(); + schedule(); finish_wait(&vc->wq, &wait); spin_lock(&vc->lock); vc->vcore_state = VCORE_INACTIVE; @@ -1001,13 +1268,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int n_ceded; - int prev_state; struct kvmppc_vcore *vc; struct kvm_vcpu *v, *vn; kvm_run->exit_reason = 0; vcpu->arch.ret = RESUME_GUEST; vcpu->arch.trap = 0; + kvmppc_update_vpas(vcpu); /* * Synchronize with other threads in this virtual core @@ -1017,8 +1284,9 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) vcpu->arch.ceded = 0; vcpu->arch.run_task = current; vcpu->arch.kvm_run = kvm_run; - prev_state = vcpu->arch.state; + vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; + vcpu->arch.busy_preempt = TB_NIL; list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads); ++vc->n_runnable; @@ -1027,33 +1295,26 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) * If the vcore is already running, we may be able to start * this thread straight away and have it join in. */ - if (prev_state == KVMPPC_VCPU_STOPPED) { + if (!signal_pending(current)) { if (vc->vcore_state == VCORE_RUNNING && VCORE_EXIT_COUNT(vc) == 0) { vcpu->arch.ptid = vc->n_runnable - 1; + kvmppc_create_dtl_entry(vcpu, vc); kvmppc_start_thread(vcpu); + } else if (vc->vcore_state == VCORE_SLEEPING) { + wake_up(&vc->wq); } - } else if (prev_state == KVMPPC_VCPU_BUSY_IN_HOST) - --vc->n_busy; + } while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && !signal_pending(current)) { - if (vc->n_busy || vc->vcore_state != VCORE_INACTIVE) { + if (vc->vcore_state != VCORE_INACTIVE) { spin_unlock(&vc->lock); kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE); spin_lock(&vc->lock); continue; } - vc->runner = vcpu; - n_ceded = 0; - list_for_each_entry(v, &vc->runnable_threads, arch.run_list) - n_ceded += v->arch.ceded; - if (n_ceded == vc->n_runnable) - kvmppc_vcore_blocked(vc); - else - kvmppc_run_core(vc); - list_for_each_entry_safe(v, vn, &vc->runnable_threads, arch.run_list) { kvmppc_core_prepare_to_enter(v); @@ -1065,22 +1326,40 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) wake_up(&v->arch.cpu_run); } } + if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) + break; + vc->runner = vcpu; + n_ceded = 0; + list_for_each_entry(v, &vc->runnable_threads, arch.run_list) + if (!v->arch.pending_exceptions) + n_ceded += v->arch.ceded; + if (n_ceded == vc->n_runnable) + kvmppc_vcore_blocked(vc); + else + kvmppc_run_core(vc); vc->runner = NULL; } - if (signal_pending(current)) { - if (vc->vcore_state == VCORE_RUNNING || - vc->vcore_state == VCORE_EXITING) { - spin_unlock(&vc->lock); - kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE); - spin_lock(&vc->lock); - } - if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { - kvmppc_remove_runnable(vc, vcpu); - vcpu->stat.signal_exits++; - kvm_run->exit_reason = KVM_EXIT_INTR; - vcpu->arch.ret = -EINTR; - } + while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && + (vc->vcore_state == VCORE_RUNNING || + vc->vcore_state == VCORE_EXITING)) { + spin_unlock(&vc->lock); + kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE); + spin_lock(&vc->lock); + } + + if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { + kvmppc_remove_runnable(vc, vcpu); + vcpu->stat.signal_exits++; + kvm_run->exit_reason = KVM_EXIT_INTR; + vcpu->arch.ret = -EINTR; + } + + if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) { + /* Wake up some vcpu to run the core */ + v = list_first_entry(&vc->runnable_threads, + struct kvm_vcpu, arch.run_list); + wake_up(&v->arch.cpu_run); } spin_unlock(&vc->lock); @@ -1090,6 +1369,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) { int r; + int srcu_idx; if (!vcpu->arch.sane) { run->exit_reason = KVM_EXIT_INTERNAL_ERROR; @@ -1120,6 +1400,7 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) flush_vsx_to_thread(current); vcpu->arch.wqp = &vcpu->arch.vcore->wq; vcpu->arch.pgdir = current->mm->pgd; + vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; do { r = kvmppc_run_vcpu(run, vcpu); @@ -1128,10 +1409,16 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) !(vcpu->arch.shregs.msr & MSR_PR)) { r = kvmppc_pseries_do_hcall(vcpu); kvmppc_core_prepare_to_enter(vcpu); + } else if (r == RESUME_PAGE_FAULT) { + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + r = kvmppc_book3s_hv_page_fault(run, vcpu, + vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); } } while (r == RESUME_GUEST); out: + vcpu->arch.state = KVMPPC_VCPU_NOTREADY; atomic_dec(&vcpu->kvm->arch.vcpus_running); return r; } @@ -1262,7 +1549,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) mutex_lock(&kvm->slots_lock); r = -EINVAL; - if (log->slot >= KVM_MEMORY_SLOTS) + if (log->slot >= KVM_USER_MEM_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); @@ -1273,7 +1560,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) n = kvm_dirty_bitmap_bytes(memslot); memset(memslot->dirty_bitmap, 0, n); - r = kvmppc_hv_get_dirty_log(kvm, memslot); + r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap); if (r) goto out; @@ -1287,67 +1574,88 @@ out: return r; } -static unsigned long slb_pgsize_encoding(unsigned long psize) +static void unpin_slot(struct kvm_memory_slot *memslot) { - unsigned long senc = 0; + unsigned long *physp; + unsigned long j, npages, pfn; + struct page *page; - if (psize > 0x1000) { - senc = SLB_VSID_L; - if (psize == 0x10000) - senc |= SLB_VSID_LP_01; + physp = memslot->arch.slot_phys; + npages = memslot->npages; + if (!physp) + return; + for (j = 0; j < npages; j++) { + if (!(physp[j] & KVMPPC_GOT_PAGE)) + continue; + pfn = physp[j] >> PAGE_SHIFT; + page = pfn_to_page(pfn); + SetPageDirty(page); + put_page(page); + } +} + +void kvmppc_core_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ + if (!dont || free->arch.rmap != dont->arch.rmap) { + vfree(free->arch.rmap); + free->arch.rmap = NULL; + } + if (!dont || free->arch.slot_phys != dont->arch.slot_phys) { + unpin_slot(free); + vfree(free->arch.slot_phys); + free->arch.slot_phys = NULL; } - return senc; +} + +int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, + unsigned long npages) +{ + slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); + if (!slot->arch.rmap) + return -ENOMEM; + slot->arch.slot_phys = NULL; + + return 0; } int kvmppc_core_prepare_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem) + struct kvm_memory_slot *memslot, + struct kvm_userspace_memory_region *mem) { - unsigned long npages; unsigned long *phys; - /* Allocate a slot_phys array */ - phys = kvm->arch.slot_phys[mem->slot]; - if (!kvm->arch.using_mmu_notifiers && !phys) { - npages = mem->memory_size >> PAGE_SHIFT; - phys = vzalloc(npages * sizeof(unsigned long)); + /* Allocate a slot_phys array if needed */ + phys = memslot->arch.slot_phys; + if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) { + phys = vzalloc(memslot->npages * sizeof(unsigned long)); if (!phys) return -ENOMEM; - kvm->arch.slot_phys[mem->slot] = phys; - kvm->arch.slot_npages[mem->slot] = npages; + memslot->arch.slot_phys = phys; } return 0; } -static void unpin_slot(struct kvm *kvm, int slot_id) +void kvmppc_core_commit_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + struct kvm_memory_slot old) { - unsigned long *physp; - unsigned long j, npages, pfn; - struct page *page; + unsigned long npages = mem->memory_size >> PAGE_SHIFT; + struct kvm_memory_slot *memslot; - physp = kvm->arch.slot_phys[slot_id]; - npages = kvm->arch.slot_npages[slot_id]; - if (physp) { - spin_lock(&kvm->arch.slot_phys_lock); - for (j = 0; j < npages; j++) { - if (!(physp[j] & KVMPPC_GOT_PAGE)) - continue; - pfn = physp[j] >> PAGE_SHIFT; - page = pfn_to_page(pfn); - SetPageDirty(page); - put_page(page); - } - kvm->arch.slot_phys[slot_id] = NULL; - spin_unlock(&kvm->arch.slot_phys_lock); - vfree(physp); + if (npages && old.npages) { + /* + * If modifying a memslot, reset all the rmap dirty bits. + * If this is a new memslot, we don't need to do anything + * since the rmap array starts out as all zeroes, + * i.e. no pages are dirty. + */ + memslot = id_to_memslot(kvm->memslots, mem->slot); + kvmppc_hv_get_dirty_log(kvm, memslot, NULL); } } -void kvmppc_core_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem) -{ -} - static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) { int err = 0; @@ -1362,6 +1670,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) unsigned long rmls; unsigned long *physp; unsigned long i, npages; + int srcu_idx; mutex_lock(&kvm->lock); if (kvm->arch.rma_setup_done) @@ -1377,12 +1686,13 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) } /* Look up the memslot for guest physical address 0 */ + srcu_idx = srcu_read_lock(&kvm->srcu); memslot = gfn_to_memslot(kvm, 0); /* We must have some memory at 0 by now */ err = -EINVAL; if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) - goto out; + goto out_srcu; /* Look up the VMA for the start of this memory slot */ hva = memslot->userspace_addr; @@ -1406,14 +1716,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) err = -EPERM; if (cpu_has_feature(CPU_FTR_ARCH_201)) { pr_err("KVM: CPU requires an RMO\n"); - goto out; + goto out_srcu; } /* We can handle 4k, 64k or 16M pages in the VRMA */ err = -EINVAL; if (!(psize == 0x1000 || psize == 0x10000 || psize == 0x1000000)) - goto out; + goto out_srcu; /* Update VRMASD field in the LPCR */ senc = slb_pgsize_encoding(psize); @@ -1436,7 +1746,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) err = -EINVAL; if (rmls < 0) { pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); - goto out; + goto out_srcu; } atomic_inc(&ri->use_count); kvm->arch.rma = ri; @@ -1465,17 +1775,24 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) /* Initialize phys addrs of pages in RMO */ npages = ri->npages; porder = __ilog2(npages); - physp = kvm->arch.slot_phys[memslot->id]; - spin_lock(&kvm->arch.slot_phys_lock); - for (i = 0; i < npages; ++i) - physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder; - spin_unlock(&kvm->arch.slot_phys_lock); + physp = memslot->arch.slot_phys; + if (physp) { + if (npages > memslot->npages) + npages = memslot->npages; + spin_lock(&kvm->arch.slot_phys_lock); + for (i = 0; i < npages; ++i) + physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + + porder; + spin_unlock(&kvm->arch.slot_phys_lock); + } } /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ smp_wmb(); kvm->arch.rma_setup_done = 1; err = 0; + out_srcu: + srcu_read_unlock(&kvm->srcu, srcu_idx); out: mutex_unlock(&kvm->lock); return err; @@ -1496,6 +1813,13 @@ int kvmppc_core_init_vm(struct kvm *kvm) return -ENOMEM; kvm->arch.lpid = lpid; + /* + * Since we don't flush the TLB when tearing down a VM, + * and this lpid might have previously been used, + * make sure we flush on each core before running the new VM. + */ + cpumask_setall(&kvm->arch.need_tlb_flush); + INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); kvm->arch.rma = NULL; @@ -1523,16 +1847,19 @@ int kvmppc_core_init_vm(struct kvm *kvm) kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206); spin_lock_init(&kvm->arch.slot_phys_lock); + + /* + * Don't allow secondary CPU threads to come online + * while any KVM VMs exist. + */ + inhibit_secondary_onlining(); + return 0; } void kvmppc_core_destroy_vm(struct kvm *kvm) { - unsigned long i; - - if (!kvm->arch.using_mmu_notifiers) - for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) - unpin_slot(kvm, i); + uninhibit_secondary_onlining(); if (kvm->arch.rma) { kvm_release_rma(kvm->arch.rma); diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index fb4eac290fe..ec0a9e5de10 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -157,8 +157,8 @@ static void __init kvm_linear_init_one(ulong size, int count, int type) linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info)); for (i = 0; i < count; ++i) { linear = alloc_bootmem_align(size, size); - pr_info("Allocated KVM %s at %p (%ld MB)\n", typestr, linear, - size >> 20); + pr_debug("Allocated KVM %s at %p (%ld MB)\n", typestr, linear, + size >> 20); linear_info[i].base_virt = linear; linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT; linear_info[i].npages = npages; diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c new file mode 100644 index 00000000000..a353c485808 --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv_ras.c @@ -0,0 +1,148 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * Copyright 2012 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> + */ + +#include <linux/types.h> +#include <linux/string.h> +#include <linux/kvm.h> +#include <linux/kvm_host.h> +#include <linux/kernel.h> +#include <asm/opal.h> + +/* SRR1 bits for machine check on POWER7 */ +#define SRR1_MC_LDSTERR (1ul << (63-42)) +#define SRR1_MC_IFETCH_SH (63-45) +#define SRR1_MC_IFETCH_MASK 0x7 +#define SRR1_MC_IFETCH_SLBPAR 2 /* SLB parity error */ +#define SRR1_MC_IFETCH_SLBMULTI 3 /* SLB multi-hit */ +#define SRR1_MC_IFETCH_SLBPARMULTI 4 /* SLB parity + multi-hit */ +#define SRR1_MC_IFETCH_TLBMULTI 5 /* I-TLB multi-hit */ + +/* DSISR bits for machine check on POWER7 */ +#define DSISR_MC_DERAT_MULTI 0x800 /* D-ERAT multi-hit */ +#define DSISR_MC_TLB_MULTI 0x400 /* D-TLB multi-hit */ +#define DSISR_MC_SLB_PARITY 0x100 /* SLB parity error */ +#define DSISR_MC_SLB_MULTI 0x080 /* SLB multi-hit */ +#define DSISR_MC_SLB_PARMULTI 0x040 /* SLB parity + multi-hit */ + +/* POWER7 SLB flush and reload */ +static void reload_slb(struct kvm_vcpu *vcpu) +{ + struct slb_shadow *slb; + unsigned long i, n; + + /* First clear out SLB */ + asm volatile("slbmte %0,%0; slbia" : : "r" (0)); + + /* Do they have an SLB shadow buffer registered? */ + slb = vcpu->arch.slb_shadow.pinned_addr; + if (!slb) + return; + + /* Sanity check */ + n = min_t(u32, slb->persistent, SLB_MIN_SIZE); + if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end) + return; + + /* Load up the SLB from that */ + for (i = 0; i < n; ++i) { + unsigned long rb = slb->save_area[i].esid; + unsigned long rs = slb->save_area[i].vsid; + + rb = (rb & ~0xFFFul) | i; /* insert entry number */ + asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb)); + } +} + +/* POWER7 TLB flush */ +static void flush_tlb_power7(struct kvm_vcpu *vcpu) +{ + unsigned long i, rb; + + rb = TLBIEL_INVAL_SET_LPID; + for (i = 0; i < POWER7_TLB_SETS; ++i) { + asm volatile("tlbiel %0" : : "r" (rb)); + rb += 1 << TLBIEL_INVAL_SET_SHIFT; + } +} + +/* + * On POWER7, see if we can handle a machine check that occurred inside + * the guest in real mode, without switching to the host partition. + * + * Returns: 0 => exit guest, 1 => deliver machine check to guest + */ +static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) +{ + unsigned long srr1 = vcpu->arch.shregs.msr; +#ifdef CONFIG_PPC_POWERNV + struct opal_machine_check_event *opal_evt; +#endif + long handled = 1; + + if (srr1 & SRR1_MC_LDSTERR) { + /* error on load/store */ + unsigned long dsisr = vcpu->arch.shregs.dsisr; + + if (dsisr & (DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI | + DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI)) { + /* flush and reload SLB; flushes D-ERAT too */ + reload_slb(vcpu); + dsisr &= ~(DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI | + DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI); + } + if (dsisr & DSISR_MC_TLB_MULTI) { + flush_tlb_power7(vcpu); + dsisr &= ~DSISR_MC_TLB_MULTI; + } + /* Any other errors we don't understand? */ + if (dsisr & 0xffffffffUL) + handled = 0; + } + + switch ((srr1 >> SRR1_MC_IFETCH_SH) & SRR1_MC_IFETCH_MASK) { + case 0: + break; + case SRR1_MC_IFETCH_SLBPAR: + case SRR1_MC_IFETCH_SLBMULTI: + case SRR1_MC_IFETCH_SLBPARMULTI: + reload_slb(vcpu); + break; + case SRR1_MC_IFETCH_TLBMULTI: + flush_tlb_power7(vcpu); + break; + default: + handled = 0; + } + +#ifdef CONFIG_PPC_POWERNV + /* + * See if OPAL has already handled the condition. + * We assume that if the condition is recovered then OPAL + * will have generated an error log event that we will pick + * up and log later. + */ + opal_evt = local_paca->opal_mc_evt; + if (opal_evt->version == OpalMCE_V1 && + (opal_evt->severity == OpalMCE_SEV_NO_ERROR || + opal_evt->disposition == OpalMCE_DISPOSITION_RECOVERED)) + handled = 1; + + if (handled) + opal_evt->in_use = 0; +#endif + + return handled; +} + +long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu) +{ + if (cpu_has_feature(CPU_FTR_ARCH_206)) + return kvmppc_realmode_mc_power7(vcpu); + + return 0; +} diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index fb0e821622d..19c93bae1ae 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -35,6 +35,37 @@ static void *real_vmalloc_addr(void *x) return __va(addr); } +/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */ +static int global_invalidates(struct kvm *kvm, unsigned long flags) +{ + int global; + + /* + * If there is only one vcore, and it's currently running, + * we can use tlbiel as long as we mark all other physical + * cores as potentially having stale TLB entries for this lpid. + * If we're not using MMU notifiers, we never take pages away + * from the guest, so we can use tlbiel if requested. + * Otherwise, don't use tlbiel. + */ + if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcore) + global = 0; + else if (kvm->arch.using_mmu_notifiers) + global = 1; + else + global = !(flags & H_LOCAL); + + if (!global) { + /* any other core might now have stale TLB entries... */ + smp_wmb(); + cpumask_setall(&kvm->arch.need_tlb_flush); + cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu, + &kvm->arch.need_tlb_flush); + } + + return global; +} + /* * Add this HPTE into the chain for the real page. * Must be called with the chain locked; it unlocks the chain. @@ -59,13 +90,24 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, head->back = pte_index; } else { rev->forw = rev->back = pte_index; - i = pte_index; + *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | + pte_index | KVMPPC_RMAP_PRESENT; } - smp_wmb(); - *rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */ + unlock_rmap(rmap); } EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain); +/* + * Note modification of an HPTE; set the HPTE modified bit + * if anyone is interested. + */ +static inline void note_hpte_modification(struct kvm *kvm, + struct revmap_entry *rev) +{ + if (atomic_read(&kvm->arch.hpte_mod_interest)) + rev->guest_rpte |= HPTE_GR_MODIFIED; +} + /* Remove this HPTE from the chain for a real page */ static void remove_revmap_chain(struct kvm *kvm, long pte_index, struct revmap_entry *rev, @@ -81,7 +123,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index, ptel = rev->guest_rpte |= rcbits; gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel)); memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); - if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) + if (!memslot) return; rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); @@ -103,14 +145,14 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index, unlock_rmap(rmap); } -static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva, +static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva, int writing, unsigned long *pte_sizep) { pte_t *ptep; unsigned long ps = *pte_sizep; unsigned int shift; - ptep = find_linux_pte_or_hugepte(vcpu->arch.pgdir, hva, &shift); + ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift); if (!ptep) return __pte(0); if (shift) @@ -130,15 +172,15 @@ static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v) hpte[0] = hpte_v; } -long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, - long pte_index, unsigned long pteh, unsigned long ptel) +long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, + long pte_index, unsigned long pteh, unsigned long ptel, + pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret) { - struct kvm *kvm = vcpu->kvm; unsigned long i, pa, gpa, gfn, psize; unsigned long slot_fn, hva; unsigned long *hpte; struct revmap_entry *rev; - unsigned long g_ptel = ptel; + unsigned long g_ptel; struct kvm_memory_slot *memslot; unsigned long *physp, pte_size; unsigned long is_io; @@ -147,13 +189,14 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, unsigned int writing; unsigned long mmu_seq; unsigned long rcbits; - bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING; psize = hpte_page_size(pteh, ptel); if (!psize) return H_PARAMETER; writing = hpte_is_writable(ptel); pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); + ptel &= ~HPTE_GR_RESERVED; + g_ptel = ptel; /* used later to detect if we might have been invalidated */ mmu_seq = kvm->mmu_notifier_seq; @@ -183,7 +226,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, rmap = &memslot->arch.rmap[slot_fn]; if (!kvm->arch.using_mmu_notifiers) { - physp = kvm->arch.slot_phys[memslot->id]; + physp = memslot->arch.slot_phys; if (!physp) return H_PARAMETER; physp += slot_fn; @@ -201,7 +244,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, /* Look up the Linux PTE for the backing page */ pte_size = psize; - pte = lookup_linux_pte(vcpu, hva, writing, &pte_size); + pte = lookup_linux_pte(pgdir, hva, writing, &pte_size); if (pte_present(pte)) { if (writing && !pte_write(pte)) /* make the actual HPTE be read-only */ @@ -210,6 +253,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, pa = pte_pfn(pte) << PAGE_SHIFT; } } + if (pte_size < psize) return H_PARAMETER; if (pa && pte_size > psize) @@ -287,8 +331,10 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, rev = &kvm->arch.revmap[pte_index]; if (realmode) rev = real_vmalloc_addr(rev); - if (rev) + if (rev) { rev->guest_rpte = g_ptel; + note_hpte_modification(kvm, rev); + } /* Link HPTE into reverse-map chain */ if (pteh & HPTE_V_VALID) { @@ -297,7 +343,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, lock_rmap(rmap); /* Check for pending invalidations under the rmap chain lock */ if (kvm->arch.using_mmu_notifiers && - mmu_notifier_retry(vcpu, mmu_seq)) { + mmu_notifier_retry(kvm, mmu_seq)) { /* inval in progress, write a non-present HPTE */ pteh |= HPTE_V_ABSENT; pteh &= ~HPTE_V_VALID; @@ -318,10 +364,17 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, hpte[0] = pteh; asm volatile("ptesync" : : : "memory"); - vcpu->arch.gpr[4] = pte_index; + *pte_idx_ret = pte_index; return H_SUCCESS; } -EXPORT_SYMBOL_GPL(kvmppc_h_enter); +EXPORT_SYMBOL_GPL(kvmppc_do_h_enter); + +long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, + long pte_index, unsigned long pteh, unsigned long ptel) +{ + return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, + vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); +} #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) @@ -343,11 +396,10 @@ static inline int try_lock_tlbie(unsigned int *lock) return old == 0; } -long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, - unsigned long pte_index, unsigned long avpn, - unsigned long va) +long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, + unsigned long pte_index, unsigned long avpn, + unsigned long *hpret) { - struct kvm *kvm = vcpu->kvm; unsigned long *hpte; unsigned long v, r, rb; struct revmap_entry *rev; @@ -369,7 +421,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, if (v & HPTE_V_VALID) { hpte[0] &= ~HPTE_V_VALID; rb = compute_tlbie_rb(v, hpte[1], pte_index); - if (!(flags & H_LOCAL) && atomic_read(&kvm->online_vcpus) > 1) { + if (global_invalidates(kvm, flags)) { while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) cpu_relax(); asm volatile("ptesync" : : : "memory"); @@ -385,13 +437,22 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, /* Read PTE low word after tlbie to get final R/C values */ remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]); } - r = rev->guest_rpte; + r = rev->guest_rpte & ~HPTE_GR_RESERVED; + note_hpte_modification(kvm, rev); unlock_hpte(hpte, 0); - vcpu->arch.gpr[4] = v; - vcpu->arch.gpr[5] = r; + hpret[0] = v; + hpret[1] = r; return H_SUCCESS; } +EXPORT_SYMBOL_GPL(kvmppc_do_h_remove); + +long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, + unsigned long pte_index, unsigned long avpn) +{ + return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, + &vcpu->arch.gpr[4]); +} long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) { @@ -459,6 +520,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) args[j] = ((0x80 | flags) << 56) + pte_index; rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); + note_hpte_modification(kvm, rev); if (!(hp[0] & HPTE_V_VALID)) { /* insert R and C bits from PTE */ @@ -534,8 +596,6 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, return H_NOT_FOUND; } - if (atomic_read(&kvm->online_vcpus) == 1) - flags |= H_LOCAL; v = hpte[0]; bits = (flags << 55) & HPTE_R_PP0; bits |= (flags << 48) & HPTE_R_KEY_HI; @@ -548,6 +608,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, if (rev) { r = (rev->guest_rpte & ~mask) | bits; rev->guest_rpte = r; + note_hpte_modification(kvm, rev); } r = (hpte[1] & ~mask) | bits; @@ -555,7 +616,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, if (v & HPTE_V_VALID) { rb = compute_tlbie_rb(v, r, pte_index); hpte[0] = v & ~HPTE_V_VALID; - if (!(flags & H_LOCAL)) { + if (global_invalidates(kvm, flags)) { while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) cpu_relax(); asm volatile("ptesync" : : : "memory"); @@ -568,6 +629,28 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, asm volatile("tlbiel %0" : : "r" (rb)); asm volatile("ptesync" : : : "memory"); } + /* + * If the host has this page as readonly but the guest + * wants to make it read/write, reduce the permissions. + * Checking the host permissions involves finding the + * memslot and then the Linux PTE for the page. + */ + if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers) { + unsigned long psize, gfn, hva; + struct kvm_memory_slot *memslot; + pgd_t *pgdir = vcpu->arch.pgdir; + pte_t pte; + + psize = hpte_page_size(v, r); + gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; + memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); + if (memslot) { + hva = __gfn_to_hva_memslot(memslot, gfn); + pte = lookup_linux_pte(pgdir, hva, 1, &psize); + if (pte_present(pte) && !pte_write(pte)) + r = hpte_make_readonly(r); + } + } } hpte[1] = r; eieio(); @@ -599,8 +682,10 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, v &= ~HPTE_V_ABSENT; v |= HPTE_V_VALID; } - if (v & HPTE_V_VALID) + if (v & HPTE_V_VALID) { r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); + r &= ~HPTE_GR_RESERVED; + } vcpu->arch.gpr[4 + i * 2] = v; vcpu->arch.gpr[5 + i * 2] = r; } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 74a24bbb963..e33d11f1b97 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -27,6 +27,7 @@ #include <asm/asm-offsets.h> #include <asm/exception-64s.h> #include <asm/kvm_book3s_asm.h> +#include <asm/mmu-hash64.h> /***************************************************************************** * * @@ -134,8 +135,11 @@ kvm_start_guest: 27: /* XXX should handle hypervisor maintenance interrupts etc. here */ + /* reload vcpu pointer after clearing the IPI */ + ld r4,HSTATE_KVM_VCPU(r13) + cmpdi r4,0 /* if we have no vcpu to run, go back to sleep */ - beq cr1,kvm_no_guest + beq kvm_no_guest /* were we napping due to cede? */ lbz r0,HSTATE_NAPPING(r13) @@ -310,7 +314,33 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) mtspr SPRN_SDR1,r6 /* switch to partition page table */ mtspr SPRN_LPID,r7 isync + + /* See if we need to flush the TLB */ + lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ + clrldi r7,r6,64-6 /* extract bit number (6 bits) */ + srdi r6,r6,6 /* doubleword number */ + sldi r6,r6,3 /* address offset */ + add r6,r6,r9 + addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ li r0,1 + sld r0,r0,r7 + ld r7,0(r6) + and. r7,r7,r0 + beq 22f +23: ldarx r7,0,r6 /* if set, clear the bit */ + andc r7,r7,r0 + stdcx. r7,0,r6 + bne 23b + li r6,128 /* and flush the TLB */ + mtctr r6 + li r7,0x800 /* IS field = 0b10 */ + ptesync +28: tlbiel r7 + addi r7,r7,0x1000 + bdnz 28b + ptesync + +22: li r0,1 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ b 10f @@ -333,36 +363,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) mr r9,r4 blt hdec_soon - /* - * Invalidate the TLB if we could possibly have stale TLB - * entries for this partition on this core due to the use - * of tlbiel. - * XXX maybe only need this on primary thread? - */ - ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ - lwz r5,VCPU_VCPUID(r4) - lhz r6,PACAPACAINDEX(r13) - rldimi r6,r5,0,62 /* XXX map as if threads 1:1 p:v */ - lhz r8,VCPU_LAST_CPU(r4) - sldi r7,r6,1 /* see if this is the same vcpu */ - add r7,r7,r9 /* as last ran on this pcpu */ - lhz r0,KVM_LAST_VCPU(r7) - cmpw r6,r8 /* on the same cpu core as last time? */ - bne 3f - cmpw r0,r5 /* same vcpu as this core last ran? */ - beq 1f -3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */ - sth r5,KVM_LAST_VCPU(r7) - li r6,128 - mtctr r6 - li r7,0x800 /* IS field = 0b10 */ - ptesync -2: tlbiel r7 - addi r7,r7,0x1000 - bdnz 2b - ptesync -1: - /* Save purr/spurr */ mfspr r5,SPRN_PURR mfspr r6,SPRN_SPURR @@ -539,6 +539,11 @@ fast_guest_return: /* Enter guest */ +BEGIN_FTR_SECTION + ld r5, VCPU_CFAR(r4) + mtspr SPRN_CFAR, r5 +END_FTR_SECTION_IFSET(CPU_FTR_CFAR) + ld r5, VCPU_LR(r4) lwz r6, VCPU_CR(r4) mtlr r5 @@ -604,6 +609,10 @@ kvmppc_interrupt: lwz r4, HSTATE_SCRATCH1(r13) std r3, VCPU_GPR(R12)(r9) stw r4, VCPU_CR(r9) +BEGIN_FTR_SECTION + ld r3, HSTATE_CFAR(r13) + std r3, VCPU_CFAR(r9) +END_FTR_SECTION_IFSET(CPU_FTR_CFAR) /* Restore R1/R2 so we can handle faults */ ld r1, HSTATE_HOST_R1(r13) @@ -679,8 +688,7 @@ BEGIN_FTR_SECTION 1: END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) -nohpte_cont: -hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ +guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ /* Save DEC */ mfspr r5,SPRN_DEC mftb r6 @@ -701,6 +709,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) std r6, VCPU_FAULT_DAR(r9) stw r7, VCPU_FAULT_DSISR(r9) + /* See if it is a machine check */ + cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK + beq machine_check_realmode +mc_cont: + /* Save guest CTRL register, set runlatch to 1 */ 6: mfspr r6,SPRN_CTRLF stw r6,VCPU_CTRL(r9) @@ -1113,38 +1126,41 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) /* * For external and machine check interrupts, we need * to call the Linux handler to process the interrupt. - * We do that by jumping to the interrupt vector address - * which we have in r12. The [h]rfid at the end of the + * We do that by jumping to absolute address 0x500 for + * external interrupts, or the machine_check_fwnmi label + * for machine checks (since firmware might have patched + * the vector area at 0x200). The [h]rfid at the end of the * handler will return to the book3s_hv_interrupts.S code. * For other interrupts we do the rfid to get back - * to the book3s_interrupts.S code here. + * to the book3s_hv_interrupts.S code here. */ ld r8, HSTATE_VMHANDLER(r13) ld r7, HSTATE_HOST_MSR(r13) + cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL +BEGIN_FTR_SECTION beq 11f - cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) /* RFI into the highmem handler, or branch to interrupt handler */ -12: mfmsr r6 - mtctr r12 + mfmsr r6 li r0, MSR_RI andc r6, r6, r0 mtmsrd r6, 1 /* Clear RI in MSR */ mtsrr0 r8 mtsrr1 r7 - beqctr + beqa 0x500 /* external interrupt (PPC970) */ + beq cr1, 13f /* machine check */ RFI -11: -BEGIN_FTR_SECTION - b 12b -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) - mtspr SPRN_HSRR0, r8 + /* On POWER7, we have external interrupts set to use HSRR0/1 */ +11: mtspr SPRN_HSRR0, r8 mtspr SPRN_HSRR1, r7 ba 0x500 +13: b machine_check_fwnmi + /* * Check whether an HDSI is an HPTE not found fault or something else. * If it is an HPTE not found fault that is due to the guest accessing @@ -1177,7 +1193,7 @@ kvmppc_hdsi: cmpdi r3, 0 /* retry the instruction */ beq 6f cmpdi r3, -1 /* handle in kernel mode */ - beq nohpte_cont + beq guest_exit_cont cmpdi r3, -2 /* MMIO emulation; need instr word */ beq 2f @@ -1191,6 +1207,7 @@ kvmppc_hdsi: li r10, BOOK3S_INTERRUPT_DATA_STORAGE li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ rotldi r11, r11, 63 +fast_interrupt_c_return: 6: ld r7, VCPU_CTR(r9) lwz r8, VCPU_XER(r9) mtctr r7 @@ -1223,7 +1240,7 @@ kvmppc_hdsi: /* Unset guest mode. */ li r0, KVM_GUEST_MODE_NONE stb r0, HSTATE_IN_GUEST(r13) - b nohpte_cont + b guest_exit_cont /* * Similarly for an HISI, reflect it to the guest as an ISI unless @@ -1249,9 +1266,9 @@ kvmppc_hisi: ld r11, VCPU_MSR(r9) li r12, BOOK3S_INTERRUPT_H_INST_STORAGE cmpdi r3, 0 /* retry the instruction */ - beq 6f + beq fast_interrupt_c_return cmpdi r3, -1 /* handle in kernel mode */ - beq nohpte_cont + beq guest_exit_cont /* Synthesize an ISI for the guest */ mr r11, r3 @@ -1260,12 +1277,7 @@ kvmppc_hisi: li r10, BOOK3S_INTERRUPT_INST_STORAGE li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ rotldi r11, r11, 63 -6: ld r7, VCPU_CTR(r9) - lwz r8, VCPU_XER(r9) - mtctr r7 - mtxer r8 - mr r4, r9 - b fast_guest_return + b fast_interrupt_c_return 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ ld r5, KVM_VRMA_SLB_V(r6) @@ -1281,14 +1293,14 @@ kvmppc_hisi: hcall_try_real_mode: ld r3,VCPU_GPR(R3)(r9) andi. r0,r11,MSR_PR - bne hcall_real_cont + bne guest_exit_cont clrrdi r3,r3,2 cmpldi r3,hcall_real_table_end - hcall_real_table - bge hcall_real_cont + bge guest_exit_cont LOAD_REG_ADDR(r4, hcall_real_table) lwzx r3,r3,r4 cmpwi r3,0 - beq hcall_real_cont + beq guest_exit_cont add r3,r3,r4 mtctr r3 mr r3,r9 /* get vcpu pointer */ @@ -1309,7 +1321,7 @@ hcall_real_fallback: li r12,BOOK3S_INTERRUPT_SYSCALL ld r9, HSTATE_KVM_VCPU(r13) - b hcall_real_cont + b guest_exit_cont .globl hcall_real_table hcall_real_table: @@ -1568,6 +1580,21 @@ kvm_cede_exit: li r3,H_TOO_HARD blr + /* Try to handle a machine check in real mode */ +machine_check_realmode: + mr r3, r9 /* get vcpu pointer */ + bl .kvmppc_realmode_machine_check + nop + cmpdi r3, 0 /* continue exiting from guest? */ + ld r9, HSTATE_KVM_VCPU(r13) + li r12, BOOK3S_INTERRUPT_MACHINE_CHECK + beq mc_cont + /* If not, deliver a machine check. SRR0/1 are already set */ + li r10, BOOK3S_INTERRUPT_MACHINE_CHECK + li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ + rotldi r11, r11, 63 + b fast_interrupt_c_return + secondary_too_late: ld r5,HSTATE_KVM_VCORE(r13) HMT_LOW @@ -1587,6 +1614,10 @@ secondary_too_late: .endr secondary_nap: + /* Clear our vcpu pointer so we don't come back in early */ + li r0, 0 + std r0, HSTATE_KVM_VCPU(r13) + lwsync /* Clear any pending IPI - assume we're a secondary thread */ ld r5, HSTATE_XICS_PHYS(r13) li r7, XICS_XIRR @@ -1612,8 +1643,6 @@ secondary_nap: kvm_no_guest: li r0, KVM_HWTHREAD_IN_NAP stb r0, HSTATE_HWTHREAD_STATE(r13) - li r0, 0 - std r0, HSTATE_KVM_VCPU(r13) li r3, LPCR_PECE0 mfspr r4, SPRN_LPCR diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c index 41cb0017e75..da8b13c4b77 100644 --- a/arch/powerpc/kvm/book3s_mmu_hpte.c +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c @@ -114,11 +114,6 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) hlist_del_init_rcu(&pte->list_vpte); hlist_del_init_rcu(&pte->list_vpte_long); - if (pte->pte.may_write) - kvm_release_pfn_dirty(pte->pfn); - else - kvm_release_pfn_clean(pte->pfn); - spin_unlock(&vcpu3s->mmu_lock); vcpu3s->hpte_cache_count--; @@ -129,7 +124,6 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hpte_cache *pte; - struct hlist_node *node; int i; rcu_read_lock(); @@ -137,7 +131,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; - hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) + hlist_for_each_entry_rcu(pte, list, list_vpte_long) invalidate_pte(vcpu, pte); } @@ -148,7 +142,6 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hlist_head *list; - struct hlist_node *node; struct hpte_cache *pte; /* Find the list of entries in the map */ @@ -157,7 +150,7 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) rcu_read_lock(); /* Check the list for matching entries and invalidate */ - hlist_for_each_entry_rcu(pte, node, list, list_pte) + hlist_for_each_entry_rcu(pte, list, list_pte) if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) invalidate_pte(vcpu, pte); @@ -168,7 +161,6 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hlist_head *list; - struct hlist_node *node; struct hpte_cache *pte; /* Find the list of entries in the map */ @@ -178,7 +170,7 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea) rcu_read_lock(); /* Check the list for matching entries and invalidate */ - hlist_for_each_entry_rcu(pte, node, list, list_pte_long) + hlist_for_each_entry_rcu(pte, list, list_pte_long) if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) invalidate_pte(vcpu, pte); @@ -212,7 +204,6 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hlist_head *list; - struct hlist_node *node; struct hpte_cache *pte; u64 vp_mask = 0xfffffffffULL; @@ -221,7 +212,7 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) rcu_read_lock(); /* Check the list for matching entries and invalidate */ - hlist_for_each_entry_rcu(pte, node, list, list_vpte) + hlist_for_each_entry_rcu(pte, list, list_vpte) if ((pte->pte.vpage & vp_mask) == guest_vp) invalidate_pte(vcpu, pte); @@ -233,7 +224,6 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hlist_head *list; - struct hlist_node *node; struct hpte_cache *pte; u64 vp_mask = 0xffffff000ULL; @@ -243,7 +233,7 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) rcu_read_lock(); /* Check the list for matching entries and invalidate */ - hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) + hlist_for_each_entry_rcu(pte, list, list_vpte_long) if ((pte->pte.vpage & vp_mask) == guest_vp) invalidate_pte(vcpu, pte); @@ -271,7 +261,6 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); - struct hlist_node *node; struct hpte_cache *pte; int i; @@ -282,7 +271,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; - hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) + hlist_for_each_entry_rcu(pte, list, list_vpte_long) if ((pte->pte.raddr >= pa_start) && (pte->pte.raddr < pa_end)) invalidate_pte(vcpu, pte); diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 05c28f59f77..5e93438afb0 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -34,6 +34,8 @@ #include <asm/kvm_book3s.h> #include <asm/mmu_context.h> #include <asm/switch_to.h> +#include <asm/firmware.h> +#include <asm/hvcall.h> #include <linux/gfp.h> #include <linux/sched.h> #include <linux/vmalloc.h> @@ -52,8 +54,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, #define MSR_USER32 MSR_USER #define MSR_USER64 MSR_USER #define HW_PAGE_SIZE PAGE_SIZE -#define __hard_irq_disable local_irq_disable -#define __hard_irq_enable local_irq_enable #endif void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) @@ -66,7 +66,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; svcpu_put(svcpu); #endif - + vcpu->cpu = smp_processor_id(); #ifdef CONFIG_PPC_BOOK3S_32 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; #endif @@ -83,17 +83,71 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) svcpu_put(svcpu); #endif - kvmppc_giveup_ext(vcpu, MSR_FP); - kvmppc_giveup_ext(vcpu, MSR_VEC); - kvmppc_giveup_ext(vcpu, MSR_VSX); + kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); + vcpu->cpu = -1; +} + +int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) +{ + int r = 1; /* Indicate we want to get back into the guest */ + + /* We misuse TLB_FLUSH to indicate that we want to clear + all shadow cache entries */ + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) + kvmppc_mmu_pte_flush(vcpu, 0, 0); + + return r; +} + +/************* MMU Notifiers *************/ + +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) +{ + trace_kvm_unmap_hva(hva); + + /* + * Flush all shadow tlb entries everywhere. This is slow, but + * we are 100% sure that we catch the to be unmapped page + */ + kvm_flush_remote_tlbs(kvm); + + return 0; } +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +{ + /* kvm_unmap_hva flushes everything anyways */ + kvm_unmap_hva(kvm, start); + + return 0; +} + +int kvm_age_hva(struct kvm *kvm, unsigned long hva) +{ + /* XXX could be more clever ;) */ + return 0; +} + +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ + /* XXX could be more clever ;) */ + return 0; +} + +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +{ + /* The page will get remapped properly on its next fault */ + kvm_unmap_hva(kvm, hva); +} + +/*****************************************/ + static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) { ulong smsr = vcpu->arch.shared->msr; /* Guest MSR values */ - smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE; + smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE; /* Process MSR values */ smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; /* External providers the guest reserved */ @@ -379,10 +433,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, static inline int get_fpr_index(int i) { -#ifdef CONFIG_VSX - i *= 2; -#endif - return i; + return i * TS_FPRWIDTH; } /* Give up external provider (FPU, Altivec, VSX) */ @@ -396,41 +447,49 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) u64 *thread_fpr = (u64*)t->fpr; int i; - if (!(vcpu->arch.guest_owned_ext & msr)) + /* + * VSX instructions can access FP and vector registers, so if + * we are giving up VSX, make sure we give up FP and VMX as well. + */ + if (msr & MSR_VSX) + msr |= MSR_FP | MSR_VEC; + + msr &= vcpu->arch.guest_owned_ext; + if (!msr) return; #ifdef DEBUG_EXT printk(KERN_INFO "Giving up ext 0x%lx\n", msr); #endif - switch (msr) { - case MSR_FP: + if (msr & MSR_FP) { + /* + * Note that on CPUs with VSX, giveup_fpu stores + * both the traditional FP registers and the added VSX + * registers into thread.fpr[]. + */ giveup_fpu(current); for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; vcpu->arch.fpscr = t->fpscr.val; - break; - case MSR_VEC: + +#ifdef CONFIG_VSX + if (cpu_has_feature(CPU_FTR_VSX)) + for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++) + vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; +#endif + } + #ifdef CONFIG_ALTIVEC + if (msr & MSR_VEC) { giveup_altivec(current); memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); vcpu->arch.vscr = t->vscr; -#endif - break; - case MSR_VSX: -#ifdef CONFIG_VSX - __giveup_vsx(current); - for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) - vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; -#endif - break; - default: - BUG(); } +#endif - vcpu->arch.guest_owned_ext &= ~msr; - current->thread.regs->msr &= ~msr; + vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); kvmppc_recalc_shadow_msr(vcpu); } @@ -490,47 +549,56 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, return RESUME_GUEST; } - /* We already own the ext */ - if (vcpu->arch.guest_owned_ext & msr) { - return RESUME_GUEST; + if (msr == MSR_VSX) { + /* No VSX? Give an illegal instruction interrupt */ +#ifdef CONFIG_VSX + if (!cpu_has_feature(CPU_FTR_VSX)) +#endif + { + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + return RESUME_GUEST; + } + + /* + * We have to load up all the FP and VMX registers before + * we can let the guest use VSX instructions. + */ + msr = MSR_FP | MSR_VEC | MSR_VSX; } + /* See if we already own all the ext(s) needed */ + msr &= ~vcpu->arch.guest_owned_ext; + if (!msr) + return RESUME_GUEST; + #ifdef DEBUG_EXT printk(KERN_INFO "Loading up ext 0x%lx\n", msr); #endif current->thread.regs->msr |= msr; - switch (msr) { - case MSR_FP: + if (msr & MSR_FP) { for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; - +#ifdef CONFIG_VSX + for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++) + thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; +#endif t->fpscr.val = vcpu->arch.fpscr; t->fpexc_mode = 0; kvmppc_load_up_fpu(); - break; - case MSR_VEC: + } + + if (msr & MSR_VEC) { #ifdef CONFIG_ALTIVEC memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); t->vscr = vcpu->arch.vscr; t->vrsave = -1; kvmppc_load_up_altivec(); #endif - break; - case MSR_VSX: -#ifdef CONFIG_VSX - for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) - thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; - kvmppc_load_up_vsx(); -#endif - break; - default: - BUG(); } vcpu->arch.guest_owned_ext |= msr; - kvmppc_recalc_shadow_msr(vcpu); return RESUME_GUEST; @@ -540,18 +608,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int exit_nr) { int r = RESUME_HOST; + int s; vcpu->stat.sum_exits++; run->exit_reason = KVM_EXIT_UNKNOWN; run->ready_for_interrupt_injection = 1; - /* We get here with MSR.EE=0, so enable it to be a nice citizen */ - __hard_irq_enable(); + /* We get here with MSR.EE=1 */ + + trace_kvm_exit(exit_nr, vcpu); + kvm_guest_exit(); - trace_kvm_book3s_exit(exit_nr, vcpu); - preempt_enable(); - kvm_resched(vcpu); switch (exit_nr) { case BOOK3S_INTERRUPT_INST_STORAGE: { @@ -694,6 +762,11 @@ program_interrupt: run->exit_reason = KVM_EXIT_MMIO; r = RESUME_HOST_NV; break; + case EMULATE_DO_PAPR: + run->exit_reason = KVM_EXIT_PAPR_HCALL; + vcpu->arch.hcall_needed = 1; + r = RESUME_HOST_NV; + break; default: BUG(); } @@ -802,7 +875,6 @@ program_interrupt: } } - preempt_disable(); if (!(r & RESUME_HOST)) { /* To avoid clobbering exit_reason, only check for signals if * we aren't already exiting to userspace for some other @@ -814,20 +886,13 @@ program_interrupt: * and if we really did time things so badly, then we just exit * again due to a host external interrupt. */ - __hard_irq_disable(); - if (signal_pending(current)) { - __hard_irq_enable(); -#ifdef EXIT_DEBUG - printk(KERN_EMERG "KVM: Going back to host\n"); -#endif - vcpu->stat.signal_exits++; - run->exit_reason = KVM_EXIT_INTR; - r = -EINTR; + local_irq_disable(); + s = kvmppc_prepare_to_enter(vcpu); + if (s <= 0) { + local_irq_enable(); + r = s; } else { - /* In case an interrupt came in that was triggered - * from userspace (like DEC), we need to check what - * to inject now! */ - kvmppc_core_prepare_to_enter(vcpu); + kvmppc_lazy_ee_enable(); } } @@ -899,34 +964,59 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, return 0; } -int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) +int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { - int r = -EINVAL; + int r = 0; - switch (reg->id) { + switch (id) { case KVM_REG_PPC_HIOR: - r = copy_to_user((u64 __user *)(long)reg->addr, - &to_book3s(vcpu)->hior, sizeof(u64)); + *val = get_reg_val(id, to_book3s(vcpu)->hior); + break; +#ifdef CONFIG_VSX + case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: { + long int i = id - KVM_REG_PPC_VSR0; + + if (!cpu_has_feature(CPU_FTR_VSX)) { + r = -ENXIO; + break; + } + val->vsxval[0] = vcpu->arch.fpr[i]; + val->vsxval[1] = vcpu->arch.vsr[i]; break; + } +#endif /* CONFIG_VSX */ default: + r = -EINVAL; break; } return r; } -int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) +int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { - int r = -EINVAL; + int r = 0; - switch (reg->id) { + switch (id) { case KVM_REG_PPC_HIOR: - r = copy_from_user(&to_book3s(vcpu)->hior, - (u64 __user *)(long)reg->addr, sizeof(u64)); - if (!r) - to_book3s(vcpu)->hior_explicit = true; + to_book3s(vcpu)->hior = set_reg_val(id, *val); + to_book3s(vcpu)->hior_explicit = true; + break; +#ifdef CONFIG_VSX + case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: { + long int i = id - KVM_REG_PPC_VSR0; + + if (!cpu_has_feature(CPU_FTR_VSX)) { + r = -ENXIO; + break; + } + vcpu->arch.fpr[i] = val->vsxval[0]; + vcpu->arch.vsr[i] = val->vsxval[1]; break; + } +#endif /* CONFIG_VSX */ default: + r = -EINVAL; break; } @@ -1020,8 +1110,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) #endif ulong ext_msr; - preempt_disable(); - /* Check if we can run the vcpu at all */ if (!vcpu->arch.sane) { kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; @@ -1029,21 +1117,16 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) goto out; } - kvmppc_core_prepare_to_enter(vcpu); - /* * Interrupts could be timers for the guest which we have to inject * again, so let's postpone them until we're in the guest and if we * really did time things so badly, then we just exit again due to * a host external interrupt. */ - __hard_irq_disable(); - - /* No need to go into the guest when all we do is going out */ - if (signal_pending(current)) { - __hard_irq_enable(); - kvm_run->exit_reason = KVM_EXIT_INTR; - ret = -EINTR; + local_irq_disable(); + ret = kvmppc_prepare_to_enter(vcpu); + if (ret <= 0) { + local_irq_enable(); goto out; } @@ -1070,7 +1153,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) /* Save VSX state in stack */ used_vsr = current->thread.used_vsr; if (used_vsr && (current->thread.regs->msr & MSR_VSX)) - __giveup_vsx(current); + __giveup_vsx(current); #endif /* Remember the MSR with disabled extensions */ @@ -1080,20 +1163,19 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) if (vcpu->arch.shared->msr & MSR_FP) kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); - kvm_guest_enter(); + kvmppc_lazy_ee_enable(); ret = __kvmppc_vcpu_run(kvm_run, vcpu); - kvm_guest_exit(); - - current->thread.regs->msr = ext_msr; + /* No need for kvm_guest_exit. It's done in handle_exit. + We also get here with interrupts enabled. */ /* Make sure we save the guest FPU/Altivec/VSX state */ - kvmppc_giveup_ext(vcpu, MSR_FP); - kvmppc_giveup_ext(vcpu, MSR_VEC); - kvmppc_giveup_ext(vcpu, MSR_VSX); + kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); + + current->thread.regs->msr = ext_msr; - /* Restore FPU state from stack */ + /* Restore FPU/VSX state from stack */ memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); current->thread.fpscr.val = fpscr; current->thread.fpexc_mode = fpexc_mode; @@ -1113,7 +1195,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) #endif out: - preempt_enable(); + vcpu->mode = OUTSIDE_GUEST_MODE; return ret; } @@ -1181,23 +1263,49 @@ int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) } #endif /* CONFIG_PPC64 */ +void kvmppc_core_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ +} + +int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, + unsigned long npages) +{ + return 0; +} + int kvmppc_core_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem) { return 0; } void kvmppc_core_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem) + struct kvm_userspace_memory_region *mem, + struct kvm_memory_slot old) { } +void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ +} + +static unsigned int kvm_global_user_count = 0; +static DEFINE_SPINLOCK(kvm_global_user_count_lock); + int kvmppc_core_init_vm(struct kvm *kvm) { #ifdef CONFIG_PPC64 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); #endif + if (firmware_has_feature(FW_FEATURE_SET_MODE)) { + spin_lock(&kvm_global_user_count_lock); + if (++kvm_global_user_count == 1) + pSeries_disable_reloc_on_exc(); + spin_unlock(&kvm_global_user_count_lock); + } return 0; } @@ -1206,6 +1314,14 @@ void kvmppc_core_destroy_vm(struct kvm *kvm) #ifdef CONFIG_PPC64 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); #endif + + if (firmware_has_feature(FW_FEATURE_SET_MODE)) { + spin_lock(&kvm_global_user_count_lock); + BUG_ON(kvm_global_user_count == 0); + if (--kvm_global_user_count == 0) + pSeries_enable_reloc_on_exc(); + spin_unlock(&kvm_global_user_count_lock); + } } static int kvmppc_book3s_init(void) diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 9ecf6e35cd8..8f7633e3afb 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -170,20 +170,21 @@ kvmppc_handler_skip_ins: * Call kvmppc_handler_trampoline_enter in real mode * * On entry, r4 contains the guest shadow MSR + * MSR.EE has to be 0 when calling this function */ _GLOBAL(kvmppc_entry_trampoline) mfmsr r5 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) toreal(r7) - li r9, MSR_RI - ori r9, r9, MSR_EE - andc r9, r5, r9 /* Clear EE and RI in MSR value */ li r6, MSR_IR | MSR_DR - ori r6, r6, MSR_EE - andc r6, r5, r6 /* Clear EE, DR and IR in MSR value */ - MTMSR_EERI(r9) /* Clear EE and RI in MSR */ - mtsrr0 r7 /* before we set srr0/1 */ + andc r6, r5, r6 /* Clear DR and IR in MSR value */ + /* + * Set EE in HOST_MSR so that it's enabled when we get into our + * C exit handler function + */ + ori r5, r5, MSR_EE + mtsrr0 r7 mtsrr1 r6 RFI @@ -233,8 +234,5 @@ define_load_up(fpu) #ifdef CONFIG_ALTIVEC define_load_up(altivec) #endif -#ifdef CONFIG_VSX -define_load_up(vsx) -#endif #include "book3s_segment.S" diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index d25a097c852..020923e4313 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -36,9 +36,11 @@ #include <asm/dbell.h> #include <asm/hw_irq.h> #include <asm/irq.h> +#include <asm/time.h> #include "timing.h" #include "booke.h" +#include "trace.h" unsigned long kvmppc_booke_handlers; @@ -62,6 +64,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "doorbell", VCPU_STAT(dbell_exits) }, { "guest doorbell", VCPU_STAT(gdbell_exits) }, + { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, { NULL } }; @@ -120,6 +123,16 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) } #endif +static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu) +{ +#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV) + /* We always treat the FP bit as enabled from the host + perspective, so only need to adjust the shadow MSR */ + vcpu->arch.shadow_msr &= ~MSR_FP; + vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP; +#endif +} + /* * Helper function for "full" MSR writes. No need to call this if only * EE/CE/ME/DE/RI are changing. @@ -136,11 +149,13 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) kvmppc_mmu_msr_notify(vcpu, old_msr); kvmppc_vcpu_sync_spe(vcpu); + kvmppc_vcpu_sync_fpu(vcpu); } static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) { + trace_kvm_booke_queue_irqprio(vcpu, priority); set_bit(priority, &vcpu->arch.pending_exceptions); } @@ -167,6 +182,14 @@ static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); } +static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags, + ulong esr_flags) +{ + vcpu->arch.queued_dear = dear_flags; + vcpu->arch.queued_esr = esr_flags; + kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT); +} + void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) { vcpu->arch.queued_esr = esr_flags; @@ -206,6 +229,16 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); } +static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu) +{ + kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG); +} + +static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu) +{ + clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions); +} + static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) { #ifdef CONFIG_KVM_BOOKE_HV @@ -275,18 +308,28 @@ static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr) #endif } +static unsigned long get_guest_epr(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_KVM_BOOKE_HV + return mfspr(SPRN_GEPR); +#else + return vcpu->arch.epr; +#endif +} + /* Deliver the interrupt of the corresponding priority, if possible. */ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) { int allowed = 0; ulong msr_mask = 0; - bool update_esr = false, update_dear = false; + bool update_esr = false, update_dear = false, update_epr = false; ulong crit_raw = vcpu->arch.shared->critical; ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); bool crit; bool keep_irq = false; enum int_class int_class; + ulong new_msr = vcpu->arch.shared->msr; /* Truncate crit indicators in 32 bit mode */ if (!(vcpu->arch.shared->msr & MSR_SF)) { @@ -304,9 +347,13 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, keep_irq = true; } + if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_enabled) + update_epr = true; + switch (priority) { case BOOKE_IRQPRIO_DTLB_MISS: case BOOKE_IRQPRIO_DATA_STORAGE: + case BOOKE_IRQPRIO_ALIGNMENT: update_dear = true; /* fall through */ case BOOKE_IRQPRIO_INST_STORAGE: @@ -320,11 +367,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, case BOOKE_IRQPRIO_SPE_FP_DATA: case BOOKE_IRQPRIO_SPE_FP_ROUND: case BOOKE_IRQPRIO_AP_UNAVAIL: - case BOOKE_IRQPRIO_ALIGNMENT: allowed = 1; msr_mask = MSR_CE | MSR_ME | MSR_DE; int_class = INT_CLASS_NONCRIT; break; + case BOOKE_IRQPRIO_WATCHDOG: case BOOKE_IRQPRIO_CRITICAL: case BOOKE_IRQPRIO_DBELL_CRIT: allowed = vcpu->arch.shared->msr & MSR_CE; @@ -381,7 +428,15 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, set_guest_esr(vcpu, vcpu->arch.queued_esr); if (update_dear == true) set_guest_dear(vcpu, vcpu->arch.queued_dear); - kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); + if (update_epr == true) + kvm_make_request(KVM_REQ_EPR_EXIT, vcpu); + + new_msr &= msr_mask; +#if defined(CONFIG_64BIT) + if (vcpu->arch.epcr & SPRN_EPCR_ICM) + new_msr |= MSR_CM; +#endif + kvmppc_set_msr(vcpu, new_msr); if (!keep_irq) clear_bit(priority, &vcpu->arch.pending_exceptions); @@ -404,12 +459,121 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, return allowed; } +/* + * Return the number of jiffies until the next timeout. If the timeout is + * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA + * because the larger value can break the timer APIs. + */ +static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu) +{ + u64 tb, wdt_tb, wdt_ticks = 0; + u64 nr_jiffies = 0; + u32 period = TCR_GET_WP(vcpu->arch.tcr); + + wdt_tb = 1ULL << (63 - period); + tb = get_tb(); + /* + * The watchdog timeout will hapeen when TB bit corresponding + * to watchdog will toggle from 0 to 1. + */ + if (tb & wdt_tb) + wdt_ticks = wdt_tb; + + wdt_ticks += wdt_tb - (tb & (wdt_tb - 1)); + + /* Convert timebase ticks to jiffies */ + nr_jiffies = wdt_ticks; + + if (do_div(nr_jiffies, tb_ticks_per_jiffy)) + nr_jiffies++; + + return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA); +} + +static void arm_next_watchdog(struct kvm_vcpu *vcpu) +{ + unsigned long nr_jiffies; + unsigned long flags; + + /* + * If TSR_ENW and TSR_WIS are not set then no need to exit to + * userspace, so clear the KVM_REQ_WATCHDOG request. + */ + if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS)) + clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests); + + spin_lock_irqsave(&vcpu->arch.wdt_lock, flags); + nr_jiffies = watchdog_next_timeout(vcpu); + /* + * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA + * then do not run the watchdog timer as this can break timer APIs. + */ + if (nr_jiffies < NEXT_TIMER_MAX_DELTA) + mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies); + else + del_timer(&vcpu->arch.wdt_timer); + spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags); +} + +void kvmppc_watchdog_func(unsigned long data) +{ + struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; + u32 tsr, new_tsr; + int final; + + do { + new_tsr = tsr = vcpu->arch.tsr; + final = 0; + + /* Time out event */ + if (tsr & TSR_ENW) { + if (tsr & TSR_WIS) + final = 1; + else + new_tsr = tsr | TSR_WIS; + } else { + new_tsr = tsr | TSR_ENW; + } + } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr); + + if (new_tsr & TSR_WIS) { + smp_wmb(); + kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); + kvm_vcpu_kick(vcpu); + } + + /* + * If this is final watchdog expiry and some action is required + * then exit to userspace. + */ + if (final && (vcpu->arch.tcr & TCR_WRC_MASK) && + vcpu->arch.watchdog_enabled) { + smp_wmb(); + kvm_make_request(KVM_REQ_WATCHDOG, vcpu); + kvm_vcpu_kick(vcpu); + } + + /* + * Stop running the watchdog timer after final expiration to + * prevent the host from being flooded with timers if the + * guest sets a short period. + * Timers will resume when TSR/TCR is updated next time. + */ + if (!final) + arm_next_watchdog(vcpu); +} + static void update_timer_ints(struct kvm_vcpu *vcpu) { if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS)) kvmppc_core_queue_dec(vcpu); else kvmppc_core_dequeue_dec(vcpu); + + if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS)) + kvmppc_core_queue_watchdog(vcpu); + else + kvmppc_core_dequeue_watchdog(vcpu); } static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) @@ -417,13 +581,6 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) unsigned long *pending = &vcpu->arch.pending_exceptions; unsigned int priority; - if (vcpu->requests) { - if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) { - smp_mb(); - update_timer_ints(vcpu); - } - } - priority = __ffs(*pending); while (priority < BOOKE_IRQPRIO_MAX) { if (kvmppc_booke_irqprio_deliver(vcpu, priority)) @@ -446,6 +603,11 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) kvmppc_core_check_exceptions(vcpu); + if (vcpu->requests) { + /* Exception delivery raised request; start over */ + return 1; + } + if (vcpu->arch.shared->msr & MSR_WE) { local_irq_enable(); kvm_vcpu_block(vcpu); @@ -459,37 +621,27 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) return r; } -/* - * Common checks before entering the guest world. Call with interrupts - * disabled. - * - * returns !0 if a signal is pending and check_signal is true - */ -static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) +int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) { - int r = 0; + int r = 1; /* Indicate we want to get back into the guest */ - WARN_ON_ONCE(!irqs_disabled()); - while (true) { - if (need_resched()) { - local_irq_enable(); - cond_resched(); - local_irq_disable(); - continue; - } - - if (signal_pending(current)) { - r = 1; - break; - } + if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) + update_timer_ints(vcpu); +#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) + kvmppc_core_flush_tlb(vcpu); +#endif - if (kvmppc_core_prepare_to_enter(vcpu)) { - /* interrupts got enabled in between, so we - are back at square 1 */ - continue; - } + if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) { + vcpu->run->exit_reason = KVM_EXIT_WATCHDOG; + r = 0; + } - break; + if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) { + vcpu->run->epr.epr = 0; + vcpu->arch.epr_needed = true; + vcpu->run->exit_reason = KVM_EXIT_EPR; + r = 0; } return r; @@ -497,7 +649,7 @@ static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { - int ret; + int ret, s; #ifdef CONFIG_PPC_FPU unsigned int fpscr; int fpexc_mode; @@ -510,11 +662,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) } local_irq_disable(); - if (kvmppc_prepare_to_enter(vcpu)) { - kvm_run->exit_reason = KVM_EXIT_INTR; - ret = -EINTR; + s = kvmppc_prepare_to_enter(vcpu); + if (s <= 0) { + local_irq_enable(); + ret = s; goto out; } + kvmppc_lazy_ee_enable(); kvm_guest_enter(); @@ -542,6 +696,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ret = __kvmppc_vcpu_run(kvm_run, vcpu); + /* No need for kvm_guest_exit. It's done in handle_exit. + We also get here with interrupts enabled. */ + #ifdef CONFIG_PPC_FPU kvmppc_save_guest_fp(vcpu); @@ -557,10 +714,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) current->thread.fpexc_mode = fpexc_mode; #endif - kvm_guest_exit(); - out: - local_irq_enable(); + vcpu->mode = OUTSIDE_GUEST_MODE; return ret; } @@ -668,6 +823,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int exit_nr) { int r = RESUME_HOST; + int s; /* update before a new last_exit_type is rewritten */ kvmppc_update_timing_stats(vcpu); @@ -677,6 +833,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, local_irq_enable(); + trace_kvm_exit(exit_nr, vcpu); + kvm_guest_exit(); + run->exit_reason = KVM_EXIT_UNKNOWN; run->ready_for_interrupt_injection = 1; @@ -820,6 +979,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, r = RESUME_GUEST; break; + case BOOKE_INTERRUPT_ALIGNMENT: + kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear, + vcpu->arch.fault_esr); + r = RESUME_GUEST; + break; + #ifdef CONFIG_KVM_BOOKE_HV case BOOKE_INTERRUPT_HV_SYSCALL: if (!(vcpu->arch.shared->msr & MSR_PR)) { @@ -971,10 +1136,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, */ if (!(r & RESUME_HOST)) { local_irq_disable(); - if (kvmppc_prepare_to_enter(vcpu)) { - run->exit_reason = KVM_EXIT_INTR; - r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); - kvmppc_account_exit(vcpu, SIGNAL_EXITS); + s = kvmppc_prepare_to_enter(vcpu); + if (s <= 0) { + local_irq_enable(); + r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); + } else { + kvmppc_lazy_ee_enable(); } } @@ -1011,6 +1178,21 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) return r; } +int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) +{ + /* setup watchdog timer once */ + spin_lock_init(&vcpu->arch.wdt_lock); + setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, + (unsigned long)vcpu); + + return 0; +} + +void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) +{ + del_timer_sync(&vcpu->arch.wdt_timer); +} + int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; @@ -1106,7 +1288,13 @@ static int set_sregs_base(struct kvm_vcpu *vcpu, } if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) { + u32 old_tsr = vcpu->arch.tsr; + vcpu->arch.tsr = sregs->u.e.tsr; + + if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS)) + arm_next_watchdog(vcpu); + update_timer_ints(vcpu); } @@ -1221,12 +1409,82 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) { - return -EINVAL; + int r = -EINVAL; + + switch (reg->id) { + case KVM_REG_PPC_IAC1: + case KVM_REG_PPC_IAC2: + case KVM_REG_PPC_IAC3: + case KVM_REG_PPC_IAC4: { + int iac = reg->id - KVM_REG_PPC_IAC1; + r = copy_to_user((u64 __user *)(long)reg->addr, + &vcpu->arch.dbg_reg.iac[iac], sizeof(u64)); + break; + } + case KVM_REG_PPC_DAC1: + case KVM_REG_PPC_DAC2: { + int dac = reg->id - KVM_REG_PPC_DAC1; + r = copy_to_user((u64 __user *)(long)reg->addr, + &vcpu->arch.dbg_reg.dac[dac], sizeof(u64)); + break; + } + case KVM_REG_PPC_EPR: { + u32 epr = get_guest_epr(vcpu); + r = put_user(epr, (u32 __user *)(long)reg->addr); + break; + } +#if defined(CONFIG_64BIT) + case KVM_REG_PPC_EPCR: + r = put_user(vcpu->arch.epcr, (u32 __user *)(long)reg->addr); + break; +#endif + default: + break; + } + return r; } int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) { - return -EINVAL; + int r = -EINVAL; + + switch (reg->id) { + case KVM_REG_PPC_IAC1: + case KVM_REG_PPC_IAC2: + case KVM_REG_PPC_IAC3: + case KVM_REG_PPC_IAC4: { + int iac = reg->id - KVM_REG_PPC_IAC1; + r = copy_from_user(&vcpu->arch.dbg_reg.iac[iac], + (u64 __user *)(long)reg->addr, sizeof(u64)); + break; + } + case KVM_REG_PPC_DAC1: + case KVM_REG_PPC_DAC2: { + int dac = reg->id - KVM_REG_PPC_DAC1; + r = copy_from_user(&vcpu->arch.dbg_reg.dac[dac], + (u64 __user *)(long)reg->addr, sizeof(u64)); + break; + } + case KVM_REG_PPC_EPR: { + u32 new_epr; + r = get_user(new_epr, (u32 __user *)(long)reg->addr); + if (!r) + kvmppc_set_epr(vcpu, new_epr); + break; + } +#if defined(CONFIG_64BIT) + case KVM_REG_PPC_EPCR: { + u32 new_epcr; + r = get_user(new_epcr, (u32 __user *)(long)reg->addr); + if (r == 0) + kvmppc_set_epcr(vcpu, new_epcr); + break; + } +#endif + default: + break; + } + return r; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) @@ -1253,20 +1511,50 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) return -ENOTSUPP; } +void kvmppc_core_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ +} + +int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, + unsigned long npages) +{ + return 0; +} + int kvmppc_core_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem) { return 0; } void kvmppc_core_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem) + struct kvm_userspace_memory_region *mem, + struct kvm_memory_slot old) { } +void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ +} + +void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr) +{ +#if defined(CONFIG_64BIT) + vcpu->arch.epcr = new_epcr; +#ifdef CONFIG_KVM_BOOKE_HV + vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM; + if (vcpu->arch.epcr & SPRN_EPCR_ICM) + vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM; +#endif +#endif +} + void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) { vcpu->arch.tcr = new_tcr; + arm_next_watchdog(vcpu); update_timer_ints(vcpu); } @@ -1281,6 +1569,14 @@ void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) { clear_bits(tsr_bits, &vcpu->arch.tsr); + + /* + * We may have stopped the watchdog due to + * being stuck on final expiration. + */ + if (tsr_bits & (TSR_ENW | TSR_WIS)) + arm_next_watchdog(vcpu); + update_timer_ints(vcpu); } @@ -1298,19 +1594,23 @@ void kvmppc_decrementer_func(unsigned long data) void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { + vcpu->cpu = smp_processor_id(); current->thread.kvm_vcpu = vcpu; } void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) { current->thread.kvm_vcpu = NULL; + vcpu->cpu = -1; } int __init kvmppc_booke_init(void) { #ifndef CONFIG_KVM_BOOKE_HV unsigned long ivor[16]; + unsigned long *handler = kvmppc_booke_handler_addr; unsigned long max_ivor = 0; + unsigned long handler_len; int i; /* We install our own exception handlers by hijacking IVPR. IVPR must @@ -1343,14 +1643,16 @@ int __init kvmppc_booke_init(void) for (i = 0; i < 16; i++) { if (ivor[i] > max_ivor) - max_ivor = ivor[i]; + max_ivor = i; + handler_len = handler[i + 1] - handler[i]; memcpy((void *)kvmppc_booke_handlers + ivor[i], - kvmppc_handlers_start + i * kvmppc_handler_len, - kvmppc_handler_len); + (void *)handler[i], handler_len); } - flush_icache_range(kvmppc_booke_handlers, - kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); + + handler_len = handler[max_ivor + 1] - handler[max_ivor]; + flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + + ivor[max_ivor] + handler_len); #endif /* !BOOKE_HV */ return 0; } diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index ba61974c1e2..5fd1ba69357 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h @@ -65,10 +65,12 @@ (1 << BOOKE_IRQPRIO_CRITICAL)) extern unsigned long kvmppc_booke_handlers; +extern unsigned long kvmppc_booke_handler_addr[]; void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr); void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr); +void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr); void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr); void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 12834bb608a..27a4b2877c1 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c @@ -133,10 +133,10 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) vcpu->arch.csrr1 = spr_val; break; case SPRN_DBCR0: - vcpu->arch.dbcr0 = spr_val; + vcpu->arch.dbg_reg.dbcr0 = spr_val; break; case SPRN_DBCR1: - vcpu->arch.dbcr1 = spr_val; + vcpu->arch.dbg_reg.dbcr1 = spr_val; break; case SPRN_DBSR: vcpu->arch.dbsr &= ~spr_val; @@ -145,6 +145,14 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) kvmppc_clr_tsr_bits(vcpu, spr_val); break; case SPRN_TCR: + /* + * WRC is a 2-bit field that is supposed to preserve its + * value once written to non-zero. + */ + if (vcpu->arch.tcr & TCR_WRC_MASK) { + spr_val &= ~TCR_WRC_MASK; + spr_val |= vcpu->arch.tcr & TCR_WRC_MASK; + } kvmppc_set_tcr(vcpu, spr_val); break; @@ -229,7 +237,17 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) case SPRN_IVOR15: vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val; break; - + case SPRN_MCSR: + vcpu->arch.mcsr &= ~spr_val; + break; +#if defined(CONFIG_64BIT) + case SPRN_EPCR: + kvmppc_set_epcr(vcpu, spr_val); +#ifdef CONFIG_KVM_BOOKE_HV + mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); +#endif + break; +#endif default: emulated = EMULATE_FAIL; } @@ -251,6 +269,9 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) case SPRN_ESR: *spr_val = vcpu->arch.shared->esr; break; + case SPRN_EPR: + *spr_val = vcpu->arch.epr; + break; case SPRN_CSRR0: *spr_val = vcpu->arch.csrr0; break; @@ -258,10 +279,10 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) *spr_val = vcpu->arch.csrr1; break; case SPRN_DBCR0: - *spr_val = vcpu->arch.dbcr0; + *spr_val = vcpu->arch.dbg_reg.dbcr0; break; case SPRN_DBCR1: - *spr_val = vcpu->arch.dbcr1; + *spr_val = vcpu->arch.dbg_reg.dbcr1; break; case SPRN_DBSR: *spr_val = vcpu->arch.dbsr; @@ -321,6 +342,14 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) case SPRN_IVOR15: *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; break; + case SPRN_MCSR: + *spr_val = vcpu->arch.mcsr; + break; +#if defined(CONFIG_64BIT) + case SPRN_EPCR: + *spr_val = vcpu->arch.epcr; + break; +#endif default: emulated = EMULATE_FAIL; diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index bb46b32f981..f4bb55c9651 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -45,18 +45,21 @@ (1<<BOOKE_INTERRUPT_DEBUG)) #define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ - (1<<BOOKE_INTERRUPT_DTLB_MISS)) + (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ + (1<<BOOKE_INTERRUPT_ALIGNMENT)) #define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ (1<<BOOKE_INTERRUPT_INST_STORAGE) | \ (1<<BOOKE_INTERRUPT_PROGRAM) | \ - (1<<BOOKE_INTERRUPT_DTLB_MISS)) + (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ + (1<<BOOKE_INTERRUPT_ALIGNMENT)) .macro KVM_HANDLER ivor_nr scratch srr0 _GLOBAL(kvmppc_handler_\ivor_nr) /* Get pointer to vcpu and record exit number. */ mtspr \scratch , r4 - mfspr r4, SPRN_SPRG_RVCPU + mfspr r4, SPRN_SPRG_THREAD + lwz r4, THREAD_KVM_VCPU(r4) stw r3, VCPU_GPR(R3)(r4) stw r5, VCPU_GPR(R5)(r4) stw r6, VCPU_GPR(R6)(r4) @@ -73,6 +76,14 @@ _GLOBAL(kvmppc_handler_\ivor_nr) bctr .endm +.macro KVM_HANDLER_ADDR ivor_nr + .long kvmppc_handler_\ivor_nr +.endm + +.macro KVM_HANDLER_END + .long kvmppc_handlers_end +.endm + _GLOBAL(kvmppc_handlers_start) KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0 @@ -93,9 +104,7 @@ KVM_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0 - -_GLOBAL(kvmppc_handler_len) - .long kvmppc_handler_1 - kvmppc_handler_0 +_GLOBAL(kvmppc_handlers_end) /* Registers: * SPRG_SCRATCH0: guest r4 @@ -402,9 +411,6 @@ lightweight_exit: lwz r8, kvmppc_booke_handlers@l(r8) mtspr SPRN_IVPR, r8 - /* Save vcpu pointer for the exception handlers. */ - mtspr SPRN_SPRG_WVCPU, r4 - lwz r5, VCPU_SHARED(r4) /* Can't switch the stack pointer until after IVPR is switched, @@ -463,6 +469,31 @@ lightweight_exit: lwz r4, VCPU_GPR(R4)(r4) rfi + .data + .align 4 + .globl kvmppc_booke_handler_addr +kvmppc_booke_handler_addr: +KVM_HANDLER_ADDR BOOKE_INTERRUPT_CRITICAL +KVM_HANDLER_ADDR BOOKE_INTERRUPT_MACHINE_CHECK +KVM_HANDLER_ADDR BOOKE_INTERRUPT_DATA_STORAGE +KVM_HANDLER_ADDR BOOKE_INTERRUPT_INST_STORAGE +KVM_HANDLER_ADDR BOOKE_INTERRUPT_EXTERNAL +KVM_HANDLER_ADDR BOOKE_INTERRUPT_ALIGNMENT +KVM_HANDLER_ADDR BOOKE_INTERRUPT_PROGRAM +KVM_HANDLER_ADDR BOOKE_INTERRUPT_FP_UNAVAIL +KVM_HANDLER_ADDR BOOKE_INTERRUPT_SYSCALL +KVM_HANDLER_ADDR BOOKE_INTERRUPT_AP_UNAVAIL +KVM_HANDLER_ADDR BOOKE_INTERRUPT_DECREMENTER +KVM_HANDLER_ADDR BOOKE_INTERRUPT_FIT +KVM_HANDLER_ADDR BOOKE_INTERRUPT_WATCHDOG +KVM_HANDLER_ADDR BOOKE_INTERRUPT_DTLB_MISS +KVM_HANDLER_ADDR BOOKE_INTERRUPT_ITLB_MISS +KVM_HANDLER_ADDR BOOKE_INTERRUPT_DEBUG +KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_UNAVAIL +KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_DATA +KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_ROUND +KVM_HANDLER_END /*Always keep this in end*/ + #ifdef CONFIG_SPE _GLOBAL(kvmppc_save_guest_spe) cmpi 0,r3,0 diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 099fe8272b5..e8ed7d659c5 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -16,6 +16,7 @@ * * Author: Varun Sethi <varun.sethi@freescale.com> * Author: Scott Wood <scotwood@freescale.com> + * Author: Mihai Caraman <mihai.caraman@freescale.com> * * This file is derived from arch/powerpc/kvm/booke_interrupts.S */ @@ -30,31 +31,33 @@ #include <asm/bitsperlong.h> #include <asm/thread_info.h> +#ifdef CONFIG_64BIT +#include <asm/exception-64e.h> +#else #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ - -#define GET_VCPU(vcpu, thread) \ - PPC_LL vcpu, THREAD_KVM_VCPU(thread) +#endif #define LONGBYTES (BITS_PER_LONG / 8) #define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES)) /* The host stack layout: */ -#define HOST_R1 (0 * LONGBYTES) /* Implied by stwu. */ -#define HOST_CALLEE_LR (1 * LONGBYTES) -#define HOST_RUN (2 * LONGBYTES) /* struct kvm_run */ +#define HOST_R1 0 /* Implied by stwu. */ +#define HOST_CALLEE_LR PPC_LR_STKOFF +#define HOST_RUN (HOST_CALLEE_LR + LONGBYTES) /* * r2 is special: it holds 'current', and it made nonvolatile in the * kernel with the -ffixed-r2 gcc option. */ -#define HOST_R2 (3 * LONGBYTES) -#define HOST_CR (4 * LONGBYTES) -#define HOST_NV_GPRS (5 * LONGBYTES) +#define HOST_R2 (HOST_RUN + LONGBYTES) +#define HOST_CR (HOST_R2 + LONGBYTES) +#define HOST_NV_GPRS (HOST_CR + LONGBYTES) #define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES)) #define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n) #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + LONGBYTES) #define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */ -#define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */ +/* LR in caller stack frame. */ +#define HOST_STACK_LR (HOST_STACK_SIZE + PPC_LR_STKOFF) #define NEED_EMU 0x00000001 /* emulation -- save nv regs */ #define NEED_DEAR 0x00000002 /* save faulting DEAR */ @@ -201,12 +204,128 @@ b kvmppc_resume_host .endm +#ifdef CONFIG_64BIT +/* Exception types */ +#define EX_GEN 1 +#define EX_GDBELL 2 +#define EX_DBG 3 +#define EX_MC 4 +#define EX_CRIT 5 +#define EX_TLB 6 + +/* + * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h + */ +.macro kvm_handler intno type scratch, paca_ex, ex_r10, ex_r11, srr0, srr1, flags + _GLOBAL(kvmppc_handler_\intno\()_\srr1) + mr r11, r4 + /* + * Get vcpu from Paca: paca->__current.thread->kvm_vcpu + */ + PPC_LL r4, PACACURRENT(r13) + PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4) + stw r10, VCPU_CR(r4) + PPC_STL r11, VCPU_GPR(R4)(r4) + PPC_STL r5, VCPU_GPR(R5)(r4) + .if \type == EX_CRIT + PPC_LL r5, (\paca_ex + EX_R13)(r13) + .else + mfspr r5, \scratch + .endif + PPC_STL r6, VCPU_GPR(R6)(r4) + PPC_STL r8, VCPU_GPR(R8)(r4) + PPC_STL r9, VCPU_GPR(R9)(r4) + PPC_STL r5, VCPU_GPR(R13)(r4) + PPC_LL r6, (\paca_ex + \ex_r10)(r13) + PPC_LL r8, (\paca_ex + \ex_r11)(r13) + PPC_STL r3, VCPU_GPR(R3)(r4) + PPC_STL r7, VCPU_GPR(R7)(r4) + PPC_STL r12, VCPU_GPR(R12)(r4) + PPC_STL r6, VCPU_GPR(R10)(r4) + PPC_STL r8, VCPU_GPR(R11)(r4) + mfctr r5 + PPC_STL r5, VCPU_CTR(r4) + mfspr r5, \srr0 + mfspr r6, \srr1 + kvm_handler_common \intno, \srr0, \flags +.endm + +#define EX_PARAMS(type) \ + EX_##type, \ + SPRN_SPRG_##type##_SCRATCH, \ + PACA_EX##type, \ + EX_R10, \ + EX_R11 + +#define EX_PARAMS_TLB \ + EX_TLB, \ + SPRN_SPRG_GEN_SCRATCH, \ + PACA_EXTLB, \ + EX_TLB_R10, \ + EX_TLB_R11 + +kvm_handler BOOKE_INTERRUPT_CRITICAL, EX_PARAMS(CRIT), \ + SPRN_CSRR0, SPRN_CSRR1, 0 +kvm_handler BOOKE_INTERRUPT_MACHINE_CHECK, EX_PARAMS(MC), \ + SPRN_MCSRR0, SPRN_MCSRR1, 0 +kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1,(NEED_EMU | NEED_DEAR | NEED_ESR) +kvm_handler BOOKE_INTERRUPT_INST_STORAGE, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1, NEED_ESR +kvm_handler BOOKE_INTERRUPT_EXTERNAL, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1, 0 +kvm_handler BOOKE_INTERRUPT_ALIGNMENT, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1,(NEED_DEAR | NEED_ESR) +kvm_handler BOOKE_INTERRUPT_PROGRAM, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1,NEED_ESR +kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1, 0 +kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1, 0 +kvm_handler BOOKE_INTERRUPT_DECREMENTER, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1, 0 +kvm_handler BOOKE_INTERRUPT_FIT, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1, 0 +kvm_handler BOOKE_INTERRUPT_WATCHDOG, EX_PARAMS(CRIT),\ + SPRN_CSRR0, SPRN_CSRR1, 0 +/* + * Only bolted TLB miss exception handlers are supported for now + */ +kvm_handler BOOKE_INTERRUPT_DTLB_MISS, EX_PARAMS_TLB, \ + SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) +kvm_handler BOOKE_INTERRUPT_ITLB_MISS, EX_PARAMS_TLB, \ + SPRN_SRR0, SPRN_SRR1, 0 +kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1, 0 +kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1, 0 +kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1, 0 +kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1, 0 +kvm_handler BOOKE_INTERRUPT_DOORBELL, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1, 0 +kvm_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, EX_PARAMS(CRIT), \ + SPRN_CSRR0, SPRN_CSRR1, 0 +kvm_handler BOOKE_INTERRUPT_HV_PRIV, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1, NEED_EMU +kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, EX_PARAMS(GEN), \ + SPRN_SRR0, SPRN_SRR1, 0 +kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, EX_PARAMS(GDBELL), \ + SPRN_GSRR0, SPRN_GSRR1, 0 +kvm_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, EX_PARAMS(CRIT), \ + SPRN_CSRR0, SPRN_CSRR1, 0 +kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(DBG), \ + SPRN_DSRR0, SPRN_DSRR1, 0 +kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(CRIT), \ + SPRN_CSRR0, SPRN_CSRR1, 0 +#else /* * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h */ .macro kvm_handler intno srr0, srr1, flags _GLOBAL(kvmppc_handler_\intno\()_\srr1) - GET_VCPU(r11, r10) + PPC_LL r11, THREAD_KVM_VCPU(r10) PPC_STL r3, VCPU_GPR(R3)(r11) mfspr r3, SPRN_SPRG_RSCRATCH0 PPC_STL r4, VCPU_GPR(R4)(r11) @@ -233,7 +352,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) .macro kvm_lvl_handler intno scratch srr0, srr1, flags _GLOBAL(kvmppc_handler_\intno\()_\srr1) mfspr r10, SPRN_SPRG_THREAD - GET_VCPU(r11, r10) + PPC_LL r11, THREAD_KVM_VCPU(r10) PPC_STL r3, VCPU_GPR(R3)(r11) mfspr r3, \scratch PPC_STL r4, VCPU_GPR(R4)(r11) @@ -295,7 +414,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0 - +#endif /* Registers: * SPRG_SCRATCH0: guest r10 diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index b479ed77c51..6dd4de7802b 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c @@ -491,6 +491,9 @@ static int __init kvmppc_e500_init(void) { int r, i; unsigned long ivor[3]; + /* Process remaining handlers above the generic first 16 */ + unsigned long *handler = &kvmppc_booke_handler_addr[16]; + unsigned long handler_len; unsigned long max_ivor = 0; r = kvmppc_core_check_processor_compat(); @@ -506,15 +509,16 @@ static int __init kvmppc_e500_init(void) ivor[1] = mfspr(SPRN_IVOR33); ivor[2] = mfspr(SPRN_IVOR34); for (i = 0; i < 3; i++) { - if (ivor[i] > max_ivor) - max_ivor = ivor[i]; + if (ivor[i] > ivor[max_ivor]) + max_ivor = i; + handler_len = handler[i + 1] - handler[i]; memcpy((void *)kvmppc_booke_handlers + ivor[i], - kvmppc_handlers_start + (i + 16) * kvmppc_handler_len, - kvmppc_handler_len); + (void *)handler[i], handler_len); } - flush_icache_range(kvmppc_booke_handlers, - kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); + handler_len = handler[max_ivor + 1] - handler[max_ivor]; + flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + + ivor[max_ivor] + handler_len); return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); } diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index aa8b81428bf..41cefd43655 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -27,8 +27,8 @@ #define E500_TLB_NUM 2 #define E500_TLB_VALID 1 -#define E500_TLB_DIRTY 2 -#define E500_TLB_BITMAP 4 +#define E500_TLB_BITMAP 2 +#define E500_TLB_TLB0 (1 << 2) struct tlbe_ref { pfn_t pfn; @@ -130,9 +130,9 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value); int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu); int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu); -int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb); -int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb); -int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb); +int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea); +int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea); +int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea); int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500); void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500); @@ -155,7 +155,7 @@ get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe) static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe) { - return tlbe->mas2 & 0xfffff000; + return tlbe->mas2 & MAS2_EPN; } static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe) diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index e04b0ef55ce..e78f353a836 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -89,6 +89,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, int ra = get_ra(inst); int rb = get_rb(inst); int rt = get_rt(inst); + gva_t ea; switch (get_op(inst)) { case 31: @@ -113,15 +114,20 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, break; case XOP_TLBSX: - emulated = kvmppc_e500_emul_tlbsx(vcpu,rb); + ea = kvmppc_get_ea_indexed(vcpu, ra, rb); + emulated = kvmppc_e500_emul_tlbsx(vcpu, ea); break; - case XOP_TLBILX: - emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb); + case XOP_TLBILX: { + int type = rt & 0x3; + ea = kvmppc_get_ea_indexed(vcpu, ra, rb); + emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea); break; + } case XOP_TLBIVAX: - emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb); + ea = kvmppc_get_ea_indexed(vcpu, ra, rb); + emulated = kvmppc_e500_emul_tlbivax(vcpu, ea); break; default: diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_mmu.c index ff38b664195..5c4475983f7 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_mmu.c @@ -1,10 +1,11 @@ /* - * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. + * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. * * Author: Yu Liu, yu.liu@freescale.com * Scott Wood, scottwood@freescale.com * Ashish Kalra, ashish.kalra@freescale.com * Varun Sethi, varun.sethi@freescale.com + * Alexander Graf, agraf@suse.de * * Description: * This file is based on arch/powerpc/kvm/44x_tlb.c, @@ -33,10 +34,7 @@ #include "e500.h" #include "trace.h" #include "timing.h" - -#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) - -static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; +#include "e500_mmu_host.h" static inline unsigned int gtlb0_get_next_victim( struct kvmppc_vcpu_e500 *vcpu_e500) @@ -50,174 +48,6 @@ static inline unsigned int gtlb0_get_next_victim( return victim; } -static inline unsigned int tlb1_max_shadow_size(void) -{ - /* reserve one entry for magic page */ - return host_tlb_params[1].entries - tlbcam_index - 1; -} - -static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) -{ - return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); -} - -static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) -{ - /* Mask off reserved bits. */ - mas3 &= MAS3_ATTRIB_MASK; - -#ifndef CONFIG_KVM_BOOKE_HV - if (!usermode) { - /* Guest is in supervisor mode, - * so we need to translate guest - * supervisor permissions into user permissions. */ - mas3 &= ~E500_TLB_USER_PERM_MASK; - mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; - } - mas3 |= E500_TLB_SUPER_PERM_MASK; -#endif - return mas3; -} - -static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) -{ -#ifdef CONFIG_SMP - return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; -#else - return mas2 & MAS2_ATTRIB_MASK; -#endif -} - -/* - * writing shadow tlb entry to host TLB - */ -static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, - uint32_t mas0) -{ - unsigned long flags; - - local_irq_save(flags); - mtspr(SPRN_MAS0, mas0); - mtspr(SPRN_MAS1, stlbe->mas1); - mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); - mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); - mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); -#ifdef CONFIG_KVM_BOOKE_HV - mtspr(SPRN_MAS8, stlbe->mas8); -#endif - asm volatile("isync; tlbwe" : : : "memory"); - -#ifdef CONFIG_KVM_BOOKE_HV - /* Must clear mas8 for other host tlbwe's */ - mtspr(SPRN_MAS8, 0); - isync(); -#endif - local_irq_restore(flags); - - trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, - stlbe->mas2, stlbe->mas7_3); -} - -/* - * Acquire a mas0 with victim hint, as if we just took a TLB miss. - * - * We don't care about the address we're searching for, other than that it's - * in the right set and is not present in the TLB. Using a zero PID and a - * userspace address means we don't have to set and then restore MAS5, or - * calculate a proper MAS6 value. - */ -static u32 get_host_mas0(unsigned long eaddr) -{ - unsigned long flags; - u32 mas0; - - local_irq_save(flags); - mtspr(SPRN_MAS6, 0); - asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); - mas0 = mfspr(SPRN_MAS0); - local_irq_restore(flags); - - return mas0; -} - -/* sesel is for tlb1 only */ -static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, - int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe) -{ - u32 mas0; - - if (tlbsel == 0) { - mas0 = get_host_mas0(stlbe->mas2); - __write_host_tlbe(stlbe, mas0); - } else { - __write_host_tlbe(stlbe, - MAS0_TLBSEL(1) | - MAS0_ESEL(to_htlb1_esel(sesel))); - } -} - -#ifdef CONFIG_KVM_E500V2 -void kvmppc_map_magic(struct kvm_vcpu *vcpu) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - struct kvm_book3e_206_tlb_entry magic; - ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; - unsigned int stid; - pfn_t pfn; - - pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; - get_page(pfn_to_page(pfn)); - - preempt_disable(); - stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0); - - magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | - MAS1_TSIZE(BOOK3E_PAGESZ_4K); - magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; - magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | - MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; - magic.mas8 = 0; - - __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); - preempt_enable(); -} -#endif - -static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, - int tlbsel, int esel) -{ - struct kvm_book3e_206_tlb_entry *gtlbe = - get_entry(vcpu_e500, tlbsel, esel); - - if (tlbsel == 1 && - vcpu_e500->gtlb_priv[1][esel].ref.flags & E500_TLB_BITMAP) { - u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; - int hw_tlb_indx; - unsigned long flags; - - local_irq_save(flags); - while (tmp) { - hw_tlb_indx = __ilog2_u64(tmp & -tmp); - mtspr(SPRN_MAS0, - MAS0_TLBSEL(1) | - MAS0_ESEL(to_htlb1_esel(hw_tlb_indx))); - mtspr(SPRN_MAS1, 0); - asm volatile("tlbwe"); - vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; - tmp &= tmp - 1; - } - mb(); - vcpu_e500->g2h_tlb1_map[esel] = 0; - vcpu_e500->gtlb_priv[1][esel].ref.flags &= ~E500_TLB_BITMAP; - local_irq_restore(flags); - - return; - } - - /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ - kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); -} - static int tlb0_set_base(gva_t addr, int sets, int ways) { int set_base; @@ -296,67 +126,6 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, return -1; } -static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, - struct kvm_book3e_206_tlb_entry *gtlbe, - pfn_t pfn) -{ - ref->pfn = pfn; - ref->flags = E500_TLB_VALID; - - if (tlbe_is_writable(gtlbe)) - ref->flags |= E500_TLB_DIRTY; -} - -static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) -{ - if (ref->flags & E500_TLB_VALID) { - if (ref->flags & E500_TLB_DIRTY) - kvm_release_pfn_dirty(ref->pfn); - else - kvm_release_pfn_clean(ref->pfn); - - ref->flags = 0; - } -} - -static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) -{ - if (vcpu_e500->g2h_tlb1_map) - memset(vcpu_e500->g2h_tlb1_map, 0, - sizeof(u64) * vcpu_e500->gtlb_params[1].entries); - if (vcpu_e500->h2g_tlb1_rmap) - memset(vcpu_e500->h2g_tlb1_rmap, 0, - sizeof(unsigned int) * host_tlb_params[1].entries); -} - -static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) -{ - int tlbsel = 0; - int i; - - for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { - struct tlbe_ref *ref = - &vcpu_e500->gtlb_priv[tlbsel][i].ref; - kvmppc_e500_ref_release(ref); - } -} - -static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) -{ - int stlbsel = 1; - int i; - - kvmppc_e500_tlbil_all(vcpu_e500); - - for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { - struct tlbe_ref *ref = - &vcpu_e500->tlb_refs[stlbsel][i]; - kvmppc_e500_ref_release(ref); - } - - clear_tlb_privs(vcpu_e500); -} - static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, unsigned int eaddr, int as) { @@ -382,212 +151,6 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, | (as ? MAS6_SAS : 0); } -/* TID must be supplied by the caller */ -static inline void kvmppc_e500_setup_stlbe( - struct kvm_vcpu *vcpu, - struct kvm_book3e_206_tlb_entry *gtlbe, - int tsize, struct tlbe_ref *ref, u64 gvaddr, - struct kvm_book3e_206_tlb_entry *stlbe) -{ - pfn_t pfn = ref->pfn; - u32 pr = vcpu->arch.shared->msr & MSR_PR; - - BUG_ON(!(ref->flags & E500_TLB_VALID)); - - /* Force IPROT=0 for all guest mappings. */ - stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; - stlbe->mas2 = (gvaddr & MAS2_EPN) | - e500_shadow_mas2_attrib(gtlbe->mas2, pr); - stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | - e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); - -#ifdef CONFIG_KVM_BOOKE_HV - stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid; -#endif -} - -static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, - u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, - int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, - struct tlbe_ref *ref) -{ - struct kvm_memory_slot *slot; - unsigned long pfn, hva; - int pfnmap = 0; - int tsize = BOOK3E_PAGESZ_4K; - - /* - * Translate guest physical to true physical, acquiring - * a page reference if it is normal, non-reserved memory. - * - * gfn_to_memslot() must succeed because otherwise we wouldn't - * have gotten this far. Eventually we should just pass the slot - * pointer through from the first lookup. - */ - slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); - hva = gfn_to_hva_memslot(slot, gfn); - - if (tlbsel == 1) { - struct vm_area_struct *vma; - down_read(¤t->mm->mmap_sem); - - vma = find_vma(current->mm, hva); - if (vma && hva >= vma->vm_start && - (vma->vm_flags & VM_PFNMAP)) { - /* - * This VMA is a physically contiguous region (e.g. - * /dev/mem) that bypasses normal Linux page - * management. Find the overlap between the - * vma and the memslot. - */ - - unsigned long start, end; - unsigned long slot_start, slot_end; - - pfnmap = 1; - - start = vma->vm_pgoff; - end = start + - ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); - - pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); - - slot_start = pfn - (gfn - slot->base_gfn); - slot_end = slot_start + slot->npages; - - if (start < slot_start) - start = slot_start; - if (end > slot_end) - end = slot_end; - - tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> - MAS1_TSIZE_SHIFT; - - /* - * e500 doesn't implement the lowest tsize bit, - * or 1K pages. - */ - tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); - - /* - * Now find the largest tsize (up to what the guest - * requested) that will cover gfn, stay within the - * range, and for which gfn and pfn are mutually - * aligned. - */ - - for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { - unsigned long gfn_start, gfn_end, tsize_pages; - tsize_pages = 1 << (tsize - 2); - - gfn_start = gfn & ~(tsize_pages - 1); - gfn_end = gfn_start + tsize_pages; - - if (gfn_start + pfn - gfn < start) - continue; - if (gfn_end + pfn - gfn > end) - continue; - if ((gfn & (tsize_pages - 1)) != - (pfn & (tsize_pages - 1))) - continue; - - gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); - pfn &= ~(tsize_pages - 1); - break; - } - } else if (vma && hva >= vma->vm_start && - (vma->vm_flags & VM_HUGETLB)) { - unsigned long psize = vma_kernel_pagesize(vma); - - tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> - MAS1_TSIZE_SHIFT; - - /* - * Take the largest page size that satisfies both host - * and guest mapping - */ - tsize = min(__ilog2(psize) - 10, tsize); - - /* - * e500 doesn't implement the lowest tsize bit, - * or 1K pages. - */ - tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); - } - - up_read(¤t->mm->mmap_sem); - } - - if (likely(!pfnmap)) { - unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); - pfn = gfn_to_pfn_memslot(slot, gfn); - if (is_error_pfn(pfn)) { - printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", - (long)gfn); - return; - } - - /* Align guest and physical address to page map boundaries */ - pfn &= ~(tsize_pages - 1); - gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); - } - - /* Drop old ref and setup new one. */ - kvmppc_e500_ref_release(ref); - kvmppc_e500_ref_setup(ref, gtlbe, pfn); - - kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, - ref, gvaddr, stlbe); - - /* Clear i-cache for new pages */ - kvmppc_mmu_flush_icache(pfn); -} - -/* XXX only map the one-one case, for now use TLB0 */ -static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, - int esel, - struct kvm_book3e_206_tlb_entry *stlbe) -{ - struct kvm_book3e_206_tlb_entry *gtlbe; - struct tlbe_ref *ref; - - gtlbe = get_entry(vcpu_e500, 0, esel); - ref = &vcpu_e500->gtlb_priv[0][esel].ref; - - kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), - get_tlb_raddr(gtlbe) >> PAGE_SHIFT, - gtlbe, 0, stlbe, ref); -} - -/* Caller must ensure that the specified guest TLB entry is safe to insert into - * the shadow TLB. */ -/* XXX for both one-one and one-to-many , for now use TLB1 */ -static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, - u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, - struct kvm_book3e_206_tlb_entry *stlbe, int esel) -{ - struct tlbe_ref *ref; - unsigned int victim; - - victim = vcpu_e500->host_tlb1_nv++; - - if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) - vcpu_e500->host_tlb1_nv = 0; - - ref = &vcpu_e500->tlb_refs[1][victim]; - kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref); - - vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim; - vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; - if (vcpu_e500->h2g_tlb1_rmap[victim]) { - unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim]; - vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim); - } - vcpu_e500->h2g_tlb1_rmap[victim] = esel; - - return victim; -} - static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) { int size = vcpu_e500->gtlb_params[1].entries; @@ -676,20 +239,17 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++) kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); - /* Invalidate all vcpu id mappings */ - kvmppc_e500_tlbil_all(vcpu_e500); + /* Invalidate all host shadow mappings */ + kvmppc_core_flush_tlb(&vcpu_e500->vcpu); return EMULATE_DONE; } -int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) +int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); unsigned int ia; int esel, tlbsel; - gva_t ea; - - ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb); ia = (ea >> 2) & 0x1; @@ -709,14 +269,14 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); } - /* Invalidate all vcpu id mappings */ - kvmppc_e500_tlbil_all(vcpu_e500); + /* Invalidate all host shadow mappings */ + kvmppc_core_flush_tlb(&vcpu_e500->vcpu); return EMULATE_DONE; } static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, - int pid, int rt) + int pid, int type) { struct kvm_book3e_206_tlb_entry *tlbe; int tid, esel; @@ -725,7 +285,7 @@ static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { tlbe = get_entry(vcpu_e500, tlbsel, esel); tid = get_tlb_tid(tlbe); - if (rt == 0 || tid == pid) { + if (type == 0 || tid == pid) { inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); } @@ -733,14 +293,9 @@ static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, } static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid, - int ra, int rb) + gva_t ea) { int tlbsel, esel; - gva_t ea; - - ea = kvmppc_get_gpr(&vcpu_e500->vcpu, rb); - if (ra) - ea += kvmppc_get_gpr(&vcpu_e500->vcpu, ra); for (tlbsel = 0; tlbsel < 2; tlbsel++) { esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1); @@ -752,16 +307,16 @@ static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid, } } -int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb) +int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int pid = get_cur_spid(vcpu); - if (rt == 0 || rt == 1) { - tlbilx_all(vcpu_e500, 0, pid, rt); - tlbilx_all(vcpu_e500, 1, pid, rt); - } else if (rt == 3) { - tlbilx_one(vcpu_e500, pid, ra, rb); + if (type == 0 || type == 1) { + tlbilx_all(vcpu_e500, 0, pid, type); + tlbilx_all(vcpu_e500, 1, pid, type); + } else if (type == 3) { + tlbilx_one(vcpu_e500, pid, ea); } return EMULATE_DONE; @@ -786,16 +341,13 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu) return EMULATE_DONE; } -int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) +int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int as = !!get_cur_sas(vcpu); unsigned int pid = get_cur_spid(vcpu); int esel, tlbsel; struct kvm_book3e_206_tlb_entry *gtlbe = NULL; - gva_t ea; - - ea = kvmppc_get_gpr(vcpu, rb); for (tlbsel = 0; tlbsel < 2; tlbsel++) { esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); @@ -838,27 +390,11 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) return EMULATE_DONE; } -/* sesel is for tlb1 only */ -static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, - struct kvm_book3e_206_tlb_entry *gtlbe, - struct kvm_book3e_206_tlb_entry *stlbe, - int stlbsel, int sesel) -{ - int stid; - - preempt_disable(); - stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); - - stlbe->mas1 |= MAS1_TID(stid); - write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); - preempt_enable(); -} - int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; - int tlbsel, esel, stlbsel, sesel; + struct kvm_book3e_206_tlb_entry *gtlbe; + int tlbsel, esel; int recal = 0; tlbsel = get_tlb_tlbsel(vcpu); @@ -875,6 +411,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) gtlbe->mas1 = vcpu->arch.shared->mas1; gtlbe->mas2 = vcpu->arch.shared->mas2; + if (!(vcpu->arch.shared->msr & MSR_CM)) + gtlbe->mas2 &= 0xffffffffUL; gtlbe->mas7_3 = vcpu->arch.shared->mas7_3; trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, @@ -894,40 +432,16 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ if (tlbe_is_host_safe(vcpu, gtlbe)) { - u64 eaddr; - u64 raddr; + u64 eaddr = get_tlb_eaddr(gtlbe); + u64 raddr = get_tlb_raddr(gtlbe); - switch (tlbsel) { - case 0: - /* TLB0 */ + if (tlbsel == 0) { gtlbe->mas1 &= ~MAS1_TSIZE(~0); gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); - - stlbsel = 0; - kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); - sesel = 0; /* unused */ - - break; - - case 1: - /* TLB1 */ - eaddr = get_tlb_eaddr(gtlbe); - raddr = get_tlb_raddr(gtlbe); - - /* Create a 4KB mapping on the host. - * If the guest wanted a large page, - * only the first 4KB is mapped here and the rest - * are mapped on the fly. */ - stlbsel = 1; - sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, - raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel); - break; - - default: - BUG(); } - write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); + /* Premap the faulting page */ + kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); } kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); @@ -1021,53 +535,14 @@ void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) { } -void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, - unsigned int index) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - struct tlbe_priv *priv; - struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; - int tlbsel = tlbsel_of(index); - int esel = esel_of(index); - int stlbsel, sesel; - - gtlbe = get_entry(vcpu_e500, tlbsel, esel); - - switch (tlbsel) { - case 0: - stlbsel = 0; - sesel = 0; /* unused */ - priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; - - kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, - &priv->ref, eaddr, &stlbe); - break; - - case 1: { - gfn_t gfn = gpaddr >> PAGE_SHIFT; - - stlbsel = 1; - sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, - gtlbe, &stlbe, esel); - break; - } - - default: - BUG(); - break; - } - - write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); -} +/*****************************************/ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) { int i; - clear_tlb1_bitmap(vcpu_e500); + kvmppc_core_flush_tlb(&vcpu_e500->vcpu); kfree(vcpu_e500->g2h_tlb1_map); - - clear_tlb_refs(vcpu_e500); kfree(vcpu_e500->gtlb_priv[0]); kfree(vcpu_e500->gtlb_priv[1]); @@ -1081,6 +556,8 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) } vcpu_e500->num_shared_tlb_pages = 0; + + kfree(vcpu_e500->shared_tlb_pages); vcpu_e500->shared_tlb_pages = NULL; } else { kfree(vcpu_e500->gtlb_arch); @@ -1178,21 +655,27 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, } virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL); - if (!virt) + if (!virt) { + ret = -ENOMEM; goto err_put_page; + } privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0], GFP_KERNEL); privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1], GFP_KERNEL); - if (!privs[0] || !privs[1]) - goto err_put_page; + if (!privs[0] || !privs[1]) { + ret = -ENOMEM; + goto err_privs; + } g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1], GFP_KERNEL); - if (!g2h_bitmap) - goto err_put_page; + if (!g2h_bitmap) { + ret = -ENOMEM; + goto err_privs; + } free_gtlb(vcpu_e500); @@ -1232,10 +715,11 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, kvmppc_recalc_tlb1map_range(vcpu_e500); return 0; -err_put_page: +err_privs: kfree(privs[0]); kfree(privs[1]); +err_put_page: for (i = 0; i < num_pages; i++) put_page(pages[i]); @@ -1249,7 +733,7 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); kvmppc_recalc_tlb1map_range(vcpu_e500); - clear_tlb_refs(vcpu_e500); + kvmppc_core_flush_tlb(vcpu); return 0; } @@ -1259,37 +743,8 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) int entry_size = sizeof(struct kvm_book3e_206_tlb_entry); int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; - host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; - host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; - - /* - * This should never happen on real e500 hardware, but is - * architecturally possible -- e.g. in some weird nested - * virtualization case. - */ - if (host_tlb_params[0].entries == 0 || - host_tlb_params[1].entries == 0) { - pr_err("%s: need to know host tlb size\n", __func__); - return -ENODEV; - } - - host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >> - TLBnCFG_ASSOC_SHIFT; - host_tlb_params[1].ways = host_tlb_params[1].entries; - - if (!is_power_of_2(host_tlb_params[0].entries) || - !is_power_of_2(host_tlb_params[0].ways) || - host_tlb_params[0].entries < host_tlb_params[0].ways || - host_tlb_params[0].ways == 0) { - pr_err("%s: bad tlb0 host config: %u entries %u ways\n", - __func__, host_tlb_params[0].entries, - host_tlb_params[0].ways); - return -ENODEV; - } - - host_tlb_params[0].sets = - host_tlb_params[0].entries / host_tlb_params[0].ways; - host_tlb_params[1].sets = 1; + if (e500_mmu_host_init(vcpu_e500)) + goto err; vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE; vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE; @@ -1308,18 +763,6 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) vcpu_e500->gtlb_offset[0] = 0; vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; - vcpu_e500->tlb_refs[0] = - kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, - GFP_KERNEL); - if (!vcpu_e500->tlb_refs[0]) - goto err; - - vcpu_e500->tlb_refs[1] = - kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, - GFP_KERNEL); - if (!vcpu_e500->tlb_refs[1]) - goto err; - vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) * vcpu_e500->gtlb_params[0].entries, GFP_KERNEL); @@ -1332,18 +775,12 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) if (!vcpu_e500->gtlb_priv[1]) goto err; - vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) * + vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) * vcpu_e500->gtlb_params[1].entries, GFP_KERNEL); if (!vcpu_e500->g2h_tlb1_map) goto err; - vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * - host_tlb_params[1].entries, - GFP_KERNEL); - if (!vcpu_e500->h2g_tlb1_rmap) - goto err; - /* Init TLB configuration register */ vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) & ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); @@ -1362,15 +799,11 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) err: free_gtlb(vcpu_e500); - kfree(vcpu_e500->tlb_refs[0]); - kfree(vcpu_e500->tlb_refs[1]); return -1; } void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) { free_gtlb(vcpu_e500); - kfree(vcpu_e500->h2g_tlb1_rmap); - kfree(vcpu_e500->tlb_refs[0]); - kfree(vcpu_e500->tlb_refs[1]); + e500_mmu_host_uninit(vcpu_e500); } diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c new file mode 100644 index 00000000000..a222edfb9a9 --- /dev/null +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -0,0 +1,699 @@ +/* + * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Yu Liu, yu.liu@freescale.com + * Scott Wood, scottwood@freescale.com + * Ashish Kalra, ashish.kalra@freescale.com + * Varun Sethi, varun.sethi@freescale.com + * Alexander Graf, agraf@suse.de + * + * Description: + * This file is based on arch/powerpc/kvm/44x_tlb.c, + * by Hollis Blanchard <hollisb@us.ibm.com>. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/kvm.h> +#include <linux/kvm_host.h> +#include <linux/highmem.h> +#include <linux/log2.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/rwsem.h> +#include <linux/vmalloc.h> +#include <linux/hugetlb.h> +#include <asm/kvm_ppc.h> + +#include "e500.h" +#include "trace.h" +#include "timing.h" +#include "e500_mmu_host.h" + +#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) + +static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; + +static inline unsigned int tlb1_max_shadow_size(void) +{ + /* reserve one entry for magic page */ + return host_tlb_params[1].entries - tlbcam_index - 1; +} + +static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) +{ + /* Mask off reserved bits. */ + mas3 &= MAS3_ATTRIB_MASK; + +#ifndef CONFIG_KVM_BOOKE_HV + if (!usermode) { + /* Guest is in supervisor mode, + * so we need to translate guest + * supervisor permissions into user permissions. */ + mas3 &= ~E500_TLB_USER_PERM_MASK; + mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; + } + mas3 |= E500_TLB_SUPER_PERM_MASK; +#endif + return mas3; +} + +static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) +{ +#ifdef CONFIG_SMP + return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; +#else + return mas2 & MAS2_ATTRIB_MASK; +#endif +} + +/* + * writing shadow tlb entry to host TLB + */ +static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, + uint32_t mas0) +{ + unsigned long flags; + + local_irq_save(flags); + mtspr(SPRN_MAS0, mas0); + mtspr(SPRN_MAS1, stlbe->mas1); + mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); + mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); + mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); +#ifdef CONFIG_KVM_BOOKE_HV + mtspr(SPRN_MAS8, stlbe->mas8); +#endif + asm volatile("isync; tlbwe" : : : "memory"); + +#ifdef CONFIG_KVM_BOOKE_HV + /* Must clear mas8 for other host tlbwe's */ + mtspr(SPRN_MAS8, 0); + isync(); +#endif + local_irq_restore(flags); + + trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, + stlbe->mas2, stlbe->mas7_3); +} + +/* + * Acquire a mas0 with victim hint, as if we just took a TLB miss. + * + * We don't care about the address we're searching for, other than that it's + * in the right set and is not present in the TLB. Using a zero PID and a + * userspace address means we don't have to set and then restore MAS5, or + * calculate a proper MAS6 value. + */ +static u32 get_host_mas0(unsigned long eaddr) +{ + unsigned long flags; + u32 mas0; + + local_irq_save(flags); + mtspr(SPRN_MAS6, 0); + asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); + mas0 = mfspr(SPRN_MAS0); + local_irq_restore(flags); + + return mas0; +} + +/* sesel is for tlb1 only */ +static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, + int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe) +{ + u32 mas0; + + if (tlbsel == 0) { + mas0 = get_host_mas0(stlbe->mas2); + __write_host_tlbe(stlbe, mas0); + } else { + __write_host_tlbe(stlbe, + MAS0_TLBSEL(1) | + MAS0_ESEL(to_htlb1_esel(sesel))); + } +} + +/* sesel is for tlb1 only */ +static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, + struct kvm_book3e_206_tlb_entry *gtlbe, + struct kvm_book3e_206_tlb_entry *stlbe, + int stlbsel, int sesel) +{ + int stid; + + preempt_disable(); + stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); + + stlbe->mas1 |= MAS1_TID(stid); + write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); + preempt_enable(); +} + +#ifdef CONFIG_KVM_E500V2 +/* XXX should be a hook in the gva2hpa translation */ +void kvmppc_map_magic(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + struct kvm_book3e_206_tlb_entry magic; + ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; + unsigned int stid; + pfn_t pfn; + + pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; + get_page(pfn_to_page(pfn)); + + preempt_disable(); + stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0); + + magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | + MAS1_TSIZE(BOOK3E_PAGESZ_4K); + magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; + magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | + MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; + magic.mas8 = 0; + + __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); + preempt_enable(); +} +#endif + +void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, + int esel) +{ + struct kvm_book3e_206_tlb_entry *gtlbe = + get_entry(vcpu_e500, tlbsel, esel); + struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; + + /* Don't bother with unmapped entries */ + if (!(ref->flags & E500_TLB_VALID)) + return; + + if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { + u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; + int hw_tlb_indx; + unsigned long flags; + + local_irq_save(flags); + while (tmp) { + hw_tlb_indx = __ilog2_u64(tmp & -tmp); + mtspr(SPRN_MAS0, + MAS0_TLBSEL(1) | + MAS0_ESEL(to_htlb1_esel(hw_tlb_indx))); + mtspr(SPRN_MAS1, 0); + asm volatile("tlbwe"); + vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; + tmp &= tmp - 1; + } + mb(); + vcpu_e500->g2h_tlb1_map[esel] = 0; + ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); + local_irq_restore(flags); + } + + if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) { + /* + * TLB1 entry is backed by 4k pages. This should happen + * rarely and is not worth optimizing. Invalidate everything. + */ + kvmppc_e500_tlbil_all(vcpu_e500); + ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); + } + + /* Already invalidated in between */ + if (!(ref->flags & E500_TLB_VALID)) + return; + + /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ + kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); + + /* Mark the TLB as not backed by the host anymore */ + ref->flags &= ~E500_TLB_VALID; +} + +static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) +{ + return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); +} + +static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, + struct kvm_book3e_206_tlb_entry *gtlbe, + pfn_t pfn) +{ + ref->pfn = pfn; + ref->flags = E500_TLB_VALID; + + if (tlbe_is_writable(gtlbe)) + kvm_set_pfn_dirty(pfn); +} + +static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) +{ + if (ref->flags & E500_TLB_VALID) { + trace_kvm_booke206_ref_release(ref->pfn, ref->flags); + ref->flags = 0; + } +} + +static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + if (vcpu_e500->g2h_tlb1_map) + memset(vcpu_e500->g2h_tlb1_map, 0, + sizeof(u64) * vcpu_e500->gtlb_params[1].entries); + if (vcpu_e500->h2g_tlb1_rmap) + memset(vcpu_e500->h2g_tlb1_rmap, 0, + sizeof(unsigned int) * host_tlb_params[1].entries); +} + +static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + int tlbsel = 0; + int i; + + for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { + struct tlbe_ref *ref = + &vcpu_e500->gtlb_priv[tlbsel][i].ref; + kvmppc_e500_ref_release(ref); + } +} + +static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + int stlbsel = 1; + int i; + + kvmppc_e500_tlbil_all(vcpu_e500); + + for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { + struct tlbe_ref *ref = + &vcpu_e500->tlb_refs[stlbsel][i]; + kvmppc_e500_ref_release(ref); + } + + clear_tlb_privs(vcpu_e500); +} + +void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + clear_tlb_refs(vcpu_e500); + clear_tlb1_bitmap(vcpu_e500); +} + +/* TID must be supplied by the caller */ +static void kvmppc_e500_setup_stlbe( + struct kvm_vcpu *vcpu, + struct kvm_book3e_206_tlb_entry *gtlbe, + int tsize, struct tlbe_ref *ref, u64 gvaddr, + struct kvm_book3e_206_tlb_entry *stlbe) +{ + pfn_t pfn = ref->pfn; + u32 pr = vcpu->arch.shared->msr & MSR_PR; + + BUG_ON(!(ref->flags & E500_TLB_VALID)); + + /* Force IPROT=0 for all guest mappings. */ + stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; + stlbe->mas2 = (gvaddr & MAS2_EPN) | + e500_shadow_mas2_attrib(gtlbe->mas2, pr); + stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | + e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); + +#ifdef CONFIG_KVM_BOOKE_HV + stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid; +#endif +} + +static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, + u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, + int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, + struct tlbe_ref *ref) +{ + struct kvm_memory_slot *slot; + unsigned long pfn = 0; /* silence GCC warning */ + unsigned long hva; + int pfnmap = 0; + int tsize = BOOK3E_PAGESZ_4K; + + /* + * Translate guest physical to true physical, acquiring + * a page reference if it is normal, non-reserved memory. + * + * gfn_to_memslot() must succeed because otherwise we wouldn't + * have gotten this far. Eventually we should just pass the slot + * pointer through from the first lookup. + */ + slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); + hva = gfn_to_hva_memslot(slot, gfn); + + if (tlbsel == 1) { + struct vm_area_struct *vma; + down_read(¤t->mm->mmap_sem); + + vma = find_vma(current->mm, hva); + if (vma && hva >= vma->vm_start && + (vma->vm_flags & VM_PFNMAP)) { + /* + * This VMA is a physically contiguous region (e.g. + * /dev/mem) that bypasses normal Linux page + * management. Find the overlap between the + * vma and the memslot. + */ + + unsigned long start, end; + unsigned long slot_start, slot_end; + + pfnmap = 1; + + start = vma->vm_pgoff; + end = start + + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); + + pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); + + slot_start = pfn - (gfn - slot->base_gfn); + slot_end = slot_start + slot->npages; + + if (start < slot_start) + start = slot_start; + if (end > slot_end) + end = slot_end; + + tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> + MAS1_TSIZE_SHIFT; + + /* + * e500 doesn't implement the lowest tsize bit, + * or 1K pages. + */ + tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); + + /* + * Now find the largest tsize (up to what the guest + * requested) that will cover gfn, stay within the + * range, and for which gfn and pfn are mutually + * aligned. + */ + + for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { + unsigned long gfn_start, gfn_end, tsize_pages; + tsize_pages = 1 << (tsize - 2); + + gfn_start = gfn & ~(tsize_pages - 1); + gfn_end = gfn_start + tsize_pages; + + if (gfn_start + pfn - gfn < start) + continue; + if (gfn_end + pfn - gfn > end) + continue; + if ((gfn & (tsize_pages - 1)) != + (pfn & (tsize_pages - 1))) + continue; + + gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); + pfn &= ~(tsize_pages - 1); + break; + } + } else if (vma && hva >= vma->vm_start && + (vma->vm_flags & VM_HUGETLB)) { + unsigned long psize = vma_kernel_pagesize(vma); + + tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> + MAS1_TSIZE_SHIFT; + + /* + * Take the largest page size that satisfies both host + * and guest mapping + */ + tsize = min(__ilog2(psize) - 10, tsize); + + /* + * e500 doesn't implement the lowest tsize bit, + * or 1K pages. + */ + tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); + } + + up_read(¤t->mm->mmap_sem); + } + + if (likely(!pfnmap)) { + unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); + pfn = gfn_to_pfn_memslot(slot, gfn); + if (is_error_noslot_pfn(pfn)) { + printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", + (long)gfn); + return -EINVAL; + } + + /* Align guest and physical address to page map boundaries */ + pfn &= ~(tsize_pages - 1); + gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); + } + + /* Drop old ref and setup new one. */ + kvmppc_e500_ref_release(ref); + kvmppc_e500_ref_setup(ref, gtlbe, pfn); + + kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, + ref, gvaddr, stlbe); + + /* Clear i-cache for new pages */ + kvmppc_mmu_flush_icache(pfn); + + /* Drop refcount on page, so that mmu notifiers can clear it */ + kvm_release_pfn_clean(pfn); + + return 0; +} + +/* XXX only map the one-one case, for now use TLB0 */ +static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel, + struct kvm_book3e_206_tlb_entry *stlbe) +{ + struct kvm_book3e_206_tlb_entry *gtlbe; + struct tlbe_ref *ref; + int stlbsel = 0; + int sesel = 0; + int r; + + gtlbe = get_entry(vcpu_e500, 0, esel); + ref = &vcpu_e500->gtlb_priv[0][esel].ref; + + r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), + get_tlb_raddr(gtlbe) >> PAGE_SHIFT, + gtlbe, 0, stlbe, ref); + if (r) + return r; + + write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); + + return 0; +} + +static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, + struct tlbe_ref *ref, + int esel) +{ + unsigned int sesel = vcpu_e500->host_tlb1_nv++; + + if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) + vcpu_e500->host_tlb1_nv = 0; + + vcpu_e500->tlb_refs[1][sesel] = *ref; + vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; + vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; + if (vcpu_e500->h2g_tlb1_rmap[sesel]) { + unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; + vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); + } + vcpu_e500->h2g_tlb1_rmap[sesel] = esel; + + return sesel; +} + +/* Caller must ensure that the specified guest TLB entry is safe to insert into + * the shadow TLB. */ +/* For both one-one and one-to-many */ +static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, + u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, + struct kvm_book3e_206_tlb_entry *stlbe, int esel) +{ + struct tlbe_ref ref; + int sesel; + int r; + + ref.flags = 0; + r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, + &ref); + if (r) + return r; + + /* Use TLB0 when we can only map a page with 4k */ + if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) { + vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0; + write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0); + return 0; + } + + /* Otherwise map into TLB1 */ + sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel); + write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); + + return 0; +} + +void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, + unsigned int index) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + struct tlbe_priv *priv; + struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; + int tlbsel = tlbsel_of(index); + int esel = esel_of(index); + + gtlbe = get_entry(vcpu_e500, tlbsel, esel); + + switch (tlbsel) { + case 0: + priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; + + /* Triggers after clear_tlb_refs or on initial mapping */ + if (!(priv->ref.flags & E500_TLB_VALID)) { + kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); + } else { + kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, + &priv->ref, eaddr, &stlbe); + write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0); + } + break; + + case 1: { + gfn_t gfn = gpaddr >> PAGE_SHIFT; + kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, + esel); + break; + } + + default: + BUG(); + break; + } +} + +/************* MMU Notifiers *************/ + +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) +{ + trace_kvm_unmap_hva(hva); + + /* + * Flush all shadow tlb entries everywhere. This is slow, but + * we are 100% sure that we catch the to be unmapped page + */ + kvm_flush_remote_tlbs(kvm); + + return 0; +} + +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +{ + /* kvm_unmap_hva flushes everything anyways */ + kvm_unmap_hva(kvm, start); + + return 0; +} + +int kvm_age_hva(struct kvm *kvm, unsigned long hva) +{ + /* XXX could be more clever ;) */ + return 0; +} + +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ + /* XXX could be more clever ;) */ + return 0; +} + +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +{ + /* The page will get remapped properly on its next fault */ + kvm_unmap_hva(kvm, hva); +} + +/*****************************************/ + +int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; + host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; + + /* + * This should never happen on real e500 hardware, but is + * architecturally possible -- e.g. in some weird nested + * virtualization case. + */ + if (host_tlb_params[0].entries == 0 || + host_tlb_params[1].entries == 0) { + pr_err("%s: need to know host tlb size\n", __func__); + return -ENODEV; + } + + host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >> + TLBnCFG_ASSOC_SHIFT; + host_tlb_params[1].ways = host_tlb_params[1].entries; + + if (!is_power_of_2(host_tlb_params[0].entries) || + !is_power_of_2(host_tlb_params[0].ways) || + host_tlb_params[0].entries < host_tlb_params[0].ways || + host_tlb_params[0].ways == 0) { + pr_err("%s: bad tlb0 host config: %u entries %u ways\n", + __func__, host_tlb_params[0].entries, + host_tlb_params[0].ways); + return -ENODEV; + } + + host_tlb_params[0].sets = + host_tlb_params[0].entries / host_tlb_params[0].ways; + host_tlb_params[1].sets = 1; + + vcpu_e500->tlb_refs[0] = + kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, + GFP_KERNEL); + if (!vcpu_e500->tlb_refs[0]) + goto err; + + vcpu_e500->tlb_refs[1] = + kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, + GFP_KERNEL); + if (!vcpu_e500->tlb_refs[1]) + goto err; + + vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * + host_tlb_params[1].entries, + GFP_KERNEL); + if (!vcpu_e500->h2g_tlb1_rmap) + goto err; + + return 0; + +err: + kfree(vcpu_e500->tlb_refs[0]); + kfree(vcpu_e500->tlb_refs[1]); + return -EINVAL; +} + +void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + kfree(vcpu_e500->h2g_tlb1_rmap); + kfree(vcpu_e500->tlb_refs[0]); + kfree(vcpu_e500->tlb_refs[1]); +} diff --git a/arch/powerpc/kvm/e500_mmu_host.h b/arch/powerpc/kvm/e500_mmu_host.h new file mode 100644 index 00000000000..7624835b76c --- /dev/null +++ b/arch/powerpc/kvm/e500_mmu_host.h @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#ifndef KVM_E500_MMU_HOST_H +#define KVM_E500_MMU_HOST_H + +void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, + int esel); + +int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500); +void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500); + +#endif /* KVM_E500_MMU_HOST_H */ diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index ee04abaefe2..7a73b6f72a8 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -39,6 +39,7 @@ #define OP_31_XOP_TRAP 4 #define OP_31_XOP_LWZX 23 #define OP_31_XOP_TRAP_64 68 +#define OP_31_XOP_DCBF 86 #define OP_31_XOP_LBZX 87 #define OP_31_XOP_STWX 151 #define OP_31_XOP_STBX 215 @@ -131,6 +132,120 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) return vcpu->arch.dec - jd; } +static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) +{ + enum emulation_result emulated = EMULATE_DONE; + ulong spr_val = kvmppc_get_gpr(vcpu, rs); + + switch (sprn) { + case SPRN_SRR0: + vcpu->arch.shared->srr0 = spr_val; + break; + case SPRN_SRR1: + vcpu->arch.shared->srr1 = spr_val; + break; + + /* XXX We need to context-switch the timebase for + * watchdog and FIT. */ + case SPRN_TBWL: break; + case SPRN_TBWU: break; + + case SPRN_DEC: + vcpu->arch.dec = spr_val; + kvmppc_emulate_dec(vcpu); + break; + + case SPRN_SPRG0: + vcpu->arch.shared->sprg0 = spr_val; + break; + case SPRN_SPRG1: + vcpu->arch.shared->sprg1 = spr_val; + break; + case SPRN_SPRG2: + vcpu->arch.shared->sprg2 = spr_val; + break; + case SPRN_SPRG3: + vcpu->arch.shared->sprg3 = spr_val; + break; + + default: + emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, + spr_val); + if (emulated == EMULATE_FAIL) + printk(KERN_INFO "mtspr: unknown spr " + "0x%x\n", sprn); + break; + } + + kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); + + return emulated; +} + +static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) +{ + enum emulation_result emulated = EMULATE_DONE; + ulong spr_val = 0; + + switch (sprn) { + case SPRN_SRR0: + spr_val = vcpu->arch.shared->srr0; + break; + case SPRN_SRR1: + spr_val = vcpu->arch.shared->srr1; + break; + case SPRN_PVR: + spr_val = vcpu->arch.pvr; + break; + case SPRN_PIR: + spr_val = vcpu->vcpu_id; + break; + + /* Note: mftb and TBRL/TBWL are user-accessible, so + * the guest can always access the real TB anyways. + * In fact, we probably will never see these traps. */ + case SPRN_TBWL: + spr_val = get_tb() >> 32; + break; + case SPRN_TBWU: + spr_val = get_tb(); + break; + + case SPRN_SPRG0: + spr_val = vcpu->arch.shared->sprg0; + break; + case SPRN_SPRG1: + spr_val = vcpu->arch.shared->sprg1; + break; + case SPRN_SPRG2: + spr_val = vcpu->arch.shared->sprg2; + break; + case SPRN_SPRG3: + spr_val = vcpu->arch.shared->sprg3; + break; + /* Note: SPRG4-7 are user-readable, so we don't get + * a trap. */ + + case SPRN_DEC: + spr_val = kvmppc_get_dec(vcpu, get_tb()); + break; + default: + emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, + &spr_val); + if (unlikely(emulated == EMULATE_FAIL)) { + printk(KERN_INFO "mfspr: unknown spr " + "0x%x\n", sprn); + } + break; + } + + if (emulated == EMULATE_DONE) + kvmppc_set_gpr(vcpu, rt, spr_val); + kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); + + return emulated; +} + /* XXX to do: * lhax * lhaux @@ -156,7 +271,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) int sprn = get_sprn(inst); enum emulation_result emulated = EMULATE_DONE; int advance = 1; - ulong spr_val = 0; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); @@ -236,62 +350,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) break; case OP_31_XOP_MFSPR: - switch (sprn) { - case SPRN_SRR0: - spr_val = vcpu->arch.shared->srr0; - break; - case SPRN_SRR1: - spr_val = vcpu->arch.shared->srr1; - break; - case SPRN_PVR: - spr_val = vcpu->arch.pvr; - break; - case SPRN_PIR: - spr_val = vcpu->vcpu_id; - break; - case SPRN_MSSSR0: - spr_val = 0; - break; - - /* Note: mftb and TBRL/TBWL are user-accessible, so - * the guest can always access the real TB anyways. - * In fact, we probably will never see these traps. */ - case SPRN_TBWL: - spr_val = get_tb() >> 32; - break; - case SPRN_TBWU: - spr_val = get_tb(); - break; - - case SPRN_SPRG0: - spr_val = vcpu->arch.shared->sprg0; - break; - case SPRN_SPRG1: - spr_val = vcpu->arch.shared->sprg1; - break; - case SPRN_SPRG2: - spr_val = vcpu->arch.shared->sprg2; - break; - case SPRN_SPRG3: - spr_val = vcpu->arch.shared->sprg3; - break; - /* Note: SPRG4-7 are user-readable, so we don't get - * a trap. */ - - case SPRN_DEC: - spr_val = kvmppc_get_dec(vcpu, get_tb()); - break; - default: - emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, - &spr_val); - if (unlikely(emulated == EMULATE_FAIL)) { - printk(KERN_INFO "mfspr: unknown spr " - "0x%x\n", sprn); - } - break; - } - kvmppc_set_gpr(vcpu, rt, spr_val); - kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); + emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); break; case OP_31_XOP_STHX: @@ -308,51 +367,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) break; case OP_31_XOP_MTSPR: - spr_val = kvmppc_get_gpr(vcpu, rs); - switch (sprn) { - case SPRN_SRR0: - vcpu->arch.shared->srr0 = spr_val; - break; - case SPRN_SRR1: - vcpu->arch.shared->srr1 = spr_val; - break; - - /* XXX We need to context-switch the timebase for - * watchdog and FIT. */ - case SPRN_TBWL: break; - case SPRN_TBWU: break; - - case SPRN_MSSSR0: break; - - case SPRN_DEC: - vcpu->arch.dec = spr_val; - kvmppc_emulate_dec(vcpu); - break; - - case SPRN_SPRG0: - vcpu->arch.shared->sprg0 = spr_val; - break; - case SPRN_SPRG1: - vcpu->arch.shared->sprg1 = spr_val; - break; - case SPRN_SPRG2: - vcpu->arch.shared->sprg2 = spr_val; - break; - case SPRN_SPRG3: - vcpu->arch.shared->sprg3 = spr_val; - break; - - default: - emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, - spr_val); - if (emulated == EMULATE_FAIL) - printk(KERN_INFO "mtspr: unknown spr " - "0x%x\n", sprn); - break; - } - kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); + emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); break; + case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 4d213b8b0fb..934413cd3a1 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -30,6 +30,7 @@ #include <asm/kvm_ppc.h> #include <asm/tlbflush.h> #include <asm/cputhreads.h> +#include <asm/irqflags.h> #include "timing.h" #include "../mm/mmu_decl.h" @@ -38,8 +39,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { - return !(v->arch.shared->msr & MSR_WE) || - !!(v->arch.pending_exceptions) || + return !!(v->arch.pending_exceptions) || v->requests; } @@ -48,6 +48,85 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) return 1; } +#ifndef CONFIG_KVM_BOOK3S_64_HV +/* + * Common checks before entering the guest world. Call with interrupts + * disabled. + * + * returns: + * + * == 1 if we're ready to go into guest state + * <= 0 if we need to go back to the host with return value + */ +int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) +{ + int r = 1; + + WARN_ON_ONCE(!irqs_disabled()); + while (true) { + if (need_resched()) { + local_irq_enable(); + cond_resched(); + local_irq_disable(); + continue; + } + + if (signal_pending(current)) { + kvmppc_account_exit(vcpu, SIGNAL_EXITS); + vcpu->run->exit_reason = KVM_EXIT_INTR; + r = -EINTR; + break; + } + + vcpu->mode = IN_GUEST_MODE; + + /* + * Reading vcpu->requests must happen after setting vcpu->mode, + * so we don't miss a request because the requester sees + * OUTSIDE_GUEST_MODE and assumes we'll be checking requests + * before next entering the guest (and thus doesn't IPI). + */ + smp_mb(); + + if (vcpu->requests) { + /* Make sure we process requests preemptable */ + local_irq_enable(); + trace_kvm_check_requests(vcpu); + r = kvmppc_core_check_requests(vcpu); + local_irq_disable(); + if (r > 0) + continue; + break; + } + + if (kvmppc_core_prepare_to_enter(vcpu)) { + /* interrupts got enabled in between, so we + are back at square 1 */ + continue; + } + +#ifdef CONFIG_PPC64 + /* lazy EE magic */ + hard_irq_disable(); + if (lazy_irq_pending()) { + /* Got an interrupt in between, try again */ + local_irq_enable(); + local_irq_disable(); + kvm_guest_exit(); + continue; + } + + trace_hardirqs_on(); +#endif + + kvm_guest_enter(); + break; + } + + return r; +} +#endif /* CONFIG_KVM_BOOK3S_64_HV */ + int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) { int nr = kvmppc_get_gpr(vcpu, 11); @@ -67,18 +146,18 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) } switch (nr) { - case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE: + case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): { vcpu->arch.magic_page_pa = param1; vcpu->arch.magic_page_ea = param2; r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; - r = HC_EV_SUCCESS; + r = EV_SUCCESS; break; } - case HC_VENDOR_KVM | KVM_HC_FEATURES: - r = HC_EV_SUCCESS; + case KVM_HCALL_TOKEN(KVM_HC_FEATURES): + r = EV_SUCCESS; #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) /* XXX Missing magic page on 44x */ r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); @@ -86,8 +165,13 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) /* Second return value is in r4 */ break; + case EV_HCALL_TOKEN(EV_IDLE): + r = EV_SUCCESS; + kvm_vcpu_block(vcpu); + clear_bit(KVM_REQ_UNHALT, &vcpu->requests); + break; default: - r = HC_EV_UNIMPLEMENTED; + r = EV_UNIMPLEMENTED; break; } @@ -153,7 +237,8 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) r = RESUME_HOST; break; default: - BUG(); + WARN_ON(1); + r = RESUME_GUEST; } return r; @@ -220,6 +305,8 @@ int kvm_dev_ioctl_check_extension(long ext) switch (ext) { #ifdef CONFIG_BOOKE case KVM_CAP_PPC_BOOKE_SREGS: + case KVM_CAP_PPC_BOOKE_WATCHDOG: + case KVM_CAP_PPC_EPR: #else case KVM_CAP_PPC_SEGSTATE: case KVM_CAP_PPC_HIOR: @@ -229,6 +316,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_PPC_IRQ_LEVEL: case KVM_CAP_ENABLE_CAP: case KVM_CAP_ONE_REG: + case KVM_CAP_IOEVENTFD: r = 1; break; #ifndef CONFIG_KVM_BOOK3S_64_HV @@ -260,10 +348,22 @@ int kvm_dev_ioctl_check_extension(long ext) if (cpu_has_feature(CPU_FTR_ARCH_201)) r = 2; break; +#endif case KVM_CAP_SYNC_MMU: +#ifdef CONFIG_KVM_BOOK3S_64_HV r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; +#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) + r = 1; +#else + r = 0; + break; +#endif +#ifdef CONFIG_KVM_BOOK3S_64_HV + case KVM_CAP_PPC_HTAB_FD: + r = 1; break; #endif + break; case KVM_CAP_NR_VCPUS: /* * Recommending a number of CPUs is somewhat arbitrary; we @@ -302,36 +402,29 @@ long kvm_arch_dev_ioctl(struct file *filp, void kvm_arch_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { - if (!dont || free->arch.rmap != dont->arch.rmap) { - vfree(free->arch.rmap); - free->arch.rmap = NULL; - } + kvmppc_core_free_memslot(free, dont); } int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) { - slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); - if (!slot->arch.rmap) - return -ENOMEM; - - return 0; + return kvmppc_core_create_memslot(slot, npages); } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, - int user_alloc) + bool user_alloc) { - return kvmppc_core_prepare_memory_region(kvm, mem); + return kvmppc_core_prepare_memory_region(kvm, memslot, mem); } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, - int user_alloc) + bool user_alloc) { - kvmppc_core_commit_memory_region(kvm, mem); + kvmppc_core_commit_memory_region(kvm, mem, old); } void kvm_arch_flush_shadow_all(struct kvm *kvm) @@ -341,6 +434,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm) void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { + kvmppc_core_flush_memslot(kvm, slot); } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) @@ -354,6 +448,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) return vcpu; } +int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ + return 0; +} + void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { /* Make sure we're not using the vcpu anymore */ @@ -390,6 +489,8 @@ enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { + int ret; + hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; @@ -398,13 +499,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) #ifdef CONFIG_KVM_EXIT_TIMING mutex_init(&vcpu->arch.exit_timing_lock); #endif - - return 0; + ret = kvmppc_subarch_vcpu_init(vcpu); + return ret; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { kvmppc_mmu_destroy(vcpu); + kvmppc_subarch_vcpu_uninit(vcpu); } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) @@ -420,7 +522,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); #endif kvmppc_core_vcpu_load(vcpu, cpu); - vcpu->cpu = smp_processor_id(); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -429,7 +530,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) #ifdef CONFIG_BOOKE vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); #endif - vcpu->cpu = -1; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, @@ -527,6 +627,13 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->mmio_is_write = 0; vcpu->arch.mmio_sign_extend = 0; + if (!kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, + bytes, &run->mmio.data)) { + kvmppc_complete_mmio_load(vcpu, run); + vcpu->mmio_needed = 0; + return EMULATE_DONE; + } + return EMULATE_DO_MMIO; } @@ -536,8 +643,8 @@ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, { int r; - r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); vcpu->arch.mmio_sign_extend = 1; + r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); return r; } @@ -575,6 +682,13 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, } } + if (!kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, + bytes, &run->mmio.data)) { + kvmppc_complete_mmio_load(vcpu, run); + vcpu->mmio_needed = 0; + return EMULATE_DONE; + } + return EMULATE_DO_MMIO; } @@ -608,6 +722,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) for (i = 0; i < 9; ++i) kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); vcpu->arch.hcall_needed = 0; +#ifdef CONFIG_BOOKE + } else if (vcpu->arch.epr_needed) { + kvmppc_set_epr(vcpu, run->epr.epr); + vcpu->arch.epr_needed = 0; +#endif } r = kvmppc_vcpu_run(run, vcpu); @@ -649,6 +768,16 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, r = 0; vcpu->arch.papr_enabled = true; break; + case KVM_CAP_PPC_EPR: + r = 0; + vcpu->arch.epr_enabled = cap->args[0]; + break; +#ifdef CONFIG_BOOKE + case KVM_CAP_PPC_BOOKE_WATCHDOG: + r = 0; + vcpu->arch.watchdog_enabled = true; + break; +#endif #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) case KVM_CAP_SW_TLB: { struct kvm_config_tlb cfg; @@ -751,9 +880,16 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) { + u32 inst_nop = 0x60000000; +#ifdef CONFIG_KVM_BOOKE_HV + u32 inst_sc1 = 0x44000022; + pvinfo->hcall[0] = inst_sc1; + pvinfo->hcall[1] = inst_nop; + pvinfo->hcall[2] = inst_nop; + pvinfo->hcall[3] = inst_nop; +#else u32 inst_lis = 0x3c000000; u32 inst_ori = 0x60000000; - u32 inst_nop = 0x60000000; u32 inst_sc = 0x44000002; u32 inst_imm_mask = 0xffff; @@ -770,6 +906,9 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); pvinfo->hcall[2] = inst_sc; pvinfo->hcall[3] = inst_nop; +#endif + + pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; return 0; } @@ -832,6 +971,17 @@ long kvm_arch_vm_ioctl(struct file *filp, r = 0; break; } + + case KVM_PPC_GET_HTAB_FD: { + struct kvm *kvm = filp->private_data; + struct kvm_get_htab_fd ghf; + + r = -EFAULT; + if (copy_from_user(&ghf, argp, sizeof(ghf))) + break; + r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); + break; + } #endif /* CONFIG_KVM_BOOK3S_64_HV */ #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h index ddb6a2149d4..e326489a542 100644 --- a/arch/powerpc/kvm/trace.h +++ b/arch/powerpc/kvm/trace.h @@ -31,6 +31,126 @@ TRACE_EVENT(kvm_ppc_instr, __entry->inst, __entry->pc, __entry->emulate) ); +#ifdef CONFIG_PPC_BOOK3S +#define kvm_trace_symbol_exit \ + {0x100, "SYSTEM_RESET"}, \ + {0x200, "MACHINE_CHECK"}, \ + {0x300, "DATA_STORAGE"}, \ + {0x380, "DATA_SEGMENT"}, \ + {0x400, "INST_STORAGE"}, \ + {0x480, "INST_SEGMENT"}, \ + {0x500, "EXTERNAL"}, \ + {0x501, "EXTERNAL_LEVEL"}, \ + {0x502, "EXTERNAL_HV"}, \ + {0x600, "ALIGNMENT"}, \ + {0x700, "PROGRAM"}, \ + {0x800, "FP_UNAVAIL"}, \ + {0x900, "DECREMENTER"}, \ + {0x980, "HV_DECREMENTER"}, \ + {0xc00, "SYSCALL"}, \ + {0xd00, "TRACE"}, \ + {0xe00, "H_DATA_STORAGE"}, \ + {0xe20, "H_INST_STORAGE"}, \ + {0xe40, "H_EMUL_ASSIST"}, \ + {0xf00, "PERFMON"}, \ + {0xf20, "ALTIVEC"}, \ + {0xf40, "VSX"} +#else +#define kvm_trace_symbol_exit \ + {0, "CRITICAL"}, \ + {1, "MACHINE_CHECK"}, \ + {2, "DATA_STORAGE"}, \ + {3, "INST_STORAGE"}, \ + {4, "EXTERNAL"}, \ + {5, "ALIGNMENT"}, \ + {6, "PROGRAM"}, \ + {7, "FP_UNAVAIL"}, \ + {8, "SYSCALL"}, \ + {9, "AP_UNAVAIL"}, \ + {10, "DECREMENTER"}, \ + {11, "FIT"}, \ + {12, "WATCHDOG"}, \ + {13, "DTLB_MISS"}, \ + {14, "ITLB_MISS"}, \ + {15, "DEBUG"}, \ + {32, "SPE_UNAVAIL"}, \ + {33, "SPE_FP_DATA"}, \ + {34, "SPE_FP_ROUND"}, \ + {35, "PERFORMANCE_MONITOR"}, \ + {36, "DOORBELL"}, \ + {37, "DOORBELL_CRITICAL"}, \ + {38, "GUEST_DBELL"}, \ + {39, "GUEST_DBELL_CRIT"}, \ + {40, "HV_SYSCALL"}, \ + {41, "HV_PRIV"} +#endif + +TRACE_EVENT(kvm_exit, + TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), + TP_ARGS(exit_nr, vcpu), + + TP_STRUCT__entry( + __field( unsigned int, exit_nr ) + __field( unsigned long, pc ) + __field( unsigned long, msr ) + __field( unsigned long, dar ) +#ifdef CONFIG_KVM_BOOK3S_PR + __field( unsigned long, srr1 ) +#endif + __field( unsigned long, last_inst ) + ), + + TP_fast_assign( +#ifdef CONFIG_KVM_BOOK3S_PR + struct kvmppc_book3s_shadow_vcpu *svcpu; +#endif + __entry->exit_nr = exit_nr; + __entry->pc = kvmppc_get_pc(vcpu); + __entry->dar = kvmppc_get_fault_dar(vcpu); + __entry->msr = vcpu->arch.shared->msr; +#ifdef CONFIG_KVM_BOOK3S_PR + svcpu = svcpu_get(vcpu); + __entry->srr1 = svcpu->shadow_srr1; + svcpu_put(svcpu); +#endif + __entry->last_inst = vcpu->arch.last_inst; + ), + + TP_printk("exit=%s" + " | pc=0x%lx" + " | msr=0x%lx" + " | dar=0x%lx" +#ifdef CONFIG_KVM_BOOK3S_PR + " | srr1=0x%lx" +#endif + " | last_inst=0x%lx" + , + __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit), + __entry->pc, + __entry->msr, + __entry->dar, +#ifdef CONFIG_KVM_BOOK3S_PR + __entry->srr1, +#endif + __entry->last_inst + ) +); + +TRACE_EVENT(kvm_unmap_hva, + TP_PROTO(unsigned long hva), + TP_ARGS(hva), + + TP_STRUCT__entry( + __field( unsigned long, hva ) + ), + + TP_fast_assign( + __entry->hva = hva; + ), + + TP_printk("unmap hva 0x%lx\n", __entry->hva) +); + TRACE_EVENT(kvm_stlb_inval, TP_PROTO(unsigned int stlb_index), TP_ARGS(stlb_index), @@ -98,41 +218,31 @@ TRACE_EVENT(kvm_gtlb_write, __entry->word1, __entry->word2) ); - -/************************************************************************* - * Book3S trace points * - *************************************************************************/ - -#ifdef CONFIG_KVM_BOOK3S_PR - -TRACE_EVENT(kvm_book3s_exit, - TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), - TP_ARGS(exit_nr, vcpu), +TRACE_EVENT(kvm_check_requests, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu), TP_STRUCT__entry( - __field( unsigned int, exit_nr ) - __field( unsigned long, pc ) - __field( unsigned long, msr ) - __field( unsigned long, dar ) - __field( unsigned long, srr1 ) + __field( __u32, cpu_nr ) + __field( __u32, requests ) ), TP_fast_assign( - struct kvmppc_book3s_shadow_vcpu *svcpu; - __entry->exit_nr = exit_nr; - __entry->pc = kvmppc_get_pc(vcpu); - __entry->dar = kvmppc_get_fault_dar(vcpu); - __entry->msr = vcpu->arch.shared->msr; - svcpu = svcpu_get(vcpu); - __entry->srr1 = svcpu->shadow_srr1; - svcpu_put(svcpu); + __entry->cpu_nr = vcpu->vcpu_id; + __entry->requests = vcpu->requests; ), - TP_printk("exit=0x%x | pc=0x%lx | msr=0x%lx | dar=0x%lx | srr1=0x%lx", - __entry->exit_nr, __entry->pc, __entry->msr, __entry->dar, - __entry->srr1) + TP_printk("vcpu=%x requests=%x", + __entry->cpu_nr, __entry->requests) ); + +/************************************************************************* + * Book3S trace points * + *************************************************************************/ + +#ifdef CONFIG_KVM_BOOK3S_PR + TRACE_EVENT(kvm_book3s_reenter, TP_PROTO(int r, struct kvm_vcpu *vcpu), TP_ARGS(r, vcpu), @@ -395,6 +505,44 @@ TRACE_EVENT(kvm_booke206_gtlb_write, __entry->mas2, __entry->mas7_3) ); +TRACE_EVENT(kvm_booke206_ref_release, + TP_PROTO(__u64 pfn, __u32 flags), + TP_ARGS(pfn, flags), + + TP_STRUCT__entry( + __field( __u64, pfn ) + __field( __u32, flags ) + ), + + TP_fast_assign( + __entry->pfn = pfn; + __entry->flags = flags; + ), + + TP_printk("pfn=%llx flags=%x", + __entry->pfn, __entry->flags) +); + +TRACE_EVENT(kvm_booke_queue_irqprio, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority), + TP_ARGS(vcpu, priority), + + TP_STRUCT__entry( + __field( __u32, cpu_nr ) + __field( __u32, priority ) + __field( unsigned long, pending ) + ), + + TP_fast_assign( + __entry->cpu_nr = vcpu->vcpu_id; + __entry->priority = priority; + __entry->pending = vcpu->arch.pending_exceptions; + ), + + TP_printk("vcpu=%x prio=%x pending=%lx", + __entry->cpu_nr, __entry->priority, __entry->pending) +); + #endif #endif /* _TRACE_KVM_H */ diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 746e0c895cd..45043327669 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -4,7 +4,7 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror -ccflags-$(CONFIG_PPC64) := -mno-minimal-toc +ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) CFLAGS_REMOVE_code-patching.o = -pg CFLAGS_REMOVE_feature-fixups.o = -pg @@ -19,9 +19,7 @@ obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ checksum_wrappers_64.o hweight_64.o \ copyuser_power7.o string_64.o copypage_power7.o \ memcpy_power7.o -obj-$(CONFIG_XMON) += sstep.o ldstfp.o -obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o -obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o +obj-$(CONFIG_PPC_EMULATE_SSTEP) += sstep.o ldstfp.o ifeq ($(CONFIG_PPC64),y) obj-$(CONFIG_SMP) += locks.o diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 3787b61f7d2..cf16b5733ea 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -4,7 +4,7 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror -ccflags-$(CONFIG_PPC64) := -mno-minimal-toc +ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) obj-y := fault.o mem.o pgtable.o gup.o \ init_$(CONFIG_WORD_SIZE).o \ diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 0a6b28336eb..229951ffc35 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -113,19 +113,6 @@ static int store_updates_sp(struct pt_regs *regs) #define MM_FAULT_CONTINUE -1 #define MM_FAULT_ERR(sig) (sig) -static int out_of_memory(struct pt_regs *regs) -{ - /* - * We ran out of memory, or some other thing happened to us that made - * us unable to handle the page fault gracefully. - */ - up_read(¤t->mm->mmap_sem); - if (!user_mode(regs)) - return MM_FAULT_ERR(SIGKILL); - pagefault_out_of_memory(); - return MM_FAULT_RETURN; -} - static int do_sigbus(struct pt_regs *regs, unsigned long address) { siginfo_t info; @@ -169,8 +156,18 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) return MM_FAULT_CONTINUE; /* Out of memory */ - if (fault & VM_FAULT_OOM) - return out_of_memory(regs); + if (fault & VM_FAULT_OOM) { + up_read(¤t->mm->mmap_sem); + + /* + * We ran out of memory, or some other thing happened to us that + * made us unable to handle the page fault gracefully. + */ + if (!user_mode(regs)) + return MM_FAULT_ERR(SIGKILL); + pagefault_out_of_memory(); + return MM_FAULT_RETURN; + } /* Bus error. x86 handles HWPOISON here, we'll add this if/when * we support the feature in HW @@ -252,8 +249,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ defined(CONFIG_PPC_BOOK3S_64)) if (error_code & DSISR_DABRMATCH) { - /* DABR match */ - do_dabr(regs, address, error_code); + /* breakpoint match */ + do_break(regs, address, error_code); return 0; } #endif diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 56585086413..7443481a315 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -115,11 +115,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) sldi r29,r5,SID_SHIFT - VPN_SHIFT rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) or r29,r28,r29 - - /* Calculate hash value for primary slot and store it in r28 */ - rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ - rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ - xor r28,r5,r0 + /* + * Calculate hash value for primary slot and store it in r28 + * r3 = va, r5 = vsid + * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) + */ + rldicl r0,r3,64-12,48 + xor r28,r5,r0 /* hash */ b 4f 3: /* Calc vpn and put it in r29 */ @@ -130,11 +132,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* * calculate hash value for primary slot and * store it in r28 for 1T segment + * r3 = va, r5 = vsid */ - rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ - clrldi r5,r5,40 /* vsid & 0xffffff */ - rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ - xor r28,r28,r5 + sldi r28,r5,25 /* vsid << 25 */ + /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ + rldicl r0,r3,64-12,36 + xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ @@ -407,11 +410,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) */ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) or r29,r28,r29 - - /* Calculate hash value for primary slot and store it in r28 */ - rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ - rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ - xor r28,r5,r0 + /* + * Calculate hash value for primary slot and store it in r28 + * r3 = va, r5 = vsid + * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) + */ + rldicl r0,r3,64-12,48 + xor r28,r5,r0 /* hash */ b 4f 3: /* Calc vpn and put it in r29 */ @@ -426,11 +431,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* * Calculate hash value for primary slot and * store it in r28 for 1T segment + * r3 = va, r5 = vsid */ - rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ - clrldi r5,r5,40 /* vsid & 0xffffff */ - rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ - xor r28,r28,r5 + sldi r28,r5,25 /* vsid << 25 */ + /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ + rldicl r0,r3,64-12,36 + xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ @@ -752,25 +758,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) or r29,r28,r29 - /* Calculate hash value for primary slot and store it in r28 */ - rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ - rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */ - xor r28,r5,r0 + /* Calculate hash value for primary slot and store it in r28 + * r3 = va, r5 = vsid + * r0 = (va >> 16) & ((1ul << (28 - 16)) -1) + */ + rldicl r0,r3,64-16,52 + xor r28,r5,r0 /* hash */ b 4f 3: /* Calc vpn and put it in r29 */ sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) or r29,r28,r29 - /* * calculate hash value for primary slot and * store it in r28 for 1T segment + * r3 = va, r5 = vsid */ - rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ - clrldi r5,r5,40 /* vsid & 0xffffff */ - rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */ - xor r28,r28,r5 + sldi r28,r5,25 /* vsid << 25 */ + /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */ + rldicl r0,r3,64-16,40 + xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 3a292be2e07..1b6e1271719 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -55,6 +55,7 @@ #include <asm/code-patching.h> #include <asm/fadump.h> #include <asm/firmware.h> +#include <asm/tm.h> #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -1171,6 +1172,21 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx); ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local); } pte_iterate_hashed_end(); + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Transactions are not aborted by tlbiel, only tlbie. + * Without, syncing a page back to a block device w/ PIO could pick up + * transactional data (bad!) so we force an abort here. Before the + * sync the page will be made read-only, which will flush_hash_page. + * BIG ISSUE here: if the kernel uses a page from userspace without + * unmapping it first, it may see the speculated version. + */ + if (local && cpu_has_feature(CPU_FTR_TM) && + MSR_TM_ACTIVE(current->thread.regs->msr)) { + tm_enable(); + tm_abort(TM_CAUSE_TLBI); + } +#endif } void flush_hash_range(unsigned long number, int local) diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 95a45293e5a..7e2246fb2f3 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -297,5 +297,10 @@ int __meminit vmemmap_populate(struct page *start_page, return 0; } + +void vmemmap_free(struct page *memmap, unsigned long nr_pages) +{ +} + #endif /* CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 0dba5066c22..f1f7409a418 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -133,6 +133,18 @@ int arch_add_memory(int nid, u64 start, u64 size) return __add_pages(nid, zone, start_pfn, nr_pages); } + +#ifdef CONFIG_MEMORY_HOTREMOVE +int arch_remove_memory(u64 start, u64 size) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + struct zone *zone; + + zone = page_zone(pfn_to_page(start_pfn)); + return __remove_pages(zone, start_pfn, nr_pages); +} +#endif #endif /* CONFIG_MEMORY_HOTPLUG */ /* @@ -195,13 +207,10 @@ void __init do_init_bootmem(void) min_low_pfn = MEMORY_START >> PAGE_SHIFT; boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); - /* Add active regions with valid PFNs */ - for_each_memblock(memory, reg) { - unsigned long start_pfn, end_pfn; - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); - } + /* Place all memblock_regions in the same node and merge contiguous + * memblock_regions + */ + memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); /* Add all physical memory to the bootmem map, mark each area * present. diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 59213cfaeca..bba87ca2b4d 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -399,18 +399,6 @@ static unsigned long read_n_cells(int n, const unsigned int **buf) return result; } -struct of_drconf_cell { - u64 base_addr; - u32 drc_index; - u32 reserved; - u32 aa_index; - u32 flags; -}; - -#define DRCONF_MEM_ASSIGNED 0x00000008 -#define DRCONF_MEM_AI_INVALID 0x00000040 -#define DRCONF_MEM_RESERVED 0x00000080 - /* * Read the next memblock list entry from the ibm,dynamic-memory property * and return the information in the provided of_drconf_cell structure. diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 5829d2a950d..cf9dada734b 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -722,7 +722,7 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start, } /* - * is_hugepage_only_range() is used by generic code to verify wether + * is_hugepage_only_range() is used by generic code to verify whether * a normal mmap mapping (non hugetlbfs) is valid on a given area. * * until the generic code provides a more generic hook and/or starts diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index ae758b3ff72..0d82ef50dc3 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -186,8 +186,6 @@ void tlb_flush(struct mmu_gather *tlb) * Because of that usage pattern, it's only available with CONFIG_HOTPLUG * and is implemented for small size rather than speed. */ -#ifdef CONFIG_HOTPLUG - void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, unsigned long end) { @@ -221,5 +219,3 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, arch_leave_lazy_mmu_mode(); local_irq_restore(flags); } - -#endif /* CONFIG_HOTPLUG */ diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S index fab919fd138..626ad081639 100644 --- a/arch/powerpc/mm/tlb_nohash_low.S +++ b/arch/powerpc/mm/tlb_nohash_low.S @@ -191,12 +191,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x) #ifdef CONFIG_PPC_47x /* - * 47x variant of icbt - */ -# define ICBT(CT,RA,RB) \ - .long 0x7c00002c | ((CT) << 21) | ((RA) << 16) | ((RB) << 11) - -/* * _tlbivax_bcast is only on 47x. We don't bother doing a runtime * check though, it will blow up soon enough if we mistakenly try * to use it on a 440. @@ -208,8 +202,7 @@ _GLOBAL(_tlbivax_bcast) wrteei 0 mtspr SPRN_MMUCR,r5 isync -/* tlbivax 0,r3 - use .long to avoid binutils deps */ - .long 0x7c000624 | (r3 << 11) + PPC_TLBIVAX(0, R3) isync eieio tlbsync @@ -227,11 +220,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_476_DD2) bl 2f 2: mflr r6 li r7,32 - ICBT(0,r6,r7) /* touch next cache line */ + PPC_ICBT(0,R6,R7) /* touch next cache line */ add r6,r6,r7 - ICBT(0,r6,r7) /* touch next cache line */ + PPC_ICBT(0,R6,R7) /* touch next cache line */ add r6,r6,r7 - ICBT(0,r6,r7) /* touch next cache line */ + PPC_ICBT(0,R6,R7) /* touch next cache line */ sync nop nop diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 1fc8109bf2f..8a5dfaf5c6b 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -134,6 +134,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); ___PPC_RS(a) | IMM_L(i)) #define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | ___PPC_RA(d) | \ ___PPC_RS(a) | IMM_L(i)) +#define PPC_XOR(d, a, b) EMIT(PPC_INST_XOR | ___PPC_RA(d) | \ + ___PPC_RS(a) | ___PPC_RB(b)) +#define PPC_XORI(d, a, i) EMIT(PPC_INST_XORI | ___PPC_RA(d) | \ + ___PPC_RS(a) | IMM_L(i)) +#define PPC_XORIS(d, a, i) EMIT(PPC_INST_XORIS | ___PPC_RA(d) | \ + ___PPC_RS(a) | IMM_L(i)) #define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | ___PPC_RA(d) | \ ___PPC_RS(a) | ___PPC_RB(s)) #define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | ___PPC_RA(d) | \ diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index dd1130642d0..e834f1ec23c 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -13,6 +13,8 @@ #include <asm/cacheflush.h> #include <linux/netdevice.h> #include <linux/filter.h> +#include <linux/if_vlan.h> + #include "bpf_jit.h" #ifndef __BIG_ENDIAN @@ -89,6 +91,8 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, case BPF_S_ANC_IFINDEX: case BPF_S_ANC_MARK: case BPF_S_ANC_RXHASH: + case BPF_S_ANC_VLAN_TAG: + case BPF_S_ANC_VLAN_TAG_PRESENT: case BPF_S_ANC_CPU: case BPF_S_ANC_QUEUE: case BPF_S_LD_W_ABS: @@ -232,6 +236,17 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, if (K >= 65536) PPC_ORIS(r_A, r_A, IMM_H(K)); break; + case BPF_S_ANC_ALU_XOR_X: + case BPF_S_ALU_XOR_X: /* A ^= X */ + ctx->seen |= SEEN_XREG; + PPC_XOR(r_A, r_A, r_X); + break; + case BPF_S_ALU_XOR_K: /* A ^= K */ + if (IMM_L(K)) + PPC_XORI(r_A, r_A, IMM_L(K)); + if (K >= 65536) + PPC_XORIS(r_A, r_A, IMM_H(K)); + break; case BPF_S_ALU_LSH_X: /* A <<= X; */ ctx->seen |= SEEN_XREG; PPC_SLW(r_A, r_A, r_X); @@ -371,6 +386,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, rxhash)); break; + case BPF_S_ANC_VLAN_TAG: + case BPF_S_ANC_VLAN_TAG_PRESENT: + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); + PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, + vlan_tci)); + if (filter[i].code == BPF_S_ANC_VLAN_TAG) + PPC_ANDI(r_A, r_A, VLAN_VID_MASK); + else + PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); + break; case BPF_S_ANC_QUEUE: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile index 73456c4cec2..751ec7bd501 100644 --- a/arch/powerpc/oprofile/Makefile +++ b/arch/powerpc/oprofile/Makefile @@ -1,6 +1,6 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror -ccflags-$(CONFIG_PPC64) := -mno-minimal-toc +ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) obj-$(CONFIG_OPROFILE) += oprofile.o diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index 315f9495e9b..f444b94935f 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c @@ -52,7 +52,7 @@ static int power7_marked_instr_event(u64 mmcr1) for (pmc = 0; pmc < 4; pmc++) { psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK << (OPROFILE_MAX_PMC_NUM - pmc) - * OPROFILE_MAX_PMC_NUM); + * OPROFILE_PMSEL_FIELD_WIDTH); psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc) * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL; unit = mmcr1 & (OPROFILE_PM_UNIT_MSK diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index aa2465e21f1..65362e98eb2 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -880,8 +880,16 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) cpuhw->events[n0] = event->hw.config; cpuhw->flags[n0] = event->hw.event_base; + /* + * This event may have been disabled/stopped in record_and_restart() + * because we exceeded the ->event_limit. If re-starting the event, + * clear the ->hw.state (STOPPED and UPTODATE flags), so the user + * notification is re-enabled. + */ if (!(ef_flags & PERF_EF_START)) event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + else + event->hw.state = 0; /* * If group events scheduling transaction was started, @@ -1305,6 +1313,16 @@ static int power_pmu_event_idx(struct perf_event *event) return event->hw.idx; } +ssize_t power_events_sysfs_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); +} + struct pmu power_pmu = { .pmu_enable = power_pmu_enable, .pmu_disable = power_pmu_disable, @@ -1349,6 +1367,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val, */ val = 0; left = local64_read(&event->hw.period_left) - delta; + if (delta == 0) + left++; if (period) { if (left <= 0) { left += period; @@ -1412,11 +1432,8 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return regs->nip; } -static bool pmc_overflow(unsigned long val) +static bool pmc_overflow_power7(unsigned long val) { - if ((int)val < 0) - return true; - /* * Events on POWER7 can roll back if a speculative event doesn't * eventually complete. Unfortunately in some rare cases they will @@ -1428,7 +1445,15 @@ static bool pmc_overflow(unsigned long val) * PMCs because a user might set a period of less than 256 and we * don't want to mistakenly reset them. */ - if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256)) + if ((0x80000000 - val) <= 256) + return true; + + return false; +} + +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) return true; return false; @@ -1439,11 +1464,11 @@ static bool pmc_overflow(unsigned long val) */ static void perf_event_interrupt(struct pt_regs *regs) { - int i; + int i, j; struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct perf_event *event; - unsigned long val; - int found = 0; + unsigned long val[8]; + int found, active; int nmi; if (cpuhw->n_limited) @@ -1458,33 +1483,53 @@ static void perf_event_interrupt(struct pt_regs *regs) else irq_enter(); - for (i = 0; i < cpuhw->n_events; ++i) { - event = cpuhw->event[i]; - if (!event->hw.idx || is_limited_pmc(event->hw.idx)) + /* Read all the PMCs since we'll need them a bunch of times */ + for (i = 0; i < ppmu->n_counter; ++i) + val[i] = read_pmc(i + 1); + + /* Try to find what caused the IRQ */ + found = 0; + for (i = 0; i < ppmu->n_counter; ++i) { + if (!pmc_overflow(val[i])) continue; - val = read_pmc(event->hw.idx); - if ((int)val < 0) { - /* event has overflowed */ - found = 1; - record_and_restart(event, val, regs); + if (is_limited_pmc(i + 1)) + continue; /* these won't generate IRQs */ + /* + * We've found one that's overflowed. For active + * counters we need to log this. For inactive + * counters, we need to reset it anyway + */ + found = 1; + active = 0; + for (j = 0; j < cpuhw->n_events; ++j) { + event = cpuhw->event[j]; + if (event->hw.idx == (i + 1)) { + active = 1; + record_and_restart(event, val[i], regs); + break; + } } + if (!active) + /* reset non active counters that have overflowed */ + write_pmc(i + 1, 0); } - - /* - * In case we didn't find and reset the event that caused - * the interrupt, scan all events and reset any that are - * negative, to avoid getting continual interrupts. - * Any that we processed in the previous loop will not be negative. - */ - if (!found) { - for (i = 0; i < ppmu->n_counter; ++i) { - if (is_limited_pmc(i + 1)) + if (!found && pvr_version_is(PVR_POWER7)) { + /* check active counters for special buggy p7 overflow */ + for (i = 0; i < cpuhw->n_events; ++i) { + event = cpuhw->event[i]; + if (!event->hw.idx || is_limited_pmc(event->hw.idx)) continue; - val = read_pmc(i + 1); - if (pmc_overflow(val)) - write_pmc(i + 1, 0); + if (pmc_overflow_power7(val[event->hw.idx - 1])) { + /* event has overflowed in a buggy way*/ + found = 1; + record_and_restart(event, + val[event->hw.idx - 1], + regs); + } } } + if ((!found) && printk_ratelimit()) + printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); /* * Reset MMCR0 to its normal value. This will set PMXE and @@ -1537,6 +1582,8 @@ int __cpuinit register_power_pmu(struct power_pmu *pmu) pr_info("%s performance monitor hardware support registered\n", pmu->name); + power_pmu.attr_groups = ppmu->attr_groups; + #ifdef MSR_HV /* * Use FCHV to ignore kernel events if MSR.HV is set. diff --git a/arch/powerpc/perf/e500-pmu.c b/arch/powerpc/perf/e500-pmu.c index cb2e2949c8d..fb664929f5d 100644 --- a/arch/powerpc/perf/e500-pmu.c +++ b/arch/powerpc/perf/e500-pmu.c @@ -24,6 +24,8 @@ static int e500_generic_events[] = { [PERF_COUNT_HW_CACHE_MISSES] = 41, /* Data L1 cache reloads */ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12, [PERF_COUNT_HW_BRANCH_MISSES] = 15, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 18, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 19, }; #define C(x) PERF_COUNT_HW_CACHE_##x diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index 441af08edf4..b554879bd31 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -51,11 +51,25 @@ #define MMCR1_PMCSEL_MSK 0xff /* + * Power7 event codes. + */ +#define PME_PM_CYC 0x1e +#define PME_PM_GCT_NOSLOT_CYC 0x100f8 +#define PME_PM_CMPLU_STALL 0x4000a +#define PME_PM_INST_CMPL 0x2 +#define PME_PM_LD_REF_L1 0xc880 +#define PME_PM_LD_MISS_L1 0x400f0 +#define PME_PM_BRU_FIN 0x10068 +#define PME_PM_BRU_MPRED 0x400f6 + +/* * Layout of constraint bits: * 6666555555555544444444443333333333222222222211111111110000000000 * 3210987654321098765432109876543210987654321098765432109876543210 - * [ ><><><><><><> - * NC P6P5P4P3P2P1 + * < >< ><><><><><><> + * L2 NC P6P5P4P3P2P1 + * + * L2 - 16-18 - Required L2SEL value (select field) * * NC - number of counters * 15: NC error 0x8000 @@ -72,7 +86,7 @@ static int power7_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) { - int pmc, sh; + int pmc, sh, unit; unsigned long mask = 0, value = 0; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; @@ -90,6 +104,15 @@ static int power7_get_constraint(u64 event, unsigned long *maskp, mask |= 0x8000; value |= 0x1000; } + + unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; + if (unit == 6) { + /* L2SEL must be identical across events */ + int l2sel = (event >> PM_L2SEL_SH) & PM_L2SEL_MSK; + mask |= 0x7 << 16; + value |= l2sel << 16; + } + *maskp = mask; *valp = value; return 0; @@ -296,14 +319,14 @@ static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[]) } static int power7_generic_events[] = { - [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, - [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x100f8, /* GCT_NOSLOT_CYC */ - [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x4000a, /* CMPLU_STALL */ - [PERF_COUNT_HW_INSTRUCTIONS] = 2, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/ - [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */ - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */ - [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */ + [PERF_COUNT_HW_CPU_CYCLES] = PME_PM_CYC, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PME_PM_GCT_NOSLOT_CYC, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PME_PM_CMPLU_STALL, + [PERF_COUNT_HW_INSTRUCTIONS] = PME_PM_INST_CMPL, + [PERF_COUNT_HW_CACHE_REFERENCES] = PME_PM_LD_REF_L1, + [PERF_COUNT_HW_CACHE_MISSES] = PME_PM_LD_MISS_L1, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PME_PM_BRU_FIN, + [PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BRU_MPRED, }; #define C(x) PERF_COUNT_HW_CACHE_##x @@ -351,6 +374,57 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { }, }; + +GENERIC_EVENT_ATTR(cpu-cycles, CYC); +GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC); +GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL); +GENERIC_EVENT_ATTR(instructions, INST_CMPL); +GENERIC_EVENT_ATTR(cache-references, LD_REF_L1); +GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1); +GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN); +GENERIC_EVENT_ATTR(branch-misses, BRU_MPRED); + +POWER_EVENT_ATTR(CYC, CYC); +POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC); +POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL); +POWER_EVENT_ATTR(INST_CMPL, INST_CMPL); +POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1); +POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1); +POWER_EVENT_ATTR(BRU_FIN, BRU_FIN) +POWER_EVENT_ATTR(BRU_MPRED, BRU_MPRED); + +static struct attribute *power7_events_attr[] = { + GENERIC_EVENT_PTR(CYC), + GENERIC_EVENT_PTR(GCT_NOSLOT_CYC), + GENERIC_EVENT_PTR(CMPLU_STALL), + GENERIC_EVENT_PTR(INST_CMPL), + GENERIC_EVENT_PTR(LD_REF_L1), + GENERIC_EVENT_PTR(LD_MISS_L1), + GENERIC_EVENT_PTR(BRU_FIN), + GENERIC_EVENT_PTR(BRU_MPRED), + + POWER_EVENT_PTR(CYC), + POWER_EVENT_PTR(GCT_NOSLOT_CYC), + POWER_EVENT_PTR(CMPLU_STALL), + POWER_EVENT_PTR(INST_CMPL), + POWER_EVENT_PTR(LD_REF_L1), + POWER_EVENT_PTR(LD_MISS_L1), + POWER_EVENT_PTR(BRU_FIN), + POWER_EVENT_PTR(BRU_MPRED), + NULL +}; + + +static struct attribute_group power7_pmu_events_group = { + .name = "events", + .attrs = power7_events_attr, +}; + +static const struct attribute_group *power7_pmu_attr_groups[] = { + &power7_pmu_events_group, + NULL, +}; + static struct power_pmu power7_pmu = { .name = "POWER7", .n_counter = 6, @@ -362,6 +436,7 @@ static struct power_pmu power7_pmu = { .get_alternatives = power7_get_alternatives, .disable_pmc = power7_disable_pmc, .flags = PPMU_ALT_SIPR, + .attr_groups = power7_pmu_attr_groups, .n_generic = ARRAY_SIZE(power7_generic_events), .generic_events = power7_generic_events, .cache_events = &power7_cache_events, diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c index 969dddcf332..8f3920e5a04 100644 --- a/arch/powerpc/platforms/40x/ppc40x_simple.c +++ b/arch/powerpc/platforms/40x/ppc40x_simple.c @@ -57,7 +57,8 @@ static const char * const board[] __initconst = { "amcc,makalu", "apm,klondike", "est,hotfoot", - "plathome,obs600" + "plathome,obs600", + NULL }; static int __init ppc40x_probe(void) diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 8abf6fb8f41..0effe9f5a1e 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -252,6 +252,14 @@ config PPC4xx_GPIO help Enable gpiolib support for ppc440 based boards +config PPC4xx_OCM + bool "PPC4xx On Chip Memory (OCM) support" + depends on 4xx + select PPC_LIB_RHEAP + help + Enable OCM support for PowerPC 4xx platforms with on chip memory, + OCM provides the fast place for memory access to improve performance. + # 44x specific CPU modules, selected based on the board above. config 440EP bool diff --git a/arch/powerpc/platforms/44x/currituck.c b/arch/powerpc/platforms/44x/currituck.c index 6bd89a0e0de..ecd3890c40d 100644 --- a/arch/powerpc/platforms/44x/currituck.c +++ b/arch/powerpc/platforms/44x/currituck.c @@ -46,7 +46,7 @@ static __initdata struct of_device_id ppc47x_of_bus[] = { /* The EEPROM is missing and the default values are bogus. This forces USB in * to EHCI mode */ -static void __devinit quirk_ppc_currituck_usb_fixup(struct pci_dev *dev) +static void quirk_ppc_currituck_usb_fixup(struct pci_dev *dev) { if (of_machine_is_compatible("ibm,currituck")) { pci_write_config_dword(dev, 0xe0, 0x0114231f); diff --git a/arch/powerpc/platforms/44x/virtex_ml510.c b/arch/powerpc/platforms/44x/virtex_ml510.c index ba4a6e388a4..1fdb8748638 100644 --- a/arch/powerpc/platforms/44x/virtex_ml510.c +++ b/arch/powerpc/platforms/44x/virtex_ml510.c @@ -5,7 +5,7 @@ /** * ml510_ail_quirk */ -static void __devinit ml510_ali_quirk(struct pci_dev *dev) +static void ml510_ali_quirk(struct pci_dev *dev) { /* Enable the IDE controller */ pci_write_config_byte(dev, 0x58, 0x4c); diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig index b62508b113d..c16999802ec 100644 --- a/arch/powerpc/platforms/512x/Kconfig +++ b/arch/powerpc/platforms/512x/Kconfig @@ -2,7 +2,6 @@ config PPC_MPC512x bool "512x-based boards" depends on 6xx select FSL_SOC - select FB_FSL_DIU select IPIC select PPC_CLOCK select PPC_PCI_CHOICE diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c index 9f771e05457..52d57d28172 100644 --- a/arch/powerpc/platforms/512x/clock.c +++ b/arch/powerpc/platforms/512x/clock.c @@ -26,6 +26,7 @@ #include <linux/of_platform.h> #include <asm/mpc5xxx.h> +#include <asm/mpc5121.h> #include <asm/clk_interface.h> #undef CLK_DEBUG @@ -122,7 +123,7 @@ struct mpc512x_clockctl { u32 dccr; /* DIU Clk Cnfg Reg */ }; -struct mpc512x_clockctl __iomem *clockctl; +static struct mpc512x_clockctl __iomem *clockctl; static int mpc5121_clk_enable(struct clk *clk) { @@ -184,7 +185,7 @@ static unsigned long spmf_mult(void) 36, 40, 44, 48, 52, 56, 60, 64 }; - int spmf = (clockctl->spmr >> 24) & 0xf; + int spmf = (in_be32(&clockctl->spmr) >> 24) & 0xf; return spmf_to_mult[spmf]; } @@ -206,7 +207,7 @@ static unsigned long sysdiv_div_x_2(void) 52, 56, 58, 62, 60, 64, 66, }; - int sysdiv = (clockctl->scfr2 >> 26) & 0x3f; + int sysdiv = (in_be32(&clockctl->scfr2) >> 26) & 0x3f; return sysdiv_to_div_x_2[sysdiv]; } @@ -230,7 +231,7 @@ static unsigned long sys_to_ref(unsigned long rate) static long ips_to_ref(unsigned long rate) { - int ips_div = (clockctl->scfr1 >> 23) & 0x7; + int ips_div = (in_be32(&clockctl->scfr1) >> 23) & 0x7; rate *= ips_div; /* csb_clk = ips_clk * ips_div */ rate *= 2; /* sys_clk = csb_clk * 2 */ @@ -284,7 +285,7 @@ static struct clk sys_clk = { static void diu_clk_calc(struct clk *clk) { - int diudiv_x_2 = clockctl->scfr1 & 0xff; + int diudiv_x_2 = in_be32(&clockctl->scfr1) & 0xff; unsigned long rate; rate = sys_clk.rate; @@ -311,7 +312,7 @@ static void half_clk_calc(struct clk *clk) static void generic_div_clk_calc(struct clk *clk) { - int div = (clockctl->scfr1 >> clk->div_shift) & 0x7; + int div = (in_be32(&clockctl->scfr1) >> clk->div_shift) & 0x7; clk->rate = clk->parent->rate / div; } @@ -329,7 +330,7 @@ static struct clk csb_clk = { static void e300_clk_calc(struct clk *clk) { - int spmf = (clockctl->spmr >> 16) & 0xf; + int spmf = (in_be32(&clockctl->spmr) >> 16) & 0xf; int ratex2 = clk->parent->rate * spmf; clk->rate = ratex2 / 2; @@ -551,7 +552,7 @@ static struct clk ac97_clk = { .calc = ac97_clk_calc, }; -struct clk *rate_clks[] = { +static struct clk *rate_clks[] = { &ref_clk, &sys_clk, &diu_clk, @@ -607,7 +608,7 @@ static void rate_clks_init(void) * There are two clk enable registers with 32 enable bits each * psc clocks and device clocks are all stored in dev_clks */ -struct clk dev_clks[2][32]; +static struct clk dev_clks[2][32]; /* * Given a psc number return the dev_clk @@ -648,12 +649,12 @@ static void psc_calc_rate(struct clk *clk, int pscnum, struct device_node *np) out_be32(&clockctl->pccr[pscnum], 0x00020000); out_be32(&clockctl->pccr[pscnum], 0x00030000); - if (clockctl->pccr[pscnum] & 0x80) { + if (in_be32(&clockctl->pccr[pscnum]) & 0x80) { clk->rate = spdif_rxclk.rate; return; } - switch ((clockctl->pccr[pscnum] >> 14) & 0x3) { + switch ((in_be32(&clockctl->pccr[pscnum]) >> 14) & 0x3) { case 0: mclk_src = sys_clk.rate; break; @@ -668,7 +669,7 @@ static void psc_calc_rate(struct clk *clk, int pscnum, struct device_node *np) break; } - mclk_div = ((clockctl->pccr[pscnum] >> 17) & 0x7fff) + 1; + mclk_div = ((in_be32(&clockctl->pccr[pscnum]) >> 17) & 0x7fff) + 1; clk->rate = mclk_src / mclk_div; } @@ -680,13 +681,12 @@ static void psc_calc_rate(struct clk *clk, int pscnum, struct device_node *np) static void psc_clks_init(void) { struct device_node *np; - const u32 *cell_index; struct platform_device *ofdev; + u32 reg; for_each_compatible_node(np, NULL, "fsl,mpc5121-psc") { - cell_index = of_get_property(np, "cell-index", NULL); - if (cell_index) { - int pscnum = *cell_index; + if (!of_property_read_u32(np, "reg", ®)) { + int pscnum = (reg & 0xf00) >> 8; struct clk *clk = psc_dev_clk(pscnum); clk->flags = CLK_HAS_RATE | CLK_HAS_CTRL; @@ -696,7 +696,7 @@ static void psc_clks_init(void) * AC97 is special rate clock does * not go through normal path */ - if (strcmp("ac97", np->name) == 0) + if (of_device_is_compatible(np, "fsl,mpc5121-psc-ac97")) clk->rate = ac97_clk.rate; else psc_calc_rate(clk, pscnum, np); diff --git a/arch/powerpc/platforms/512x/mpc5121_ads.c b/arch/powerpc/platforms/512x/mpc5121_ads.c index dcef6ade48e..0a134e0469e 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads.c @@ -42,7 +42,10 @@ static void __init mpc5121_ads_setup_arch(void) for_each_compatible_node(np, "pci", "fsl,mpc5121-pci") mpc83xx_add_bridge(np); #endif + +#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) mpc512x_setup_diu(); +#endif } static void __init mpc5121_ads_init_IRQ(void) diff --git a/arch/powerpc/platforms/512x/mpc512x.h b/arch/powerpc/platforms/512x/mpc512x.h index 1ab6d11d0b1..c32b399eb95 100644 --- a/arch/powerpc/platforms/512x/mpc512x.h +++ b/arch/powerpc/platforms/512x/mpc512x.h @@ -16,6 +16,13 @@ extern void __init mpc512x_init(void); extern int __init mpc5121_clk_init(void); void __init mpc512x_declare_of_platform_devices(void); extern void mpc512x_restart(char *cmd); -extern void mpc512x_init_diu(void); -extern void mpc512x_setup_diu(void); + +#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) +void mpc512x_init_diu(void); +void mpc512x_setup_diu(void); +#else +#define mpc512x_init_diu NULL +#define mpc512x_setup_diu NULL +#endif + #endif /* __MPC512X_H__ */ diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c index 1650e090ef3..d30235b7e3f 100644 --- a/arch/powerpc/platforms/512x/mpc512x_shared.c +++ b/arch/powerpc/platforms/512x/mpc512x_shared.c @@ -58,6 +58,8 @@ void mpc512x_restart(char *cmd) ; } +#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) + struct fsl_diu_shared_fb { u8 gamma[0x300]; /* 32-bit aligned! */ struct diu_ad ad0; /* 32-bit aligned! */ @@ -66,29 +68,6 @@ struct fsl_diu_shared_fb { bool in_use; }; -u32 mpc512x_get_pixel_format(enum fsl_diu_monitor_port port, - unsigned int bits_per_pixel) -{ - switch (bits_per_pixel) { - case 32: - return 0x88883316; - case 24: - return 0x88082219; - case 16: - return 0x65053118; - } - return 0x00000400; -} - -void mpc512x_set_gamma_table(enum fsl_diu_monitor_port port, - char *gamma_table_base) -{ -} - -void mpc512x_set_monitor_port(enum fsl_diu_monitor_port port) -{ -} - #define DIU_DIV_MASK 0x000000ff void mpc512x_set_pixel_clock(unsigned int pixclock) { @@ -320,14 +299,13 @@ void __init mpc512x_setup_diu(void) } } - diu_ops.get_pixel_format = mpc512x_get_pixel_format; - diu_ops.set_gamma_table = mpc512x_set_gamma_table; - diu_ops.set_monitor_port = mpc512x_set_monitor_port; diu_ops.set_pixel_clock = mpc512x_set_pixel_clock; diu_ops.valid_monitor_port = mpc512x_valid_monitor_port; diu_ops.release_bootmem = mpc512x_release_bootmem; } +#endif + void __init mpc512x_init_IRQ(void) { struct device_node *np; @@ -448,8 +426,38 @@ void __init mpc512x_psc_fifo_init(void) void __init mpc512x_init(void) { - mpc512x_declare_of_platform_devices(); mpc5121_clk_init(); + mpc512x_declare_of_platform_devices(); mpc512x_restart_init(); mpc512x_psc_fifo_init(); } + +/** + * mpc512x_cs_config - Setup chip select configuration + * @cs: chip select number + * @val: chip select configuration value + * + * Perform chip select configuration for devices on LocalPlus Bus. + * Intended to dynamically reconfigure the chip select parameters + * for configurable devices on the bus. + */ +int mpc512x_cs_config(unsigned int cs, u32 val) +{ + static struct mpc512x_lpc __iomem *lpc; + struct device_node *np; + + if (cs > 7) + return -EINVAL; + + if (!lpc) { + np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-lpc"); + lpc = of_iomap(np, 0); + of_node_put(np); + if (!lpc) + return -ENOMEM; + } + + out_be32(&lpc->cs_cfg[cs], val); + return 0; +} +EXPORT_SYMBOL(mpc512x_cs_config); diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c index 448d862bcf3..1843bc93201 100644 --- a/arch/powerpc/platforms/52xx/lite5200.c +++ b/arch/powerpc/platforms/52xx/lite5200.c @@ -4,7 +4,7 @@ * Written by: Grant Likely <grant.likely@secretlab.ca> * * Copyright (C) Secret Lab Technologies Ltd. 2006. All rights reserved. - * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved. * * Description: * This program is free software; you can redistribute it and/or modify it diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c index 9cf36020cf0..792a301a0bf 100644 --- a/arch/powerpc/platforms/52xx/mpc5200_simple.c +++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c @@ -50,6 +50,7 @@ static void __init mpc5200_simple_setup_arch(void) /* list of the supported boards */ static const char *board[] __initdata = { + "anonymous,a3m071", "anonymous,a4m072", "anon,charon", "ifm,o2d", diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 028470b9588..692998244d2 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -526,7 +526,7 @@ EXPORT_SYMBOL(mpc52xx_gpt_timer_period); #define WDT_IDENTITY "mpc52xx watchdog on GPT0" -/* wdt_is_active stores wether or not the /dev/watchdog device is opened */ +/* wdt_is_active stores whether or not the /dev/watchdog device is opened */ static unsigned long wdt_is_active; /* wdt-capable gpt */ @@ -669,7 +669,7 @@ static struct miscdevice mpc52xx_wdt_miscdev = { .fops = &mpc52xx_wdt_fops, }; -static int __devinit mpc52xx_gpt_wdt_init(void) +static int mpc52xx_gpt_wdt_init(void) { int err; @@ -704,7 +704,7 @@ static int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt, #else -static int __devinit mpc52xx_gpt_wdt_init(void) +static int mpc52xx_gpt_wdt_init(void) { return 0; } @@ -720,7 +720,7 @@ static inline int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt, /* --------------------------------------------------------------------- * of_platform bus binding code */ -static int __devinit mpc52xx_gpt_probe(struct platform_device *ofdev) +static int mpc52xx_gpt_probe(struct platform_device *ofdev) { struct mpc52xx_gpt_priv *gpt; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index 2351f9e0fb6..be7b1aa4d54 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c @@ -20,9 +20,9 @@ #include <asm/mpc52xx.h> #include <asm/time.h> -#include <sysdev/bestcomm/bestcomm.h> -#include <sysdev/bestcomm/bestcomm_priv.h> -#include <sysdev/bestcomm/gen_bd.h> +#include <linux/fsl/bestcomm/bestcomm.h> +#include <linux/fsl/bestcomm/bestcomm_priv.h> +#include <linux/fsl/bestcomm/gen_bd.h> MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver"); @@ -470,7 +470,7 @@ void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req) } EXPORT_SYMBOL(mpc52xx_lpbfifo_abort); -static int __devinit mpc52xx_lpbfifo_probe(struct platform_device *op) +static int mpc52xx_lpbfifo_probe(struct platform_device *op) { struct resource res; int rc = -ENOMEM; @@ -540,7 +540,7 @@ static int __devinit mpc52xx_lpbfifo_probe(struct platform_device *op) } -static int __devexit mpc52xx_lpbfifo_remove(struct platform_device *op) +static int mpc52xx_lpbfifo_remove(struct platform_device *op) { if (lpbfifo.dev != &op->dev) return 0; @@ -564,7 +564,7 @@ static int __devexit mpc52xx_lpbfifo_remove(struct platform_device *op) return 0; } -static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = { +static struct of_device_id mpc52xx_lpbfifo_match[] = { { .compatible = "fsl,mpc5200-lpbfifo", }, {}, }; @@ -576,20 +576,6 @@ static struct platform_driver mpc52xx_lpbfifo_driver = { .of_match_table = mpc52xx_lpbfifo_match, }, .probe = mpc52xx_lpbfifo_probe, - .remove = __devexit_p(mpc52xx_lpbfifo_remove), + .remove = mpc52xx_lpbfifo_remove, }; - -/*********************************************************************** - * Module init/exit - */ -static int __init mpc52xx_lpbfifo_init(void) -{ - return platform_driver_register(&mpc52xx_lpbfifo_driver); -} -module_init(mpc52xx_lpbfifo_init); - -static void __exit mpc52xx_lpbfifo_exit(void) -{ - platform_driver_unregister(&mpc52xx_lpbfifo_driver); -} -module_exit(mpc52xx_lpbfifo_exit); +module_platform_driver(mpc52xx_lpbfifo_driver); diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 8520b58a5e9..b89ef65392d 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -372,10 +372,11 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq, case MPC52xx_IRQ_L1_MAIN: irqchip = &mpc52xx_main_irqchip; break; case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break; case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break; - default: - pr_err("%s: invalid irq: virq=%i, l1=%i, l2=%i\n", - __func__, virq, l1irq, l2irq); - return -EINVAL; + case MPC52xx_IRQ_L1_CRIT: + pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n", + __func__, l2irq); + irq_set_chip(virq, &no_irq_chip); + return 0; } irq_set_chip_and_handler(virq, irqchip, handle_level_irq); diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index 10ff526cd04..79799b29ffe 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c @@ -111,7 +111,7 @@ static struct mdiobb_ctrl ep8248e_mdio_ctrl = { .ops = &ep8248e_mdio_ops, }; -static int __devinit ep8248e_mdio_probe(struct platform_device *ofdev) +static int ep8248e_mdio_probe(struct platform_device *ofdev) { struct mii_bus *bus; struct resource res; diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c index cf964e19573..058cc1895c8 100644 --- a/arch/powerpc/platforms/82xx/km82xx.c +++ b/arch/powerpc/platforms/82xx/km82xx.c @@ -18,11 +18,11 @@ #include <linux/fsl_devices.h> #include <linux/of_platform.h> -#include <asm/io.h> +#include <linux/io.h> #include <asm/cpm2.h> #include <asm/udbg.h> #include <asm/machdep.h> -#include <asm/time.h> +#include <linux/time.h> #include <asm/mpc8260.h> #include <asm/prom.h> @@ -36,7 +36,7 @@ static void __init km82xx_pic_init(void) struct device_node *np = of_find_compatible_node(NULL, NULL, "fsl,pq2-pic"); if (!np) { - printk(KERN_ERR "PIC init: can not find cpm-pic node\n"); + pr_err("PIC init: can not find cpm-pic node\n"); return; } diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c index fb94d10e5a4..fc8b2d6a7d8 100644 --- a/arch/powerpc/platforms/82xx/pq2.c +++ b/arch/powerpc/platforms/82xx/pq2.c @@ -71,11 +71,11 @@ err: void __init pq2_init_pci(void) { - struct device_node *np = NULL; + struct device_node *np; ppc_md.pci_exclude_device = pq2_pci_exclude_device; - while ((np = of_find_compatible_node(np, NULL, "fsl,pq2-pci"))) + for_each_compatible_node(np, NULL, "fsl,pq2-pci") pq2_pci_add_bridge(np); } #endif diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c index 328d221fd1c..74861a7fb80 100644 --- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c +++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c @@ -16,7 +16,6 @@ #include <linux/spinlock.h> #include <linux/irq.h> #include <linux/types.h> -#include <linux/bootmem.h> #include <linux/slab.h> #include <asm/io.h> @@ -149,7 +148,7 @@ int __init pq2ads_pci_init_irq(void) priv->regs = of_iomap(np, 0); if (!priv->regs) { printk(KERN_ERR "Cannot map PCI PIC registers.\n"); - goto out_free_bootmem; + goto out_free_kmalloc; } /* mask all PCI interrupts */ @@ -171,9 +170,8 @@ int __init pq2ads_pci_init_irq(void) out_unmap_regs: iounmap(priv->regs); -out_free_bootmem: - free_bootmem((unsigned long)priv, - sizeof(struct pq2ads_pci_pic)); +out_free_kmalloc: + kfree(priv); of_node_put(np); out_unmap_irq: irq_dispose_mapping(irq); diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c index 89923d72334..bf4c4473abb 100644 --- a/arch/powerpc/platforms/83xx/km83xx.c +++ b/arch/powerpc/platforms/83xx/km83xx.c @@ -28,8 +28,8 @@ #include <linux/of_device.h> #include <linux/atomic.h> -#include <asm/time.h> -#include <asm/io.h> +#include <linux/time.h> +#include <linux/io.h> #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/irq.h> @@ -43,6 +43,82 @@ #include "mpc83xx.h" #define SVR_REV(svr) (((svr) >> 0) & 0xFFFF) /* Revision field */ + +static void quirk_mpc8360e_qe_enet10(void) +{ + /* + * handle mpc8360E Erratum QE_ENET10: + * RGMII AC values do not meet the specification + */ + uint svid = mfspr(SPRN_SVR); + struct device_node *np_par; + struct resource res; + void __iomem *base; + int ret; + + np_par = of_find_node_by_name(NULL, "par_io"); + if (np_par == NULL) { + pr_warn("%s couldn;t find par_io node\n", __func__); + return; + } + /* Map Parallel I/O ports registers */ + ret = of_address_to_resource(np_par, 0, &res); + if (ret) { + pr_warn("%s couldn;t map par_io registers\n", __func__); + return; + } + + base = ioremap(res.start, res.end - res.start + 1); + + /* + * set output delay adjustments to default values according + * table 5 in Errata Rev. 5, 9/2011: + * + * write 0b01 to UCC1 bits 18:19 + * write 0b01 to UCC2 option 1 bits 4:5 + * write 0b01 to UCC2 option 2 bits 16:17 + */ + clrsetbits_be32((base + 0xa8), 0x0c00f000, 0x04005000); + + /* + * set output delay adjustments to default values according + * table 3-13 in Reference Manual Rev.3 05/2010: + * + * write 0b01 to UCC2 option 2 bits 16:17 + * write 0b0101 to UCC1 bits 20:23 + * write 0b0101 to UCC2 option 1 bits 24:27 + */ + clrsetbits_be32((base + 0xac), 0x0000cff0, 0x00004550); + + if (SVR_REV(svid) == 0x0021) { + /* + * UCC2 option 1: write 0b1010 to bits 24:27 + * at address IMMRBAR+0x14AC + */ + clrsetbits_be32((base + 0xac), 0x000000f0, 0x000000a0); + } else if (SVR_REV(svid) == 0x0020) { + /* + * UCC1: write 0b11 to bits 18:19 + * at address IMMRBAR+0x14A8 + */ + setbits32((base + 0xa8), 0x00003000); + + /* + * UCC2 option 1: write 0b11 to bits 4:5 + * at address IMMRBAR+0x14A8 + */ + setbits32((base + 0xa8), 0x0c000000); + + /* + * UCC2 option 2: write 0b11 to bits 16:17 + * at address IMMRBAR+0x14AC + */ + setbits32((base + 0xac), 0x0000c000); + } + iounmap(base); + of_node_put(np_par); +} + /* ************************************************************************ * * Setup the architecture @@ -72,84 +148,13 @@ static void __init mpc83xx_km_setup_arch(void) for_each_node_by_name(np, "ucc") par_io_of_config(np); - } - - np = of_find_compatible_node(NULL, "network", "ucc_geth"); - if (np != NULL) { - /* - * handle mpc8360E Erratum QE_ENET10: - * RGMII AC values do not meet the specification - */ - uint svid = mfspr(SPRN_SVR); - struct device_node *np_par; - struct resource res; - void __iomem *base; - int ret; - - np_par = of_find_node_by_name(NULL, "par_io"); - if (np_par == NULL) { - printk(KERN_WARNING "%s couldn;t find par_io node\n", - __func__); - return; - } - /* Map Parallel I/O ports registers */ - ret = of_address_to_resource(np_par, 0, &res); - if (ret) { - printk(KERN_WARNING "%s couldn;t map par_io registers\n", - __func__); - return; - } - - base = ioremap(res.start, res.end - res.start + 1); - - /* - * set output delay adjustments to default values according - * table 5 in Errata Rev. 5, 9/2011: - * - * write 0b01 to UCC1 bits 18:19 - * write 0b01 to UCC2 option 1 bits 4:5 - * write 0b01 to UCC2 option 2 bits 16:17 - */ - clrsetbits_be32((base + 0xa8), 0x0c00f000, 0x04005000); - /* - * set output delay adjustments to default values according - * table 3-13 in Reference Manual Rev.3 05/2010: - * - * write 0b01 to UCC2 option 2 bits 16:17 - * write 0b0101 to UCC1 bits 20:23 - * write 0b0101 to UCC2 option 1 bits 24:27 - */ - clrsetbits_be32((base + 0xac), 0x0000cff0, 0x00004550); - - if (SVR_REV(svid) == 0x0021) { - /* - * UCC2 option 1: write 0b1010 to bits 24:27 - * at address IMMRBAR+0x14AC - */ - clrsetbits_be32((base + 0xac), 0x000000f0, 0x000000a0); - } else if (SVR_REV(svid) == 0x0020) { - /* - * UCC1: write 0b11 to bits 18:19 - * at address IMMRBAR+0x14A8 - */ - setbits32((base + 0xa8), 0x00003000); - - /* - * UCC2 option 1: write 0b11 to bits 4:5 - * at address IMMRBAR+0x14A8 - */ - setbits32((base + 0xa8), 0x0c000000); - - /* - * UCC2 option 2: write 0b11 to bits 16:17 - * at address IMMRBAR+0x14AC - */ - setbits32((base + 0xac), 0x0000c000); + /* Only apply this quirk when par_io is available */ + np = of_find_compatible_node(NULL, "network", "ucc_geth"); + if (np != NULL) { + quirk_mpc8360e_qe_enet10(); + of_node_put(np); } - iounmap(base); - of_node_put(np_par); - of_node_put(np); } #endif /* CONFIG_QUICC_ENGINE */ } diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c index ef6537b8ed3..624cb51d19c 100644 --- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c +++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c @@ -145,8 +145,7 @@ static int mcu_gpiochip_remove(struct mcu *mcu) return gpiochip_remove(&mcu->gc); } -static int __devinit mcu_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mcu_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct mcu *mcu; int ret; @@ -188,7 +187,7 @@ err: return ret; } -static int __devexit mcu_remove(struct i2c_client *client) +static int mcu_remove(struct i2c_client *client) { struct mcu *mcu = i2c_get_clientdata(client); int ret; @@ -216,7 +215,7 @@ static const struct i2c_device_id mcu_ids[] = { }; MODULE_DEVICE_TABLE(i2c, mcu_ids); -static struct of_device_id mcu_of_match_table[] __devinitdata = { +static struct of_device_id mcu_of_match_table[] = { { .compatible = "fsl,mcu-mpc8349emitx", }, { }, }; @@ -228,7 +227,7 @@ static struct i2c_driver mcu_driver = { .of_match_table = mcu_of_match_table, }, .probe = mcu_probe, - .remove = __devexit_p(mcu_remove), + .remove = mcu_remove, .id_table = mcu_ids, }; diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c index d440435e055..8d762203eef 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c @@ -1,5 +1,5 @@ /* - * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved. * * Description: * MPC832xE MDS board specific routines. diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c index 1b1f6c8a1a1..1a26d2f8340 100644 --- a/arch/powerpc/platforms/83xx/mpc836x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c @@ -1,5 +1,5 @@ /* - * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved. * * Author: Li Yang <LeoLi@freescale.com> * Yin Olivia <Hong-hua.Yin@freescale.com> diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c index f8769d713d6..b63b42d11d6 100644 --- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c +++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c @@ -1,7 +1,7 @@ /* * MPC8360E-RDK board file. * - * Copyright (c) 2006 Freescale Semicondutor, Inc. + * Copyright (c) 2006 Freescale Semiconductor, Inc. * Copyright (c) 2007-2008 MontaVista Software, Inc. * * Author: Anton Vorontsov <avorontsov@ru.mvista.com> diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c index eca1f0960ff..9813c81e8e5 100644 --- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c @@ -1,7 +1,7 @@ /* * arch/powerpc/platforms/83xx/mpc837x_rdb.c * - * Copyright (C) 2007 Freescale Semicondutor, Inc. All rights reserved. + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. * * MPC837x RDB board specific routines * diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index 02d02a09942..a0dcd577fb0 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -191,6 +191,13 @@ config SBC8548 help This option enables support for the Wind River SBC8548 board +config PPA8548 + bool "Prodrive PPA8548" + help + This option enables support for the Prodrive PPA8548 board. + select DEFAULT_UIMAGE + select HAS_RAPIDIO + config GE_IMP3A bool "GE Intelligent Platforms IMP3A" select DEFAULT_UIMAGE @@ -245,6 +252,14 @@ config P4080_DS help This option enables support for the P4080 DS board +config SGY_CTS1000 + tristate "Servergy CTS-1000 support" + select GPIOLIB + select OF_GPIO + depends on P4080_DS + help + Enable this to support functionality in Servergy's CTS-1000 systems. + endif # PPC32 config P5020_DS @@ -277,7 +292,6 @@ config P5040_DS config PPC_QEMU_E500 bool "QEMU generic e500 platform" - depends on EXPERIMENTAL select DEFAULT_UIMAGE help This option enables support for running as a QEMU guest using diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile index 76f679cb04a..07d0dbb141c 100644 --- a/arch/powerpc/platforms/85xx/Makefile +++ b/arch/powerpc/platforms/85xx/Makefile @@ -25,8 +25,10 @@ obj-$(CONFIG_P5040_DS) += p5040_ds.o corenet_ds.o obj-$(CONFIG_STX_GP3) += stx_gp3.o obj-$(CONFIG_TQM85xx) += tqm85xx.o obj-$(CONFIG_SBC8548) += sbc8548.o +obj-$(CONFIG_PPA8548) += ppa8548.o obj-$(CONFIG_SOCRATES) += socrates.o socrates_fpga_pic.o obj-$(CONFIG_KSI8560) += ksi8560.o obj-$(CONFIG_XES_MPC85xx) += xes_mpc85xx.o obj-$(CONFIG_GE_IMP3A) += ge_imp3a.o obj-$(CONFIG_PPC_QEMU_E500) += qemu_e500.o +obj-$(CONFIG_SGY_CTS1000) += sgy_cts1000.o diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c index ed69c925071..6f355d8c92f 100644 --- a/arch/powerpc/platforms/85xx/corenet_ds.c +++ b/arch/powerpc/platforms/85xx/corenet_ds.c @@ -64,7 +64,7 @@ void __init corenet_ds_setup_arch(void) pr_info("%s board from Freescale Semiconductor\n", ppc_md.name); } -static const struct of_device_id of_device_ids[] __devinitconst = { +static const struct of_device_id of_device_ids[] = { { .compatible = "simple-bus" }, diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index c474505ad0d..7a31a0e1df2 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c @@ -154,7 +154,7 @@ static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev) } } -static void __devinit skip_fake_bridge(struct pci_dev *dev) +static void skip_fake_bridge(struct pci_dev *dev) { /* Make it an error to skip the fake bridge * in pci_setup_device() in probe.c */ diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index 8498f732347..a7b3621a8df 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006-2010, 2012 Freescale Semicondutor, Inc. + * Copyright (C) 2006-2010, 2012 Freescale Semiconductor, Inc. * All rights reserved. * * Author: Andy Fleming <afleming@freescale.com> @@ -206,9 +206,7 @@ static void __init mpc85xx_mds_reset_ucc_phys(void) setbits8(&bcsr_regs[7], BCSR7_UCC12_GETHnRST); clrbits8(&bcsr_regs[8], BCSR8_UEM_MARVELL_RST); - for (np = NULL; (np = of_find_compatible_node(np, - "network", - "ucc_geth")) != NULL;) { + for_each_compatible_node(np, "network", "ucc_geth") { const unsigned int *prop; int ucc_num; diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c index 848a3e98e1c..e611e79f23c 100644 --- a/arch/powerpc/platforms/85xx/p1022_ds.c +++ b/arch/powerpc/platforms/85xx/p1022_ds.c @@ -106,42 +106,6 @@ (c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \ (c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT)) -/** - * p1022ds_get_pixel_format: return the Area Descriptor for a given pixel depth - * - * The Area Descriptor is a 32-bit value that determine which bits in each - * pixel are to be used for each color. - */ -static u32 p1022ds_get_pixel_format(enum fsl_diu_monitor_port port, - unsigned int bits_per_pixel) -{ - switch (bits_per_pixel) { - case 32: - /* 0x88883316 */ - return MAKE_AD(3, 2, 0, 1, 3, 8, 8, 8, 8); - case 24: - /* 0x88082219 */ - return MAKE_AD(4, 0, 1, 2, 2, 0, 8, 8, 8); - case 16: - /* 0x65053118 */ - return MAKE_AD(4, 2, 1, 0, 1, 5, 6, 5, 0); - default: - pr_err("fsl-diu: unsupported pixel depth %u\n", bits_per_pixel); - return 0; - } -} - -/** - * p1022ds_set_gamma_table: update the gamma table, if necessary - * - * On some boards, the gamma table for some ports may need to be modified. - * This is not the case on the P1022DS, so we do nothing. -*/ -static void p1022ds_set_gamma_table(enum fsl_diu_monitor_port port, - char *gamma_table_base) -{ -} - struct fsl_law { u32 lawbar; u32 reserved1; @@ -215,13 +179,13 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port) /* Map the global utilities registers. */ guts_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts"); if (!guts_node) { - pr_err("p1022ds: missing global utilties device node\n"); + pr_err("p1022ds: missing global utilities device node\n"); return; } guts = of_iomap(guts_node, 0); if (!guts) { - pr_err("p1022ds: could not map global utilties device\n"); + pr_err("p1022ds: could not map global utilities device\n"); goto exit; } @@ -249,7 +213,7 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port) goto exit; } - iprop = of_get_property(law_node, "fsl,num-laws", 0); + iprop = of_get_property(law_node, "fsl,num-laws", NULL); if (!iprop) { pr_err("p1022ds: LAW node is missing fsl,num-laws property\n"); goto exit; @@ -302,7 +266,7 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port) goto exit; } cs1_addr = lbc_br_to_phys(ecm, num_laws, br1); - if (!cs0_addr) { + if (!cs1_addr) { pr_err("p1022ds: could not determine physical address for CS1" " (BR1=%08x)\n", br1); goto exit; @@ -416,14 +380,14 @@ void p1022ds_set_pixel_clock(unsigned int pixclock) /* Map the global utilities registers. */ guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts"); if (!guts_np) { - pr_err("p1022ds: missing global utilties device node\n"); + pr_err("p1022ds: missing global utilities device node\n"); return; } guts = of_iomap(guts_np, 0); of_node_put(guts_np); if (!guts) { - pr_err("p1022ds: could not map global utilties device\n"); + pr_err("p1022ds: could not map global utilities device\n"); return; } @@ -510,8 +474,6 @@ static void __init p1022_ds_setup_arch(void) ppc_md.progress("p1022_ds_setup_arch()", 0); #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) - diu_ops.get_pixel_format = p1022ds_get_pixel_format; - diu_ops.set_gamma_table = p1022ds_set_gamma_table; diu_ops.set_monitor_port = p1022ds_set_monitor_port; diu_ops.set_pixel_clock = p1022ds_set_pixel_clock; diu_ops.valid_monitor_port = p1022ds_valid_monitor_port; @@ -539,7 +501,7 @@ static void __init p1022_ds_setup_arch(void) }; /* - * prom_update_property() is called before + * of_update_property() is called before * kmalloc() is available, so the 'new' object * should be allocated in the global area. * The easiest way is to do that is to @@ -548,7 +510,7 @@ static void __init p1022_ds_setup_arch(void) */ pr_info("p1022ds: disabling %s node", np2->full_name); - prom_update_property(np2, &nor_status); + of_update_property(np2, &nor_status); of_node_put(np2); } @@ -564,7 +526,7 @@ static void __init p1022_ds_setup_arch(void) pr_info("p1022ds: disabling %s node", np2->full_name); - prom_update_property(np2, &nand_status); + of_update_property(np2, &nand_status); of_node_put(np2); } diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c index 55ffa1cc380..8c9297112b3 100644 --- a/arch/powerpc/platforms/85xx/p1022_rdk.c +++ b/arch/powerpc/platforms/85xx/p1022_rdk.c @@ -35,17 +35,6 @@ #define CLKDVDR_PXCLK_MASK 0x00FF0000 /** - * p1022rdk_set_monitor_port: switch the output to a different monitor port - */ -static void p1022rdk_set_monitor_port(enum fsl_diu_monitor_port port) -{ - if (port != FSL_DIU_PORT_DVI) { - pr_err("p1022rdk: unsupported monitor port %i\n", port); - return; - } -} - -/** * p1022rdk_set_pixel_clock: program the DIU's clock * * @pixclock: the wavelength, in picoseconds, of the clock @@ -124,7 +113,6 @@ static void __init p1022_rdk_setup_arch(void) ppc_md.progress("p1022_rdk_setup_arch()", 0); #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) - diu_ops.set_monitor_port = p1022rdk_set_monitor_port; diu_ops.set_pixel_clock = p1022rdk_set_pixel_clock; diu_ops.valid_monitor_port = p1022rdk_valid_monitor_port; #endif diff --git a/arch/powerpc/platforms/85xx/ppa8548.c b/arch/powerpc/platforms/85xx/ppa8548.c new file mode 100644 index 00000000000..6a7704b92c3 --- /dev/null +++ b/arch/powerpc/platforms/85xx/ppa8548.c @@ -0,0 +1,98 @@ +/* + * ppa8548 setup and early boot code. + * + * Copyright 2009 Prodrive B.V.. + * + * By Stef van Os (see MAINTAINERS for contact information) + * + * Based on the SBC8548 support - Copyright 2007 Wind River Systems Inc. + * Based on the MPC8548CDS support - Copyright 2005 Freescale Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/reboot.h> +#include <linux/seq_file.h> +#include <linux/of_platform.h> + +#include <asm/machdep.h> +#include <asm/udbg.h> +#include <asm/mpic.h> + +#include <sysdev/fsl_soc.h> + +static void __init ppa8548_pic_init(void) +{ + struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, + 0, 256, " OpenPIC "); + BUG_ON(mpic == NULL); + mpic_init(mpic); +} + +/* + * Setup the architecture + */ +static void __init ppa8548_setup_arch(void) +{ + if (ppc_md.progress) + ppc_md.progress("ppa8548_setup_arch()", 0); +} + +static void ppa8548_show_cpuinfo(struct seq_file *m) +{ + uint32_t svid, phid1; + + svid = mfspr(SPRN_SVR); + + seq_printf(m, "Vendor\t\t: Prodrive B.V.\n"); + seq_printf(m, "SVR\t\t: 0x%x\n", svid); + + /* Display cpu Pll setting */ + phid1 = mfspr(SPRN_HID1); + seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); +} + +static struct of_device_id __initdata of_bus_ids[] = { + { .name = "soc", }, + { .type = "soc", }, + { .compatible = "simple-bus", }, + { .compatible = "gianfar", }, + { .compatible = "fsl,srio", }, + {}, +}; + +static int __init declare_of_platform_devices(void) +{ + of_platform_bus_probe(NULL, of_bus_ids, NULL); + + return 0; +} +machine_device_initcall(ppa8548, declare_of_platform_devices); + +/* + * Called very early, device-tree isn't unflattened + */ +static int __init ppa8548_probe(void) +{ + unsigned long root = of_get_flat_dt_root(); + + return of_flat_dt_is_compatible(root, "ppa8548"); +} + +define_machine(ppa8548) { + .name = "ppa8548", + .probe = ppa8548_probe, + .setup_arch = ppa8548_setup_arch, + .init_IRQ = ppa8548_pic_init, + .show_cpuinfo = ppa8548_show_cpuinfo, + .get_irq = mpic_get_irq, + .restart = fsl_rstcr_restart, + .calibrate_decr = generic_calibrate_decr, + .progress = udbg_progress, +}; diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c index f6ea5618c73..5cefc5a9a14 100644 --- a/arch/powerpc/platforms/85xx/qemu_e500.c +++ b/arch/powerpc/platforms/85xx/qemu_e500.c @@ -29,9 +29,10 @@ void __init qemu_e500_pic_init(void) { struct mpic *mpic; + unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU | + MPIC_ENABLE_COREINT; - mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, - 0, 256, " OpenPIC "); + mpic = mpic_alloc(NULL, 0, flags, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); @@ -66,7 +67,7 @@ define_machine(qemu_e500) { #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif - .get_irq = mpic_get_irq, + .get_irq = mpic_get_coreint_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c new file mode 100644 index 00000000000..611e92f291c --- /dev/null +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c @@ -0,0 +1,176 @@ +/* + * Servergy CTS-1000 Setup + * + * Maintained by Ben Collins <ben.c@servergy.com> + * + * Copyright 2012 by Servergy, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/platform_device.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/of_gpio.h> +#include <linux/workqueue.h> +#include <linux/reboot.h> +#include <linux/interrupt.h> + +#include <asm/machdep.h> + +static struct device_node *halt_node; + +static struct of_device_id child_match[] = { + { + .compatible = "sgy,gpio-halt", + }, + {}, +}; + +static void gpio_halt_wfn(struct work_struct *work) +{ + /* Likely wont return */ + orderly_poweroff(true); +} +static DECLARE_WORK(gpio_halt_wq, gpio_halt_wfn); + +static void gpio_halt_cb(void) +{ + enum of_gpio_flags flags; + int trigger, gpio; + + if (!halt_node) + return; + + gpio = of_get_gpio_flags(halt_node, 0, &flags); + + if (!gpio_is_valid(gpio)) + return; + + trigger = (flags == OF_GPIO_ACTIVE_LOW); + + printk(KERN_INFO "gpio-halt: triggering GPIO.\n"); + + /* Probably wont return */ + gpio_set_value(gpio, trigger); +} + +/* This IRQ means someone pressed the power button and it is waiting for us + * to handle the shutdown/poweroff. */ +static irqreturn_t gpio_halt_irq(int irq, void *__data) +{ + printk(KERN_INFO "gpio-halt: shutdown due to power button IRQ.\n"); + schedule_work(&gpio_halt_wq); + + return IRQ_HANDLED; +}; + +static int __devinit gpio_halt_probe(struct platform_device *pdev) +{ + enum of_gpio_flags flags; + struct device_node *node = pdev->dev.of_node; + int gpio, err, irq; + int trigger; + + if (!node) + return -ENODEV; + + /* If there's no matching child, this isn't really an error */ + halt_node = of_find_matching_node(node, child_match); + if (!halt_node) + return 0; + + /* Technically we could just read the first one, but punish + * DT writers for invalid form. */ + if (of_gpio_count(halt_node) != 1) + return -EINVAL; + + /* Get the gpio number relative to the dynamic base. */ + gpio = of_get_gpio_flags(halt_node, 0, &flags); + if (!gpio_is_valid(gpio)) + return -EINVAL; + + err = gpio_request(gpio, "gpio-halt"); + if (err) { + printk(KERN_ERR "gpio-halt: error requesting GPIO %d.\n", + gpio); + halt_node = NULL; + return err; + } + + trigger = (flags == OF_GPIO_ACTIVE_LOW); + + gpio_direction_output(gpio, !trigger); + + /* Now get the IRQ which tells us when the power button is hit */ + irq = irq_of_parse_and_map(halt_node, 0); + err = request_irq(irq, gpio_halt_irq, IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, "gpio-halt", halt_node); + if (err) { + printk(KERN_ERR "gpio-halt: error requesting IRQ %d for " + "GPIO %d.\n", irq, gpio); + gpio_free(gpio); + halt_node = NULL; + return err; + } + + /* Register our halt function */ + ppc_md.halt = gpio_halt_cb; + ppc_md.power_off = gpio_halt_cb; + + printk(KERN_INFO "gpio-halt: registered GPIO %d (%d trigger, %d" + " irq).\n", gpio, trigger, irq); + + return 0; +} + +static int __devexit gpio_halt_remove(struct platform_device *pdev) +{ + if (halt_node) { + int gpio = of_get_gpio(halt_node, 0); + int irq = irq_of_parse_and_map(halt_node, 0); + + free_irq(irq, halt_node); + + ppc_md.halt = NULL; + ppc_md.power_off = NULL; + + gpio_free(gpio); + + halt_node = NULL; + } + + return 0; +} + +static struct of_device_id gpio_halt_match[] = { + /* We match on the gpio bus itself and scan the children since they + * wont be matched against us. We know the bus wont match until it + * has been registered too. */ + { + .compatible = "fsl,qoriq-gpio", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, gpio_halt_match); + +static struct platform_driver gpio_halt_driver = { + .driver = { + .name = "gpio-halt", + .owner = THIS_MODULE, + .of_match_table = gpio_halt_match, + }, + .probe = gpio_halt_probe, + .remove = __devexit_p(gpio_halt_remove), +}; + +module_platform_driver(gpio_halt_driver); + +MODULE_DESCRIPTION("Driver to support GPIO triggered system halt for Servergy CTS-1000 Systems."); +MODULE_VERSION("1.0"); +MODULE_AUTHOR("Ben Collins <ben.c@servergy.com>"); +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 6fcfa12e5c5..148c2f2d978 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -128,6 +128,19 @@ static void __cpuinit smp_85xx_mach_cpu_die(void) } #endif +static inline void flush_spin_table(void *spin_table) +{ + flush_dcache_range((ulong)spin_table, + (ulong)spin_table + sizeof(struct epapr_spin_table)); +} + +static inline u32 read_spin_table_addr_l(void *spin_table) +{ + flush_dcache_range((ulong)spin_table, + (ulong)spin_table + sizeof(struct epapr_spin_table)); + return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l); +} + static int __cpuinit smp_85xx_kick_cpu(int nr) { unsigned long flags; @@ -161,8 +174,8 @@ static int __cpuinit smp_85xx_kick_cpu(int nr) /* Map the spin table */ if (ioremappable) - spin_table = ioremap(*cpu_rel_addr, - sizeof(struct epapr_spin_table)); + spin_table = ioremap_prot(*cpu_rel_addr, + sizeof(struct epapr_spin_table), _PAGE_COHERENT); else spin_table = phys_to_virt(*cpu_rel_addr); @@ -173,7 +186,16 @@ static int __cpuinit smp_85xx_kick_cpu(int nr) generic_set_cpu_up(nr); if (system_state == SYSTEM_RUNNING) { + /* + * To keep it compatible with old boot program which uses + * cache-inhibit spin table, we need to flush the cache + * before accessing spin table to invalidate any staled data. + * We also need to flush the cache after writing to spin + * table to push data out. + */ + flush_spin_table(spin_table); out_be32(&spin_table->addr_l, 0); + flush_spin_table(spin_table); /* * We don't set the BPTR register here since it already points @@ -181,9 +203,14 @@ static int __cpuinit smp_85xx_kick_cpu(int nr) */ mpic_reset_core(hw_cpu); - /* wait until core is ready... */ - if (!spin_event_timeout(in_be32(&spin_table->addr_l) == 1, - 10000, 100)) { + /* + * wait until core is ready... + * We need to invalidate the stale data, in case the boot + * loader uses a cache-inhibited spin table. + */ + if (!spin_event_timeout( + read_spin_table_addr_l(spin_table) == 1, + 10000, 100)) { pr_err("%s: timeout waiting for core %d to reset\n", __func__, hw_cpu); ret = -ENOENT; @@ -194,12 +221,10 @@ static int __cpuinit smp_85xx_kick_cpu(int nr) __secondary_hold_acknowledge = -1; } #endif + flush_spin_table(spin_table); out_be32(&spin_table->pir, hw_cpu); out_be32(&spin_table->addr_l, __pa(__early_start)); - - if (!ioremappable) - flush_dcache_range((ulong)spin_table, - (ulong)spin_table + sizeof(struct epapr_spin_table)); + flush_spin_table(spin_table); /* Wait a bit for the CPU to ack. */ if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu, @@ -213,13 +238,11 @@ out: #else smp_generic_kick_cpu(nr); + flush_spin_table(spin_table); out_be32(&spin_table->pir, hw_cpu); out_be64((u64 *)(&spin_table->addr_h), __pa((u64)*((unsigned long long *)generic_secondary_smp_init))); - - if (!ioremappable) - flush_dcache_range((ulong)spin_table, - (ulong)spin_table + sizeof(struct epapr_spin_table)); + flush_spin_table(spin_table); #endif local_irq_restore(flags); diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c index b4e58cdc09a..ec0b7272fae 100644 --- a/arch/powerpc/platforms/85xx/tqm85xx.c +++ b/arch/powerpc/platforms/85xx/tqm85xx.c @@ -85,7 +85,7 @@ static void tqm85xx_show_cpuinfo(struct seq_file *m) seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } -static void __devinit tqm85xx_ti1520_fixup(struct pci_dev *pdev) +static void tqm85xx_ti1520_fixup(struct pci_dev *pdev) { unsigned int val; diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c index bf5338754c5..c23f3443880 100644 --- a/arch/powerpc/platforms/86xx/gef_ppc9a.c +++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c @@ -159,7 +159,7 @@ static void gef_ppc9a_show_cpuinfo(struct seq_file *m) gef_ppc9a_get_vme_is_syscon() ? "yes" : "no"); } -static void __devinit gef_ppc9a_nec_fixup(struct pci_dev *pdev) +static void gef_ppc9a_nec_fixup(struct pci_dev *pdev) { unsigned int val; diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c index 0b7851330a0..8a6ac20686e 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc310.c +++ b/arch/powerpc/platforms/86xx/gef_sbc310.c @@ -146,7 +146,7 @@ static void gef_sbc310_show_cpuinfo(struct seq_file *m) } -static void __devinit gef_sbc310_nec_fixup(struct pci_dev *pdev) +static void gef_sbc310_nec_fixup(struct pci_dev *pdev) { unsigned int val; diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c index b9eb174897b..06c72636f29 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc610.c +++ b/arch/powerpc/platforms/86xx/gef_sbc610.c @@ -136,7 +136,7 @@ static void gef_sbc610_show_cpuinfo(struct seq_file *m) seq_printf(m, "SVR\t\t: 0x%x\n", svid); } -static void __devinit gef_sbc610_nec_fixup(struct pci_dev *pdev) +static void gef_sbc610_nec_fixup(struct pci_dev *pdev) { unsigned int val; diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c index a817398a56d..d479d68fbb2 100644 --- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c +++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c @@ -236,14 +236,14 @@ void mpc8610hpcd_set_pixel_clock(unsigned int pixclock) /* Map the global utilities registers. */ guts_np = of_find_compatible_node(NULL, NULL, "fsl,mpc8610-guts"); if (!guts_np) { - pr_err("mpc8610hpcd: missing global utilties device node\n"); + pr_err("mpc8610hpcd: missing global utilities device node\n"); return; } guts = of_iomap(guts_np, 0); of_node_put(guts_np); if (!guts) { - pr_err("mpc8610hpcd: could not map global utilties device\n"); + pr_err("mpc8610hpcd: could not map global utilities device\n"); return; } @@ -353,5 +353,7 @@ define_machine(mpc86xx_hpcd) { .time_init = mpc86xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, +#ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, +#endif }; diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index e7a896acd98..52de8bccfb3 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -90,6 +90,7 @@ config MPIC config PPC_EPAPR_HV_PIC bool default n + select EPAPR_PARAVIRT config MPIC_WEIRD bool @@ -351,8 +352,6 @@ config OF_RTC Uses information from the OF or flattened device tree to instantiate platform devices for direct mapped RTC chips like the DS1742 or DS1743. -source "arch/powerpc/sysdev/bestcomm/Kconfig" - config SIMPLE_GPIO bool "Support for simple, memory-mapped GPIO controllers" depends on PPC diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 72afd2888ca..cea2f09c424 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -76,6 +76,7 @@ config PPC_BOOK3E_64 bool "Embedded processors" select PPC_FPU # Make it a choice ? select PPC_SMP_MUXED_IPI + select PPC_DOORBELL endchoice @@ -208,6 +209,7 @@ config PPC_FSL_BOOK3E select FSL_EMB_PERFMON select PPC_SMP_MUXED_IPI select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 + select PPC_DOORBELL default y if FSL_BOOKE config PTE_64BIT @@ -382,4 +384,8 @@ config NOT_COHERENT_CACHE config CHECK_CACHE_COHERENCY bool +config PPC_DOORBELL + bool + default n + endmenu diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 2e7ff0c5cf4..53aaefeb338 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -124,7 +124,7 @@ config CBE_CPUFREQ config CBE_CPUFREQ_PMI_ENABLE bool "CBE frequency scaling using PMI interface" - depends on CBE_CPUFREQ && EXPERIMENTAL + depends on CBE_CPUFREQ default n help Select this, if you want to use the PMI interface diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c index abc8af43ea7..173568140a3 100644 --- a/arch/powerpc/platforms/cell/celleb_pci.c +++ b/arch/powerpc/platforms/cell/celleb_pci.c @@ -401,11 +401,11 @@ error: } else { if (config && *config) { size = 256; - free_bootmem((unsigned long)(*config), size); + free_bootmem(__pa(*config), size); } if (res && *res) { size = sizeof(struct celleb_pci_resource); - free_bootmem((unsigned long)(*res), size); + free_bootmem(__pa(*res), size); } } diff --git a/arch/powerpc/platforms/cell/celleb_scc_sio.c b/arch/powerpc/platforms/cell/celleb_scc_sio.c index 3a16c5b3c46..9c339ec646f 100644 --- a/arch/powerpc/platforms/cell/celleb_scc_sio.c +++ b/arch/powerpc/platforms/cell/celleb_scc_sio.c @@ -42,14 +42,13 @@ static struct { static int __init txx9_serial_init(void) { extern int early_serial_txx9_setup(struct uart_port *port); - struct device_node *node = NULL; + struct device_node *node; int i; struct uart_port req; struct of_irq irq; struct resource res; - while ((node = of_find_compatible_node(node, - "serial", "toshiba,sio-scc")) != NULL) { + for_each_compatible_node(node, "serial", "toshiba,sio-scc") { for (i = 0; i < ARRAY_SIZE(txx9_scc_tab); i++) { if (!(txx9_serial_bitmap & (1<<i))) continue; diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index dca21366674..e56bb651da1 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -728,7 +728,7 @@ static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np) nid, np->full_name); /* XXX todo: If we can have multiple windows on the same IOMMU, which - * isn't the case today, we probably want here to check wether the + * isn't the case today, we probably want here to check whether the * iommu for that node is already setup. * However, there might be issue with getting the size right so let's * ignore that for now. We might want to completely get rid of the diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index 4ab08767118..6ae25fb6201 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c @@ -117,7 +117,7 @@ static void cell_fixup_pcie_rootcomplex(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, cell_fixup_pcie_rootcomplex); -static int __devinit cell_setup_phb(struct pci_controller *phb) +static int cell_setup_phb(struct pci_controller *phb) { const char *model; struct device_node *np; diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index 49a65e2dfc7..d35dbbc8ec7 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c @@ -67,7 +67,7 @@ static cpumask_t of_spin_map; * 0 - failure * 1 - success */ -static inline int __devinit smp_startup_cpu(unsigned int lcpu) +static inline int smp_startup_cpu(unsigned int lcpu) { int status; unsigned long start_here = __pa((u32)*((unsigned long *) @@ -108,7 +108,7 @@ static int __init smp_iic_probe(void) return cpumask_weight(cpu_possible_mask); } -static void __devinit smp_cell_setup_cpu(int cpu) +static void smp_cell_setup_cpu(int cpu) { if (cpu != boot_cpuid) iic_setup_cpu(); @@ -119,7 +119,7 @@ static void __devinit smp_cell_setup_cpu(int cpu) mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); } -static int __devinit smp_cell_kick_cpu(int nr) +static int smp_cell_kick_cpu(int nr) { BUG_ON(nr < 0 || nr >= NR_CPUS); diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index d8b7cc8a66c..8e299447127 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c @@ -148,7 +148,7 @@ static int spider_set_irq_type(struct irq_data *d, unsigned int type) /* Configure the source. One gross hack that was there before and * that I've kept around is the priority to the BE which I set to - * be the same as the interrupt source number. I don't know wether + * be the same as the interrupt source number. I don't know whether * that's supposed to make any kind of sense however, we'll have to * decide that, but for now, I'm not changing the behaviour. */ @@ -220,7 +220,7 @@ static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) /* For hooking up the cascace we have a problem. Our device-tree is * crap and we don't know on which BE iic interrupt we are hooked on at * least not the "standard" way. We can reconstitute it based on two - * informations though: which BE node we are connected to and wether + * informations though: which BE node we are connected to and whether * we are connected to IOIF0 or IOIF1. Right now, we really only care * about the IBM cell blade and we know that its firmware gives us an * interrupt-map property which is pretty strange. @@ -232,7 +232,7 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) int imaplen, intsize, unit; struct device_node *iic; - /* First, we check wether we have a real "interrupts" in the device + /* First, we check whether we have a real "interrupts" in the device * tree in case the device-tree is ever fixed */ struct of_irq oirq; diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c index 75d613313f1..b0ec78e8ad6 100644 --- a/arch/powerpc/platforms/cell/spu_callbacks.c +++ b/arch/powerpc/platforms/cell/spu_callbacks.c @@ -60,13 +60,12 @@ long spu_sys_callback(struct spu_syscall_block *s) syscall = spu_syscall_table[s->nr_ret]; -#ifdef DEBUG - print_symbol(KERN_DEBUG "SPU-syscall %s:", (unsigned long)syscall); - printk("syscall%ld(%lx, %lx, %lx, %lx, %lx, %lx)\n", - s->nr_ret, - s->parm[0], s->parm[1], s->parm[2], - s->parm[3], s->parm[4], s->parm[5]); -#endif + pr_debug("SPU-syscall " + "%pSR:syscall%lld(%llx, %llx, %llx, %llx, %llx, %llx)\n", + syscall, + s->nr_ret, + s->parm[0], s->parm[1], s->parm[2], + s->parm[3], s->parm[4], s->parm[5]); return syscall(s->parm[0], s->parm[1], s->parm[2], s->parm[3], s->parm[4], s->parm[5]); diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c index 657e3f233a6..c9500ea7be2 100644 --- a/arch/powerpc/platforms/cell/spufs/coredump.c +++ b/arch/powerpc/platforms/cell/spufs/coredump.c @@ -111,7 +111,7 @@ static int match_context(const void *v, struct file *file, unsigned fd) struct spu_context *ctx; if (file->f_op != &spufs_context_fops) return 0; - ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx; + ctx = SPUFS_I(file_inode(file))->i_ctx; if (ctx->flags & SPU_CREATE_NOSCHED) return 0; return fd + 1; @@ -137,7 +137,7 @@ static struct spu_context *coredump_next_context(int *fd) return NULL; *fd = n - 1; file = fcheck(*fd); - return SPUFS_I(file->f_dentry->d_inode)->i_ctx; + return SPUFS_I(file_inode(file))->i_ctx; } int spufs_coredump_extra_notes_size(void) diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 0cfece4cf6e..68c57d38745 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -1852,7 +1852,7 @@ out: static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) { - struct inode *inode = file->f_path.dentry->d_inode; + struct inode *inode = file_inode(file); int err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (!err) { mutex_lock(&inode->i_mutex); @@ -2501,7 +2501,7 @@ static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { - struct inode *inode = file->f_path.dentry->d_inode; + struct inode *inode = file_inode(file); struct spu_context *ctx = SPUFS_I(inode)->i_ctx; int error = 0, cnt = 0; @@ -2571,7 +2571,7 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait) { - struct inode *inode = file->f_path.dentry->d_inode; + struct inode *inode = file_inode(file); struct spu_context *ctx = SPUFS_I(inode)->i_ctx; unsigned int mask = 0; int rc; diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index dba1ce235da..3f3bb4cdbbe 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -199,37 +199,18 @@ static int spufs_fill_dir(struct dentry *dir, const struct spufs_tree_descr *files, umode_t mode, struct spu_context *ctx) { - struct dentry *dentry, *tmp; - int ret; - while (files->name && files->name[0]) { - ret = -ENOMEM; - dentry = d_alloc_name(dir, files->name); + int ret; + struct dentry *dentry = d_alloc_name(dir, files->name); if (!dentry) - goto out; + return -ENOMEM; ret = spufs_new_file(dir->d_sb, dentry, files->ops, files->mode & mode, files->size, ctx); if (ret) - goto out; + return ret; files++; } return 0; -out: - /* - * remove all children from dir. dir->inode is not set so don't - * just simply use spufs_prune_dir() and panic afterwards :) - * dput() looks like it will do the right thing: - * - dec parent's ref counter - * - remove child from parent's child list - * - free child's inode if possible - * - free child - */ - list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { - dput(dentry); - } - - shrink_dcache_parent(dir); - return ret; } static int spufs_dir_close(struct inode *inode, struct file *file) @@ -269,10 +250,9 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, struct inode *inode; struct spu_context *ctx; - ret = -ENOSPC; inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR); if (!inode) - goto out; + return -ENOSPC; if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; @@ -280,40 +260,38 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, } ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */ SPUFS_I(inode)->i_ctx = ctx; - if (!ctx) - goto out_iput; + if (!ctx) { + iput(inode); + return -ENOSPC; + } ctx->flags = flags; inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; + + mutex_lock(&inode->i_mutex); + + dget(dentry); + inc_nlink(dir); + inc_nlink(inode); + + d_instantiate(dentry, inode); + if (flags & SPU_CREATE_NOSCHED) ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents, mode, ctx); else ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx); - if (ret) - goto out_free_ctx; - - if (spufs_get_sb_info(dir->i_sb)->debug) + if (!ret && spufs_get_sb_info(dir->i_sb)->debug) ret = spufs_fill_dir(dentry, spufs_dir_debug_contents, mode, ctx); if (ret) - goto out_free_ctx; + spufs_rmdir(dir, dentry); - d_instantiate(dentry, inode); - dget(dentry); - inc_nlink(dir); - inc_nlink(dentry->d_inode); - goto out; + mutex_unlock(&inode->i_mutex); -out_free_ctx: - spu_forget(ctx); - put_spu_context(ctx); -out_iput: - iput(inode); -out: return ret; } @@ -368,7 +346,7 @@ spufs_assert_affinity(unsigned int flags, struct spu_gang *gang, return ERR_PTR(-EINVAL); neighbor = get_spu_context( - SPUFS_I(filp->f_dentry->d_inode)->i_ctx); + SPUFS_I(file_inode(filp))->i_ctx); if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) && !list_is_last(&neighbor->aff_list, &gang->aff_list_head) && @@ -771,6 +749,7 @@ static struct file_system_type spufs_type = { .mount = spufs_mount, .kill_sb = kill_litter_super, }; +MODULE_ALIAS_FS("spufs"); static int __init spufs_init(void) { diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 965d381abd7..49318385d4f 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -24,6 +24,7 @@ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/rt.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> @@ -1094,7 +1095,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private) LOAD_INT(c), LOAD_FRAC(c), count_active_contexts(), atomic_read(&nr_spu_contexts), - current->nsproxy->pid_ns->last_pid); + task_active_pid_ns(current)->last_pid); return 0; } diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c index 5b7d8ffbf89..b045fdda484 100644 --- a/arch/powerpc/platforms/cell/spufs/syscalls.c +++ b/arch/powerpc/platforms/cell/spufs/syscalls.c @@ -47,7 +47,7 @@ static long do_spu_run(struct file *filp, if (filp->f_op != &spufs_context_fops) goto out; - i = SPUFS_I(filp->f_path.dentry->d_inode); + i = SPUFS_I(file_inode(filp)); ret = spufs_run_spu(i->i_ctx, &npc, &status); if (put_user(npc, unpc)) @@ -66,7 +66,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags, struct dentry *dentry; int ret; - dentry = user_path_create(AT_FDCWD, pathname, &path, 1); + dentry = user_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY); ret = PTR_ERR(dentry); if (!IS_ERR(dentry)) { ret = spufs_create(&path, dentry, flags, mode, neighbor); diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c index 83285c5a204..1b87e198faa 100644 --- a/arch/powerpc/platforms/chrp/pci.c +++ b/arch/powerpc/platforms/chrp/pci.c @@ -323,7 +323,7 @@ chrp_find_bridges(void) * ATA controller to be set to fully native mode or bad things * will happen. */ -static void __devinit chrp_pci_fixup_winbond_ata(struct pci_dev *sl82c105) +static void chrp_pci_fixup_winbond_ata(struct pci_dev *sl82c105) { u8 progif; diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c index feab30bbae2..dead91b177b 100644 --- a/arch/powerpc/platforms/chrp/smp.c +++ b/arch/powerpc/platforms/chrp/smp.c @@ -30,7 +30,7 @@ #include <asm/mpic.h> #include <asm/rtas.h> -static int __devinit smp_chrp_kick_cpu(int nr) +static int smp_chrp_kick_cpu(int nr) { *(unsigned long *)KERNELBASE = nr; asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory"); @@ -38,7 +38,7 @@ static int __devinit smp_chrp_kick_cpu(int nr) return 0; } -static void __devinit smp_chrp_setup_cpu(int cpu_nr) +static void smp_chrp_setup_cpu(int cpu_nr) { mpic_setup_this_cpu(); } diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c index 64fde058e54..92ac9b52b32 100644 --- a/arch/powerpc/platforms/fsl_uli1575.c +++ b/arch/powerpc/platforms/fsl_uli1575.c @@ -59,7 +59,7 @@ static inline bool is_quirk_valid(void) } /* Bridge */ -static void __devinit early_uli5249(struct pci_dev *dev) +static void early_uli5249(struct pci_dev *dev) { unsigned char temp; @@ -82,7 +82,7 @@ static void __devinit early_uli5249(struct pci_dev *dev) } -static void __devinit quirk_uli1575(struct pci_dev *dev) +static void quirk_uli1575(struct pci_dev *dev) { int i; @@ -139,7 +139,7 @@ static void __devinit quirk_uli1575(struct pci_dev *dev) pci_write_config_byte(dev, 0x75, ULI_8259_IRQ15); } -static void __devinit quirk_final_uli1575(struct pci_dev *dev) +static void quirk_final_uli1575(struct pci_dev *dev) { /* Set i8259 interrupt trigger * IRQ 3: Level @@ -175,7 +175,7 @@ static void __devinit quirk_final_uli1575(struct pci_dev *dev) } /* SATA */ -static void __devinit quirk_uli5288(struct pci_dev *dev) +static void quirk_uli5288(struct pci_dev *dev) { unsigned char c; unsigned int d; @@ -200,7 +200,7 @@ static void __devinit quirk_uli5288(struct pci_dev *dev) } /* PATA */ -static void __devinit quirk_uli5229(struct pci_dev *dev) +static void quirk_uli5229(struct pci_dev *dev) { unsigned short temp; @@ -216,7 +216,7 @@ static void __devinit quirk_uli5229(struct pci_dev *dev) } /* We have to do a dummy read on the P2P for the RTC to work, WTF */ -static void __devinit quirk_final_uli5249(struct pci_dev *dev) +static void quirk_final_uli5249(struct pci_dev *dev) { int i; u8 *dummy; @@ -253,7 +253,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 0x5249, quirk_final_uli5249); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 0x1575, quirk_final_uli1575); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229); -static void __devinit hpcd_quirk_uli1575(struct pci_dev *dev) +static void hpcd_quirk_uli1575(struct pci_dev *dev) { u32 temp32; @@ -269,7 +269,7 @@ static void __devinit hpcd_quirk_uli1575(struct pci_dev *dev) pci_write_config_dword(dev, 0x90, (temp32 | 1<<22)); } -static void __devinit hpcd_quirk_uli5288(struct pci_dev *dev) +static void hpcd_quirk_uli5288(struct pci_dev *dev) { unsigned char c; @@ -295,7 +295,7 @@ static void __devinit hpcd_quirk_uli5288(struct pci_dev *dev) * IRQ14 is a sideband interrupt from IDE device to CPU and we use this * as the interrupt for IDE device. */ -static void __devinit hpcd_quirk_uli5229(struct pci_dev *dev) +static void hpcd_quirk_uli5229(struct pci_dev *dev) { unsigned char c; @@ -317,7 +317,7 @@ static void __devinit hpcd_quirk_uli5229(struct pci_dev *dev) * bug by re-assigning a correct irq to 5288. * */ -static void __devinit hpcd_final_uli5288(struct pci_dev *dev) +static void hpcd_final_uli5288(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct device_node *hosenode = hose ? hose->dn : NULL; diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index 465ee8f5c08..f7136aae8bb 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c @@ -543,7 +543,7 @@ static int __init maple_add_bridge(struct device_node *dev) } -void __devinit maple_pci_irq_fixup(struct pci_dev *dev) +void maple_pci_irq_fixup(struct pci_dev *dev) { DBG(" -> maple_pci_irq_fixup\n"); @@ -648,7 +648,7 @@ int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel) return irq; } -static void __devinit quirk_ipr_msi(struct pci_dev *dev) +static void quirk_ipr_msi(struct pci_dev *dev) { /* Something prevents MSIs from the IPR from working on Bimini, * and the driver has no smarts to recover. So disable MSI diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c index 95d00173029..890f30e70f9 100644 --- a/arch/powerpc/platforms/pasemi/cpufreq.c +++ b/arch/powerpc/platforms/pasemi/cpufreq.c @@ -236,6 +236,13 @@ out: static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) { + /* + * We don't support CPU hotplug. Don't unmap after the system + * has already made it to a running state. + */ + if (system_state != SYSTEM_BOOTING) + return 0; + if (sdcasr_mapbase) iounmap(sdcasr_mapbase); if (sdcpwr_mapbase) diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index 9886296e08d..0237ab782fb 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c @@ -216,7 +216,7 @@ static int gpio_mdio_reset(struct mii_bus *bus) } -static int __devinit gpio_mdio_probe(struct platform_device *ofdev) +static int gpio_mdio_probe(struct platform_device *ofdev) { struct device *dev = &ofdev->dev; struct device_node *np = ofdev->dev.of_node; diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h index b1e524f7489..ea65bf0eb89 100644 --- a/arch/powerpc/platforms/pasemi/pasemi.h +++ b/arch/powerpc/platforms/pasemi/pasemi.h @@ -3,8 +3,8 @@ extern unsigned long pas_get_boot_time(void); extern void pas_pci_init(void); -extern void __devinit pas_pci_irq_fixup(struct pci_dev *dev); -extern void __devinit pas_pci_dma_dev_setup(struct pci_dev *dev); +extern void pas_pci_irq_fixup(struct pci_dev *dev); +extern void pas_pci_dma_dev_setup(struct pci_dev *dev); extern void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset); diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index 2ed9212d7d5..8c54de6d8ec 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -76,7 +76,7 @@ static void pas_restart(char *cmd) static arch_spinlock_t timebase_lock; static unsigned long timebase; -static void __devinit pas_give_timebase(void) +static void pas_give_timebase(void) { unsigned long flags; @@ -94,7 +94,7 @@ static void __devinit pas_give_timebase(void) local_irq_restore(flags); } -static void __devinit pas_take_timebase(void) +static void pas_take_timebase(void) { while (!timebase) smp_rmb(); diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c index 64171198535..311b804353b 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ b/arch/powerpc/platforms/powermac/cpufreq_32.c @@ -55,6 +55,7 @@ static unsigned int low_freq; static unsigned int hi_freq; static unsigned int cur_freq; static unsigned int sleep_freq; +static unsigned long transition_latency; /* * Different models uses different mechanisms to switch the frequency @@ -403,7 +404,7 @@ static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) if (policy->cpu != 0) return -ENODEV; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + policy->cpuinfo.transition_latency = transition_latency; policy->cur = cur_freq; cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu); @@ -658,12 +659,14 @@ static int __init pmac_cpufreq_setup(void) if (!value) goto out; cur_freq = (*value) / 1000; + transition_latency = CPUFREQ_ETERNAL; /* Check for 7447A based MacRISC3 */ if (of_machine_is_compatible("MacRISC3") && of_get_property(cpunode, "dynamic-power-step", NULL) && PVR_VER(mfspr(SPRN_PVR)) == 0x8003) { pmac_cpufreq_init_7447A(cpunode); + transition_latency = 8000000; /* Check for other MacRISC3 machines */ } else if (of_machine_is_compatible("PowerBook3,4") || of_machine_is_compatible("PowerBook3,5") || diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 43bbe1bda93..2b8af75abc2 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -561,7 +561,7 @@ static struct pci_ops u4_pcie_pci_ops = .write = u4_pcie_write_config, }; -static void __devinit pmac_pci_fixup_u4_of_node(struct pci_dev *dev) +static void pmac_pci_fixup_u4_of_node(struct pci_dev *dev) { /* Apple's device-tree "hides" the root complex virtual P2P bridge * on U4. However, Linux sees it, causing the PCI <-> OF matching @@ -988,7 +988,7 @@ static int __init pmac_add_bridge(struct device_node *dev) return 0; } -void __devinit pmac_pci_irq_fixup(struct pci_dev *dev) +void pmac_pci_irq_fixup(struct pci_dev *dev) { #ifdef CONFIG_PPC32 /* Fixup interrupt for the modem/ethernet combo controller. @@ -1138,7 +1138,7 @@ int pmac_pci_enable_device_hook(struct pci_dev *dev) return 0; } -void __devinit pmac_pci_fixup_ohci(struct pci_dev *dev) +void pmac_pci_fixup_ohci(struct pci_dev *dev) { struct device_node *node = pci_device_to_OF_node(dev); diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c index b0c3777528a..d588e48dff7 100644 --- a/arch/powerpc/platforms/powermac/pfunc_core.c +++ b/arch/powerpc/platforms/powermac/pfunc_core.c @@ -686,7 +686,7 @@ static int pmf_add_functions(struct pmf_device *dev, void *driverdata) int count = 0; for (pp = dev->node->properties; pp != 0; pp = pp->next) { - char *name; + const char *name; if (strncmp(pp->name, PP_PREFIX, plen) != 0) continue; name = pp->name + plen; diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index c4e630576ff..31036b56670 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -529,7 +529,7 @@ static int __init pmac_pic_probe_mpic(void) void __init pmac_pic_init(void) { /* We configure the OF parsing based on our oldworld vs. newworld - * platform type and wether we were booted by BootX. + * platform type and whether we were booted by BootX. */ #ifdef CONFIG_PPC32 if (!pmac_newworld) diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index b4ddaa3fbb2..bdb738a69e4 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -484,7 +484,7 @@ static void smp_core99_give_timebase(void) } -static void __devinit smp_core99_take_timebase(void) +static void smp_core99_take_timebase(void) { unsigned long flags; @@ -669,7 +669,7 @@ static void smp_core99_gpio_tb_freeze(int freeze) volatile static long int core99_l2_cache; volatile static long int core99_l3_cache; -static void __devinit core99_init_caches(int cpu) +static void core99_init_caches(int cpu) { #ifndef CONFIG_PPC64 if (!cpu_has_feature(CPU_FTR_L2CR)) @@ -801,7 +801,7 @@ static int __init smp_core99_probe(void) return ncpus; } -static int __devinit smp_core99_kick_cpu(int nr) +static int smp_core99_kick_cpu(int nr) { unsigned int save_vector; unsigned long target, flags; @@ -844,7 +844,7 @@ static int __devinit smp_core99_kick_cpu(int nr) return 0; } -static void __devinit smp_core99_setup_cpu(int cpu_nr) +static void smp_core99_setup_cpu(int cpu_nr) { /* Setup L2/L3 */ if (cpu_nr != 0) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 471aa3ccd9f..8e90e8906df 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -34,24 +34,12 @@ #include "powernv.h" #include "pci.h" -static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe, - struct va_format *vaf) -{ - char pfix[32]; - - if (pe->pdev) - strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); - else - sprintf(pfix, "%04x:%02x ", - pci_domain_nr(pe->pbus), pe->pbus->number); - return printk("pci %s%s: [PE# %.3d] %pV", level, pfix, pe->pe_number, vaf); -} - #define define_pe_printk_level(func, kern_level) \ static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \ { \ struct va_format vaf; \ va_list args; \ + char pfix[32]; \ int r; \ \ va_start(args, fmt); \ @@ -59,7 +47,16 @@ static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \ vaf.fmt = fmt; \ vaf.va = &args; \ \ - r = __pe_printk(kern_level, pe, &vaf); \ + if (pe->pdev) \ + strlcpy(pfix, dev_name(&pe->pdev->dev), \ + sizeof(pfix)); \ + else \ + sprintf(pfix, "%04x:%02x ", \ + pci_domain_nr(pe->pbus), \ + pe->pbus->number); \ + r = printk(kern_level "pci %s: [PE# %.3d] %pV", \ + pfix, pe->pe_number, &vaf); \ + \ va_end(args); \ \ return r; \ @@ -79,7 +76,7 @@ static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev) return PCI_DN(np); } -static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb) +static int pnv_ioda_alloc_pe(struct pnv_phb *phb) { unsigned long pe; @@ -94,7 +91,7 @@ static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb) return pe; } -static void __devinit pnv_ioda_free_pe(struct pnv_phb *phb, int pe) +static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe) { WARN_ON(phb->ioda.pe_array[pe].pdev); @@ -106,7 +103,7 @@ static void __devinit pnv_ioda_free_pe(struct pnv_phb *phb, int pe) * but in the meantime, we need to protect them to avoid warnings */ #ifdef CONFIG_PCI_MSI -static struct pnv_ioda_pe * __devinit pnv_ioda_get_pe(struct pci_dev *dev) +static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; @@ -120,8 +117,7 @@ static struct pnv_ioda_pe * __devinit pnv_ioda_get_pe(struct pci_dev *dev) } #endif /* CONFIG_PCI_MSI */ -static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb, - struct pnv_ioda_pe *pe) +static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { struct pci_dev *parent; uint8_t bcomp, dcomp, fcomp; @@ -210,8 +206,8 @@ static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb, return 0; } -static void __devinit pnv_ioda_link_pe_by_weight(struct pnv_phb *phb, - struct pnv_ioda_pe *pe) +static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb, + struct pnv_ioda_pe *pe) { struct pnv_ioda_pe *lpe; @@ -249,7 +245,7 @@ static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev) } #if 0 -static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev) +static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; @@ -346,7 +342,7 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) * subordinate PCI devices and buses. The second type of PE is normally * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports. */ -static void __devinit pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all) +static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all) { struct pci_controller *hose = pci_bus_to_host(bus); struct pnv_phb *phb = hose->private_data; @@ -402,7 +398,7 @@ static void __devinit pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all) pnv_ioda_link_pe_by_weight(phb, pe); } -static void __devinit pnv_ioda_setup_PEs(struct pci_bus *bus) +static void pnv_ioda_setup_PEs(struct pci_bus *bus) { struct pci_dev *dev; @@ -426,7 +422,7 @@ static void __devinit pnv_ioda_setup_PEs(struct pci_bus *bus) * port to PE# here. The game rule here is expected to be changed * as soon as we can detected PLX bridge correctly. */ -static void __devinit pnv_pci_ioda_setup_PEs(void) +static void pnv_pci_ioda_setup_PEs(void) { struct pci_controller *hose, *tmp; @@ -435,14 +431,12 @@ static void __devinit pnv_pci_ioda_setup_PEs(void) } } -static void __devinit pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, - struct pci_dev *dev) +static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *dev) { /* We delay DMA setup after we have assigned all PE# */ } -static void __devinit pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, - struct pci_bus *bus) +static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) { struct pci_dev *dev; @@ -453,10 +447,9 @@ static void __devinit pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, } } -static void __devinit pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, - struct pnv_ioda_pe *pe, - unsigned int base, - unsigned int segs) +static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, + struct pnv_ioda_pe *pe, unsigned int base, + unsigned int segs) { struct page *tce_mem = NULL; @@ -544,7 +537,7 @@ static void __devinit pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); } -static void __devinit pnv_ioda_setup_dma(struct pnv_phb *phb) +static void pnv_ioda_setup_dma(struct pnv_phb *phb) { struct pci_controller *hose = phb->hose; unsigned int residual, remaining, segs, tw, base; @@ -687,8 +680,8 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { } * to bottom style. So the the I/O or MMIO segment assigned to * parent PE could be overrided by its child PEs if necessary. */ -static void __devinit pnv_ioda_setup_pe_seg(struct pci_controller *hose, - struct pnv_ioda_pe *pe) +static void pnv_ioda_setup_pe_seg(struct pci_controller *hose, + struct pnv_ioda_pe *pe) { struct pnv_phb *phb = hose->private_data; struct pci_bus_region region; @@ -756,7 +749,7 @@ static void __devinit pnv_ioda_setup_pe_seg(struct pci_controller *hose, } } -static void __devinit pnv_pci_ioda_setup_seg(void) +static void pnv_pci_ioda_setup_seg(void) { struct pci_controller *tmp, *hose; struct pnv_phb *phb; @@ -770,7 +763,7 @@ static void __devinit pnv_pci_ioda_setup_seg(void) } } -static void __devinit pnv_pci_ioda_setup_DMA(void) +static void pnv_pci_ioda_setup_DMA(void) { struct pci_controller *hose, *tmp; struct pnv_phb *phb; @@ -784,7 +777,7 @@ static void __devinit pnv_pci_ioda_setup_DMA(void) } } -static void __devinit pnv_pci_ioda_fixup(void) +static void pnv_pci_ioda_fixup(void) { pnv_pci_ioda_setup_PEs(); pnv_pci_ioda_setup_seg(); @@ -832,7 +825,7 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus, /* Prevent enabling devices for which we couldn't properly * assign a PE */ -static int __devinit pnv_pci_enable_device_hook(struct pci_dev *dev) +static int pnv_pci_enable_device_hook(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c index 6b4bef4e9d8..7db8771a40f 100644 --- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c +++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c @@ -84,8 +84,8 @@ static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb) static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb) { } #endif /* CONFIG_PCI_MSI */ -static void __devinit pnv_pci_p5ioc2_dma_dev_setup(struct pnv_phb *phb, - struct pci_dev *pdev) +static void pnv_pci_p5ioc2_dma_dev_setup(struct pnv_phb *phb, + struct pci_dev *pdev) { if (phb->p5ioc2.iommu_table.it_map == NULL) iommu_init_table(&phb->p5ioc2.iommu_table, phb->hose->node); diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index c01688a1a74..b8b8e0bd989 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -464,8 +464,7 @@ void pnv_pci_setup_iommu_table(struct iommu_table *tbl, tbl->it_type = TCE_PCI; } -static struct iommu_table * __devinit -pnv_pci_setup_bml_iommu(struct pci_controller *hose) +static struct iommu_table *pnv_pci_setup_bml_iommu(struct pci_controller *hose) { struct iommu_table *tbl; const __be64 *basep, *swinvp; @@ -496,8 +495,8 @@ pnv_pci_setup_bml_iommu(struct pci_controller *hose) return tbl; } -static void __devinit pnv_pci_dma_fallback_setup(struct pci_controller *hose, - struct pci_dev *pdev) +static void pnv_pci_dma_fallback_setup(struct pci_controller *hose, + struct pci_dev *pdev) { struct device_node *np = pci_bus_to_OF_node(hose->bus); struct pci_dn *pdn; @@ -512,7 +511,7 @@ static void __devinit pnv_pci_dma_fallback_setup(struct pci_controller *hose, set_iommu_table_base(&pdev->dev, pdn->iommu_table); } -static void __devinit pnv_pci_dma_dev_setup(struct pci_dev *pdev) +static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; @@ -527,7 +526,7 @@ static void __devinit pnv_pci_dma_dev_setup(struct pci_dev *pdev) } /* Fixup wrong class code in p7ioc root complex */ -static void __devinit pnv_p7ioc_rc_quirk(struct pci_dev *dev) +static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI << 8; } diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 7698b6e13c5..0bdc735db16 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -62,7 +62,7 @@ static int pnv_smp_cpu_bootable(unsigned int nr) return 1; } -int __devinit pnv_smp_kick_cpu(int nr) +int pnv_smp_kick_cpu(int nr) { unsigned int pcpu = get_hard_smp_processor_id(nr); unsigned long start_here = __pa(*((unsigned long *) diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig index 46b7f023252..e87c1947397 100644 --- a/arch/powerpc/platforms/ps3/Kconfig +++ b/arch/powerpc/platforms/ps3/Kconfig @@ -48,7 +48,7 @@ config PS3_HTAB_SIZE system will have optimal runtime performance. config PS3_DYNAMIC_DMA - depends on PPC_PS3 && EXPERIMENTAL + depends on PPC_PS3 bool "PS3 Platform dynamic DMA page table management" default n help diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c index d00d7b0a3bd..6cc58201db8 100644 --- a/arch/powerpc/platforms/ps3/htab.c +++ b/arch/powerpc/platforms/ps3/htab.c @@ -27,6 +27,7 @@ #include <asm/lv1call.h> #include <asm/ps3fb.h> +#define PS3_VERBOSE_RESULT #include "platform.h" /** @@ -75,8 +76,9 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn, if (result) { /* all entries bolted !*/ - pr_info("%s:result=%d vpn=%lx pa=%lx ix=%lx v=%llx r=%llx\n", - __func__, result, vpn, pa, hpte_group, hpte_v, hpte_r); + pr_info("%s:result=%s vpn=%lx pa=%lx ix=%lx v=%llx r=%llx\n", + __func__, ps3_result(result), vpn, pa, hpte_group, + hpte_v, hpte_r); BUG(); } @@ -125,8 +127,8 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, &hpte_rs); if (result) { - pr_info("%s: res=%d read vpn=%lx slot=%lx psize=%d\n", - __func__, result, vpn, slot, psize); + pr_info("%s: result=%s read vpn=%lx slot=%lx psize=%d\n", + __func__, ps3_result(result), vpn, slot, psize); BUG(); } @@ -170,8 +172,8 @@ static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn, result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0); if (result) { - pr_info("%s: res=%d vpn=%lx slot=%lx psize=%d\n", - __func__, result, vpn, slot, psize); + pr_info("%s: result=%s vpn=%lx slot=%lx psize=%d\n", + __func__, ps3_result(result), vpn, slot, psize); BUG(); } diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c index 56d26bc4fd4..09787139834 100644 --- a/arch/powerpc/platforms/ps3/os-area.c +++ b/arch/powerpc/platforms/ps3/os-area.c @@ -280,13 +280,13 @@ static void os_area_set_property(struct device_node *node, if (tmp) { pr_debug("%s:%d found %s\n", __func__, __LINE__, prop->name); - prom_remove_property(node, tmp); + of_remove_property(node, tmp); } - result = prom_add_property(node, prop); + result = of_add_property(node, prop); if (result) - pr_debug("%s:%d prom_set_property failed\n", __func__, + pr_debug("%s:%d of_set_property failed\n", __func__, __LINE__); } diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c index 9b47ba7a5de..bfccdc7cb85 100644 --- a/arch/powerpc/platforms/ps3/repository.c +++ b/arch/powerpc/platforms/ps3/repository.c @@ -437,7 +437,7 @@ found_dev: return 0; } -int __devinit ps3_repository_find_devices(enum ps3_bus_type bus_type, +int ps3_repository_find_devices(enum ps3_bus_type bus_type, int (*callback)(const struct ps3_repository_device *repo)) { int result = 0; diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 837cf49357e..9a0941bc4d3 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -17,6 +17,7 @@ config PPC_PSERIES select PPC_NATIVE select PPC_PCI_CHOICE if EXPERT select ZLIB_DEFLATE + select PPC_DOORBELL default y config PPC_SPLPAR diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 890622b87c8..53866e537a9 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -1,4 +1,4 @@ -ccflags-$(CONFIG_PPC64) := -mno-minimal-toc +ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG obj-y := lpar.o hvCall.o nvram.o reconfig.o \ diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 0f1b706506e..a1a7b9a67ff 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -13,17 +13,16 @@ #include <linux/kernel.h> #include <linux/kref.h> #include <linux/notifier.h> -#include <linux/proc_fs.h> #include <linux/spinlock.h> #include <linux/cpu.h> #include <linux/slab.h> +#include <linux/of.h> #include "offline_states.h" #include <asm/prom.h> #include <asm/machdep.h> #include <asm/uaccess.h> #include <asm/rtas.h> -#include <asm/pSeries_reconfig.h> struct cc_workarea { u32 drc_index; @@ -255,9 +254,6 @@ static struct device_node *derive_parent(const char *path) int dlpar_attach_node(struct device_node *dn) { -#ifdef CONFIG_PROC_DEVICETREE - struct proc_dir_entry *ent; -#endif int rc; of_node_set_flag(dn, OF_DYNAMIC); @@ -266,44 +262,26 @@ int dlpar_attach_node(struct device_node *dn) if (!dn->parent) return -ENOMEM; - rc = pSeries_reconfig_notify(PSERIES_RECONFIG_ADD, dn); + rc = of_attach_node(dn); if (rc) { printk(KERN_ERR "Failed to add device node %s\n", dn->full_name); return rc; } - of_attach_node(dn); - -#ifdef CONFIG_PROC_DEVICETREE - ent = proc_mkdir(strrchr(dn->full_name, '/') + 1, dn->parent->pde); - if (ent) - proc_device_tree_add_node(dn, ent); -#endif - of_node_put(dn->parent); return 0; } int dlpar_detach_node(struct device_node *dn) { -#ifdef CONFIG_PROC_DEVICETREE - struct device_node *parent = dn->parent; - struct property *prop = dn->properties; - - while (prop) { - remove_proc_entry(prop->name, dn->pde); - prop = prop->next; - } + int rc; - if (dn->pde) - remove_proc_entry(dn->pde->name, parent->pde); -#endif + rc = of_detach_node(dn); + if (rc) + return rc; - pSeries_reconfig_notify(PSERIES_RECONFIG_REMOVE, dn); - of_detach_node(dn); of_node_put(dn); /* Must decrement the refcount */ - return 0; } diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index a7648543c59..0cc0ac07a55 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -57,7 +57,7 @@ static u8 dtl_event_mask = 0x7; */ static int dtl_buf_entries = N_DISPATCH_LOG; -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE struct dtl_ring { u64 write_index; struct dtl_entry *write_ptr; @@ -142,7 +142,7 @@ static u64 dtl_current_index(struct dtl *dtl) return per_cpu(dtl_rings, dtl->cpu).write_index; } -#else /* CONFIG_VIRT_CPU_ACCOUNTING */ +#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ static int dtl_start(struct dtl *dtl) { @@ -188,7 +188,7 @@ static u64 dtl_current_index(struct dtl *dtl) { return lppaca_of(dtl->cpu).dtl_idx; } -#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ static int dtl_enable(struct dtl *dtl) { diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 9a04322b173..6b73d6c44f5 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c @@ -788,7 +788,6 @@ static void eeh_add_device_late(struct pci_dev *dev) dev->dev.archdata.edev = edev; eeh_addr_cache_insert_dev(dev); - eeh_sysfs_add_device(dev); } /** @@ -815,6 +814,29 @@ void eeh_add_device_tree_late(struct pci_bus *bus) EXPORT_SYMBOL_GPL(eeh_add_device_tree_late); /** + * eeh_add_sysfs_files - Add EEH sysfs files for the indicated PCI bus + * @bus: PCI bus + * + * This routine must be used to add EEH sysfs files for PCI + * devices which are attached to the indicated PCI bus. The PCI bus + * is added after system boot through hotplug or dlpar. + */ +void eeh_add_sysfs_files(struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + eeh_sysfs_add_device(dev); + if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { + struct pci_bus *subbus = dev->subordinate; + if (subbus) + eeh_add_sysfs_files(subbus); + } + } +} +EXPORT_SYMBOL_GPL(eeh_add_sysfs_files); + +/** * eeh_remove_device - Undo EEH setup for the indicated pci device * @dev: pci device to be removed * @purge_pe: remove the PE or not diff --git a/arch/powerpc/platforms/pseries/eeh_dev.c b/arch/powerpc/platforms/pseries/eeh_dev.c index 66442341d3a..1efa28f5fc5 100644 --- a/arch/powerpc/platforms/pseries/eeh_dev.c +++ b/arch/powerpc/platforms/pseries/eeh_dev.c @@ -49,7 +49,7 @@ * It will create EEH device according to the given OF node. The function * might be called by PCI emunation, DR, PHB hotplug. */ -void * __devinit eeh_dev_init(struct device_node *dn, void *data) +void *eeh_dev_init(struct device_node *dn, void *data) { struct pci_controller *phb = data; struct eeh_dev *edev; @@ -77,7 +77,7 @@ void * __devinit eeh_dev_init(struct device_node *dn, void *data) * Scan the PHB OF node and its child association, then create the * EEH devices accordingly */ -void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb) +void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { struct device_node *dn = phb->dn; diff --git a/arch/powerpc/platforms/pseries/eeh_pe.c b/arch/powerpc/platforms/pseries/eeh_pe.c index 797cd181dc3..fe43d1aa2cf 100644 --- a/arch/powerpc/platforms/pseries/eeh_pe.c +++ b/arch/powerpc/platforms/pseries/eeh_pe.c @@ -66,7 +66,7 @@ static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type) * The function should be called while the PHB is detected during * system boot or PCI hotplug in order to create PHB PE. */ -int __devinit eeh_phb_pe_create(struct pci_controller *phb) +int eeh_phb_pe_create(struct pci_controller *phb) { struct eeh_pe *pe; @@ -449,7 +449,7 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe) if (list_empty(&pe->edevs)) { cnt = 0; list_for_each_entry(child, &pe->child_list, child) { - if (!(pe->type & EEH_PE_INVALID)) { + if (!(child->type & EEH_PE_INVALID)) { cnt++; break; } diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c index 0b0eff0cce3..aa3693f7fb2 100644 --- a/arch/powerpc/platforms/pseries/firmware.c +++ b/arch/powerpc/platforms/pseries/firmware.c @@ -33,6 +33,11 @@ typedef struct { char * name; } firmware_feature_t; +/* + * The names in this table match names in rtas/ibm,hypertas-functions. If the + * entry ends in a '*', only upto the '*' is matched. Otherwise the entire + * string must match. + */ static __initdata firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = { {FW_FEATURE_PFT, "hcall-pft"}, @@ -56,6 +61,8 @@ firmware_features_table[FIRMWARE_MAX_FEATURES] = { {FW_FEATURE_MULTITCE, "hcall-multi-tce"}, {FW_FEATURE_SPLPAR, "hcall-splpar"}, {FW_FEATURE_VPHN, "hcall-vphn"}, + {FW_FEATURE_SET_MODE, "hcall-set-mode"}, + {FW_FEATURE_BEST_ENERGY, "hcall-best-energy-1*"}, }; /* Build up the firmware features bitmask using the contents of @@ -71,9 +78,20 @@ void __init fw_feature_init(const char *hypertas, unsigned long len) for (s = hypertas; s < hypertas + len; s += strlen(s) + 1) { for (i = 0; i < FIRMWARE_MAX_FEATURES; i++) { + const char *name = firmware_features_table[i].name; + size_t size; /* check value against table of strings */ - if (!firmware_features_table[i].name || - strcmp(firmware_features_table[i].name, s)) + if (!name) + continue; + /* + * If there is a '*' at the end of name, only check + * upto there + */ + size = strlen(name); + if (size && name[size - 1] == '*') { + if (strncmp(name, s, size - 1)) + continue; + } else if (strcmp(name, s)) continue; /* we have a match */ diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 64c97d8ac0c..217ca5c75b2 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -23,12 +23,12 @@ #include <linux/delay.h> #include <linux/sched.h> /* for idle_task_exit */ #include <linux/cpu.h> +#include <linux/of.h> #include <asm/prom.h> #include <asm/rtas.h> #include <asm/firmware.h> #include <asm/machdep.h> #include <asm/vdso_datapage.h> -#include <asm/pSeries_reconfig.h> #include <asm/xics.h> #include "plpar_wrappers.h" #include "offline_states.h" @@ -127,9 +127,16 @@ static void pseries_mach_cpu_die(void) get_lppaca()->donate_dedicated_cpu = 1; while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { + while (!prep_irq_for_idle()) { + local_irq_enable(); + local_irq_disable(); + } + extended_cede_processor(cede_latency_hint); } + local_irq_disable(); + if (!get_lppaca()->shared_proc) get_lppaca()->donate_dedicated_cpu = 0; get_lppaca()->idle = 0; @@ -137,6 +144,7 @@ static void pseries_mach_cpu_die(void) if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { unregister_slb_shadow(hwcpu); + hard_irq_disable(); /* * Call to start_secondary_resume() will not return. * Kernel stack will be reset and start_secondary() @@ -333,10 +341,10 @@ static int pseries_smp_notifier(struct notifier_block *nb, int err = 0; switch (action) { - case PSERIES_RECONFIG_ADD: + case OF_RECONFIG_ATTACH_NODE: err = pseries_add_processor(node); break; - case PSERIES_RECONFIG_REMOVE: + case OF_RECONFIG_DETACH_NODE: pseries_remove_processor(node); break; } @@ -399,7 +407,7 @@ static int __init pseries_cpu_hotplug_init(void) /* Processors can be added/removed only on LPAR */ if (firmware_has_feature(FW_FEATURE_LPAR)) { - pSeries_reconfig_notifier_register(&pseries_smp_nb); + of_reconfig_notifier_register(&pseries_smp_nb); cpu_maps_update_begin(); if (cede_offline_enabled && parse_cede_parameters() == 0) { default_offline_state = CPU_STATE_INACTIVE; diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index ecdb0a6b317..2372c609fa2 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -16,7 +16,6 @@ #include <asm/firmware.h> #include <asm/machdep.h> -#include <asm/pSeries_reconfig.h> #include <asm/sparsemem.h> static unsigned long get_memblock_size(void) @@ -187,42 +186,69 @@ static int pseries_add_memory(struct device_node *np) return (ret < 0) ? -EINVAL : 0; } -static int pseries_drconf_memory(unsigned long *base, unsigned int action) +static int pseries_update_drconf_memory(struct of_prop_reconfig *pr) { + struct of_drconf_cell *new_drmem, *old_drmem; unsigned long memblock_size; - int rc; + u32 entries; + u32 *p; + int i, rc = -EINVAL; memblock_size = get_memblock_size(); if (!memblock_size) return -EINVAL; - if (action == PSERIES_DRCONF_MEM_ADD) { - rc = memblock_add(*base, memblock_size); - rc = (rc < 0) ? -EINVAL : 0; - } else if (action == PSERIES_DRCONF_MEM_REMOVE) { - rc = pseries_remove_memblock(*base, memblock_size); - } else { - rc = -EINVAL; + p = (u32 *)of_get_property(pr->dn, "ibm,dynamic-memory", NULL); + if (!p) + return -EINVAL; + + /* The first int of the property is the number of lmb's described + * by the property. This is followed by an array of of_drconf_cell + * entries. Get the niumber of entries and skip to the array of + * of_drconf_cell's. + */ + entries = *p++; + old_drmem = (struct of_drconf_cell *)p; + + p = (u32 *)pr->prop->value; + p++; + new_drmem = (struct of_drconf_cell *)p; + + for (i = 0; i < entries; i++) { + if ((old_drmem[i].flags & DRCONF_MEM_ASSIGNED) && + (!(new_drmem[i].flags & DRCONF_MEM_ASSIGNED))) { + rc = pseries_remove_memblock(old_drmem[i].base_addr, + memblock_size); + break; + } else if ((!(old_drmem[i].flags & DRCONF_MEM_ASSIGNED)) && + (new_drmem[i].flags & DRCONF_MEM_ASSIGNED)) { + rc = memblock_add(old_drmem[i].base_addr, + memblock_size); + rc = (rc < 0) ? -EINVAL : 0; + break; + } } return rc; } static int pseries_memory_notifier(struct notifier_block *nb, - unsigned long action, void *node) + unsigned long action, void *node) { + struct of_prop_reconfig *pr; int err = 0; switch (action) { - case PSERIES_RECONFIG_ADD: + case OF_RECONFIG_ATTACH_NODE: err = pseries_add_memory(node); break; - case PSERIES_RECONFIG_REMOVE: + case OF_RECONFIG_DETACH_NODE: err = pseries_remove_memory(node); break; - case PSERIES_DRCONF_MEM_ADD: - case PSERIES_DRCONF_MEM_REMOVE: - err = pseries_drconf_memory(node, action); + case OF_RECONFIG_UPDATE_PROPERTY: + pr = (struct of_prop_reconfig *)node; + if (!strcmp(pr->prop->name, "ibm,dynamic-memory")) + err = pseries_update_drconf_memory(pr); break; } return notifier_from_errno(err); @@ -235,7 +261,7 @@ static struct notifier_block pseries_mem_nb = { static int __init pseries_memory_hotplug_init(void) { if (firmware_has_feature(FW_FEATURE_LPAR)) - pSeries_reconfig_notifier_register(&pseries_mem_nb); + of_reconfig_notifier_register(&pseries_mem_nb); return 0; } diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index c9311cfdfca..cf4e7736e4f 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -86,7 +86,7 @@ static int hcall_inst_seq_open(struct inode *inode, struct file *file) rc = seq_open(file, &hcall_inst_seq_ops); seq = file->private_data; - seq->private = file->f_path.dentry->d_inode->i_private; + seq->private = file_inode(file)->i_private; return rc; } diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c index fcf4b4cbeaf..4557e91626c 100644 --- a/arch/powerpc/platforms/pseries/hvcserver.c +++ b/arch/powerpc/platforms/pseries/hvcserver.c @@ -23,6 +23,7 @@ #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/string.h> #include <asm/hvcall.h> #include <asm/hvcserver.h> @@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head, = (unsigned int)last_p_partition_ID; /* copy the Null-term char too */ - strncpy(&next_partner_info->location_code[0], + strlcpy(&next_partner_info->location_code[0], (char *)&pi_buff[2], - strlen((char *)&pi_buff[2]) + 1); + sizeof(next_partner_info->location_code)); list_add_tail(&(next_partner_info->node), head); next_partner_info = NULL; diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 6153eea27ce..1b2a174e7c5 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -36,13 +36,13 @@ #include <linux/dma-mapping.h> #include <linux/crash_dump.h> #include <linux/memory.h> +#include <linux/of.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/rtas.h> #include <asm/iommu.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> -#include <asm/pSeries_reconfig.h> #include <asm/firmware.h> #include <asm/tce.h> #include <asm/ppc-pci.h> @@ -382,6 +382,7 @@ static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn, rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn), dma_offset, 0, limit); + next += limit * tce_size; num_tce -= limit; } while (num_tce > 0 && !rc); @@ -760,7 +761,7 @@ static void remove_ddw(struct device_node *np) __remove_ddw(np, ddw_avail, liobn); delprop: - ret = prom_remove_property(np, win64); + ret = of_remove_property(np, win64); if (ret) pr_warning("%s: failed to remove direct window property: %d\n", np->full_name, ret); @@ -786,33 +787,68 @@ static u64 find_existing_ddw(struct device_node *pdn) return dma_addr; } +static void __restore_default_window(struct eeh_dev *edev, + u32 ddw_restore_token) +{ + u32 cfg_addr; + u64 buid; + int ret; + + /* + * Get the config address and phb buid of the PE window. + * Rely on eeh to retrieve this for us. + * Retrieve them from the pci device, not the node with the + * dma-window property + */ + cfg_addr = edev->config_addr; + if (edev->pe_config_addr) + cfg_addr = edev->pe_config_addr; + buid = edev->phb->buid; + + do { + ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr, + BUID_HI(buid), BUID_LO(buid)); + } while (rtas_busy_delay(ret)); + pr_info("ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n", + ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret); +} + static int find_existing_ddw_windows(void) { - int len; struct device_node *pdn; - struct direct_window *window; const struct dynamic_dma_window_prop *direct64; + const u32 *ddw_extensions; if (!firmware_has_feature(FW_FEATURE_LPAR)) return 0; for_each_node_with_property(pdn, DIRECT64_PROPNAME) { - direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len); + direct64 = of_get_property(pdn, DIRECT64_PROPNAME, NULL); if (!direct64) continue; - window = kzalloc(sizeof(*window), GFP_KERNEL); - if (!window || len < sizeof(struct dynamic_dma_window_prop)) { - kfree(window); - remove_ddw(pdn); - continue; - } + /* + * We need to ensure the IOMMU table is active when we + * return from the IOMMU setup so that the common code + * can clear the table or find the holes. To that end, + * first, remove any existing DDW configuration. + */ + remove_ddw(pdn); - window->device = pdn; - window->prop = direct64; - spin_lock(&direct_window_list_lock); - list_add(&window->list, &direct_window_list); - spin_unlock(&direct_window_list_lock); + /* + * Second, if we are running on a new enough level of + * firmware where the restore API is present, use it to + * restore the 32-bit window, which was removed in + * create_ddw. + * If the API is not present, then create_ddw couldn't + * have removed the 32-bit window in the first place, so + * removing the DDW configuration should be sufficient. + */ + ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions", + NULL); + if (ddw_extensions && ddw_extensions[0] > 0) + __restore_default_window(of_node_to_eeh_dev(pdn), + ddw_extensions[1]); } return 0; @@ -883,32 +919,9 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, } static void restore_default_window(struct pci_dev *dev, - u32 ddw_restore_token, unsigned long liobn) + u32 ddw_restore_token) { - struct eeh_dev *edev; - u32 cfg_addr; - u64 buid; - int ret; - - /* - * Get the config address and phb buid of the PE window. - * Rely on eeh to retrieve this for us. - * Retrieve them from the pci device, not the node with the - * dma-window property - */ - edev = pci_dev_to_eeh_dev(dev); - cfg_addr = edev->config_addr; - if (edev->pe_config_addr) - cfg_addr = edev->pe_config_addr; - buid = edev->phb->buid; - - do { - ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr, - BUID_HI(buid), BUID_LO(buid)); - } while (rtas_busy_delay(ret)); - dev_info(&dev->dev, - "ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n", - ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret); + __restore_default_window(pci_dev_to_eeh_dev(dev), ddw_restore_token); } /* @@ -1070,7 +1083,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) goto out_free_window; } - ret = prom_add_property(pdn, win64); + ret = of_add_property(pdn, win64); if (ret) { dev_err(&dev->dev, "unable to add dma window property for %s: %d", pdn->full_name, ret); @@ -1099,7 +1112,7 @@ out_free_prop: out_restore_window: if (ddw_restore_token) - restore_default_window(dev, ddw_restore_token, liobn); + restore_default_window(dev, ddw_restore_token); out_unlock: mutex_unlock(&direct_window_init_mutex); @@ -1294,7 +1307,8 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti struct direct_window *window; switch (action) { - case PSERIES_RECONFIG_REMOVE: + case OF_RECONFIG_DETACH_NODE: + remove_ddw(np); if (pci && pci->iommu_table) iommu_free_table(pci->iommu_table, np->full_name); @@ -1307,16 +1321,6 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti } } spin_unlock(&direct_window_list_lock); - - /* - * Because the notifier runs after isolation of the - * slot, we are guaranteed any DMA window has already - * been revoked and the TCEs have been marked invalid, - * so we don't need a call to remove_ddw(np). However, - * if an additional notifier action is added before the - * isolate call, we should update this code for - * completeness with such a call. - */ break; default: err = NOTIFY_DONE; @@ -1357,7 +1361,7 @@ void iommu_init_early_pSeries(void) } - pSeries_reconfig_notifier_register(&iommu_reconfig_nb); + of_reconfig_notifier_register(&iommu_reconfig_nb); register_memory_notifier(&iommu_mem_nb); set_pci_dma_ops(&dma_iommu_ops); diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index dd30b12edfe..6573808cc5f 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -116,7 +116,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop, } if (!more) { - prom_update_property(dn, new_prop); + of_update_property(dn, new_prop); new_prop = NULL; } @@ -172,7 +172,7 @@ static int update_dt_node(u32 phandle) case 0x80000000: prop = of_find_property(dn, prop_name, NULL); - prom_remove_property(dn, prop); + of_remove_property(dn, prop); prop = NULL; break; diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index d19f4977c83..e5b08472313 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -220,7 +220,8 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) /* Get the top level device in the PE */ edev = of_node_to_eeh_dev(dn); - edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); + if (edev->pe) + edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); dn = eeh_dev_to_of_node(edev); if (!dn) return NULL; diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 56b864d777e..0b580f413a9 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -40,7 +40,8 @@ void pcibios_name_device(struct pci_dev *dev) */ dn = pci_device_to_OF_node(dev); if (dn) { - const char *loc_code = of_get_property(dn, "ibm,loc-code", 0); + const char *loc_code = of_get_property(dn, "ibm,loc-code", + NULL); if (loc_code) { int loc_len = strlen(loc_code); if (loc_len < sizeof(dev->dev.name)) { diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 261a577a3dd..c91b22be928 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -149,7 +149,7 @@ void pcibios_add_pci_devices(struct pci_bus * bus) } EXPORT_SYMBOL_GPL(pcibios_add_pci_devices); -struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) +struct pci_controller *init_phb_dynamic(struct device_node *dn) { struct pci_controller *phb; diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h index 13e8cc43adf..f368668d97b 100644 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h @@ -2,6 +2,7 @@ #define _PSERIES_PLPAR_WRAPPERS_H #include <linux/string.h> +#include <linux/irqflags.h> #include <asm/hvcall.h> #include <asm/paca.h> @@ -41,7 +42,14 @@ static inline long extended_cede_processor(unsigned long latency_hint) u8 old_latency_hint = get_cede_latency_hint(); set_cede_latency_hint(latency_hint); + rc = cede_processor(); +#ifdef CONFIG_TRACE_IRQFLAGS + /* Ensure that H_CEDE returns with IRQs on */ + if (WARN_ON(!(mfmsr() & MSR_EE))) + __hard_irq_enable(); +#endif + set_cede_latency_hint(old_latency_hint); return rc; @@ -273,4 +281,45 @@ static inline long plpar_put_term_char(unsigned long termno, unsigned long len, lbuf[1]); } +/* Set various resource mode parameters */ +static inline long plpar_set_mode(unsigned long mflags, unsigned long resource, + unsigned long value1, unsigned long value2) +{ + return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2); +} + +/* + * Enable relocation on exceptions on this partition + * + * Note: this call has a partition wide scope and can take a while to complete. + * If it returns H_LONG_BUSY_* it should be retried periodically until it + * returns H_SUCCESS. + */ +static inline long enable_reloc_on_exceptions(void) +{ + /* mflags = 3: Exceptions at 0xC000000000004000 */ + return plpar_set_mode(3, 3, 0, 0); +} + +/* + * Disable relocation on exceptions on this partition + * + * Note: this call has a partition wide scope and can take a while to complete. + * If it returns H_LONG_BUSY_* it should be retried periodically until it + * returns H_SUCCESS. + */ +static inline long disable_reloc_on_exceptions(void) { + return plpar_set_mode(0, 3, 0, 0); +} + +static inline long plapr_set_ciabr(unsigned long ciabr) +{ + return plpar_set_mode(0, 1, ciabr, 0); +} + +static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0) +{ + return plpar_set_mode(0, 2, dawr0, dawrx0); +} + #endif /* _PSERIES_PLPAR_WRAPPERS_H */ diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c index 45d00e5fe14..4d806b41960 100644 --- a/arch/powerpc/platforms/pseries/processor_idle.c +++ b/arch/powerpc/platforms/pseries/processor_idle.c @@ -36,7 +36,7 @@ static struct cpuidle_state *cpuidle_state_table; static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before) { - *kt_before = ktime_get_real(); + *kt_before = ktime_get(); *in_purr = mfspr(SPRN_PURR); /* * Indicate to the HV that we are idle. Now would be @@ -50,7 +50,7 @@ static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before) get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; get_lppaca()->idle = 0; - return ktime_to_us(ktime_sub(ktime_get_real(), kt_before)); + return ktime_to_us(ktime_sub(ktime_get(), kt_before)); } static int snooze_loop(struct cpuidle_device *dev, diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c index af281dce510..a91e6dadda2 100644 --- a/arch/powerpc/platforms/pseries/pseries_energy.c +++ b/arch/powerpc/platforms/pseries/pseries_energy.c @@ -21,6 +21,7 @@ #include <asm/cputhreads.h> #include <asm/page.h> #include <asm/hvcall.h> +#include <asm/firmware.h> #define MODULE_VERS "1.0" @@ -32,40 +33,6 @@ static int sysfs_entries; /* Helper routines */ -/* - * Routine to detect firmware support for hcall - * return 1 if H_BEST_ENERGY is supported - * else return 0 - */ - -static int check_for_h_best_energy(void) -{ - struct device_node *rtas = NULL; - const char *hypertas, *s; - int length; - int rc = 0; - - rtas = of_find_node_by_path("/rtas"); - if (!rtas) - return 0; - - hypertas = of_get_property(rtas, "ibm,hypertas-functions", &length); - if (!hypertas) { - of_node_put(rtas); - return 0; - } - - /* hypertas will have list of strings with hcall names */ - for (s = hypertas; s < hypertas + length; s += strlen(s) + 1) { - if (!strncmp("hcall-best-energy-1", s, 19)) { - rc = 1; /* Found the string */ - break; - } - } - of_node_put(rtas); - return rc; -} - /* Helper Routines to convert between drc_index to cpu numbers */ static u32 cpu_to_drc_index(int cpu) @@ -262,7 +229,7 @@ static int __init pseries_energy_init(void) int cpu, err; struct device *cpu_dev; - if (!check_for_h_best_energy()) { + if (!firmware_has_feature(FW_FEATURE_BEST_ENERGY)) { printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n"); return 0; } diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 39f71fba9b3..d6491bd481d 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -16,55 +16,13 @@ #include <linux/notifier.h> #include <linux/proc_fs.h> #include <linux/slab.h> +#include <linux/of.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/uaccess.h> -#include <asm/pSeries_reconfig.h> #include <asm/mmu.h> - - -/* - * Routines for "runtime" addition and removal of device tree nodes. - */ -#ifdef CONFIG_PROC_DEVICETREE -/* - * Add a node to /proc/device-tree. - */ -static void add_node_proc_entries(struct device_node *np) -{ - struct proc_dir_entry *ent; - - ent = proc_mkdir(strrchr(np->full_name, '/') + 1, np->parent->pde); - if (ent) - proc_device_tree_add_node(np, ent); -} - -static void remove_node_proc_entries(struct device_node *np) -{ - struct property *pp = np->properties; - struct device_node *parent = np->parent; - - while (pp) { - remove_proc_entry(pp->name, np->pde); - pp = pp->next; - } - if (np->pde) - remove_proc_entry(np->pde->name, parent->pde); -} -#else /* !CONFIG_PROC_DEVICETREE */ -static void add_node_proc_entries(struct device_node *np) -{ - return; -} - -static void remove_node_proc_entries(struct device_node *np) -{ - return; -} -#endif /* CONFIG_PROC_DEVICETREE */ - /** * derive_parent - basically like dirname(1) * @path: the full_name of a node to be added to the tree @@ -97,28 +55,6 @@ static struct device_node *derive_parent(const char *path) return parent; } -static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain); - -int pSeries_reconfig_notifier_register(struct notifier_block *nb) -{ - return blocking_notifier_chain_register(&pSeries_reconfig_chain, nb); -} -EXPORT_SYMBOL_GPL(pSeries_reconfig_notifier_register); - -void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) -{ - blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb); -} -EXPORT_SYMBOL_GPL(pSeries_reconfig_notifier_unregister); - -int pSeries_reconfig_notify(unsigned long action, void *p) -{ - int err = blocking_notifier_call_chain(&pSeries_reconfig_chain, - action, p); - - return notifier_to_errno(err); -} - static int pSeries_reconfig_add_node(const char *path, struct property *proplist) { struct device_node *np; @@ -142,16 +78,12 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist goto out_err; } - err = pSeries_reconfig_notify(PSERIES_RECONFIG_ADD, np); + err = of_attach_node(np); if (err) { printk(KERN_ERR "Failed to add device node %s\n", path); goto out_err; } - of_attach_node(np); - - add_node_proc_entries(np); - of_node_put(np->parent); return 0; @@ -179,11 +111,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np) return -EBUSY; } - remove_node_proc_entries(np); - - pSeries_reconfig_notify(PSERIES_RECONFIG_REMOVE, np); of_detach_node(np); - of_node_put(parent); of_node_put(np); /* Must decrement the refcount */ return 0; @@ -281,12 +209,11 @@ static struct property *new_property(const char *name, const int length, if (!new) return NULL; - if (!(new->name = kmalloc(strlen(name) + 1, GFP_KERNEL))) + if (!(new->name = kstrdup(name, GFP_KERNEL))) goto cleanup; if (!(new->value = kmalloc(length + 1, GFP_KERNEL))) goto cleanup; - strcpy(new->name, name); memcpy(new->value, value, length); *(((char *)new->value) + length) = 0; new->length = length; @@ -398,7 +325,7 @@ static int do_add_property(char *buf, size_t bufsize) if (!prop) return -ENOMEM; - prom_add_property(np, prop); + of_add_property(np, prop); return 0; } @@ -422,16 +349,15 @@ static int do_remove_property(char *buf, size_t bufsize) prop = of_find_property(np, buf, NULL); - return prom_remove_property(np, prop); + return of_remove_property(np, prop); } static int do_update_property(char *buf, size_t bufsize) { struct device_node *np; - struct pSeries_reconfig_prop_update upd_value; unsigned char *value; char *name, *end, *next_prop; - int rc, length; + int length; struct property *newprop; buf = parse_node(buf, bufsize, &np); end = buf + bufsize; @@ -453,41 +379,7 @@ static int do_update_property(char *buf, size_t bufsize) if (!strcmp(name, "slb-size") || !strcmp(name, "ibm,slb-size")) slb_set_size(*(int *)value); - upd_value.node = np; - upd_value.property = newprop; - pSeries_reconfig_notify(PSERIES_UPDATE_PROPERTY, &upd_value); - - rc = prom_update_property(np, newprop); - if (rc) - return rc; - - /* For memory under the ibm,dynamic-reconfiguration-memory node - * of the device tree, adding and removing memory is just an update - * to the ibm,dynamic-memory property instead of adding/removing a - * memory node in the device tree. For these cases we still need to - * involve the notifier chain. - */ - if (!strcmp(name, "ibm,dynamic-memory")) { - int action; - - next_prop = parse_next_property(next_prop, end, &name, - &length, &value); - if (!next_prop) - return -EINVAL; - - if (!strcmp(name, "add")) - action = PSERIES_DRCONF_MEM_ADD; - else - action = PSERIES_DRCONF_MEM_REMOVE; - - rc = pSeries_reconfig_notify(action, value); - if (rc) { - prom_update_property(np, newprop); - return rc; - } - } - - return 0; + return of_update_property(np, newprop); } /** diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c index 554457294a2..47f3cda2a68 100644 --- a/arch/powerpc/platforms/pseries/scanlog.c +++ b/arch/powerpc/platforms/pseries/scanlog.c @@ -46,16 +46,12 @@ static struct proc_dir_entry *proc_ppc64_scan_log_dump; /* The proc file */ static ssize_t scanlog_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - struct inode * inode = file->f_path.dentry->d_inode; - struct proc_dir_entry *dp; - unsigned int *data; + struct proc_dir_entry *dp = PDE(file_inode(file)); + unsigned int *data = (unsigned int *)dp->data; int status; unsigned long len, off; unsigned int wait_time; - dp = PDE(inode); - data = (unsigned int *)dp->data; - if (count > RTAS_DATA_BUF_SIZE) count = RTAS_DATA_BUF_SIZE; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index e3cb7ae6165..8bcc9ca6682 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -40,6 +40,8 @@ #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/cpuidle.h> +#include <linux/of.h> +#include <linux/kexec.h> #include <asm/mmu.h> #include <asm/processor.h> @@ -63,7 +65,7 @@ #include <asm/smp.h> #include <asm/firmware.h> #include <asm/eeh.h> -#include <asm/pSeries_reconfig.h> +#include <asm/reg.h> #include "plpar_wrappers.h" #include "pseries.h" @@ -258,7 +260,7 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act int err = NOTIFY_OK; switch (action) { - case PSERIES_RECONFIG_ADD: + case OF_RECONFIG_ATTACH_NODE: pci = np->parent->data; if (pci) { update_dn_pci_info(np, pci->phb); @@ -280,7 +282,7 @@ static struct notifier_block pci_dn_reconfig_nb = { struct kmem_cache *dtl_cache; -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE /* * Allocate space for the dispatch trace log for all possible cpus * and register the buffers with the hypervisor. This is used for @@ -331,12 +333,12 @@ static int alloc_dispatch_logs(void) return 0; } -#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ +#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ static inline int alloc_dispatch_logs(void) { return 0; } -#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ static int alloc_dispatch_log_kmem_cache(void) { @@ -367,6 +369,67 @@ static void pSeries_idle(void) } } +/* + * Enable relocation on during exceptions. This has partition wide scope and + * may take a while to complete, if it takes longer than one second we will + * just give up rather than wasting any more time on this - if that turns out + * to ever be a problem in practice we can move this into a kernel thread to + * finish off the process later in boot. + */ +long pSeries_enable_reloc_on_exc(void) +{ + long rc; + unsigned int delay, total_delay = 0; + + while (1) { + rc = enable_reloc_on_exceptions(); + if (!H_IS_LONG_BUSY(rc)) + return rc; + + delay = get_longbusy_msecs(rc); + total_delay += delay; + if (total_delay > 1000) { + pr_warn("Warning: Giving up waiting to enable " + "relocation on exceptions (%u msec)!\n", + total_delay); + return rc; + } + + mdelay(delay); + } +} +EXPORT_SYMBOL(pSeries_enable_reloc_on_exc); + +long pSeries_disable_reloc_on_exc(void) +{ + long rc; + + while (1) { + rc = disable_reloc_on_exceptions(); + if (!H_IS_LONG_BUSY(rc)) + return rc; + mdelay(get_longbusy_msecs(rc)); + } +} +EXPORT_SYMBOL(pSeries_disable_reloc_on_exc); + +#ifdef CONFIG_KEXEC +static void pSeries_machine_kexec(struct kimage *image) +{ + long rc; + + if (firmware_has_feature(FW_FEATURE_SET_MODE) && + (image->type != KEXEC_TYPE_CRASH)) { + rc = pSeries_disable_reloc_on_exc(); + if (rc != H_SUCCESS) + pr_warning("Warning: Failed to disable relocation on " + "exceptions: %ld\n", rc); + } + + default_machine_kexec(image); +} +#endif + static void __init pSeries_setup_arch(void) { panic_timeout = 10; @@ -389,7 +452,7 @@ static void __init pSeries_setup_arch(void) /* Find and initialize PCI host bridges */ init_pci_config_tokens(); find_and_init_phbs(); - pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb); + of_reconfig_notifier_register(&pci_dn_reconfig_nb); pSeries_nvram_init(); @@ -402,6 +465,14 @@ static void __init pSeries_setup_arch(void) ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; else ppc_md.enable_pmcs = power4_enable_pmcs; + + if (firmware_has_feature(FW_FEATURE_SET_MODE)) { + long rc; + if ((rc = pSeries_enable_reloc_on_exc()) != H_SUCCESS) { + pr_warn("Unable to enable relocation on exceptions: " + "%ld\n", rc); + } + } } static int __init pSeries_init_panel(void) @@ -430,6 +501,14 @@ static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx) return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx); } +static int pseries_set_dawr(unsigned long dawr, unsigned long dawrx) +{ + /* PAPR says we can't set HYP */ + dawrx &= ~DAWRX_HYP; + + return plapr_set_watchpoint0(dawr, dawrx); +} + #define CMO_CHARACTERISTICS_TOKEN 44 #define CMO_MAXLENGTH 1026 @@ -536,6 +615,9 @@ static void __init pSeries_init_early(void) else if (firmware_has_feature(FW_FEATURE_DABR)) ppc_md.set_dabr = pseries_set_dabr; + if (firmware_has_feature(FW_FEATURE_SET_MODE)) + ppc_md.set_dawr = pseries_set_dawr; + pSeries_cmo_feature_init(); iommu_init_early_pSeries(); @@ -659,4 +741,7 @@ define_machine(pseries) { .progress = rtas_progress, .system_reset_exception = pSeries_system_reset_exception, .machine_check_exception = pSeries_machine_check_exception, +#ifdef CONFIG_KEXEC + .machine_kexec = pSeries_machine_kexec, +#endif }; diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 71706bc34a0..12bc8c3663a 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -38,11 +38,11 @@ #include <asm/cputable.h> #include <asm/firmware.h> #include <asm/rtas.h> -#include <asm/pSeries_reconfig.h> #include <asm/mpic.h> #include <asm/vdso_datapage.h> #include <asm/cputhreads.h> #include <asm/xics.h> +#include <asm/dbell.h> #include "plpar_wrappers.h" #include "pseries.h" @@ -55,6 +55,11 @@ */ static cpumask_var_t of_spin_mask; +/* + * If we multiplex IPI mechanisms, store the appropriate XICS IPI mechanism here + */ +static void (*xics_cause_ipi)(int cpu, unsigned long data); + /* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */ int smp_query_cpu_stopped(unsigned int pcpu) { @@ -88,7 +93,7 @@ int smp_query_cpu_stopped(unsigned int pcpu) * 0 - failure * 1 - success */ -static inline int __devinit smp_startup_cpu(unsigned int lcpu) +static inline int smp_startup_cpu(unsigned int lcpu) { int status; unsigned long start_here = __pa((u32)*((unsigned long *) @@ -134,10 +139,12 @@ out: return 1; } -static void __devinit smp_xics_setup_cpu(int cpu) +static void smp_xics_setup_cpu(int cpu) { if (cpu != boot_cpuid) xics_setup_cpu(); + if (cpu_has_feature(CPU_FTR_DBELL)) + doorbell_setup_this_cpu(); if (firmware_has_feature(FW_FEATURE_SPLPAR)) vpa_init(cpu); @@ -149,7 +156,7 @@ static void __devinit smp_xics_setup_cpu(int cpu) #endif } -static int __devinit smp_pSeries_kick_cpu(int nr) +static int smp_pSeries_kick_cpu(int nr) { BUG_ON(nr < 0 || nr >= NR_CPUS); @@ -196,6 +203,27 @@ static int smp_pSeries_cpu_bootable(unsigned int nr) return 1; } +/* Only used on systems that support multiple IPI mechanisms */ +static void pSeries_cause_ipi_mux(int cpu, unsigned long data) +{ + if (cpumask_test_cpu(cpu, cpu_sibling_mask(smp_processor_id()))) + doorbell_cause_ipi(cpu, data); + else + xics_cause_ipi(cpu, data); +} + +static __init int pSeries_smp_probe(void) +{ + int ret = xics_smp_probe(); + + if (cpu_has_feature(CPU_FTR_DBELL)) { + xics_cause_ipi = smp_ops->cause_ipi; + smp_ops->cause_ipi = pSeries_cause_ipi_mux; + } + + return ret; +} + static struct smp_ops_t pSeries_mpic_smp_ops = { .message_pass = smp_mpic_message_pass, .probe = smp_mpic_probe, @@ -205,8 +233,8 @@ static struct smp_ops_t pSeries_mpic_smp_ops = { static struct smp_ops_t pSeries_xics_smp_ops = { .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */ - .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */ - .probe = xics_smp_probe, + .cause_ipi = NULL, /* Filled at runtime by pSeries_smp_probe() */ + .probe = pSeries_smp_probe, .kick_cpu = smp_pSeries_kick_cpu, .setup_cpu = smp_xics_setup_cpu, .cpu_bootable = smp_pSeries_cpu_bootable, diff --git a/arch/powerpc/platforms/wsp/Makefile b/arch/powerpc/platforms/wsp/Makefile index 56817ac98fc..162fc60125a 100644 --- a/arch/powerpc/platforms/wsp/Makefile +++ b/arch/powerpc/platforms/wsp/Makefile @@ -1,4 +1,4 @@ -ccflags-y += -mno-minimal-toc +ccflags-y += $(NO_MINIMAL_TOC) obj-y += setup.o ics.o wsp.o obj-$(CONFIG_PPC_PSR2) += psr2.o diff --git a/arch/powerpc/platforms/wsp/scom_smp.c b/arch/powerpc/platforms/wsp/scom_smp.c index 141e7803209..b56b70aeb49 100644 --- a/arch/powerpc/platforms/wsp/scom_smp.c +++ b/arch/powerpc/platforms/wsp/scom_smp.c @@ -337,8 +337,7 @@ scom_fail: return rc; } -int __devinit a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, - struct device_node *np) +int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np) { u64 init_iar, init_msr, init_ccr2; unsigned long start_here; diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c index 0ba103ae83a..332a18b8140 100644 --- a/arch/powerpc/platforms/wsp/smp.c +++ b/arch/powerpc/platforms/wsp/smp.c @@ -23,7 +23,7 @@ #include "ics.h" #include "wsp.h" -static void __devinit smp_a2_setup_cpu(int cpu) +static void smp_a2_setup_cpu(int cpu) { doorbell_setup_this_cpu(); @@ -31,7 +31,7 @@ static void __devinit smp_a2_setup_cpu(int cpu) xics_setup_cpu(); } -int __devinit smp_a2_kick_cpu(int nr) +int smp_a2_kick_cpu(int nr) { const char *enable_method; struct device_node *np; diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h index 10c1d1fff36..62ef21afb89 100644 --- a/arch/powerpc/platforms/wsp/wsp.h +++ b/arch/powerpc/platforms/wsp/wsp.h @@ -18,7 +18,7 @@ extern void a2_setup_smp(void); extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np); extern int smp_a2_cpu_bootable(unsigned int nr); -extern int __devinit smp_a2_kick_cpu(int nr); +extern int smp_a2_kick_cpu(int nr); extern void opb_pic_init(void); diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c index 1526551f9fe..8e22f561d17 100644 --- a/arch/powerpc/platforms/wsp/wsp_pci.c +++ b/arch/powerpc/platforms/wsp/wsp_pci.c @@ -402,7 +402,7 @@ static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb, return ERR_PTR(-ENOMEM); } -static void __devinit wsp_pci_dma_dev_setup(struct pci_dev *pdev) +static void wsp_pci_dma_dev_setup(struct pci_dev *pdev) { struct dev_archdata *archdata = &pdev->dev.archdata; struct pci_controller *hose = pci_bus_to_host(pdev->bus); diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index a57600b3a4e..b0a518e9759 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile @@ -1,6 +1,6 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror -ccflags-$(CONFIG_PPC64) := -mno-minimal-toc +ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) @@ -26,7 +26,6 @@ obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o obj-$(CONFIG_QUICC_ENGINE) += qe_lib/ -obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ mv64x60-$(CONFIG_PCI) += mv64x60_pci.o obj-$(CONFIG_MV64X60) += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o \ mv64x60_udbg.o @@ -37,6 +36,7 @@ obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o obj-$(CONFIG_PPC_I8259) += i8259.o obj-$(CONFIG_IPIC) += ipic.o obj-$(CONFIG_4xx) += uic.o +obj-$(CONFIG_PPC4xx_OCM) += ppc4xx_ocm.o obj-$(CONFIG_4xx_SOC) += ppc4xx_soc.o obj-$(CONFIG_XILINX_VIRTEX) += xilinx_intc.o obj-$(CONFIG_XILINX_PCI) += xilinx_pci.o diff --git a/arch/powerpc/sysdev/bestcomm/Kconfig b/arch/powerpc/sysdev/bestcomm/Kconfig deleted file mode 100644 index 29e427085ef..00000000000 --- a/arch/powerpc/sysdev/bestcomm/Kconfig +++ /dev/null @@ -1,36 +0,0 @@ -# -# Kconfig options for Bestcomm -# - -config PPC_BESTCOMM - tristate "Bestcomm DMA engine support" - depends on PPC_MPC52xx - default n - select PPC_LIB_RHEAP - help - BestComm is the name of the communication coprocessor found - on the Freescale MPC5200 family of processor. Its usage is - optional for some drivers (like ATA), but required for - others (like FEC). - - If you want to use drivers that require DMA operations, - answer Y or M. Otherwise say N. - -config PPC_BESTCOMM_ATA - tristate - depends on PPC_BESTCOMM - help - This option enables the support for the ATA task. - -config PPC_BESTCOMM_FEC - tristate - depends on PPC_BESTCOMM - help - This option enables the support for the FEC tasks. - -config PPC_BESTCOMM_GEN_BD - tristate - depends on PPC_BESTCOMM - help - This option enables the support for the GenBD tasks. - diff --git a/arch/powerpc/sysdev/bestcomm/Makefile b/arch/powerpc/sysdev/bestcomm/Makefile deleted file mode 100644 index aed2df2a658..00000000000 --- a/arch/powerpc/sysdev/bestcomm/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# -# Makefile for BestComm & co -# - -bestcomm-core-objs := bestcomm.o sram.o -bestcomm-ata-objs := ata.o bcom_ata_task.o -bestcomm-fec-objs := fec.o bcom_fec_rx_task.o bcom_fec_tx_task.o -bestcomm-gen-bd-objs := gen_bd.o bcom_gen_bd_rx_task.o bcom_gen_bd_tx_task.o - -obj-$(CONFIG_PPC_BESTCOMM) += bestcomm-core.o -obj-$(CONFIG_PPC_BESTCOMM_ATA) += bestcomm-ata.o -obj-$(CONFIG_PPC_BESTCOMM_FEC) += bestcomm-fec.o -obj-$(CONFIG_PPC_BESTCOMM_GEN_BD) += bestcomm-gen-bd.o - diff --git a/arch/powerpc/sysdev/bestcomm/ata.c b/arch/powerpc/sysdev/bestcomm/ata.c deleted file mode 100644 index 901c9f91e5d..00000000000 --- a/arch/powerpc/sysdev/bestcomm/ata.c +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Bestcomm ATA task driver - * - * - * Patterned after bestcomm/fec.c by Dale Farnsworth <dfarnsworth@mvista.com> - * 2003-2004 (c) MontaVista, Software, Inc. - * - * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com> - * Copyright (C) 2006 Freescale - John Rigby - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/types.h> -#include <asm/io.h> - -#include "bestcomm.h" -#include "bestcomm_priv.h" -#include "ata.h" - - -/* ======================================================================== */ -/* Task image/var/inc */ -/* ======================================================================== */ - -/* ata task image */ -extern u32 bcom_ata_task[]; - -/* ata task vars that need to be set before enabling the task */ -struct bcom_ata_var { - u32 enable; /* (u16*) address of task's control register */ - u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ - u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ - u32 bd_start; /* (struct bcom_bd*) current bd */ - u32 buffer_size; /* size of receive buffer */ -}; - -/* ata task incs that need to be set before enabling the task */ -struct bcom_ata_inc { - u16 pad0; - s16 incr_bytes; - u16 pad1; - s16 incr_dst; - u16 pad2; - s16 incr_src; -}; - - -/* ======================================================================== */ -/* Task support code */ -/* ======================================================================== */ - -struct bcom_task * -bcom_ata_init(int queue_len, int maxbufsize) -{ - struct bcom_task *tsk; - struct bcom_ata_var *var; - struct bcom_ata_inc *inc; - - /* Prefetch breaks ATA DMA. Turn it off for ATA DMA */ - bcom_disable_prefetch(); - - tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0); - if (!tsk) - return NULL; - - tsk->flags = BCOM_FLAGS_NONE; - - bcom_ata_reset_bd(tsk); - - var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum); - inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum); - - if (bcom_load_image(tsk->tasknum, bcom_ata_task)) { - bcom_task_free(tsk); - return NULL; - } - - var->enable = bcom_eng->regs_base + - offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); - var->bd_base = tsk->bd_pa; - var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); - var->bd_start = tsk->bd_pa; - var->buffer_size = maxbufsize; - - /* Configure some stuff */ - bcom_set_task_pragma(tsk->tasknum, BCOM_ATA_PRAGMA); - bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); - - out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_RX], BCOM_IPR_ATA_RX); - out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_TX], BCOM_IPR_ATA_TX); - - out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ - - return tsk; -} -EXPORT_SYMBOL_GPL(bcom_ata_init); - -void bcom_ata_rx_prepare(struct bcom_task *tsk) -{ - struct bcom_ata_inc *inc; - - inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum); - - inc->incr_bytes = -(s16)sizeof(u32); - inc->incr_src = 0; - inc->incr_dst = sizeof(u32); - - bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_RX); -} -EXPORT_SYMBOL_GPL(bcom_ata_rx_prepare); - -void bcom_ata_tx_prepare(struct bcom_task *tsk) -{ - struct bcom_ata_inc *inc; - - inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum); - - inc->incr_bytes = -(s16)sizeof(u32); - inc->incr_src = sizeof(u32); - inc->incr_dst = 0; - - bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_TX); -} -EXPORT_SYMBOL_GPL(bcom_ata_tx_prepare); - -void bcom_ata_reset_bd(struct bcom_task *tsk) -{ - struct bcom_ata_var *var; - - /* Reset all BD */ - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); - - tsk->index = 0; - tsk->outdex = 0; - - var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum); - var->bd_start = var->bd_base; -} -EXPORT_SYMBOL_GPL(bcom_ata_reset_bd); - -void bcom_ata_release(struct bcom_task *tsk) -{ - /* Nothing special for the ATA tasks */ - bcom_task_free(tsk); -} -EXPORT_SYMBOL_GPL(bcom_ata_release); - - -MODULE_DESCRIPTION("BestComm ATA task driver"); -MODULE_AUTHOR("John Rigby"); -MODULE_LICENSE("GPL v2"); - diff --git a/arch/powerpc/sysdev/bestcomm/ata.h b/arch/powerpc/sysdev/bestcomm/ata.h deleted file mode 100644 index 0b237181133..00000000000 --- a/arch/powerpc/sysdev/bestcomm/ata.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Header for Bestcomm ATA task driver - * - * - * Copyright (C) 2006 Freescale - John Rigby - * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#ifndef __BESTCOMM_ATA_H__ -#define __BESTCOMM_ATA_H__ - - -struct bcom_ata_bd { - u32 status; - u32 src_pa; - u32 dst_pa; -}; - -extern struct bcom_task * bcom_ata_init(int queue_len, int maxbufsize); -extern void bcom_ata_rx_prepare(struct bcom_task *tsk); -extern void bcom_ata_tx_prepare(struct bcom_task *tsk); -extern void bcom_ata_reset_bd(struct bcom_task *tsk); -extern void bcom_ata_release(struct bcom_task *tsk); - -#endif /* __BESTCOMM_ATA_H__ */ - diff --git a/arch/powerpc/sysdev/bestcomm/bcom_ata_task.c b/arch/powerpc/sysdev/bestcomm/bcom_ata_task.c deleted file mode 100644 index cc6049a4e46..00000000000 --- a/arch/powerpc/sysdev/bestcomm/bcom_ata_task.c +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Bestcomm ATA task microcode - * - * Copyright (c) 2004 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Created based on bestcom/code_dma/image_rtos1/dma_image.hex - */ - -#include <asm/types.h> - -/* - * The header consists of the following fields: - * u32 magic; - * u8 desc_size; - * u8 var_size; - * u8 inc_size; - * u8 first_var; - * u8 reserved[8]; - * - * The size fields contain the number of 32-bit words. - */ - -u32 bcom_ata_task[] = { - /* header */ - 0x4243544b, - 0x0e060709, - 0x00000000, - 0x00000000, - - /* Task descriptors */ - 0x8198009b, /* LCD: idx0 = var3; idx0 <= var2; idx0 += inc3 */ - 0x13e00c08, /* DRD1A: var3 = var1; FN=0 MORE init=31 WS=0 RS=0 */ - 0xb8000264, /* LCD: idx1 = *idx0, idx2 = var0; idx1 < var9; idx1 += inc4, idx2 += inc4 */ - 0x10000f00, /* DRD1A: var3 = idx0; FN=0 MORE init=0 WS=0 RS=0 */ - 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ - 0x0c8cfc8a, /* DRD2B1: *idx2 = EU3(); EU3(*idx2,var10) */ - 0xd8988240, /* LCDEXT: idx1 = idx1; idx1 > var9; idx1 += inc0 */ - 0xf845e011, /* LCDEXT: idx2 = *(idx0 + var00000015); ; idx2 += inc2 */ - 0xb845e00a, /* LCD: idx3 = *(idx0 + var00000019); ; idx3 += inc1 */ - 0x0bfecf90, /* DRD1A: *idx3 = *idx2; FN=0 TFD init=31 WS=3 RS=3 */ - 0x9898802d, /* LCD: idx1 = idx1; idx1 once var0; idx1 += inc5 */ - 0x64000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 INT EXT init=0 WS=0 RS=0 */ - 0x0c0cf849, /* DRD2B1: *idx0 = EU3(); EU3(idx1,var9) */ - 0x000001f8, /* NOP */ - - /* VAR[9]-VAR[14] */ - 0x40000000, - 0x7fff7fff, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - - /* INC[0]-INC[6] */ - 0x40000000, - 0xe0000000, - 0xe0000000, - 0xa000000c, - 0x20000000, - 0x00000000, - 0x00000000, -}; - diff --git a/arch/powerpc/sysdev/bestcomm/bcom_fec_rx_task.c b/arch/powerpc/sysdev/bestcomm/bcom_fec_rx_task.c deleted file mode 100644 index a1ad6a02fce..00000000000 --- a/arch/powerpc/sysdev/bestcomm/bcom_fec_rx_task.c +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Bestcomm FEC RX task microcode - * - * Copyright (c) 2004 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex - * on Tue Mar 22 11:19:38 2005 GMT - */ - -#include <asm/types.h> - -/* - * The header consists of the following fields: - * u32 magic; - * u8 desc_size; - * u8 var_size; - * u8 inc_size; - * u8 first_var; - * u8 reserved[8]; - * - * The size fields contain the number of 32-bit words. - */ - -u32 bcom_fec_rx_task[] = { - /* header */ - 0x4243544b, - 0x18060709, - 0x00000000, - 0x00000000, - - /* Task descriptors */ - 0x808220e3, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */ - 0x10601010, /* DRD1A: var4 = var2; FN=0 MORE init=3 WS=0 RS=0 */ - 0xb8800264, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc4, idx3 += inc4 */ - 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */ - 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ - 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */ - 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */ - 0xb8c58029, /* LCD: idx3 = *(idx1 + var00000015); idx3 once var0; idx3 += inc5 */ - 0x60000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=0 RS=0 */ - 0x088cf8cc, /* DRD2B1: idx2 = EU3(); EU3(idx3,var12) */ - 0x991982f2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var11; idx2 += inc6, idx3 += inc2 */ - 0x006acf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=1 RS=1 */ - 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */ - 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */ - 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */ - 0x034cfc4e, /* DRD2B1: var13 = EU3(); EU3(*idx1,var14) */ - 0x00008868, /* DRD1A: idx2 = var13; FN=0 init=0 WS=0 RS=0 */ - 0x99198341, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var13; idx2 += inc0, idx3 += inc1 */ - 0x007ecf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=3 RS=3 */ - 0x99198272, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc6, idx3 += inc2 */ - 0x046acf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=3 WS=1 RS=1 */ - 0x9819002d, /* LCD: idx2 = idx0; idx2 once var0; idx2 += inc5 */ - 0x0060c790, /* DRD1A: *idx1 = *idx2; FN=0 init=3 WS=0 RS=0 */ - 0x000001f8, /* NOP */ - - /* VAR[9]-VAR[14] */ - 0x40000000, - 0x7fff7fff, - 0x00000000, - 0x00000003, - 0x40000008, - 0x43ffffff, - - /* INC[0]-INC[6] */ - 0x40000000, - 0xe0000000, - 0xe0000000, - 0xa0000008, - 0x20000000, - 0x00000000, - 0x4000ffff, -}; - diff --git a/arch/powerpc/sysdev/bestcomm/bcom_fec_tx_task.c b/arch/powerpc/sysdev/bestcomm/bcom_fec_tx_task.c deleted file mode 100644 index b1c495c3a65..00000000000 --- a/arch/powerpc/sysdev/bestcomm/bcom_fec_tx_task.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Bestcomm FEC TX task microcode - * - * Copyright (c) 2004 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex - * on Tue Mar 22 11:19:29 2005 GMT - */ - -#include <asm/types.h> - -/* - * The header consists of the following fields: - * u32 magic; - * u8 desc_size; - * u8 var_size; - * u8 inc_size; - * u8 first_var; - * u8 reserved[8]; - * - * The size fields contain the number of 32-bit words. - */ - -u32 bcom_fec_tx_task[] = { - /* header */ - 0x4243544b, - 0x2407070d, - 0x00000000, - 0x00000000, - - /* Task descriptors */ - 0x8018001b, /* LCD: idx0 = var0; idx0 <= var0; idx0 += inc3 */ - 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */ - 0x01ccfc0d, /* DRD2B1: var7 = EU3(); EU3(*idx0,var13) */ - 0x8082a123, /* LCD: idx0 = var1, idx1 = var5; idx1 <= var4; idx0 += inc4, idx1 += inc3 */ - 0x10801418, /* DRD1A: var5 = var3; FN=0 MORE init=4 WS=0 RS=0 */ - 0xf88103a4, /* LCDEXT: idx2 = *idx1, idx3 = var2; idx2 < var14; idx2 += inc4, idx3 += inc4 */ - 0x801a6024, /* LCD: idx4 = var0; ; idx4 += inc4 */ - 0x10001708, /* DRD1A: var5 = idx1; FN=0 MORE init=0 WS=0 RS=0 */ - 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ - 0x0cccfccf, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var15) */ - 0x991a002c, /* LCD: idx2 = idx2, idx3 = idx4; idx2 once var0; idx2 += inc5, idx3 += inc4 */ - 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */ - 0x024cfc4d, /* DRD2B1: var9 = EU3(); EU3(*idx1,var13) */ - 0x60000003, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=3 EXT init=0 WS=0 RS=0 */ - 0x0cccf247, /* DRD2B1: *idx3 = EU3(); EU3(var9,var7) */ - 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */ - 0xb8c80029, /* LCD: idx3 = *(idx1 + var0000001a); idx3 once var0; idx3 += inc5 */ - 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */ - 0x088cf8d1, /* DRD2B1: idx2 = EU3(); EU3(idx3,var17) */ - 0x00002f10, /* DRD1A: var11 = idx2; FN=0 init=0 WS=0 RS=0 */ - 0x99198432, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var16; idx2 += inc6, idx3 += inc2 */ - 0x008ac398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=1 RS=1 */ - 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */ - 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */ - 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */ - 0x048cfc53, /* DRD2B1: var18 = EU3(); EU3(*idx1,var19) */ - 0x60000008, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=8 EXT init=0 WS=0 RS=0 */ - 0x088cf48b, /* DRD2B1: idx2 = EU3(); EU3(var18,var11) */ - 0x99198481, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var18; idx2 += inc0, idx3 += inc1 */ - 0x009ec398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=3 RS=3 */ - 0x991983b2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var14; idx2 += inc6, idx3 += inc2 */ - 0x088ac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD init=4 WS=1 RS=1 */ - 0x9919002d, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc5 */ - 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */ - 0x0c4cf88e, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var14) */ - 0x000001f8, /* NOP */ - - /* VAR[13]-VAR[19] */ - 0x0c000000, - 0x40000000, - 0x7fff7fff, - 0x00000000, - 0x00000003, - 0x40000004, - 0x43ffffff, - - /* INC[0]-INC[6] */ - 0x40000000, - 0xe0000000, - 0xe0000000, - 0xa0000008, - 0x20000000, - 0x00000000, - 0x4000ffff, -}; - diff --git a/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_rx_task.c b/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_rx_task.c deleted file mode 100644 index efee022b025..00000000000 --- a/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_rx_task.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Bestcomm GenBD RX task microcode - * - * Copyright (C) 2006 AppSpec Computer Technologies Corp. - * Jeff Gibbons <jeff.gibbons@appspec.com> - * Copyright (c) 2004 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex - * on Tue Mar 4 10:14:12 2006 GMT - * - */ - -#include <asm/types.h> - -/* - * The header consists of the following fields: - * u32 magic; - * u8 desc_size; - * u8 var_size; - * u8 inc_size; - * u8 first_var; - * u8 reserved[8]; - * - * The size fields contain the number of 32-bit words. - */ - -u32 bcom_gen_bd_rx_task[] = { - /* header */ - 0x4243544b, - 0x0d020409, - 0x00000000, - 0x00000000, - - /* Task descriptors */ - 0x808220da, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc3, idx1 += inc2 */ - 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */ - 0xb880025b, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc3, idx3 += inc3 */ - 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */ - 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ - 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */ - 0xd9190240, /* LCDEXT: idx2 = idx2; idx2 > var9; idx2 += inc0 */ - 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */ - 0x07fecf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=31 WS=3 RS=3 */ - 0x99190024, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc4 */ - 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */ - 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */ - 0x000001f8, /* NOP */ - - /* VAR[9]-VAR[10] */ - 0x40000000, - 0x7fff7fff, - - /* INC[0]-INC[3] */ - 0x40000000, - 0xe0000000, - 0xa0000008, - 0x20000000, -}; - diff --git a/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_tx_task.c b/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_tx_task.c deleted file mode 100644 index c605aa42ecb..00000000000 --- a/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_tx_task.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Bestcomm GenBD TX task microcode - * - * Copyright (C) 2006 AppSpec Computer Technologies Corp. - * Jeff Gibbons <jeff.gibbons@appspec.com> - * Copyright (c) 2004 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex - * on Tue Mar 4 10:14:12 2006 GMT - * - */ - -#include <asm/types.h> - -/* - * The header consists of the following fields: - * u32 magic; - * u8 desc_size; - * u8 var_size; - * u8 inc_size; - * u8 first_var; - * u8 reserved[8]; - * - * The size fields contain the number of 32-bit words. - */ - -u32 bcom_gen_bd_tx_task[] = { - /* header */ - 0x4243544b, - 0x0f040609, - 0x00000000, - 0x00000000, - - /* Task descriptors */ - 0x800220e3, /* LCD: idx0 = var0, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */ - 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */ - 0xb8808264, /* LCD: idx2 = *idx1, idx3 = var1; idx2 < var9; idx2 += inc4, idx3 += inc4 */ - 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */ - 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ - 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */ - 0xd9190300, /* LCDEXT: idx2 = idx2; idx2 > var12; idx2 += inc0 */ - 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */ - 0x03fec398, /* DRD1A: *idx0 = *idx3; FN=0 init=31 WS=3 RS=3 */ - 0x9919826a, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc5, idx3 += inc2 */ - 0x0feac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD INT init=31 WS=1 RS=1 */ - 0x99190036, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc6 */ - 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */ - 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */ - 0x000001f8, /* NOP */ - - /* VAR[9]-VAR[12] */ - 0x40000000, - 0x7fff7fff, - 0x00000000, - 0x40000004, - - /* INC[0]-INC[5] */ - 0x40000000, - 0xe0000000, - 0xe0000000, - 0xa0000008, - 0x20000000, - 0x4000ffff, -}; - diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c deleted file mode 100644 index b3fbb271be8..00000000000 --- a/arch/powerpc/sysdev/bestcomm/bestcomm.c +++ /dev/null @@ -1,532 +0,0 @@ -/* - * Driver for MPC52xx processor BestComm peripheral controller - * - * - * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com> - * Copyright (C) 2005 Varma Electronics Oy, - * ( by Andrey Volkov <avolkov@varma-el.com> ) - * Copyright (C) 2003-2004 MontaVista, Software, Inc. - * ( by Dale Farnsworth <dfarnsworth@mvista.com> ) - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_platform.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/mpc52xx.h> - -#include "sram.h" -#include "bestcomm_priv.h" -#include "bestcomm.h" - -#define DRIVER_NAME "bestcomm-core" - -/* MPC5200 device tree match tables */ -static struct of_device_id mpc52xx_sram_ids[] __devinitdata = { - { .compatible = "fsl,mpc5200-sram", }, - { .compatible = "mpc5200-sram", }, - {} -}; - - -struct bcom_engine *bcom_eng = NULL; -EXPORT_SYMBOL_GPL(bcom_eng); /* needed for inline functions */ - -/* ======================================================================== */ -/* Public and private API */ -/* ======================================================================== */ - -/* Private API */ - -struct bcom_task * -bcom_task_alloc(int bd_count, int bd_size, int priv_size) -{ - int i, tasknum = -1; - struct bcom_task *tsk; - - /* Don't try to do anything if bestcomm init failed */ - if (!bcom_eng) - return NULL; - - /* Get and reserve a task num */ - spin_lock(&bcom_eng->lock); - - for (i=0; i<BCOM_MAX_TASKS; i++) - if (!bcom_eng->tdt[i].stop) { /* we use stop as a marker */ - bcom_eng->tdt[i].stop = 0xfffffffful; /* dummy addr */ - tasknum = i; - break; - } - - spin_unlock(&bcom_eng->lock); - - if (tasknum < 0) - return NULL; - - /* Allocate our structure */ - tsk = kzalloc(sizeof(struct bcom_task) + priv_size, GFP_KERNEL); - if (!tsk) - goto error; - - tsk->tasknum = tasknum; - if (priv_size) - tsk->priv = (void*)tsk + sizeof(struct bcom_task); - - /* Get IRQ of that task */ - tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum); - if (tsk->irq == NO_IRQ) - goto error; - - /* Init the BDs, if needed */ - if (bd_count) { - tsk->cookie = kmalloc(sizeof(void*) * bd_count, GFP_KERNEL); - if (!tsk->cookie) - goto error; - - tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa); - if (!tsk->bd) - goto error; - memset(tsk->bd, 0x00, bd_count * bd_size); - - tsk->num_bd = bd_count; - tsk->bd_size = bd_size; - } - - return tsk; - -error: - if (tsk) { - if (tsk->irq != NO_IRQ) - irq_dispose_mapping(tsk->irq); - bcom_sram_free(tsk->bd); - kfree(tsk->cookie); - kfree(tsk); - } - - bcom_eng->tdt[tasknum].stop = 0; - - return NULL; -} -EXPORT_SYMBOL_GPL(bcom_task_alloc); - -void -bcom_task_free(struct bcom_task *tsk) -{ - /* Stop the task */ - bcom_disable_task(tsk->tasknum); - - /* Clear TDT */ - bcom_eng->tdt[tsk->tasknum].start = 0; - bcom_eng->tdt[tsk->tasknum].stop = 0; - - /* Free everything */ - irq_dispose_mapping(tsk->irq); - bcom_sram_free(tsk->bd); - kfree(tsk->cookie); - kfree(tsk); -} -EXPORT_SYMBOL_GPL(bcom_task_free); - -int -bcom_load_image(int task, u32 *task_image) -{ - struct bcom_task_header *hdr = (struct bcom_task_header *)task_image; - struct bcom_tdt *tdt; - u32 *desc, *var, *inc; - u32 *desc_src, *var_src, *inc_src; - - /* Safety checks */ - if (hdr->magic != BCOM_TASK_MAGIC) { - printk(KERN_ERR DRIVER_NAME - ": Trying to load invalid microcode\n"); - return -EINVAL; - } - - if ((task < 0) || (task >= BCOM_MAX_TASKS)) { - printk(KERN_ERR DRIVER_NAME - ": Trying to load invalid task %d\n", task); - return -EINVAL; - } - - /* Initial load or reload */ - tdt = &bcom_eng->tdt[task]; - - if (tdt->start) { - desc = bcom_task_desc(task); - if (hdr->desc_size != bcom_task_num_descs(task)) { - printk(KERN_ERR DRIVER_NAME - ": Trying to reload wrong task image " - "(%d size %d/%d)!\n", - task, - hdr->desc_size, - bcom_task_num_descs(task)); - return -EINVAL; - } - } else { - phys_addr_t start_pa; - - desc = bcom_sram_alloc(hdr->desc_size * sizeof(u32), 4, &start_pa); - if (!desc) - return -ENOMEM; - - tdt->start = start_pa; - tdt->stop = start_pa + ((hdr->desc_size-1) * sizeof(u32)); - } - - var = bcom_task_var(task); - inc = bcom_task_inc(task); - - /* Clear & copy */ - memset(var, 0x00, BCOM_VAR_SIZE); - memset(inc, 0x00, BCOM_INC_SIZE); - - desc_src = (u32 *)(hdr + 1); - var_src = desc_src + hdr->desc_size; - inc_src = var_src + hdr->var_size; - - memcpy(desc, desc_src, hdr->desc_size * sizeof(u32)); - memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32)); - memcpy(inc, inc_src, hdr->inc_size * sizeof(u32)); - - return 0; -} -EXPORT_SYMBOL_GPL(bcom_load_image); - -void -bcom_set_initiator(int task, int initiator) -{ - int i; - int num_descs; - u32 *desc; - int next_drd_has_initiator; - - bcom_set_tcr_initiator(task, initiator); - - /* Just setting tcr is apparently not enough due to some problem */ - /* with it. So we just go thru all the microcode and replace in */ - /* the DRD directly */ - - desc = bcom_task_desc(task); - next_drd_has_initiator = 1; - num_descs = bcom_task_num_descs(task); - - for (i=0; i<num_descs; i++, desc++) { - if (!bcom_desc_is_drd(*desc)) - continue; - if (next_drd_has_initiator) - if (bcom_desc_initiator(*desc) != BCOM_INITIATOR_ALWAYS) - bcom_set_desc_initiator(desc, initiator); - next_drd_has_initiator = !bcom_drd_is_extended(*desc); - } -} -EXPORT_SYMBOL_GPL(bcom_set_initiator); - - -/* Public API */ - -void -bcom_enable(struct bcom_task *tsk) -{ - bcom_enable_task(tsk->tasknum); -} -EXPORT_SYMBOL_GPL(bcom_enable); - -void -bcom_disable(struct bcom_task *tsk) -{ - bcom_disable_task(tsk->tasknum); -} -EXPORT_SYMBOL_GPL(bcom_disable); - - -/* ======================================================================== */ -/* Engine init/cleanup */ -/* ======================================================================== */ - -/* Function Descriptor table */ -/* this will need to be updated if Freescale changes their task code FDT */ -static u32 fdt_ops[] = { - 0xa0045670, /* FDT[48] - load_acc() */ - 0x80045670, /* FDT[49] - unload_acc() */ - 0x21800000, /* FDT[50] - and() */ - 0x21e00000, /* FDT[51] - or() */ - 0x21500000, /* FDT[52] - xor() */ - 0x21400000, /* FDT[53] - andn() */ - 0x21500000, /* FDT[54] - not() */ - 0x20400000, /* FDT[55] - add() */ - 0x20500000, /* FDT[56] - sub() */ - 0x20800000, /* FDT[57] - lsh() */ - 0x20a00000, /* FDT[58] - rsh() */ - 0xc0170000, /* FDT[59] - crc8() */ - 0xc0145670, /* FDT[60] - crc16() */ - 0xc0345670, /* FDT[61] - crc32() */ - 0xa0076540, /* FDT[62] - endian32() */ - 0xa0000760, /* FDT[63] - endian16() */ -}; - - -static int __devinit -bcom_engine_init(void) -{ - int task; - phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa; - unsigned int tdt_size, ctx_size, var_size, fdt_size; - - /* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */ - tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt); - ctx_size = BCOM_MAX_TASKS * BCOM_CTX_SIZE; - var_size = BCOM_MAX_TASKS * (BCOM_VAR_SIZE + BCOM_INC_SIZE); - fdt_size = BCOM_FDT_SIZE; - - bcom_eng->tdt = bcom_sram_alloc(tdt_size, sizeof(u32), &tdt_pa); - bcom_eng->ctx = bcom_sram_alloc(ctx_size, BCOM_CTX_ALIGN, &ctx_pa); - bcom_eng->var = bcom_sram_alloc(var_size, BCOM_VAR_ALIGN, &var_pa); - bcom_eng->fdt = bcom_sram_alloc(fdt_size, BCOM_FDT_ALIGN, &fdt_pa); - - if (!bcom_eng->tdt || !bcom_eng->ctx || !bcom_eng->var || !bcom_eng->fdt) { - printk(KERN_ERR "DMA: SRAM alloc failed in engine init !\n"); - - bcom_sram_free(bcom_eng->tdt); - bcom_sram_free(bcom_eng->ctx); - bcom_sram_free(bcom_eng->var); - bcom_sram_free(bcom_eng->fdt); - - return -ENOMEM; - } - - memset(bcom_eng->tdt, 0x00, tdt_size); - memset(bcom_eng->ctx, 0x00, ctx_size); - memset(bcom_eng->var, 0x00, var_size); - memset(bcom_eng->fdt, 0x00, fdt_size); - - /* Copy the FDT for the EU#3 */ - memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops)); - - /* Initialize Task base structure */ - for (task=0; task<BCOM_MAX_TASKS; task++) - { - out_be16(&bcom_eng->regs->tcr[task], 0); - out_8(&bcom_eng->regs->ipr[task], 0); - - bcom_eng->tdt[task].context = ctx_pa; - bcom_eng->tdt[task].var = var_pa; - bcom_eng->tdt[task].fdt = fdt_pa; - - var_pa += BCOM_VAR_SIZE + BCOM_INC_SIZE; - ctx_pa += BCOM_CTX_SIZE; - } - - out_be32(&bcom_eng->regs->taskBar, tdt_pa); - - /* Init 'always' initiator */ - out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS); - - /* Disable COMM Bus Prefetch on the original 5200; it's broken */ - if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR) - bcom_disable_prefetch(); - - /* Init lock */ - spin_lock_init(&bcom_eng->lock); - - return 0; -} - -static void -bcom_engine_cleanup(void) -{ - int task; - - /* Stop all tasks */ - for (task=0; task<BCOM_MAX_TASKS; task++) - { - out_be16(&bcom_eng->regs->tcr[task], 0); - out_8(&bcom_eng->regs->ipr[task], 0); - } - - out_be32(&bcom_eng->regs->taskBar, 0ul); - - /* Release the SRAM zones */ - bcom_sram_free(bcom_eng->tdt); - bcom_sram_free(bcom_eng->ctx); - bcom_sram_free(bcom_eng->var); - bcom_sram_free(bcom_eng->fdt); -} - - -/* ======================================================================== */ -/* OF platform driver */ -/* ======================================================================== */ - -static int __devinit mpc52xx_bcom_probe(struct platform_device *op) -{ - struct device_node *ofn_sram; - struct resource res_bcom; - - int rv; - - /* Inform user we're ok so far */ - printk(KERN_INFO "DMA: MPC52xx BestComm driver\n"); - - /* Get the bestcomm node */ - of_node_get(op->dev.of_node); - - /* Prepare SRAM */ - ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids); - if (!ofn_sram) { - printk(KERN_ERR DRIVER_NAME ": " - "No SRAM found in device tree\n"); - rv = -ENODEV; - goto error_ofput; - } - rv = bcom_sram_init(ofn_sram, DRIVER_NAME); - of_node_put(ofn_sram); - - if (rv) { - printk(KERN_ERR DRIVER_NAME ": " - "Error in SRAM init\n"); - goto error_ofput; - } - - /* Get a clean struct */ - bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL); - if (!bcom_eng) { - printk(KERN_ERR DRIVER_NAME ": " - "Can't allocate state structure\n"); - rv = -ENOMEM; - goto error_sramclean; - } - - /* Save the node */ - bcom_eng->ofnode = op->dev.of_node; - - /* Get, reserve & map io */ - if (of_address_to_resource(op->dev.of_node, 0, &res_bcom)) { - printk(KERN_ERR DRIVER_NAME ": " - "Can't get resource\n"); - rv = -EINVAL; - goto error_sramclean; - } - - if (!request_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma), - DRIVER_NAME)) { - printk(KERN_ERR DRIVER_NAME ": " - "Can't request registers region\n"); - rv = -EBUSY; - goto error_sramclean; - } - - bcom_eng->regs_base = res_bcom.start; - bcom_eng->regs = ioremap(res_bcom.start, sizeof(struct mpc52xx_sdma)); - if (!bcom_eng->regs) { - printk(KERN_ERR DRIVER_NAME ": " - "Can't map registers\n"); - rv = -ENOMEM; - goto error_release; - } - - /* Now, do the real init */ - rv = bcom_engine_init(); - if (rv) - goto error_unmap; - - /* Done ! */ - printk(KERN_INFO "DMA: MPC52xx BestComm engine @%08lx ok !\n", - (long)bcom_eng->regs_base); - - return 0; - - /* Error path */ -error_unmap: - iounmap(bcom_eng->regs); -error_release: - release_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma)); -error_sramclean: - kfree(bcom_eng); - bcom_sram_cleanup(); -error_ofput: - of_node_put(op->dev.of_node); - - printk(KERN_ERR "DMA: MPC52xx BestComm init failed !\n"); - - return rv; -} - - -static int mpc52xx_bcom_remove(struct platform_device *op) -{ - /* Clean up the engine */ - bcom_engine_cleanup(); - - /* Cleanup SRAM */ - bcom_sram_cleanup(); - - /* Release regs */ - iounmap(bcom_eng->regs); - release_mem_region(bcom_eng->regs_base, sizeof(struct mpc52xx_sdma)); - - /* Release the node */ - of_node_put(bcom_eng->ofnode); - - /* Release memory */ - kfree(bcom_eng); - bcom_eng = NULL; - - return 0; -} - -static struct of_device_id mpc52xx_bcom_of_match[] = { - { .compatible = "fsl,mpc5200-bestcomm", }, - { .compatible = "mpc5200-bestcomm", }, - {}, -}; - -MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match); - - -static struct platform_driver mpc52xx_bcom_of_platform_driver = { - .probe = mpc52xx_bcom_probe, - .remove = mpc52xx_bcom_remove, - .driver = { - .name = DRIVER_NAME, - .owner = THIS_MODULE, - .of_match_table = mpc52xx_bcom_of_match, - }, -}; - - -/* ======================================================================== */ -/* Module */ -/* ======================================================================== */ - -static int __init -mpc52xx_bcom_init(void) -{ - return platform_driver_register(&mpc52xx_bcom_of_platform_driver); -} - -static void __exit -mpc52xx_bcom_exit(void) -{ - platform_driver_unregister(&mpc52xx_bcom_of_platform_driver); -} - -/* If we're not a module, we must make sure everything is setup before */ -/* anyone tries to use us ... that's why we use subsys_initcall instead */ -/* of module_init. */ -subsys_initcall(mpc52xx_bcom_init); -module_exit(mpc52xx_bcom_exit); - -MODULE_DESCRIPTION("Freescale MPC52xx BestComm DMA"); -MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>"); -MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>"); -MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>"); -MODULE_LICENSE("GPL v2"); - diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.h b/arch/powerpc/sysdev/bestcomm/bestcomm.h deleted file mode 100644 index a0e2e6b19b5..00000000000 --- a/arch/powerpc/sysdev/bestcomm/bestcomm.h +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Public header for the MPC52xx processor BestComm driver - * - * - * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com> - * Copyright (C) 2005 Varma Electronics Oy, - * ( by Andrey Volkov <avolkov@varma-el.com> ) - * Copyright (C) 2003-2004 MontaVista, Software, Inc. - * ( by Dale Farnsworth <dfarnsworth@mvista.com> ) - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#ifndef __BESTCOMM_H__ -#define __BESTCOMM_H__ - -/** - * struct bcom_bd - Structure describing a generic BestComm buffer descriptor - * @status: The current status of this buffer. Exact meaning depends on the - * task type - * @data: An array of u32 extra data. Size of array is task dependent. - * - * Note: Don't dereference a bcom_bd pointer as an array. The size of the - * bcom_bd is variable. Use bcom_get_bd() instead. - */ -struct bcom_bd { - u32 status; - u32 data[0]; /* variable payload size */ -}; - -/* ======================================================================== */ -/* Generic task management */ -/* ======================================================================== */ - -/** - * struct bcom_task - Structure describing a loaded BestComm task - * - * This structure is never built by the driver it self. It's built and - * filled the intermediate layer of the BestComm API, the task dependent - * support code. - * - * Most likely you don't need to poke around inside this structure. The - * fields are exposed in the header just for the sake of inline functions - */ -struct bcom_task { - unsigned int tasknum; - unsigned int flags; - int irq; - - struct bcom_bd *bd; - phys_addr_t bd_pa; - void **cookie; - unsigned short index; - unsigned short outdex; - unsigned int num_bd; - unsigned int bd_size; - - void* priv; -}; - -#define BCOM_FLAGS_NONE 0x00000000ul -#define BCOM_FLAGS_ENABLE_TASK (1ul << 0) - -/** - * bcom_enable - Enable a BestComm task - * @tsk: The BestComm task structure - * - * This function makes sure the given task is enabled and can be run - * by the BestComm engine as needed - */ -extern void bcom_enable(struct bcom_task *tsk); - -/** - * bcom_disable - Disable a BestComm task - * @tsk: The BestComm task structure - * - * This function disable a given task, making sure it's not executed - * by the BestComm engine. - */ -extern void bcom_disable(struct bcom_task *tsk); - - -/** - * bcom_get_task_irq - Returns the irq number of a BestComm task - * @tsk: The BestComm task structure - */ -static inline int -bcom_get_task_irq(struct bcom_task *tsk) { - return tsk->irq; -} - -/* ======================================================================== */ -/* BD based tasks helpers */ -/* ======================================================================== */ - -#define BCOM_BD_READY 0x40000000ul - -/** _bcom_next_index - Get next input index. - * @tsk: pointer to task structure - * - * Support function; Device drivers should not call this - */ -static inline int -_bcom_next_index(struct bcom_task *tsk) -{ - return ((tsk->index + 1) == tsk->num_bd) ? 0 : tsk->index + 1; -} - -/** _bcom_next_outdex - Get next output index. - * @tsk: pointer to task structure - * - * Support function; Device drivers should not call this - */ -static inline int -_bcom_next_outdex(struct bcom_task *tsk) -{ - return ((tsk->outdex + 1) == tsk->num_bd) ? 0 : tsk->outdex + 1; -} - -/** - * bcom_queue_empty - Checks if a BestComm task BD queue is empty - * @tsk: The BestComm task structure - */ -static inline int -bcom_queue_empty(struct bcom_task *tsk) -{ - return tsk->index == tsk->outdex; -} - -/** - * bcom_queue_full - Checks if a BestComm task BD queue is full - * @tsk: The BestComm task structure - */ -static inline int -bcom_queue_full(struct bcom_task *tsk) -{ - return tsk->outdex == _bcom_next_index(tsk); -} - -/** - * bcom_get_bd - Get a BD from the queue - * @tsk: The BestComm task structure - * index: Index of the BD to fetch - */ -static inline struct bcom_bd -*bcom_get_bd(struct bcom_task *tsk, unsigned int index) -{ - /* A cast to (void*) so the address can be incremented by the - * real size instead of by sizeof(struct bcom_bd) */ - return ((void *)tsk->bd) + (index * tsk->bd_size); -} - -/** - * bcom_buffer_done - Checks if a BestComm - * @tsk: The BestComm task structure - */ -static inline int -bcom_buffer_done(struct bcom_task *tsk) -{ - struct bcom_bd *bd; - if (bcom_queue_empty(tsk)) - return 0; - - bd = bcom_get_bd(tsk, tsk->outdex); - return !(bd->status & BCOM_BD_READY); -} - -/** - * bcom_prepare_next_buffer - clear status of next available buffer. - * @tsk: The BestComm task structure - * - * Returns pointer to next buffer descriptor - */ -static inline struct bcom_bd * -bcom_prepare_next_buffer(struct bcom_task *tsk) -{ - struct bcom_bd *bd; - - bd = bcom_get_bd(tsk, tsk->index); - bd->status = 0; /* cleanup last status */ - return bd; -} - -static inline void -bcom_submit_next_buffer(struct bcom_task *tsk, void *cookie) -{ - struct bcom_bd *bd = bcom_get_bd(tsk, tsk->index); - - tsk->cookie[tsk->index] = cookie; - mb(); /* ensure the bd is really up-to-date */ - bd->status |= BCOM_BD_READY; - tsk->index = _bcom_next_index(tsk); - if (tsk->flags & BCOM_FLAGS_ENABLE_TASK) - bcom_enable(tsk); -} - -static inline void * -bcom_retrieve_buffer(struct bcom_task *tsk, u32 *p_status, struct bcom_bd **p_bd) -{ - void *cookie = tsk->cookie[tsk->outdex]; - struct bcom_bd *bd = bcom_get_bd(tsk, tsk->outdex); - - if (p_status) - *p_status = bd->status; - if (p_bd) - *p_bd = bd; - tsk->outdex = _bcom_next_outdex(tsk); - return cookie; -} - -#endif /* __BESTCOMM_H__ */ diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h deleted file mode 100644 index 3b52f3ffbdf..00000000000 --- a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Private header for the MPC52xx processor BestComm driver - * - * By private, we mean that driver should not use it directly. It's meant - * to be used by the BestComm engine driver itself and by the intermediate - * layer between the core and the drivers. - * - * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com> - * Copyright (C) 2005 Varma Electronics Oy, - * ( by Andrey Volkov <avolkov@varma-el.com> ) - * Copyright (C) 2003-2004 MontaVista, Software, Inc. - * ( by Dale Farnsworth <dfarnsworth@mvista.com> ) - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#ifndef __BESTCOMM_PRIV_H__ -#define __BESTCOMM_PRIV_H__ - -#include <linux/spinlock.h> -#include <linux/of.h> -#include <asm/io.h> -#include <asm/mpc52xx.h> - -#include "sram.h" - - -/* ======================================================================== */ -/* Engine related stuff */ -/* ======================================================================== */ - -/* Zones sizes and needed alignments */ -#define BCOM_MAX_TASKS 16 -#define BCOM_MAX_VAR 24 -#define BCOM_MAX_INC 8 -#define BCOM_MAX_FDT 64 -#define BCOM_MAX_CTX 20 -#define BCOM_CTX_SIZE (BCOM_MAX_CTX * sizeof(u32)) -#define BCOM_CTX_ALIGN 0x100 -#define BCOM_VAR_SIZE (BCOM_MAX_VAR * sizeof(u32)) -#define BCOM_INC_SIZE (BCOM_MAX_INC * sizeof(u32)) -#define BCOM_VAR_ALIGN 0x80 -#define BCOM_FDT_SIZE (BCOM_MAX_FDT * sizeof(u32)) -#define BCOM_FDT_ALIGN 0x100 - -/** - * struct bcom_tdt - Task Descriptor Table Entry - * - */ -struct bcom_tdt { - u32 start; - u32 stop; - u32 var; - u32 fdt; - u32 exec_status; /* used internally by BestComm engine */ - u32 mvtp; /* used internally by BestComm engine */ - u32 context; - u32 litbase; -}; - -/** - * struct bcom_engine - * - * This holds all info needed globaly to handle the engine - */ -struct bcom_engine { - struct device_node *ofnode; - struct mpc52xx_sdma __iomem *regs; - phys_addr_t regs_base; - - struct bcom_tdt *tdt; - u32 *ctx; - u32 *var; - u32 *fdt; - - spinlock_t lock; -}; - -extern struct bcom_engine *bcom_eng; - - -/* ======================================================================== */ -/* Tasks related stuff */ -/* ======================================================================== */ - -/* Tasks image header */ -#define BCOM_TASK_MAGIC 0x4243544B /* 'BCTK' */ - -struct bcom_task_header { - u32 magic; - u8 desc_size; /* the size fields */ - u8 var_size; /* are given in number */ - u8 inc_size; /* of 32-bits words */ - u8 first_var; - u8 reserved[8]; -}; - -/* Descriptors structure & co */ -#define BCOM_DESC_NOP 0x000001f8 -#define BCOM_LCD_MASK 0x80000000 -#define BCOM_DRD_EXTENDED 0x40000000 -#define BCOM_DRD_INITIATOR_SHIFT 21 - -/* Tasks pragma */ -#define BCOM_PRAGMA_BIT_RSV 7 /* reserved pragma bit */ -#define BCOM_PRAGMA_BIT_PRECISE_INC 6 /* increment 0=when possible, */ - /* 1=iter end */ -#define BCOM_PRAGMA_BIT_RST_ERROR_NO 5 /* don't reset errors on */ - /* task enable */ -#define BCOM_PRAGMA_BIT_PACK 4 /* pack data enable */ -#define BCOM_PRAGMA_BIT_INTEGER 3 /* data alignment */ - /* 0=frac(msb), 1=int(lsb) */ -#define BCOM_PRAGMA_BIT_SPECREAD 2 /* XLB speculative read */ -#define BCOM_PRAGMA_BIT_CW 1 /* write line buffer enable */ -#define BCOM_PRAGMA_BIT_RL 0 /* read line buffer enable */ - - /* Looks like XLB speculative read generates XLB errors when a buffer - * is at the end of the physical memory. i.e. when accessing the - * lasts words, the engine tries to prefetch the next but there is no - * next ... - */ -#define BCOM_STD_PRAGMA ((0 << BCOM_PRAGMA_BIT_RSV) | \ - (0 << BCOM_PRAGMA_BIT_PRECISE_INC) | \ - (0 << BCOM_PRAGMA_BIT_RST_ERROR_NO) | \ - (0 << BCOM_PRAGMA_BIT_PACK) | \ - (0 << BCOM_PRAGMA_BIT_INTEGER) | \ - (0 << BCOM_PRAGMA_BIT_SPECREAD) | \ - (1 << BCOM_PRAGMA_BIT_CW) | \ - (1 << BCOM_PRAGMA_BIT_RL)) - -#define BCOM_PCI_PRAGMA ((0 << BCOM_PRAGMA_BIT_RSV) | \ - (0 << BCOM_PRAGMA_BIT_PRECISE_INC) | \ - (0 << BCOM_PRAGMA_BIT_RST_ERROR_NO) | \ - (0 << BCOM_PRAGMA_BIT_PACK) | \ - (1 << BCOM_PRAGMA_BIT_INTEGER) | \ - (0 << BCOM_PRAGMA_BIT_SPECREAD) | \ - (1 << BCOM_PRAGMA_BIT_CW) | \ - (1 << BCOM_PRAGMA_BIT_RL)) - -#define BCOM_ATA_PRAGMA BCOM_STD_PRAGMA -#define BCOM_CRC16_DP_0_PRAGMA BCOM_STD_PRAGMA -#define BCOM_CRC16_DP_1_PRAGMA BCOM_STD_PRAGMA -#define BCOM_FEC_RX_BD_PRAGMA BCOM_STD_PRAGMA -#define BCOM_FEC_TX_BD_PRAGMA BCOM_STD_PRAGMA -#define BCOM_GEN_DP_0_PRAGMA BCOM_STD_PRAGMA -#define BCOM_GEN_DP_1_PRAGMA BCOM_STD_PRAGMA -#define BCOM_GEN_DP_2_PRAGMA BCOM_STD_PRAGMA -#define BCOM_GEN_DP_3_PRAGMA BCOM_STD_PRAGMA -#define BCOM_GEN_DP_BD_0_PRAGMA BCOM_STD_PRAGMA -#define BCOM_GEN_DP_BD_1_PRAGMA BCOM_STD_PRAGMA -#define BCOM_GEN_RX_BD_PRAGMA BCOM_STD_PRAGMA -#define BCOM_GEN_TX_BD_PRAGMA BCOM_STD_PRAGMA -#define BCOM_GEN_LPC_PRAGMA BCOM_STD_PRAGMA -#define BCOM_PCI_RX_PRAGMA BCOM_PCI_PRAGMA -#define BCOM_PCI_TX_PRAGMA BCOM_PCI_PRAGMA - -/* Initiators number */ -#define BCOM_INITIATOR_ALWAYS 0 -#define BCOM_INITIATOR_SCTMR_0 1 -#define BCOM_INITIATOR_SCTMR_1 2 -#define BCOM_INITIATOR_FEC_RX 3 -#define BCOM_INITIATOR_FEC_TX 4 -#define BCOM_INITIATOR_ATA_RX 5 -#define BCOM_INITIATOR_ATA_TX 6 -#define BCOM_INITIATOR_SCPCI_RX 7 -#define BCOM_INITIATOR_SCPCI_TX 8 -#define BCOM_INITIATOR_PSC3_RX 9 -#define BCOM_INITIATOR_PSC3_TX 10 -#define BCOM_INITIATOR_PSC2_RX 11 -#define BCOM_INITIATOR_PSC2_TX 12 -#define BCOM_INITIATOR_PSC1_RX 13 -#define BCOM_INITIATOR_PSC1_TX 14 -#define BCOM_INITIATOR_SCTMR_2 15 -#define BCOM_INITIATOR_SCLPC 16 -#define BCOM_INITIATOR_PSC5_RX 17 -#define BCOM_INITIATOR_PSC5_TX 18 -#define BCOM_INITIATOR_PSC4_RX 19 -#define BCOM_INITIATOR_PSC4_TX 20 -#define BCOM_INITIATOR_I2C2_RX 21 -#define BCOM_INITIATOR_I2C2_TX 22 -#define BCOM_INITIATOR_I2C1_RX 23 -#define BCOM_INITIATOR_I2C1_TX 24 -#define BCOM_INITIATOR_PSC6_RX 25 -#define BCOM_INITIATOR_PSC6_TX 26 -#define BCOM_INITIATOR_IRDA_RX 25 -#define BCOM_INITIATOR_IRDA_TX 26 -#define BCOM_INITIATOR_SCTMR_3 27 -#define BCOM_INITIATOR_SCTMR_4 28 -#define BCOM_INITIATOR_SCTMR_5 29 -#define BCOM_INITIATOR_SCTMR_6 30 -#define BCOM_INITIATOR_SCTMR_7 31 - -/* Initiators priorities */ -#define BCOM_IPR_ALWAYS 7 -#define BCOM_IPR_SCTMR_0 2 -#define BCOM_IPR_SCTMR_1 2 -#define BCOM_IPR_FEC_RX 6 -#define BCOM_IPR_FEC_TX 5 -#define BCOM_IPR_ATA_RX 7 -#define BCOM_IPR_ATA_TX 7 -#define BCOM_IPR_SCPCI_RX 2 -#define BCOM_IPR_SCPCI_TX 2 -#define BCOM_IPR_PSC3_RX 2 -#define BCOM_IPR_PSC3_TX 2 -#define BCOM_IPR_PSC2_RX 2 -#define BCOM_IPR_PSC2_TX 2 -#define BCOM_IPR_PSC1_RX 2 -#define BCOM_IPR_PSC1_TX 2 -#define BCOM_IPR_SCTMR_2 2 -#define BCOM_IPR_SCLPC 2 -#define BCOM_IPR_PSC5_RX 2 -#define BCOM_IPR_PSC5_TX 2 -#define BCOM_IPR_PSC4_RX 2 -#define BCOM_IPR_PSC4_TX 2 -#define BCOM_IPR_I2C2_RX 2 -#define BCOM_IPR_I2C2_TX 2 -#define BCOM_IPR_I2C1_RX 2 -#define BCOM_IPR_I2C1_TX 2 -#define BCOM_IPR_PSC6_RX 2 -#define BCOM_IPR_PSC6_TX 2 -#define BCOM_IPR_IRDA_RX 2 -#define BCOM_IPR_IRDA_TX 2 -#define BCOM_IPR_SCTMR_3 2 -#define BCOM_IPR_SCTMR_4 2 -#define BCOM_IPR_SCTMR_5 2 -#define BCOM_IPR_SCTMR_6 2 -#define BCOM_IPR_SCTMR_7 2 - - -/* ======================================================================== */ -/* API */ -/* ======================================================================== */ - -extern struct bcom_task *bcom_task_alloc(int bd_count, int bd_size, int priv_size); -extern void bcom_task_free(struct bcom_task *tsk); -extern int bcom_load_image(int task, u32 *task_image); -extern void bcom_set_initiator(int task, int initiator); - - -#define TASK_ENABLE 0x8000 - -/** - * bcom_disable_prefetch - Hook to disable bus prefetching - * - * ATA DMA and the original MPC5200 need this due to silicon bugs. At the - * moment disabling prefetch is a one-way street. There is no mechanism - * in place to turn prefetch back on after it has been disabled. There is - * no reason it couldn't be done, it would just be more complex to implement. - */ -static inline void bcom_disable_prefetch(void) -{ - u16 regval; - - regval = in_be16(&bcom_eng->regs->PtdCntrl); - out_be16(&bcom_eng->regs->PtdCntrl, regval | 1); -}; - -static inline void -bcom_enable_task(int task) -{ - u16 reg; - reg = in_be16(&bcom_eng->regs->tcr[task]); - out_be16(&bcom_eng->regs->tcr[task], reg | TASK_ENABLE); -} - -static inline void -bcom_disable_task(int task) -{ - u16 reg = in_be16(&bcom_eng->regs->tcr[task]); - out_be16(&bcom_eng->regs->tcr[task], reg & ~TASK_ENABLE); -} - - -static inline u32 * -bcom_task_desc(int task) -{ - return bcom_sram_pa2va(bcom_eng->tdt[task].start); -} - -static inline int -bcom_task_num_descs(int task) -{ - return (bcom_eng->tdt[task].stop - bcom_eng->tdt[task].start)/sizeof(u32) + 1; -} - -static inline u32 * -bcom_task_var(int task) -{ - return bcom_sram_pa2va(bcom_eng->tdt[task].var); -} - -static inline u32 * -bcom_task_inc(int task) -{ - return &bcom_task_var(task)[BCOM_MAX_VAR]; -} - - -static inline int -bcom_drd_is_extended(u32 desc) -{ - return (desc) & BCOM_DRD_EXTENDED; -} - -static inline int -bcom_desc_is_drd(u32 desc) -{ - return !(desc & BCOM_LCD_MASK) && desc != BCOM_DESC_NOP; -} - -static inline int -bcom_desc_initiator(u32 desc) -{ - return (desc >> BCOM_DRD_INITIATOR_SHIFT) & 0x1f; -} - -static inline void -bcom_set_desc_initiator(u32 *desc, int initiator) -{ - *desc = (*desc & ~(0x1f << BCOM_DRD_INITIATOR_SHIFT)) | - ((initiator & 0x1f) << BCOM_DRD_INITIATOR_SHIFT); -} - - -static inline void -bcom_set_task_pragma(int task, int pragma) -{ - u32 *fdt = &bcom_eng->tdt[task].fdt; - *fdt = (*fdt & ~0xff) | pragma; -} - -static inline void -bcom_set_task_auto_start(int task, int next_task) -{ - u16 __iomem *tcr = &bcom_eng->regs->tcr[task]; - out_be16(tcr, (in_be16(tcr) & ~0xff) | 0x00c0 | next_task); -} - -static inline void -bcom_set_tcr_initiator(int task, int initiator) -{ - u16 __iomem *tcr = &bcom_eng->regs->tcr[task]; - out_be16(tcr, (in_be16(tcr) & ~0x1f00) | ((initiator & 0x1f) << 8)); -} - - -#endif /* __BESTCOMM_PRIV_H__ */ - diff --git a/arch/powerpc/sysdev/bestcomm/fec.c b/arch/powerpc/sysdev/bestcomm/fec.c deleted file mode 100644 index 957a988d23e..00000000000 --- a/arch/powerpc/sysdev/bestcomm/fec.c +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Bestcomm FEC tasks driver - * - * - * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com> - * Copyright (C) 2003-2004 MontaVista, Software, Inc. - * ( by Dale Farnsworth <dfarnsworth@mvista.com> ) - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/types.h> -#include <asm/io.h> - -#include "bestcomm.h" -#include "bestcomm_priv.h" -#include "fec.h" - - -/* ======================================================================== */ -/* Task image/var/inc */ -/* ======================================================================== */ - -/* fec tasks images */ -extern u32 bcom_fec_rx_task[]; -extern u32 bcom_fec_tx_task[]; - -/* rx task vars that need to be set before enabling the task */ -struct bcom_fec_rx_var { - u32 enable; /* (u16*) address of task's control register */ - u32 fifo; /* (u32*) address of fec's fifo */ - u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ - u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ - u32 bd_start; /* (struct bcom_bd*) current bd */ - u32 buffer_size; /* size of receive buffer */ -}; - -/* rx task incs that need to be set before enabling the task */ -struct bcom_fec_rx_inc { - u16 pad0; - s16 incr_bytes; - u16 pad1; - s16 incr_dst; - u16 pad2; - s16 incr_dst_ma; -}; - -/* tx task vars that need to be set before enabling the task */ -struct bcom_fec_tx_var { - u32 DRD; /* (u32*) address of self-modified DRD */ - u32 fifo; /* (u32*) address of fec's fifo */ - u32 enable; /* (u16*) address of task's control register */ - u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ - u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ - u32 bd_start; /* (struct bcom_bd*) current bd */ - u32 buffer_size; /* set by uCode for each packet */ -}; - -/* tx task incs that need to be set before enabling the task */ -struct bcom_fec_tx_inc { - u16 pad0; - s16 incr_bytes; - u16 pad1; - s16 incr_src; - u16 pad2; - s16 incr_src_ma; -}; - -/* private structure in the task */ -struct bcom_fec_priv { - phys_addr_t fifo; - int maxbufsize; -}; - - -/* ======================================================================== */ -/* Task support code */ -/* ======================================================================== */ - -struct bcom_task * -bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize) -{ - struct bcom_task *tsk; - struct bcom_fec_priv *priv; - - tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd), - sizeof(struct bcom_fec_priv)); - if (!tsk) - return NULL; - - tsk->flags = BCOM_FLAGS_NONE; - - priv = tsk->priv; - priv->fifo = fifo; - priv->maxbufsize = maxbufsize; - - if (bcom_fec_rx_reset(tsk)) { - bcom_task_free(tsk); - return NULL; - } - - return tsk; -} -EXPORT_SYMBOL_GPL(bcom_fec_rx_init); - -int -bcom_fec_rx_reset(struct bcom_task *tsk) -{ - struct bcom_fec_priv *priv = tsk->priv; - struct bcom_fec_rx_var *var; - struct bcom_fec_rx_inc *inc; - - /* Shutdown the task */ - bcom_disable_task(tsk->tasknum); - - /* Reset the microcode */ - var = (struct bcom_fec_rx_var *) bcom_task_var(tsk->tasknum); - inc = (struct bcom_fec_rx_inc *) bcom_task_inc(tsk->tasknum); - - if (bcom_load_image(tsk->tasknum, bcom_fec_rx_task)) - return -1; - - var->enable = bcom_eng->regs_base + - offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); - var->fifo = (u32) priv->fifo; - var->bd_base = tsk->bd_pa; - var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); - var->bd_start = tsk->bd_pa; - var->buffer_size = priv->maxbufsize; - - inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */ - inc->incr_dst = sizeof(u32); /* task image, but we stick */ - inc->incr_dst_ma= sizeof(u8); /* to the official ones */ - - /* Reset the BDs */ - tsk->index = 0; - tsk->outdex = 0; - - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); - - /* Configure some stuff */ - bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA); - bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); - - out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_RX], BCOM_IPR_FEC_RX); - - out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ - - return 0; -} -EXPORT_SYMBOL_GPL(bcom_fec_rx_reset); - -void -bcom_fec_rx_release(struct bcom_task *tsk) -{ - /* Nothing special for the FEC tasks */ - bcom_task_free(tsk); -} -EXPORT_SYMBOL_GPL(bcom_fec_rx_release); - - - - /* Return 2nd to last DRD */ - /* This is an ugly hack, but at least it's only done - once at initialization */ -static u32 *self_modified_drd(int tasknum) -{ - u32 *desc; - int num_descs; - int drd_count; - int i; - - num_descs = bcom_task_num_descs(tasknum); - desc = bcom_task_desc(tasknum) + num_descs - 1; - drd_count = 0; - for (i=0; i<num_descs; i++, desc--) - if (bcom_desc_is_drd(*desc) && ++drd_count == 3) - break; - return desc; -} - -struct bcom_task * -bcom_fec_tx_init(int queue_len, phys_addr_t fifo) -{ - struct bcom_task *tsk; - struct bcom_fec_priv *priv; - - tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd), - sizeof(struct bcom_fec_priv)); - if (!tsk) - return NULL; - - tsk->flags = BCOM_FLAGS_ENABLE_TASK; - - priv = tsk->priv; - priv->fifo = fifo; - - if (bcom_fec_tx_reset(tsk)) { - bcom_task_free(tsk); - return NULL; - } - - return tsk; -} -EXPORT_SYMBOL_GPL(bcom_fec_tx_init); - -int -bcom_fec_tx_reset(struct bcom_task *tsk) -{ - struct bcom_fec_priv *priv = tsk->priv; - struct bcom_fec_tx_var *var; - struct bcom_fec_tx_inc *inc; - - /* Shutdown the task */ - bcom_disable_task(tsk->tasknum); - - /* Reset the microcode */ - var = (struct bcom_fec_tx_var *) bcom_task_var(tsk->tasknum); - inc = (struct bcom_fec_tx_inc *) bcom_task_inc(tsk->tasknum); - - if (bcom_load_image(tsk->tasknum, bcom_fec_tx_task)) - return -1; - - var->enable = bcom_eng->regs_base + - offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); - var->fifo = (u32) priv->fifo; - var->DRD = bcom_sram_va2pa(self_modified_drd(tsk->tasknum)); - var->bd_base = tsk->bd_pa; - var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); - var->bd_start = tsk->bd_pa; - - inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */ - inc->incr_src = sizeof(u32); /* task image, but we stick */ - inc->incr_src_ma= sizeof(u8); /* to the official ones */ - - /* Reset the BDs */ - tsk->index = 0; - tsk->outdex = 0; - - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); - - /* Configure some stuff */ - bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA); - bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); - - out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_TX], BCOM_IPR_FEC_TX); - - out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ - - return 0; -} -EXPORT_SYMBOL_GPL(bcom_fec_tx_reset); - -void -bcom_fec_tx_release(struct bcom_task *tsk) -{ - /* Nothing special for the FEC tasks */ - bcom_task_free(tsk); -} -EXPORT_SYMBOL_GPL(bcom_fec_tx_release); - - -MODULE_DESCRIPTION("BestComm FEC tasks driver"); -MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>"); -MODULE_LICENSE("GPL v2"); - diff --git a/arch/powerpc/sysdev/bestcomm/fec.h b/arch/powerpc/sysdev/bestcomm/fec.h deleted file mode 100644 index ee565d94d50..00000000000 --- a/arch/powerpc/sysdev/bestcomm/fec.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Header for Bestcomm FEC tasks driver - * - * - * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com> - * Copyright (C) 2003-2004 MontaVista, Software, Inc. - * ( by Dale Farnsworth <dfarnsworth@mvista.com> ) - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#ifndef __BESTCOMM_FEC_H__ -#define __BESTCOMM_FEC_H__ - - -struct bcom_fec_bd { - u32 status; - u32 skb_pa; -}; - -#define BCOM_FEC_TX_BD_TFD 0x08000000ul /* transmit frame done */ -#define BCOM_FEC_TX_BD_TC 0x04000000ul /* transmit CRC */ -#define BCOM_FEC_TX_BD_ABC 0x02000000ul /* append bad CRC */ - -#define BCOM_FEC_RX_BD_L 0x08000000ul /* buffer is last in frame */ -#define BCOM_FEC_RX_BD_BC 0x00800000ul /* DA is broadcast */ -#define BCOM_FEC_RX_BD_MC 0x00400000ul /* DA is multicast and not broadcast */ -#define BCOM_FEC_RX_BD_LG 0x00200000ul /* Rx frame length violation */ -#define BCOM_FEC_RX_BD_NO 0x00100000ul /* Rx non-octet aligned frame */ -#define BCOM_FEC_RX_BD_CR 0x00040000ul /* Rx CRC error */ -#define BCOM_FEC_RX_BD_OV 0x00020000ul /* overrun */ -#define BCOM_FEC_RX_BD_TR 0x00010000ul /* Rx frame truncated */ -#define BCOM_FEC_RX_BD_LEN_MASK 0x000007fful /* mask for length of received frame */ -#define BCOM_FEC_RX_BD_ERRORS (BCOM_FEC_RX_BD_LG | BCOM_FEC_RX_BD_NO | \ - BCOM_FEC_RX_BD_CR | BCOM_FEC_RX_BD_OV | BCOM_FEC_RX_BD_TR) - - -extern struct bcom_task * -bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize); - -extern int -bcom_fec_rx_reset(struct bcom_task *tsk); - -extern void -bcom_fec_rx_release(struct bcom_task *tsk); - - -extern struct bcom_task * -bcom_fec_tx_init(int queue_len, phys_addr_t fifo); - -extern int -bcom_fec_tx_reset(struct bcom_task *tsk); - -extern void -bcom_fec_tx_release(struct bcom_task *tsk); - - -#endif /* __BESTCOMM_FEC_H__ */ - diff --git a/arch/powerpc/sysdev/bestcomm/gen_bd.c b/arch/powerpc/sysdev/bestcomm/gen_bd.c deleted file mode 100644 index e0a53e3147b..00000000000 --- a/arch/powerpc/sysdev/bestcomm/gen_bd.c +++ /dev/null @@ -1,354 +0,0 @@ -/* - * Driver for MPC52xx processor BestComm General Buffer Descriptor - * - * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com> - * Copyright (C) 2006 AppSpec Computer Technologies Corp. - * Jeff Gibbons <jeff.gibbons@appspec.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/types.h> -#include <asm/errno.h> -#include <asm/io.h> - -#include <asm/mpc52xx.h> -#include <asm/mpc52xx_psc.h> - -#include "bestcomm.h" -#include "bestcomm_priv.h" -#include "gen_bd.h" - - -/* ======================================================================== */ -/* Task image/var/inc */ -/* ======================================================================== */ - -/* gen_bd tasks images */ -extern u32 bcom_gen_bd_rx_task[]; -extern u32 bcom_gen_bd_tx_task[]; - -/* rx task vars that need to be set before enabling the task */ -struct bcom_gen_bd_rx_var { - u32 enable; /* (u16*) address of task's control register */ - u32 fifo; /* (u32*) address of gen_bd's fifo */ - u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ - u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ - u32 bd_start; /* (struct bcom_bd*) current bd */ - u32 buffer_size; /* size of receive buffer */ -}; - -/* rx task incs that need to be set before enabling the task */ -struct bcom_gen_bd_rx_inc { - u16 pad0; - s16 incr_bytes; - u16 pad1; - s16 incr_dst; -}; - -/* tx task vars that need to be set before enabling the task */ -struct bcom_gen_bd_tx_var { - u32 fifo; /* (u32*) address of gen_bd's fifo */ - u32 enable; /* (u16*) address of task's control register */ - u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ - u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ - u32 bd_start; /* (struct bcom_bd*) current bd */ - u32 buffer_size; /* set by uCode for each packet */ -}; - -/* tx task incs that need to be set before enabling the task */ -struct bcom_gen_bd_tx_inc { - u16 pad0; - s16 incr_bytes; - u16 pad1; - s16 incr_src; - u16 pad2; - s16 incr_src_ma; -}; - -/* private structure */ -struct bcom_gen_bd_priv { - phys_addr_t fifo; - int initiator; - int ipr; - int maxbufsize; -}; - - -/* ======================================================================== */ -/* Task support code */ -/* ======================================================================== */ - -struct bcom_task * -bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo, - int initiator, int ipr, int maxbufsize) -{ - struct bcom_task *tsk; - struct bcom_gen_bd_priv *priv; - - tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd), - sizeof(struct bcom_gen_bd_priv)); - if (!tsk) - return NULL; - - tsk->flags = BCOM_FLAGS_NONE; - - priv = tsk->priv; - priv->fifo = fifo; - priv->initiator = initiator; - priv->ipr = ipr; - priv->maxbufsize = maxbufsize; - - if (bcom_gen_bd_rx_reset(tsk)) { - bcom_task_free(tsk); - return NULL; - } - - return tsk; -} -EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_init); - -int -bcom_gen_bd_rx_reset(struct bcom_task *tsk) -{ - struct bcom_gen_bd_priv *priv = tsk->priv; - struct bcom_gen_bd_rx_var *var; - struct bcom_gen_bd_rx_inc *inc; - - /* Shutdown the task */ - bcom_disable_task(tsk->tasknum); - - /* Reset the microcode */ - var = (struct bcom_gen_bd_rx_var *) bcom_task_var(tsk->tasknum); - inc = (struct bcom_gen_bd_rx_inc *) bcom_task_inc(tsk->tasknum); - - if (bcom_load_image(tsk->tasknum, bcom_gen_bd_rx_task)) - return -1; - - var->enable = bcom_eng->regs_base + - offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); - var->fifo = (u32) priv->fifo; - var->bd_base = tsk->bd_pa; - var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); - var->bd_start = tsk->bd_pa; - var->buffer_size = priv->maxbufsize; - - inc->incr_bytes = -(s16)sizeof(u32); - inc->incr_dst = sizeof(u32); - - /* Reset the BDs */ - tsk->index = 0; - tsk->outdex = 0; - - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); - - /* Configure some stuff */ - bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA); - bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); - - out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr); - bcom_set_initiator(tsk->tasknum, priv->initiator); - - out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ - - return 0; -} -EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_reset); - -void -bcom_gen_bd_rx_release(struct bcom_task *tsk) -{ - /* Nothing special for the GenBD tasks */ - bcom_task_free(tsk); -} -EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_release); - - -extern struct bcom_task * -bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo, - int initiator, int ipr) -{ - struct bcom_task *tsk; - struct bcom_gen_bd_priv *priv; - - tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd), - sizeof(struct bcom_gen_bd_priv)); - if (!tsk) - return NULL; - - tsk->flags = BCOM_FLAGS_NONE; - - priv = tsk->priv; - priv->fifo = fifo; - priv->initiator = initiator; - priv->ipr = ipr; - - if (bcom_gen_bd_tx_reset(tsk)) { - bcom_task_free(tsk); - return NULL; - } - - return tsk; -} -EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_init); - -int -bcom_gen_bd_tx_reset(struct bcom_task *tsk) -{ - struct bcom_gen_bd_priv *priv = tsk->priv; - struct bcom_gen_bd_tx_var *var; - struct bcom_gen_bd_tx_inc *inc; - - /* Shutdown the task */ - bcom_disable_task(tsk->tasknum); - - /* Reset the microcode */ - var = (struct bcom_gen_bd_tx_var *) bcom_task_var(tsk->tasknum); - inc = (struct bcom_gen_bd_tx_inc *) bcom_task_inc(tsk->tasknum); - - if (bcom_load_image(tsk->tasknum, bcom_gen_bd_tx_task)) - return -1; - - var->enable = bcom_eng->regs_base + - offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); - var->fifo = (u32) priv->fifo; - var->bd_base = tsk->bd_pa; - var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); - var->bd_start = tsk->bd_pa; - - inc->incr_bytes = -(s16)sizeof(u32); - inc->incr_src = sizeof(u32); - inc->incr_src_ma = sizeof(u8); - - /* Reset the BDs */ - tsk->index = 0; - tsk->outdex = 0; - - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); - - /* Configure some stuff */ - bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA); - bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); - - out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr); - bcom_set_initiator(tsk->tasknum, priv->initiator); - - out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ - - return 0; -} -EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_reset); - -void -bcom_gen_bd_tx_release(struct bcom_task *tsk) -{ - /* Nothing special for the GenBD tasks */ - bcom_task_free(tsk); -} -EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_release); - -/* --------------------------------------------------------------------- - * PSC support code - */ - -/** - * bcom_psc_parameters - Bestcomm initialization value table for PSC devices - * - * This structure is only used internally. It is a lookup table for PSC - * specific parameters to bestcomm tasks. - */ -static struct bcom_psc_params { - int rx_initiator; - int rx_ipr; - int tx_initiator; - int tx_ipr; -} bcom_psc_params[] = { - [0] = { - .rx_initiator = BCOM_INITIATOR_PSC1_RX, - .rx_ipr = BCOM_IPR_PSC1_RX, - .tx_initiator = BCOM_INITIATOR_PSC1_TX, - .tx_ipr = BCOM_IPR_PSC1_TX, - }, - [1] = { - .rx_initiator = BCOM_INITIATOR_PSC2_RX, - .rx_ipr = BCOM_IPR_PSC2_RX, - .tx_initiator = BCOM_INITIATOR_PSC2_TX, - .tx_ipr = BCOM_IPR_PSC2_TX, - }, - [2] = { - .rx_initiator = BCOM_INITIATOR_PSC3_RX, - .rx_ipr = BCOM_IPR_PSC3_RX, - .tx_initiator = BCOM_INITIATOR_PSC3_TX, - .tx_ipr = BCOM_IPR_PSC3_TX, - }, - [3] = { - .rx_initiator = BCOM_INITIATOR_PSC4_RX, - .rx_ipr = BCOM_IPR_PSC4_RX, - .tx_initiator = BCOM_INITIATOR_PSC4_TX, - .tx_ipr = BCOM_IPR_PSC4_TX, - }, - [4] = { - .rx_initiator = BCOM_INITIATOR_PSC5_RX, - .rx_ipr = BCOM_IPR_PSC5_RX, - .tx_initiator = BCOM_INITIATOR_PSC5_TX, - .tx_ipr = BCOM_IPR_PSC5_TX, - }, - [5] = { - .rx_initiator = BCOM_INITIATOR_PSC6_RX, - .rx_ipr = BCOM_IPR_PSC6_RX, - .tx_initiator = BCOM_INITIATOR_PSC6_TX, - .tx_ipr = BCOM_IPR_PSC6_TX, - }, -}; - -/** - * bcom_psc_gen_bd_rx_init - Allocate a receive bcom_task for a PSC port - * @psc_num: Number of the PSC to allocate a task for - * @queue_len: number of buffer descriptors to allocate for the task - * @fifo: physical address of FIFO register - * @maxbufsize: Maximum receive data size in bytes. - * - * Allocate a bestcomm task structure for receiving data from a PSC. - */ -struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len, - phys_addr_t fifo, int maxbufsize) -{ - if (psc_num >= MPC52xx_PSC_MAXNUM) - return NULL; - - return bcom_gen_bd_rx_init(queue_len, fifo, - bcom_psc_params[psc_num].rx_initiator, - bcom_psc_params[psc_num].rx_ipr, - maxbufsize); -} -EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_rx_init); - -/** - * bcom_psc_gen_bd_tx_init - Allocate a transmit bcom_task for a PSC port - * @psc_num: Number of the PSC to allocate a task for - * @queue_len: number of buffer descriptors to allocate for the task - * @fifo: physical address of FIFO register - * - * Allocate a bestcomm task structure for transmitting data to a PSC. - */ -struct bcom_task * -bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len, phys_addr_t fifo) -{ - struct psc; - return bcom_gen_bd_tx_init(queue_len, fifo, - bcom_psc_params[psc_num].tx_initiator, - bcom_psc_params[psc_num].tx_ipr); -} -EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_tx_init); - - -MODULE_DESCRIPTION("BestComm General Buffer Descriptor tasks driver"); -MODULE_AUTHOR("Jeff Gibbons <jeff.gibbons@appspec.com>"); -MODULE_LICENSE("GPL v2"); - diff --git a/arch/powerpc/sysdev/bestcomm/gen_bd.h b/arch/powerpc/sysdev/bestcomm/gen_bd.h deleted file mode 100644 index de47260e69d..00000000000 --- a/arch/powerpc/sysdev/bestcomm/gen_bd.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Header for Bestcomm General Buffer Descriptor tasks driver - * - * - * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com> - * Copyright (C) 2006 AppSpec Computer Technologies Corp. - * Jeff Gibbons <jeff.gibbons@appspec.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * - */ - -#ifndef __BESTCOMM_GEN_BD_H__ -#define __BESTCOMM_GEN_BD_H__ - -struct bcom_gen_bd { - u32 status; - u32 buf_pa; -}; - - -extern struct bcom_task * -bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo, - int initiator, int ipr, int maxbufsize); - -extern int -bcom_gen_bd_rx_reset(struct bcom_task *tsk); - -extern void -bcom_gen_bd_rx_release(struct bcom_task *tsk); - - -extern struct bcom_task * -bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo, - int initiator, int ipr); - -extern int -bcom_gen_bd_tx_reset(struct bcom_task *tsk); - -extern void -bcom_gen_bd_tx_release(struct bcom_task *tsk); - - -/* PSC support utility wrappers */ -struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len, - phys_addr_t fifo, int maxbufsize); -struct bcom_task * bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len, - phys_addr_t fifo); -#endif /* __BESTCOMM_GEN_BD_H__ */ - diff --git a/arch/powerpc/sysdev/bestcomm/sram.c b/arch/powerpc/sysdev/bestcomm/sram.c deleted file mode 100644 index b6db23e085f..00000000000 --- a/arch/powerpc/sysdev/bestcomm/sram.c +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Simple memory allocator for on-board SRAM - * - * - * Maintainer : Sylvain Munaut <tnt@246tNt.com> - * - * Copyright (C) 2005 Sylvain Munaut <tnt@246tNt.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include <linux/err.h> -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/string.h> -#include <linux/ioport.h> -#include <linux/of.h> - -#include <asm/io.h> -#include <asm/mmu.h> - -#include "sram.h" - - -/* Struct keeping our 'state' */ -struct bcom_sram *bcom_sram = NULL; -EXPORT_SYMBOL_GPL(bcom_sram); /* needed for inline functions */ - - -/* ======================================================================== */ -/* Public API */ -/* ======================================================================== */ -/* DO NOT USE in interrupts, if needed in irq handler, we should use the - _irqsave version of the spin_locks */ - -int bcom_sram_init(struct device_node *sram_node, char *owner) -{ - int rv; - const u32 *regaddr_p; - u64 regaddr64, size64; - unsigned int psize; - - /* Create our state struct */ - if (bcom_sram) { - printk(KERN_ERR "%s: bcom_sram_init: " - "Already initialized !\n", owner); - return -EBUSY; - } - - bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL); - if (!bcom_sram) { - printk(KERN_ERR "%s: bcom_sram_init: " - "Couldn't allocate internal state !\n", owner); - return -ENOMEM; - } - - /* Get address and size of the sram */ - regaddr_p = of_get_address(sram_node, 0, &size64, NULL); - if (!regaddr_p) { - printk(KERN_ERR "%s: bcom_sram_init: " - "Invalid device node !\n", owner); - rv = -EINVAL; - goto error_free; - } - - regaddr64 = of_translate_address(sram_node, regaddr_p); - - bcom_sram->base_phys = (phys_addr_t) regaddr64; - bcom_sram->size = (unsigned int) size64; - - /* Request region */ - if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) { - printk(KERN_ERR "%s: bcom_sram_init: " - "Couldn't request region !\n", owner); - rv = -EBUSY; - goto error_free; - } - - /* Map SRAM */ - /* sram is not really __iomem */ - bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size); - - if (!bcom_sram->base_virt) { - printk(KERN_ERR "%s: bcom_sram_init: " - "Map error SRAM zone 0x%08lx (0x%0x)!\n", - owner, (long)bcom_sram->base_phys, bcom_sram->size ); - rv = -ENOMEM; - goto error_release; - } - - /* Create an rheap (defaults to 32 bits word alignment) */ - bcom_sram->rh = rh_create(4); - - /* Attach the free zones */ -#if 0 - /* Currently disabled ... for future use only */ - reg_addr_p = of_get_property(sram_node, "available", &psize); -#else - regaddr_p = NULL; - psize = 0; -#endif - - if (!regaddr_p || !psize) { - /* Attach the whole zone */ - rh_attach_region(bcom_sram->rh, 0, bcom_sram->size); - } else { - /* Attach each zone independently */ - while (psize >= 2 * sizeof(u32)) { - phys_addr_t zbase = of_translate_address(sram_node, regaddr_p); - rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]); - regaddr_p += 2; - psize -= 2 * sizeof(u32); - } - } - - /* Init our spinlock */ - spin_lock_init(&bcom_sram->lock); - - return 0; - -error_release: - release_mem_region(bcom_sram->base_phys, bcom_sram->size); -error_free: - kfree(bcom_sram); - bcom_sram = NULL; - - return rv; -} -EXPORT_SYMBOL_GPL(bcom_sram_init); - -void bcom_sram_cleanup(void) -{ - /* Free resources */ - if (bcom_sram) { - rh_destroy(bcom_sram->rh); - iounmap((void __iomem *)bcom_sram->base_virt); - release_mem_region(bcom_sram->base_phys, bcom_sram->size); - kfree(bcom_sram); - bcom_sram = NULL; - } -} -EXPORT_SYMBOL_GPL(bcom_sram_cleanup); - -void* bcom_sram_alloc(int size, int align, phys_addr_t *phys) -{ - unsigned long offset; - - spin_lock(&bcom_sram->lock); - offset = rh_alloc_align(bcom_sram->rh, size, align, NULL); - spin_unlock(&bcom_sram->lock); - - if (IS_ERR_VALUE(offset)) - return NULL; - - *phys = bcom_sram->base_phys + offset; - return bcom_sram->base_virt + offset; -} -EXPORT_SYMBOL_GPL(bcom_sram_alloc); - -void bcom_sram_free(void *ptr) -{ - unsigned long offset; - - if (!ptr) - return; - - offset = ptr - bcom_sram->base_virt; - - spin_lock(&bcom_sram->lock); - rh_free(bcom_sram->rh, offset); - spin_unlock(&bcom_sram->lock); -} -EXPORT_SYMBOL_GPL(bcom_sram_free); - diff --git a/arch/powerpc/sysdev/bestcomm/sram.h b/arch/powerpc/sysdev/bestcomm/sram.h deleted file mode 100644 index b6d668963cc..00000000000 --- a/arch/powerpc/sysdev/bestcomm/sram.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Handling of a sram zone for bestcomm - * - * - * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#ifndef __BESTCOMM_SRAM_H__ -#define __BESTCOMM_SRAM_H__ - -#include <asm/rheap.h> -#include <asm/mmu.h> -#include <linux/spinlock.h> - - -/* Structure used internally */ - /* The internals are here for the inline functions - * sake, certainly not for the user to mess with ! - */ -struct bcom_sram { - phys_addr_t base_phys; - void *base_virt; - unsigned int size; - rh_info_t *rh; - spinlock_t lock; -}; - -extern struct bcom_sram *bcom_sram; - - -/* Public API */ -extern int bcom_sram_init(struct device_node *sram_node, char *owner); -extern void bcom_sram_cleanup(void); - -extern void* bcom_sram_alloc(int size, int align, phys_addr_t *phys); -extern void bcom_sram_free(void *ptr); - -static inline phys_addr_t bcom_sram_va2pa(void *va) { - return bcom_sram->base_phys + - (unsigned long)(va - bcom_sram->base_virt); -} - -static inline void *bcom_sram_pa2va(phys_addr_t pa) { - return bcom_sram->base_virt + - (unsigned long)(pa - bcom_sram->base_phys); -} - - -#endif /* __BESTCOMM_SRAM_H__ */ - diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c index d131c8a1cb1..afc2dbf3701 100644 --- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c +++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c @@ -69,7 +69,7 @@ static int __init get_offset_from_cmdline(char *str) __setup("cache-sram-size=", get_size_from_cmdline); __setup("cache-sram-offset=", get_offset_from_cmdline); -static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev) +static int mpc85xx_l2ctlr_of_probe(struct platform_device *dev) { long rval; unsigned int rem; @@ -160,7 +160,7 @@ static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev) return 0; } -static int __devexit mpc85xx_l2ctlr_of_remove(struct platform_device *dev) +static int mpc85xx_l2ctlr_of_remove(struct platform_device *dev) { BUG_ON(!l2ctlr); @@ -203,6 +203,7 @@ static struct of_device_id mpc85xx_l2ctlr_of_match[] = { { .compatible = "fsl,p1024-l2-cache-controller",}, { .compatible = "fsl,p1015-l2-cache-controller",}, { .compatible = "fsl,p1010-l2-cache-controller",}, + { .compatible = "fsl,bsc9131-l2-cache-controller",}, {}, }; @@ -213,7 +214,7 @@ static struct platform_driver mpc85xx_l2ctlr_of_platform_driver = { .of_match_table = mpc85xx_l2ctlr_of_match, }, .probe = mpc85xx_l2ctlr_of_probe, - .remove = __devexit_p(mpc85xx_l2ctlr_of_remove), + .remove = mpc85xx_l2ctlr_of_remove, }; static __init int mpc85xx_l2ctlr_of_init(void) diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c index 02cf1e7e77f..0eb871cc343 100644 --- a/arch/powerpc/sysdev/fsl_gtm.c +++ b/arch/powerpc/sysdev/fsl_gtm.c @@ -1,7 +1,7 @@ /* * Freescale General-purpose Timers Module * - * Copyright (c) Freescale Semicondutor, Inc. 2006. + * Copyright (c) Freescale Semiconductor, Inc. 2006. * Shlomi Gridish <gridish@freescale.com> * Jerry Huang <Chang-Ming.Huang@freescale.com> * Copyright (c) MontaVista Software, Inc. 2008. diff --git a/arch/powerpc/sysdev/fsl_ifc.c b/arch/powerpc/sysdev/fsl_ifc.c index 097cc9d2585..d7fc7223914 100644 --- a/arch/powerpc/sysdev/fsl_ifc.c +++ b/arch/powerpc/sysdev/fsl_ifc.c @@ -63,7 +63,7 @@ int fsl_ifc_find(phys_addr_t addr_base) return -ENODEV; for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) { - __be32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr); + u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr); if (cspr & CSPR_V && (cspr & CSPR_BA) == convert_ifc_address(addr_base)) return i; @@ -73,7 +73,7 @@ int fsl_ifc_find(phys_addr_t addr_base) } EXPORT_SYMBOL(fsl_ifc_find); -static int __devinit fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl) +static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl) { struct fsl_ifc_regs __iomem *ifc = ctrl->regs; @@ -211,7 +211,7 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) * resources for the NAND banks themselves are allocated * in the chip probe function. */ -static int __devinit fsl_ifc_ctrl_probe(struct platform_device *dev) +static int fsl_ifc_ctrl_probe(struct platform_device *dev) { int ret = 0; diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c index 483126d7b3c..6bc5a546d49 100644 --- a/arch/powerpc/sysdev/fsl_lbc.c +++ b/arch/powerpc/sysdev/fsl_lbc.c @@ -74,8 +74,8 @@ int fsl_lbc_find(phys_addr_t addr_base) lbc = fsl_lbc_ctrl_dev->regs; for (i = 0; i < ARRAY_SIZE(lbc->bank); i++) { - __be32 br = in_be32(&lbc->bank[i].br); - __be32 or = in_be32(&lbc->bank[i].or); + u32 br = in_be32(&lbc->bank[i].br); + u32 or = in_be32(&lbc->bank[i].or); if (br & BR_V && (br & or & BR_BA) == fsl_lbc_addr(addr_base)) return i; @@ -97,7 +97,7 @@ EXPORT_SYMBOL(fsl_lbc_find); int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm) { int bank; - __be32 br; + u32 br; struct fsl_lbc_regs __iomem *lbc; bank = fsl_lbc_find(addr_base); @@ -185,8 +185,8 @@ int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar) } EXPORT_SYMBOL(fsl_upm_run_pattern); -static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl, - struct device_node *node) +static int fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl, + struct device_node *node) { struct fsl_lbc_regs __iomem *lbc = ctrl->regs; @@ -273,7 +273,7 @@ static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data) * in the chip probe function. */ -static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev) +static int fsl_lbc_ctrl_probe(struct platform_device *dev) { int ret; diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 51ffafae561..178c99427b1 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -28,7 +28,7 @@ #include "fsl_msi.h" #include "fsl_pci.h" -LIST_HEAD(msi_head); +static LIST_HEAD(msi_head); struct fsl_msi_feature { u32 fsl_pic_ip; @@ -130,7 +130,7 @@ static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq, struct pci_controller *hose = pci_bus_to_host(pdev->bus); u64 address; /* Physical address of the MSIIR */ int len; - const u64 *reg; + const __be64 *reg; /* If the msi-address-64 property exists, then use it */ reg = of_get_property(hose->dn, "msi-address-64", &len); @@ -236,7 +236,6 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) u32 intr_index; u32 have_shift = 0; struct fsl_msi_cascade_data *cascade_data; - unsigned int ret; cascade_data = irq_get_handler_data(irq); msi_data = cascade_data->msi_data; @@ -268,7 +267,9 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) case FSL_PIC_IP_IPIC: msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4); break; - case FSL_PIC_IP_VMPIC: +#ifdef CONFIG_EPAPR_PARAVIRT + case FSL_PIC_IP_VMPIC: { + unsigned int ret; ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value); if (ret) { pr_err("fsl-msi: fh_vmpic_get_msir() failed for " @@ -277,6 +278,8 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) } break; } +#endif + } while (msir_value) { intr_index = ffs(msir_value) - 1; @@ -330,9 +333,8 @@ static int fsl_of_msi_remove(struct platform_device *ofdev) return 0; } -static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi, - struct platform_device *dev, - int offset, int irq_index) +static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, + int offset, int irq_index) { struct fsl_msi_cascade_data *cascade_data = NULL; int virt_msir; @@ -360,7 +362,7 @@ static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi, } static const struct of_device_id fsl_of_msi_ids[]; -static int __devinit fsl_of_msi_probe(struct platform_device *dev) +static int fsl_of_msi_probe(struct platform_device *dev) { const struct of_device_id *match; struct fsl_msi *msi; @@ -508,10 +510,12 @@ static const struct of_device_id fsl_of_msi_ids[] = { .compatible = "fsl,ipic-msi", .data = &ipic_msi_feature, }, +#ifdef CONFIG_EPAPR_PARAVIRT { .compatible = "fsl,vmpic-msi", .data = &vmpic_msi_feature, }, +#endif {} }; diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index ffb93ae9379..682084dba19 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -36,7 +36,7 @@ static int fsl_pcie_bus_fixup, is_mpc83xx_pci; -static void __devinit quirk_fsl_pcie_header(struct pci_dev *dev) +static void quirk_fsl_pcie_header(struct pci_dev *dev) { u8 hdr_type; @@ -89,7 +89,7 @@ static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask) return 0; } -static int __init setup_one_atmu(struct ccsr_pci __iomem *pci, +static int setup_one_atmu(struct ccsr_pci __iomem *pci, unsigned int index, const struct resource *res, resource_size_t offset) { @@ -126,7 +126,7 @@ static int __init setup_one_atmu(struct ccsr_pci __iomem *pci, } /* atmu setup for fsl pci/pcie controller */ -static void __init setup_pci_atmu(struct pci_controller *hose, +static void setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc) { struct ccsr_pci __iomem *pci; @@ -136,7 +136,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose, u32 pcicsrbar = 0, pcicsrbar_sz; u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL | PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP; - char *name = hose->dn->full_name; + const char *name = hose->dn->full_name; const u64 *reg; int len; @@ -421,13 +421,16 @@ void fsl_pcibios_fixup_bus(struct pci_bus *bus) } } -int __init fsl_add_bridge(struct device_node *dev, int is_primary) +int __init fsl_add_bridge(struct platform_device *pdev, int is_primary) { int len; struct pci_controller *hose; struct resource rsrc; const int *bus_range; u8 hdr_type, progif; + struct device_node *dev; + + dev = pdev->dev.of_node; if (!of_device_is_available(dev)) { pr_warning("%s: disabled\n", dev->full_name); @@ -453,6 +456,8 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary) if (!hose) return -ENOMEM; + /* set platform device as the parent */ + hose->parent = &pdev->dev; hose->first_busno = bus_range ? bus_range[0] : 0x0; hose->last_busno = bus_range ? bus_range[1] : 0xff; @@ -827,13 +832,18 @@ static const struct of_device_id pci_ids[] = { { .compatible = "fsl,mpc8548-pcie", }, { .compatible = "fsl,mpc8610-pci", }, { .compatible = "fsl,mpc8641-pcie", }, + { .compatible = "fsl,qoriq-pcie-v2.1", }, + { .compatible = "fsl,qoriq-pcie-v2.2", }, + { .compatible = "fsl,qoriq-pcie-v2.3", }, + { .compatible = "fsl,qoriq-pcie-v2.4", }, + + /* + * The following entries are for compatibility with older device + * trees. + */ { .compatible = "fsl,p1022-pcie", }, - { .compatible = "fsl,p1010-pcie", }, - { .compatible = "fsl,p1023-pcie", }, { .compatible = "fsl,p4080-pcie", }, - { .compatible = "fsl,qoriq-pcie-v2.4", }, - { .compatible = "fsl,qoriq-pcie-v2.3", }, - { .compatible = "fsl,qoriq-pcie-v2.2", }, + {}, }; @@ -871,7 +881,7 @@ void fsl_pci_assign_primary(void) } } -static int __devinit fsl_pci_probe(struct platform_device *pdev) +static int fsl_pci_probe(struct platform_device *pdev) { int ret; struct device_node *node; @@ -880,7 +890,7 @@ static int __devinit fsl_pci_probe(struct platform_device *pdev) #endif node = pdev->dev.of_node; - ret = fsl_add_bridge(node, fsl_pci_primary == node); + ret = fsl_add_bridge(pdev, fsl_pci_primary == node); #ifdef CONFIG_SWIOTLB if (ret == 0) { @@ -902,9 +912,42 @@ static int __devinit fsl_pci_probe(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int fsl_pci_resume(struct device *dev) +{ + struct pci_controller *hose; + struct resource pci_rsrc; + + hose = pci_find_hose_for_OF_device(dev->of_node); + if (!hose) + return -ENODEV; + + if (of_address_to_resource(dev->of_node, 0, &pci_rsrc)) { + dev_err(dev, "Get pci register base failed."); + return -ENODEV; + } + + setup_pci_atmu(hose, &pci_rsrc); + + return 0; +} + +static const struct dev_pm_ops pci_pm_ops = { + .resume = fsl_pci_resume, +}; + +#define PCI_PM_OPS (&pci_pm_ops) + +#else + +#define PCI_PM_OPS NULL + +#endif + static struct platform_driver fsl_pci_driver = { .driver = { .name = "fsl-pci", + .pm = PCI_PM_OPS, .of_match_table = pci_ids, }, .probe = fsl_pci_probe, diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h index d078537adec..c495c00c874 100644 --- a/arch/powerpc/sysdev/fsl_pci.h +++ b/arch/powerpc/sysdev/fsl_pci.h @@ -91,7 +91,7 @@ struct ccsr_pci { __be32 pex_err_cap_r3; /* 0x.e34 - PCIE error capture register 0 */ }; -extern int fsl_add_bridge(struct device_node *dev, int is_primary); +extern int fsl_add_bridge(struct platform_device *pdev, int is_primary); extern void fsl_pcibios_fixup_bus(struct pci_bus *bus); extern int mpc83xx_add_bridge(struct device_node *dev); u64 fsl_pci_immrbar_base(struct pci_controller *hose); diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 5b6f556094d..e2fb3171f41 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -644,7 +644,7 @@ err_rio_regs: /* The probe function for RapidIO peer-to-peer network. */ -static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev) +static int fsl_of_rio_rpn_probe(struct platform_device *dev) { printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", dev->dev.of_node->full_name); diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index c449dbd1c93..228cf91b91c 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -58,10 +58,10 @@ phys_addr_t get_immrbase(void) if (soc) { int size; u32 naddr; - const u32 *prop = of_get_property(soc, "#address-cells", &size); + const __be32 *prop = of_get_property(soc, "#address-cells", &size); if (prop && size == 4) - naddr = *prop; + naddr = be32_to_cpup(prop); else naddr = 2; @@ -253,6 +253,7 @@ struct platform_diu_data_ops diu_ops; EXPORT_SYMBOL(diu_ops); #endif +#ifdef CONFIG_EPAPR_PARAVIRT /* * Restart the current partition * @@ -278,3 +279,4 @@ void fsl_hv_halt(void) pr_info("hv exit\n"); fh_partition_stop(-1); } +#endif diff --git a/arch/powerpc/sysdev/mpc5xxx_clocks.c b/arch/powerpc/sysdev/mpc5xxx_clocks.c index 96f815a55df..5492dc5f56f 100644 --- a/arch/powerpc/sysdev/mpc5xxx_clocks.c +++ b/arch/powerpc/sysdev/mpc5xxx_clocks.c @@ -9,9 +9,9 @@ #include <linux/kernel.h> #include <linux/of_platform.h> #include <linux/export.h> +#include <asm/mpc5xxx.h> -unsigned int -mpc5xxx_get_bus_frequency(struct device_node *node) +unsigned long mpc5xxx_get_bus_frequency(struct device_node *node) { struct device_node *np; const unsigned int *p_bus_freq = NULL; diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 9c6e535daad..d30e6a676c8 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -54,7 +54,7 @@ static DEFINE_RAW_SPINLOCK(mpic_lock); #ifdef CONFIG_PPC32 /* XXX for now */ #ifdef CONFIG_IRQ_ALL_CPUS -#define distribute_irqs (1) +#define distribute_irqs (!(mpic->flags & MPIC_SINGLE_DEST_CPU)) #else #define distribute_irqs (0) #endif @@ -1182,6 +1182,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, const char *vers; const u32 *psrc; u32 last_irq; + u32 fsl_version = 0; /* Default MPIC search parameters */ static const struct of_device_id __initconst mpic_device_id[] = { @@ -1314,7 +1315,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, mpic_map(mpic, mpic->paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); if (mpic->flags & MPIC_FSL) { - u32 brr1, version; + u32 brr1; int ret; /* @@ -1327,7 +1328,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, brr1 = _mpic_read(mpic->reg_type, &mpic->thiscpuregs, MPIC_FSL_BRR1); - version = brr1 & MPIC_FSL_BRR1_VER; + fsl_version = brr1 & MPIC_FSL_BRR1_VER; /* Error interrupt mask register (EIMR) is required for * handling individual device error interrupts. EIMR @@ -1342,11 +1343,30 @@ struct mpic * __init mpic_alloc(struct device_node *node, * is the number of vectors which have been consumed by * ipis and timer interrupts. */ - if (version >= 0x401) { + if (fsl_version >= 0x401) { ret = mpic_setup_error_int(mpic, intvec_top - 12); if (ret) return NULL; } + + } + + /* + * EPR is only available starting with v4.0. To support + * platforms that don't know the MPIC version at compile-time, + * such as qemu-e500, turn off coreint if this MPIC doesn't + * support it. Note that we never enable it if it wasn't + * requested in the first place. + * + * This is done outside the MPIC_FSL check, so that we + * also disable coreint if the MPIC node doesn't have + * an "fsl,mpic" compatible at all. This will be the case + * with device trees generated by older versions of QEMU. + * fsl_version will be zero if MPIC_FSL is not set. + */ + if (fsl_version < 0x400 && (flags & MPIC_ENABLE_COREINT)) { + WARN_ON(ppc_md.get_irq != mpic_get_coreint_irq); + ppc_md.get_irq = mpic_get_irq; } /* Reset */ @@ -1864,7 +1884,7 @@ int __init smp_mpic_probe(void) return nr_cpus; } -void __devinit smp_mpic_setup_cpu(int cpu) +void smp_mpic_setup_cpu(int cpu) { mpic_setup_this_cpu(); } diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index e961f8c4a8f..c75325865a8 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -160,7 +160,7 @@ static int mpic_msgr_block_number(struct device_node *node) /* The probe function for a single message register block. */ -static __devinit int mpic_msgr_probe(struct platform_device *dev) +static int mpic_msgr_probe(struct platform_device *dev) { void __iomem *msgr_block_addr; int block_number; diff --git a/arch/powerpc/sysdev/mv64x60_pci.c b/arch/powerpc/sysdev/mv64x60_pci.c index 364b14d4754..330d56613c5 100644 --- a/arch/powerpc/sysdev/mv64x60_pci.c +++ b/arch/powerpc/sysdev/mv64x60_pci.c @@ -104,7 +104,7 @@ subsys_initcall(mv64x60_sysfs_init); #endif /* CONFIG_SYSFS */ -static void __devinit mv64x60_pci_fixup_early(struct pci_dev *dev) +static void mv64x60_pci_fixup_early(struct pci_dev *dev) { /* * Set the host bridge hdr_type to an invalid value so that diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c index 8f0465422b1..5aaf86c0389 100644 --- a/arch/powerpc/sysdev/pmi.c +++ b/arch/powerpc/sysdev/pmi.c @@ -214,18 +214,7 @@ static struct platform_driver pmi_of_platform_driver = { .of_match_table = pmi_match, }, }; - -static int __init pmi_module_init(void) -{ - return platform_driver_register(&pmi_of_platform_driver); -} -module_init(pmi_module_init); - -static void __exit pmi_module_exit(void) -{ - platform_driver_unregister(&pmi_of_platform_driver); -} -module_exit(pmi_module_exit); +module_platform_driver(pmi_of_platform_driver); int pmi_send_message(pmi_message_t msg) { diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c index 82c6702dcba..43948da837a 100644 --- a/arch/powerpc/sysdev/ppc4xx_msi.c +++ b/arch/powerpc/sysdev/ppc4xx_msi.c @@ -220,7 +220,7 @@ static int ppc4xx_of_msi_remove(struct platform_device *dev) return 0; } -static int __devinit ppc4xx_msi_probe(struct platform_device *dev) +static int ppc4xx_msi_probe(struct platform_device *dev) { struct ppc4xx_msi *msi; struct resource res; diff --git a/arch/powerpc/sysdev/ppc4xx_ocm.c b/arch/powerpc/sysdev/ppc4xx_ocm.c new file mode 100644 index 00000000000..1b15f93479c --- /dev/null +++ b/arch/powerpc/sysdev/ppc4xx_ocm.c @@ -0,0 +1,415 @@ +/* + * PowerPC 4xx OCM memory allocation support + * + * (C) Copyright 2009, Applied Micro Circuits Corporation + * Victor Gallardo (vgallardo@amcc.com) + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/dma-mapping.h> +#include <linux/of.h> +#include <asm/rheap.h> +#include <asm/ppc4xx_ocm.h> +#include <linux/slab.h> +#include <linux/debugfs.h> + +#define OCM_DISABLED 0 +#define OCM_ENABLED 1 + +struct ocm_block { + struct list_head list; + void __iomem *addr; + int size; + const char *owner; +}; + +/* non-cached or cached region */ +struct ocm_region { + phys_addr_t phys; + void __iomem *virt; + + int memtotal; + int memfree; + + rh_info_t *rh; + struct list_head list; +}; + +struct ocm_info { + int index; + int status; + int ready; + + phys_addr_t phys; + + int alignment; + int memtotal; + int cache_size; + + struct ocm_region nc; /* non-cached region */ + struct ocm_region c; /* cached region */ +}; + +static struct ocm_info *ocm_nodes; +static int ocm_count; + +static struct ocm_info *ocm_get_node(unsigned int index) +{ + if (index >= ocm_count) { + printk(KERN_ERR "PPC4XX OCM: invalid index"); + return NULL; + } + + return &ocm_nodes[index]; +} + +static int ocm_free_region(struct ocm_region *ocm_reg, const void *addr) +{ + struct ocm_block *blk, *tmp; + unsigned long offset; + + if (!ocm_reg->virt) + return 0; + + list_for_each_entry_safe(blk, tmp, &ocm_reg->list, list) { + if (blk->addr == addr) { + offset = addr - ocm_reg->virt; + ocm_reg->memfree += blk->size; + rh_free(ocm_reg->rh, offset); + list_del(&blk->list); + kfree(blk); + return 1; + } + } + + return 0; +} + +static void __init ocm_init_node(int count, struct device_node *node) +{ + struct ocm_info *ocm; + + const unsigned int *cell_index; + const unsigned int *cache_size; + int len; + + struct resource rsrc; + int ioflags; + + ocm = ocm_get_node(count); + + cell_index = of_get_property(node, "cell-index", &len); + if (!cell_index) { + printk(KERN_ERR "PPC4XX OCM: missing cell-index property"); + return; + } + ocm->index = *cell_index; + + if (of_device_is_available(node)) + ocm->status = OCM_ENABLED; + + cache_size = of_get_property(node, "cached-region-size", &len); + if (cache_size) + ocm->cache_size = *cache_size; + + if (of_address_to_resource(node, 0, &rsrc)) { + printk(KERN_ERR "PPC4XX OCM%d: could not get resource address\n", + ocm->index); + return; + } + + ocm->phys = rsrc.start; + ocm->memtotal = (rsrc.end - rsrc.start + 1); + + printk(KERN_INFO "PPC4XX OCM%d: %d Bytes (%s)\n", + ocm->index, ocm->memtotal, + (ocm->status == OCM_DISABLED) ? "disabled" : "enabled"); + + if (ocm->status == OCM_DISABLED) + return; + + /* request region */ + + if (!request_mem_region(ocm->phys, ocm->memtotal, "ppc4xx_ocm")) { + printk(KERN_ERR "PPC4XX OCM%d: could not request region\n", + ocm->index); + return; + } + + /* Configure non-cached and cached regions */ + + ocm->nc.phys = ocm->phys; + ocm->nc.memtotal = ocm->memtotal - ocm->cache_size; + ocm->nc.memfree = ocm->nc.memtotal; + + ocm->c.phys = ocm->phys + ocm->nc.memtotal; + ocm->c.memtotal = ocm->cache_size; + ocm->c.memfree = ocm->c.memtotal; + + if (ocm->nc.memtotal == 0) + ocm->nc.phys = 0; + + if (ocm->c.memtotal == 0) + ocm->c.phys = 0; + + printk(KERN_INFO "PPC4XX OCM%d: %d Bytes (non-cached)\n", + ocm->index, ocm->nc.memtotal); + + printk(KERN_INFO "PPC4XX OCM%d: %d Bytes (cached)\n", + ocm->index, ocm->c.memtotal); + + /* ioremap the non-cached region */ + if (ocm->nc.memtotal) { + ioflags = _PAGE_NO_CACHE | _PAGE_GUARDED | _PAGE_EXEC; + ocm->nc.virt = __ioremap(ocm->nc.phys, ocm->nc.memtotal, + ioflags); + + if (!ocm->nc.virt) { + printk(KERN_ERR + "PPC4XX OCM%d: failed to ioremap non-cached memory\n", + ocm->index); + ocm->nc.memfree = 0; + return; + } + } + + /* ioremap the cached region */ + + if (ocm->c.memtotal) { + ioflags = _PAGE_EXEC; + ocm->c.virt = __ioremap(ocm->c.phys, ocm->c.memtotal, + ioflags); + + if (!ocm->c.virt) { + printk(KERN_ERR + "PPC4XX OCM%d: failed to ioremap cached memory\n", + ocm->index); + ocm->c.memfree = 0; + return; + } + } + + /* Create Remote Heaps */ + + ocm->alignment = 4; /* default 4 byte alignment */ + + if (ocm->nc.virt) { + ocm->nc.rh = rh_create(ocm->alignment); + rh_attach_region(ocm->nc.rh, 0, ocm->nc.memtotal); + } + + if (ocm->c.virt) { + ocm->c.rh = rh_create(ocm->alignment); + rh_attach_region(ocm->c.rh, 0, ocm->c.memtotal); + } + + INIT_LIST_HEAD(&ocm->nc.list); + INIT_LIST_HEAD(&ocm->c.list); + + ocm->ready = 1; + + return; +} + +static int ocm_debugfs_show(struct seq_file *m, void *v) +{ + struct ocm_block *blk, *tmp; + unsigned int i; + + for (i = 0; i < ocm_count; i++) { + struct ocm_info *ocm = ocm_get_node(i); + + if (!ocm || !ocm->ready) + continue; + + seq_printf(m, "PPC4XX OCM : %d\n", ocm->index); + seq_printf(m, "PhysAddr : 0x%llx\n", ocm->phys); + seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); + seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); + seq_printf(m, "MemTotal(C) : %d Bytes\n", ocm->c.memtotal); + + seq_printf(m, "\n"); + + seq_printf(m, "NC.PhysAddr : 0x%llx\n", ocm->nc.phys); + seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); + seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal); + seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree); + + list_for_each_entry_safe(blk, tmp, &ocm->nc.list, list) { + seq_printf(m, "NC.MemUsed : %d Bytes (%s)\n", + blk->size, blk->owner); + } + + seq_printf(m, "\n"); + + seq_printf(m, "C.PhysAddr : 0x%llx\n", ocm->c.phys); + seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); + seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); + seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree); + + list_for_each_entry_safe(blk, tmp, &ocm->c.list, list) { + seq_printf(m, "C.MemUsed : %d Bytes (%s)\n", + blk->size, blk->owner); + } + + seq_printf(m, "\n"); + } + + return 0; +} + +static int ocm_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, ocm_debugfs_show, NULL); +} + +static const struct file_operations ocm_debugfs_fops = { + .open = ocm_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int ocm_debugfs_init(void) +{ + struct dentry *junk; + + junk = debugfs_create_dir("ppc4xx_ocm", 0); + if (!junk) { + printk(KERN_ALERT "debugfs ppc4xx ocm: failed to create dir\n"); + return -1; + } + + if (debugfs_create_file("info", 0644, junk, NULL, &ocm_debugfs_fops)) { + printk(KERN_ALERT "debugfs ppc4xx ocm: failed to create file\n"); + return -1; + } + + return 0; +} + +void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align, + int flags, const char *owner) +{ + void __iomem *addr = NULL; + unsigned long offset; + struct ocm_info *ocm; + struct ocm_region *ocm_reg; + struct ocm_block *ocm_blk; + int i; + + for (i = 0; i < ocm_count; i++) { + ocm = ocm_get_node(i); + + if (!ocm || !ocm->ready) + continue; + + if (flags == PPC4XX_OCM_NON_CACHED) + ocm_reg = &ocm->nc; + else + ocm_reg = &ocm->c; + + if (!ocm_reg->virt) + continue; + + if (align < ocm->alignment) + align = ocm->alignment; + + offset = rh_alloc_align(ocm_reg->rh, size, align, NULL); + + if (IS_ERR_VALUE(offset)) + continue; + + ocm_blk = kzalloc(sizeof(struct ocm_block *), GFP_KERNEL); + if (!ocm_blk) { + printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block"); + rh_free(ocm_reg->rh, offset); + break; + } + + *phys = ocm_reg->phys + offset; + addr = ocm_reg->virt + offset; + size = ALIGN(size, align); + + ocm_blk->addr = addr; + ocm_blk->size = size; + ocm_blk->owner = owner; + list_add_tail(&ocm_blk->list, &ocm_reg->list); + + ocm_reg->memfree -= size; + + break; + } + + return addr; +} + +void ppc4xx_ocm_free(const void *addr) +{ + int i; + + if (!addr) + return; + + for (i = 0; i < ocm_count; i++) { + struct ocm_info *ocm = ocm_get_node(i); + + if (!ocm || !ocm->ready) + continue; + + if (ocm_free_region(&ocm->nc, addr) || + ocm_free_region(&ocm->c, addr)) + return; + } +} + +static int __init ppc4xx_ocm_init(void) +{ + struct device_node *np; + int count; + + count = 0; + for_each_compatible_node(np, NULL, "ibm,ocm") + count++; + + if (!count) + return 0; + + ocm_nodes = kzalloc((count * sizeof(struct ocm_info)), GFP_KERNEL); + if (!ocm_nodes) { + printk(KERN_ERR "PPC4XX OCM: failed to allocate OCM nodes!\n"); + return -ENOMEM; + } + + ocm_count = count; + count = 0; + + for_each_compatible_node(np, NULL, "ibm,ocm") { + ocm_init_node(count, np); + count++; + } + + ocm_debugfs_init(); + + return 0; +} + +arch_initcall(ppc4xx_ocm_init); diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c index b0436752972..238a07b97f2 100644 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ b/arch/powerpc/sysdev/qe_lib/qe.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006-2010 Freescale Semicondutor, Inc. All rights reserved. + * Copyright (C) 2006-2010 Freescale Semiconductor, Inc. All rights reserved. * * Authors: Shlomi Gridish <gridish@freescale.com> * Li Yang <leoli@freescale.com> diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index 2fba6ef2f95..b2b87c30e26 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c @@ -1,7 +1,7 @@ /* * arch/powerpc/sysdev/qe_lib/qe_ic.c * - * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. * * Author: Li Yang <leoli@freescale.com> * Based on code from Shlomi Gridish <gridish@freescale.com> diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.h b/arch/powerpc/sysdev/qe_lib/qe_ic.h index c327872ed35..efef7ab9b75 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.h +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.h @@ -3,7 +3,7 @@ * * QUICC ENGINE Interrupt Controller Header * - * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. * * Author: Li Yang <leoli@freescale.com> * Based on code from Shlomi Gridish <gridish@freescale.com> diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c index fd1a6c3b172..a88807b3dd5 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_io.c +++ b/arch/powerpc/sysdev/qe_lib/qe_io.c @@ -3,7 +3,7 @@ * * QE Parallel I/O ports configuration routines * - * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved. * * Author: Li Yang <LeoLi@freescale.com> * Based on code from Shlomi Gridish <gridish@freescale.com> diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c index 04677505f20..134b07d2943 100644 --- a/arch/powerpc/sysdev/qe_lib/ucc.c +++ b/arch/powerpc/sysdev/qe_lib/ucc.c @@ -3,7 +3,7 @@ * * QE UCC API Set - UCC specific routines implementations. * - * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. * * Authors: Shlomi Gridish <gridish@freescale.com> * Li Yang <leoli@freescale.com> diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c index fba02440d12..cceb2e36673 100644 --- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c +++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. * * Authors: Shlomi Gridish <gridish@freescale.com> * Li Yang <leoli@freescale.com> diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c index 524c0ead941..1c062f48f1a 100644 --- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c +++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. * * Authors: Shlomi Gridish <gridish@freescale.com> * Li Yang <leoli@freescale.com> diff --git a/arch/powerpc/sysdev/qe_lib/usb.c b/arch/powerpc/sysdev/qe_lib/usb.c index 9162828f5da..27f23bd15eb 100644 --- a/arch/powerpc/sysdev/qe_lib/usb.c +++ b/arch/powerpc/sysdev/qe_lib/usb.c @@ -1,7 +1,7 @@ /* * QE USB routines * - * Copyright (c) Freescale Semicondutor, Inc. 2006. + * Copyright 2006 Freescale Semiconductor, Inc. * Shlomi Gridish <gridish@freescale.com> * Jerry Huang <Chang-Ming.Huang@freescale.com> * Copyright (c) MontaVista Software, Inc. 2008. diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c index 702256a1ca1..9193e12df69 100644 --- a/arch/powerpc/sysdev/scom.c +++ b/arch/powerpc/sysdev/scom.c @@ -157,7 +157,7 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn, ent->map = SCOM_MAP_INVALID; spin_lock_init(&ent->lock); snprintf(ent->name, 8, "scom%d", i); - ent->blob.data = dn->full_name; + ent->blob.data = (void*) dn->full_name; ent->blob.size = strlen(dn->full_name); dir = debugfs_create_dir(ent->name, root); diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c index c782f85cf7e..936575d99c5 100644 --- a/arch/powerpc/sysdev/xics/ics-rtas.c +++ b/arch/powerpc/sysdev/xics/ics-rtas.c @@ -213,7 +213,7 @@ static int ics_rtas_host_match(struct ics *ics, struct device_node *node) return !of_device_is_compatible(node, "chrp,iic"); } -int ics_rtas_init(void) +__init int ics_rtas_init(void) { ibm_get_xive = rtas_token("ibm,get-xive"); ibm_set_xive = rtas_token("ibm,set-xive"); diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index c168c54e3c4..1278788d96e 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -4,9 +4,9 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror GCOV_PROFILE := n -ccflags-$(CONFIG_PPC64) := -mno-minimal-toc +ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) -obj-y += xmon.o start.o nonstdio.o +obj-y += xmon.o nonstdio.o ifdef CONFIG_XMON_DISASSEMBLY obj-y += ppc-dis.o ppc-opc.o diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c index bfac84fbe78..bce3dcfe505 100644 --- a/arch/powerpc/xmon/nonstdio.c +++ b/arch/powerpc/xmon/nonstdio.c @@ -7,9 +7,23 @@ * 2 of the License, or (at your option) any later version. */ #include <linux/string.h> +#include <asm/udbg.h> #include <asm/time.h> #include "nonstdio.h" + +static int xmon_write(const void *ptr, int nb) +{ + return udbg_write(ptr, nb); +} + +static int xmon_readchar(void) +{ + if (udbg_getc) + return udbg_getc(); + return -1; +} + int xmon_putchar(int c) { char ch = c; @@ -23,34 +37,7 @@ static char line[256]; static char *lineptr; static int lineleft; -int xmon_expect(const char *str, unsigned long timeout) -{ - int c; - unsigned long t0; - - /* assume 25MHz default timebase if tb_ticks_per_sec not set yet */ - timeout *= tb_ticks_per_sec? tb_ticks_per_sec: 25000000; - t0 = get_tbl(); - do { - lineptr = line; - for (;;) { - c = xmon_read_poll(); - if (c == -1) { - if (get_tbl() - t0 > timeout) - return 0; - continue; - } - if (c == '\n') - break; - if (c != '\r' && lineptr < &line[sizeof(line) - 1]) - *lineptr++ = c; - } - *lineptr = 0; - } while (strstr(line, str) == NULL); - return 1; -} - -int xmon_getchar(void) +static int xmon_getchar(void) { int c; @@ -124,13 +111,19 @@ char *xmon_gets(char *str, int nb) void xmon_printf(const char *format, ...) { va_list args; - int n; static char xmon_outbuf[1024]; + int rc, n; va_start(args, format); n = vsnprintf(xmon_outbuf, sizeof(xmon_outbuf), format, args); va_end(args); - xmon_write(xmon_outbuf, n); + + rc = xmon_write(xmon_outbuf, n); + + if (n && rc == 0) { + /* No udbg hooks, fallback to printk() - dangerous */ + printk(xmon_outbuf); + } } void xmon_puts(const char *str) diff --git a/arch/powerpc/xmon/nonstdio.h b/arch/powerpc/xmon/nonstdio.h index 23dd95f4599..18a51ded4ff 100644 --- a/arch/powerpc/xmon/nonstdio.h +++ b/arch/powerpc/xmon/nonstdio.h @@ -4,12 +4,6 @@ #define putchar xmon_putchar extern int xmon_putchar(int c); -extern int xmon_getchar(void); extern void xmon_puts(const char *); extern char *xmon_gets(char *, int); extern void xmon_printf(const char *, ...); -extern void xmon_map_scc(void); -extern int xmon_expect(const char *str, unsigned long timeout); -extern int xmon_write(const void *ptr, int nb); -extern int xmon_readchar(void); -extern int xmon_read_poll(void); diff --git a/arch/powerpc/xmon/start.c b/arch/powerpc/xmon/start.c deleted file mode 100644 index 8864de2af38..00000000000 --- a/arch/powerpc/xmon/start.c +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (C) 1996 Paul Mackerras. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include <asm/machdep.h> -#include <asm/udbg.h> -#include "nonstdio.h" - -void xmon_map_scc(void) -{ -} - -int xmon_write(const void *ptr, int nb) -{ - return udbg_write(ptr, nb); -} - -int xmon_readchar(void) -{ - if (udbg_getc) - return udbg_getc(); - return -1; -} - -int xmon_read_poll(void) -{ - if (udbg_getc_poll) - return udbg_getc_poll(); - return -1; -} diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 3a56a639a92..13f85defabe 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -43,6 +43,7 @@ #include <asm/setjmp.h> #include <asm/reg.h> #include <asm/debug.h> +#include <asm/hw_breakpoint.h> #ifdef CONFIG_PPC64 #include <asm/hvcall.h> @@ -52,9 +53,6 @@ #include "nonstdio.h" #include "dis-asm.h" -#define scanhex xmon_scanhex -#define skipbl xmon_skipbl - #ifdef CONFIG_SMP static cpumask_t cpus_in_xmon = CPU_MASK_NONE; static unsigned long xmon_taken = 1; @@ -169,12 +167,8 @@ extern void xmon_leave(void); #ifdef CONFIG_PPC64 #define REG "%.16lx" -#define REGS_PER_LINE 4 -#define LAST_VOLATILE 13 #else #define REG "%.8lx" -#define REGS_PER_LINE 8 -#define LAST_VOLATILE 12 #endif #define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3]) @@ -614,7 +608,7 @@ static int xmon_sstep(struct pt_regs *regs) return 1; } -static int xmon_dabr_match(struct pt_regs *regs) +static int xmon_break_match(struct pt_regs *regs) { if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) return 0; @@ -747,8 +741,14 @@ static void insert_bpts(void) static void insert_cpu_bpts(void) { - if (dabr.enabled) - set_dabr(dabr.address | (dabr.enabled & 7), DABRX_ALL); + struct arch_hw_breakpoint brk; + + if (dabr.enabled) { + brk.address = dabr.address; + brk.type = (dabr.enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; + brk.len = 8; + set_breakpoint(&brk); + } if (iabr && cpu_has_feature(CPU_FTR_IABR)) mtspr(SPRN_IABR, iabr->address | (iabr->enabled & (BP_IABR|BP_IABR_TE))); @@ -776,7 +776,7 @@ static void remove_bpts(void) static void remove_cpu_bpts(void) { - set_dabr(0, 0); + hw_breakpoint_disable(); if (cpu_has_feature(CPU_FTR_IABR)) mtspr(SPRN_IABR, 0); } @@ -1145,7 +1145,7 @@ bpt_cmds(void) printf(badaddr); break; } - dabr.address &= ~7; + dabr.address &= ~HW_BRK_TYPE_DABR; dabr.enabled = mode | BP_DABR; } break; @@ -1288,27 +1288,19 @@ static void get_function_bounds(unsigned long pc, unsigned long *startp, catch_memory_errors = 0; } -static int xmon_depth_to_print = 64; - #define LRSAVE_OFFSET (STACK_FRAME_LR_SAVE * sizeof(unsigned long)) #define MARKER_OFFSET (STACK_FRAME_MARKER * sizeof(unsigned long)) -#ifdef __powerpc64__ -#define REGS_OFFSET 0x70 -#else -#define REGS_OFFSET 16 -#endif - static void xmon_show_stack(unsigned long sp, unsigned long lr, unsigned long pc) { + int max_to_print = 64; unsigned long ip; unsigned long newsp; unsigned long marker; - int count = 0; struct pt_regs regs; - do { + while (max_to_print--) { if (sp < PAGE_OFFSET) { if (sp != 0) printf("SP (%lx) is in userspace\n", sp); @@ -1362,10 +1354,10 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr, an exception frame. */ if (mread(sp + MARKER_OFFSET, &marker, sizeof(unsigned long)) && marker == STACK_FRAME_REGS_MARKER) { - if (mread(sp + REGS_OFFSET, ®s, sizeof(regs)) + if (mread(sp + STACK_FRAME_OVERHEAD, ®s, sizeof(regs)) != sizeof(regs)) { printf("Couldn't read registers at %lx\n", - sp + REGS_OFFSET); + sp + STACK_FRAME_OVERHEAD); break; } printf("--- Exception: %lx %s at ", regs.trap, @@ -1379,7 +1371,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr, break; sp = newsp; - } while (count++ < xmon_depth_to_print); + } } static void backtrace(struct pt_regs *excp) @@ -2932,7 +2924,7 @@ static void xmon_init(int enable) __debugger_bpt = xmon_bpt; __debugger_sstep = xmon_sstep; __debugger_iabr_match = xmon_iabr_match; - __debugger_dabr_match = xmon_dabr_match; + __debugger_break_match = xmon_break_match; __debugger_fault_handler = xmon_fault_handler; } else { __debugger = NULL; @@ -2940,10 +2932,9 @@ static void xmon_init(int enable) __debugger_bpt = NULL; __debugger_sstep = NULL; __debugger_iabr_match = NULL; - __debugger_dabr_match = NULL; + __debugger_break_match = NULL; __debugger_fault_handler = NULL; } - xmon_map_scc(); } #ifdef CONFIG_MAGIC_SYSRQ |