diff options
Diffstat (limited to 'arch')
311 files changed, 2539 insertions, 1766 deletions
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index 6b58c1de757..400c663b21c 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c @@ -282,7 +282,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, #else /* if V-P const for loop, PTAG can be written once outside loop */ if (full_page_op) - write_aux_reg(ARC_REG_DC_PTAG, paddr); + write_aux_reg(aux_tag, paddr); #endif while (num_lines-- > 0) { @@ -296,7 +296,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, write_aux_reg(aux_cmd, vaddr); vaddr += L1_CACHE_BYTES; #else - write_aux_reg(aux, paddr); + write_aux_reg(aux_cmd, paddr); paddr += L1_CACHE_BYTES; #endif } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e2541981779..15949459611 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1578,6 +1578,7 @@ config BL_SWITCHER_DUMMY_IF choice prompt "Memory split" + depends on MMU default VMSPLIT_3G help Select the desired split between kernel and user memory. @@ -1595,6 +1596,7 @@ endchoice config PAGE_OFFSET hex + default PHYS_OFFSET if !MMU default 0x40000000 if VMSPLIT_1G default 0x80000000 if VMSPLIT_2G default 0xC0000000 @@ -1903,6 +1905,7 @@ config XEN depends on ARM && AEABI && OF depends on CPU_V7 && !CPU_V6 depends on !GENERIC_ATOMIC64 + depends on MMU select ARM_PSCI select SWIOTLB_XEN select ARCH_DMA_ADDR_T_64BIT diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index 47279aa96a6..0714e0334e3 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore @@ -1,4 +1,5 @@ ashldi3.S +bswapsdi2.S font.c lib1funcs.S hyp-stub.S diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index b9d6a8b485e..032030361be 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -38,6 +38,7 @@ dtb-$(CONFIG_ARCH_AT91) += at91sam9g35ek.dtb dtb-$(CONFIG_ARCH_AT91) += at91sam9x25ek.dtb dtb-$(CONFIG_ARCH_AT91) += at91sam9x35ek.dtb # sama5d3 +dtb-$(CONFIG_ARCH_AT91) += at91-sama5d3_xplained.dtb dtb-$(CONFIG_ARCH_AT91) += sama5d31ek.dtb dtb-$(CONFIG_ARCH_AT91) += sama5d33ek.dtb dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb @@ -208,7 +209,8 @@ dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \ omap3-n900.dtb \ omap3-n9.dtb \ omap3-n950.dtb \ - omap3-tobi.dtb \ + omap3-overo-tobi.dtb \ + omap3-overo-storm-tobi.dtb \ omap3-gta04.dtb \ omap3-igep0020.dtb \ omap3-igep0030.dtb \ diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 4718ec4a4db..486880b7483 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -121,7 +121,7 @@ ti,model = "AM335x-EVMSK"; ti,audio-codec = <&tlv320aic3106>; ti,mcasp-controller = <&mcasp1>; - ti,codec-clock-rate = <24576000>; + ti,codec-clock-rate = <24000000>; ti,audio-routing = "Headphone Jack", "HPLOUT", "Headphone Jack", "HPROUT"; @@ -256,6 +256,12 @@ >; }; + mmc1_pins: pinmux_mmc1_pins { + pinctrl-single,pins = < + 0x160 (PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */ + >; + }; + mcasp1_pins: mcasp1_pins { pinctrl-single,pins = < 0x10c (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_crs.mcasp1_aclkx */ @@ -456,6 +462,9 @@ status = "okay"; vmmc-supply = <&vmmc_reg>; bus-width = <4>; + pinctrl-names = "default"; + pinctrl-0 = <&mmc1_pins>; + cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; }; &sham { diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi index 66609684d41..9480cf891f8 100644 --- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi @@ -23,6 +23,7 @@ gpio0 = &gpio0; gpio1 = &gpio1; gpio2 = &gpio2; + eth3 = ð3; }; cpus { @@ -291,7 +292,7 @@ interrupts = <91>; }; - ethernet@34000 { + eth3: ethernet@34000 { compatible = "marvell,armada-370-neta"; reg = <0x34000 0x4000>; interrupts = <14>; diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts new file mode 100644 index 00000000000..ce1375595e5 --- /dev/null +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts @@ -0,0 +1,229 @@ +/* + * at91-sama5d3_xplained.dts - Device Tree file for the SAMA5D3 Xplained board + * + * Copyright (C) 2014 Atmel, + * 2014 Nicolas Ferre <nicolas.ferre@atmel.com> + * + * Licensed under GPLv2 or later. + */ +/dts-v1/; +#include "sama5d36.dtsi" + +/ { + model = "SAMA5D3 Xplained"; + compatible = "atmel,sama5d3-xplained", "atmel,sama5d3", "atmel,sama5"; + + chosen { + bootargs = "console=ttyS0,115200"; + }; + + memory { + reg = <0x20000000 0x10000000>; + }; + + ahb { + apb { + mmc0: mmc@f0000000 { + pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3 &pinctrl_mmc0_dat4_7 &pinctrl_mmc0_cd>; + status = "okay"; + slot@0 { + reg = <0>; + bus-width = <8>; + cd-gpios = <&pioE 0 GPIO_ACTIVE_LOW>; + }; + }; + + spi0: spi@f0004000 { + cs-gpios = <&pioD 13 0>; + status = "okay"; + }; + + can0: can@f000c000 { + status = "okay"; + }; + + i2c0: i2c@f0014000 { + status = "okay"; + }; + + i2c1: i2c@f0018000 { + status = "okay"; + }; + + macb0: ethernet@f0028000 { + phy-mode = "rgmii"; + status = "okay"; + }; + + usart0: serial@f001c000 { + status = "okay"; + }; + + usart1: serial@f0020000 { + pinctrl-0 = <&pinctrl_usart1 &pinctrl_usart1_rts_cts>; + status = "okay"; + }; + + uart0: serial@f0024000 { + status = "okay"; + }; + + mmc1: mmc@f8000000 { + pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3 &pinctrl_mmc1_cd>; + status = "okay"; + slot@0 { + reg = <0>; + bus-width = <4>; + cd-gpios = <&pioE 1 GPIO_ACTIVE_HIGH>; + }; + }; + + spi1: spi@f8008000 { + cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioD 16 0>; + status = "okay"; + }; + + adc0: adc@f8018000 { + pinctrl-0 = < + &pinctrl_adc0_adtrg + &pinctrl_adc0_ad0 + &pinctrl_adc0_ad1 + &pinctrl_adc0_ad2 + &pinctrl_adc0_ad3 + &pinctrl_adc0_ad4 + &pinctrl_adc0_ad5 + &pinctrl_adc0_ad6 + &pinctrl_adc0_ad7 + &pinctrl_adc0_ad8 + &pinctrl_adc0_ad9 + >; + status = "okay"; + }; + + i2c2: i2c@f801c000 { + dmas = <0>, <0>; /* Do not use DMA for i2c2 */ + status = "okay"; + }; + + macb1: ethernet@f802c000 { + phy-mode = "rmii"; + status = "okay"; + }; + + dbgu: serial@ffffee00 { + status = "okay"; + }; + + pinctrl@fffff200 { + board { + pinctrl_mmc0_cd: mmc0_cd { + atmel,pins = + <AT91_PIOE 0 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>; + }; + + pinctrl_mmc1_cd: mmc1_cd { + atmel,pins = + <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>; + }; + + pinctrl_usba_vbus: usba_vbus { + atmel,pins = + <AT91_PIOE 9 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PE9, conflicts with A9 */ + }; + }; + }; + + pmc: pmc@fffffc00 { + main: mainck { + clock-frequency = <12000000>; + }; + }; + }; + + nand0: nand@60000000 { + nand-bus-width = <8>; + nand-ecc-mode = "hw"; + atmel,has-pmecc; + atmel,pmecc-cap = <4>; + atmel,pmecc-sector-size = <512>; + nand-on-flash-bbt; + status = "okay"; + + at91bootstrap@0 { + label = "at91bootstrap"; + reg = <0x0 0x40000>; + }; + + bootloader@40000 { + label = "bootloader"; + reg = <0x40000 0x80000>; + }; + + bootloaderenv@c0000 { + label = "bootloader env"; + reg = <0xc0000 0xc0000>; + }; + + dtb@180000 { + label = "device tree"; + reg = <0x180000 0x80000>; + }; + + kernel@200000 { + label = "kernel"; + reg = <0x200000 0x600000>; + }; + + rootfs@800000 { + label = "rootfs"; + reg = <0x800000 0x0f800000>; + }; + }; + + usb0: gadget@00500000 { + atmel,vbus-gpio = <&pioE 9 GPIO_ACTIVE_HIGH>; /* PE9, conflicts with A9 */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usba_vbus>; + status = "okay"; + }; + + usb1: ohci@00600000 { + num-ports = <3>; + atmel,vbus-gpio = <0 + &pioE 3 GPIO_ACTIVE_LOW + &pioE 4 GPIO_ACTIVE_LOW + >; + status = "okay"; + }; + + usb2: ehci@00700000 { + status = "okay"; + }; + }; + + gpio_keys { + compatible = "gpio-keys"; + + bp3 { + label = "PB_USER"; + gpios = <&pioE 29 GPIO_ACTIVE_LOW>; + linux,code = <0x104>; + gpio-key,wakeup; + }; + }; + + leds { + compatible = "gpio-leds"; + + d2 { + label = "d2"; + gpios = <&pioE 23 GPIO_ACTIVE_LOW>; /* PE23, conflicts with A23, CTS2 */ + linux,default-trigger = "heartbeat"; + }; + + d3 { + label = "d3"; + gpios = <&pioE 24 GPIO_ACTIVE_HIGH>; + }; + }; +}; diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index 0042f73068b..fece8665fb6 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi @@ -523,7 +523,7 @@ }; i2c0: i2c@fff88000 { - compatible = "atmel,at91sam9263-i2c"; + compatible = "atmel,at91sam9260-i2c"; reg = <0xfff88000 0x100>; interrupts = <13 IRQ_TYPE_LEVEL_HIGH 6>; #address-cells = <1>; diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index e9487f6f016..924a6a6ffd0 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts @@ -124,6 +124,10 @@ nand-on-flash-bbt; status = "okay"; }; + + usb0: ohci@00500000 { + status = "okay"; + }; }; leds { diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi index e491b82f8d6..792fde1b7f7 100644 --- a/arch/arm/boot/dts/bcm11351.dtsi +++ b/arch/arm/boot/dts/bcm11351.dtsi @@ -147,7 +147,7 @@ }; pinctrl@35004800 { - compatible = "brcm,capri-pinctrl"; + compatible = "brcm,bcm11351-pinctrl"; reg = <0x35004800 0x430>; }; diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi index 2b76524f4aa..187fd46b7b5 100644 --- a/arch/arm/boot/dts/dove.dtsi +++ b/arch/arm/boot/dts/dove.dtsi @@ -379,15 +379,6 @@ #clock-cells = <1>; }; - pmu_intc: pmu-interrupt-ctrl@d0050 { - compatible = "marvell,dove-pmu-intc"; - interrupt-controller; - #interrupt-cells = <1>; - reg = <0xd0050 0x8>; - interrupts = <33>; - marvell,#interrupts = <7>; - }; - pinctrl: pin-ctrl@d0200 { compatible = "marvell,dove-pinctrl"; reg = <0xd0200 0x10>; @@ -610,8 +601,6 @@ rtc: real-time-clock@d8500 { compatible = "marvell,orion-rtc"; reg = <0xd8500 0x20>; - interrupt-parent = <&pmu_intc>; - interrupts = <5>; }; gpio2: gpio-ctrl@e8400 { diff --git a/arch/arm/boot/dts/imx6dl-hummingboard.dts b/arch/arm/boot/dts/imx6dl-hummingboard.dts index fd8fc7cd53f..5bfae54fb78 100644 --- a/arch/arm/boot/dts/imx6dl-hummingboard.dts +++ b/arch/arm/boot/dts/imx6dl-hummingboard.dts @@ -52,12 +52,6 @@ }; }; - codec: spdif-transmitter { - compatible = "linux,spdif-dit"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_hummingboard_spdif>; - }; - sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "imx-spdif"; @@ -111,7 +105,7 @@ }; pinctrl_hummingboard_spdif: hummingboard-spdif { - fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>; + fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>; }; pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus { @@ -142,6 +136,8 @@ }; &spdif { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hummingboard_spdif>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi index 64daa3b311f..c2a24888a27 100644 --- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi +++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi @@ -46,12 +46,6 @@ }; }; - codec: spdif-transmitter { - compatible = "linux,spdif-dit"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_cubox_i_spdif>; - }; - sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "imx-spdif"; @@ -89,7 +83,7 @@ }; pinctrl_cubox_i_spdif: cubox-i-spdif { - fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>; + fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>; }; pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus { @@ -121,6 +115,8 @@ }; &spdif { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_cubox_i_spdif>; status = "okay"; }; diff --git a/arch/arm/boot/dts/keystone-clocks.dtsi b/arch/arm/boot/dts/keystone-clocks.dtsi index 2363593e105..ef58d1c2431 100644 --- a/arch/arm/boot/dts/keystone-clocks.dtsi +++ b/arch/arm/boot/dts/keystone-clocks.dtsi @@ -612,7 +612,7 @@ clocks { compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "vcp-3"; - reg = <0x0235000a8 0xb00>, <0x02350060 0x400>; + reg = <0x023500a8 0xb00>, <0x02350060 0x400>; reg-names = "control", "domain"; domain-id = <24>; }; diff --git a/arch/arm/boot/dts/omap3-gta04.dts b/arch/arm/boot/dts/omap3-gta04.dts index b9b55c95a56..d3b253bbc88 100644 --- a/arch/arm/boot/dts/omap3-gta04.dts +++ b/arch/arm/boot/dts/omap3-gta04.dts @@ -13,7 +13,7 @@ / { model = "OMAP3 GTA04"; - compatible = "ti,omap3-gta04", "ti,omap3"; + compatible = "ti,omap3-gta04", "ti,omap36xx", "ti,omap3"; cpus { cpu@0 { @@ -32,7 +32,7 @@ aux-button { label = "aux"; linux,code = <169>; - gpios = <&gpio1 7 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; gpio-key,wakeup; }; }; @@ -92,6 +92,8 @@ bmp085@77 { compatible = "bosch,bmp085"; reg = <0x77>; + interrupt-parent = <&gpio4>; + interrupts = <17 IRQ_TYPE_EDGE_RISING>; }; /* leds */ @@ -141,8 +143,8 @@ pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins>; vmmc-supply = <&vmmc1>; - vmmc_aux-supply = <&vsim>; bus-width = <4>; + ti,non-removable; }; &mmc2 { diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts index 25a2b5f652f..f2779ac7587 100644 --- a/arch/arm/boot/dts/omap3-igep0020.dts +++ b/arch/arm/boot/dts/omap3-igep0020.dts @@ -14,7 +14,7 @@ / { model = "IGEPv2 (TI OMAP AM/DM37x)"; - compatible = "isee,omap3-igep0020", "ti,omap3"; + compatible = "isee,omap3-igep0020", "ti,omap36xx", "ti,omap3"; leds { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts index 145c58cfc8a..2793749eb1b 100644 --- a/arch/arm/boot/dts/omap3-igep0030.dts +++ b/arch/arm/boot/dts/omap3-igep0030.dts @@ -13,7 +13,7 @@ / { model = "IGEP COM MODULE (TI OMAP AM/DM37x)"; - compatible = "isee,omap3-igep0030", "ti,omap3"; + compatible = "isee,omap3-igep0030", "ti,omap36xx", "ti,omap3"; leds { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/omap3-n9.dts b/arch/arm/boot/dts/omap3-n9.dts index 39828ce464e..9938b5dc190 100644 --- a/arch/arm/boot/dts/omap3-n9.dts +++ b/arch/arm/boot/dts/omap3-n9.dts @@ -14,5 +14,5 @@ / { model = "Nokia N9"; - compatible = "nokia,omap3-n9", "ti,omap3"; + compatible = "nokia,omap3-n9", "ti,omap36xx", "ti,omap3"; }; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 6fc85f96353..0bf40c90fab 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -1,6 +1,6 @@ /* * Copyright (C) 2013 Pavel Machek <pavel@ucw.cz> - * Copyright 2013 Aaro Koskinen <aaro.koskinen@iki.fi> + * Copyright (C) 2013-2014 Aaro Koskinen <aaro.koskinen@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 (or later) as @@ -13,7 +13,7 @@ / { model = "Nokia N900"; - compatible = "nokia,omap3-n900", "ti,omap3"; + compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3"; cpus { cpu@0 { diff --git a/arch/arm/boot/dts/omap3-n950.dts b/arch/arm/boot/dts/omap3-n950.dts index b076a526b99..261c5589bfa 100644 --- a/arch/arm/boot/dts/omap3-n950.dts +++ b/arch/arm/boot/dts/omap3-n950.dts @@ -14,5 +14,5 @@ / { model = "Nokia N950"; - compatible = "nokia,omap3-n950", "ti,omap3"; + compatible = "nokia,omap3-n950", "ti,omap36xx", "ti,omap3"; }; diff --git a/arch/arm/boot/dts/omap3-overo-storm-tobi.dts b/arch/arm/boot/dts/omap3-overo-storm-tobi.dts new file mode 100644 index 00000000000..966b5c9cd96 --- /dev/null +++ b/arch/arm/boot/dts/omap3-overo-storm-tobi.dts @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2012 Florian Vaussard, EPFL Mobots group + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * Tobi expansion board is manufactured by Gumstix Inc. + */ + +/dts-v1/; + +#include "omap36xx.dtsi" +#include "omap3-overo-tobi-common.dtsi" + +/ { + model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Tobi"; + compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3"; +}; + diff --git a/arch/arm/boot/dts/omap3-tobi.dts b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi index 7e4ad2aec37..4edc013a91c 100644 --- a/arch/arm/boot/dts/omap3-tobi.dts +++ b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi @@ -13,9 +13,6 @@ #include "omap3-overo.dtsi" / { - model = "TI OMAP3 Gumstix Overo on Tobi"; - compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3"; - leds { compatible = "gpio-leds"; heartbeat { diff --git a/arch/arm/boot/dts/omap3-overo-tobi.dts b/arch/arm/boot/dts/omap3-overo-tobi.dts new file mode 100644 index 00000000000..de5653e1b5c --- /dev/null +++ b/arch/arm/boot/dts/omap3-overo-tobi.dts @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2012 Florian Vaussard, EPFL Mobots group + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * Tobi expansion board is manufactured by Gumstix Inc. + */ + +/dts-v1/; + +#include "omap34xx.dtsi" +#include "omap3-overo-tobi-common.dtsi" + +/ { + model = "OMAP35xx Gumstix Overo on Tobi"; + compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3"; +}; + diff --git a/arch/arm/boot/dts/omap3-overo.dtsi b/arch/arm/boot/dts/omap3-overo.dtsi index a461d2fd1fb..597099907f8 100644 --- a/arch/arm/boot/dts/omap3-overo.dtsi +++ b/arch/arm/boot/dts/omap3-overo.dtsi @@ -9,9 +9,6 @@ /* * The Gumstix Overo must be combined with an expansion board. */ -/dts-v1/; - -#include "omap34xx.dtsi" / { pwmleds { diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 52447c17537..3d5faf85f51 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi @@ -1228,7 +1228,7 @@ compatible = "atmel,at91rm9200-ohci", "usb-ohci"; reg = <0x00600000 0x100000>; interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, + clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; status = "disabled"; diff --git a/arch/arm/boot/dts/sama5d36.dtsi b/arch/arm/boot/dts/sama5d36.dtsi index 6c31c26e6cc..db58cad6acd 100644 --- a/arch/arm/boot/dts/sama5d36.dtsi +++ b/arch/arm/boot/dts/sama5d36.dtsi @@ -8,8 +8,8 @@ */ #include "sama5d3.dtsi" #include "sama5d3_can.dtsi" -#include "sama5d3_emac.dtsi" #include "sama5d3_gmac.dtsi" +#include "sama5d3_emac.dtsi" #include "sama5d3_lcd.dtsi" #include "sama5d3_mci2.dtsi" #include "sama5d3_tcb1.dtsi" diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi index 0c1e8d871ed..6cb9b68e218 100644 --- a/arch/arm/boot/dts/ste-href.dtsi +++ b/arch/arm/boot/dts/ste-href.dtsi @@ -188,7 +188,6 @@ msp2: msp@80117000 { pinctrl-names = "default"; pinctrl-0 = <&msp2_default_mode>; - status = "okay"; }; msp3: msp@80125000 { diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 040bb0eba15..d4d2763f479 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -315,7 +315,7 @@ ranges; emac: ethernet@01c0b000 { - compatible = "allwinner,sun4i-emac"; + compatible = "allwinner,sun4i-a10-emac"; reg = <0x01c0b000 0x1000>; interrupts = <55>; clocks = <&ahb_gates 17>; @@ -323,7 +323,7 @@ }; mdio@01c0b080 { - compatible = "allwinner,sun4i-mdio"; + compatible = "allwinner,sun4i-a10-mdio"; reg = <0x01c0b080 0x14>; status = "disabled"; #address-cells = <1>; @@ -426,7 +426,7 @@ }; rtp: rtp@01c25000 { - compatible = "allwinner,sun4i-ts"; + compatible = "allwinner,sun4i-a10-ts"; reg = <0x01c25000 0x100>; interrupts = <29>; }; diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index ea16054857a..79fd412005b 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi @@ -278,7 +278,7 @@ ranges; emac: ethernet@01c0b000 { - compatible = "allwinner,sun4i-emac"; + compatible = "allwinner,sun4i-a10-emac"; reg = <0x01c0b000 0x1000>; interrupts = <55>; clocks = <&ahb_gates 17>; @@ -286,7 +286,7 @@ }; mdio@01c0b080 { - compatible = "allwinner,sun4i-mdio"; + compatible = "allwinner,sun4i-a10-mdio"; reg = <0x01c0b080 0x14>; status = "disabled"; #address-cells = <1>; @@ -383,7 +383,7 @@ }; rtp: rtp@01c25000 { - compatible = "allwinner,sun4i-ts"; + compatible = "allwinner,sun4i-a10-ts"; reg = <0x01c25000 0x100>; interrupts = <29>; }; diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index 320335abfcc..c463fd730c9 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi @@ -346,7 +346,7 @@ }; rtp: rtp@01c25000 { - compatible = "allwinner,sun4i-ts"; + compatible = "allwinner,sun4i-a10-ts"; reg = <0x01c25000 0x100>; interrupts = <29>; }; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 119f066f0d9..6f25cf559ad 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -340,7 +340,7 @@ ranges; emac: ethernet@01c0b000 { - compatible = "allwinner,sun4i-emac"; + compatible = "allwinner,sun4i-a10-emac"; reg = <0x01c0b000 0x1000>; interrupts = <0 55 4>; clocks = <&ahb_gates 17>; @@ -348,7 +348,7 @@ }; mdio@01c0b080 { - compatible = "allwinner,sun4i-mdio"; + compatible = "allwinner,sun4i-a10-mdio"; reg = <0x01c0b080 0x14>; status = "disabled"; #address-cells = <1>; @@ -454,7 +454,7 @@ rtc: rtc@01c20d00 { compatible = "allwinner,sun7i-a20-rtc"; reg = <0x01c20d00 0x20>; - interrupts = <0 24 1>; + interrupts = <0 24 4>; }; sid: eeprom@01c23800 { @@ -463,7 +463,7 @@ }; rtp: rtp@01c25000 { - compatible = "allwinner,sun4i-ts"; + compatible = "allwinner,sun4i-a10-ts"; reg = <0x01c25000 0x100>; interrupts = <0 29 4>; }; @@ -596,10 +596,10 @@ hstimer@01c60000 { compatible = "allwinner,sun7i-a20-hstimer"; reg = <0x01c60000 0x1000>; - interrupts = <0 81 1>, - <0 82 1>, - <0 83 1>, - <0 84 1>; + interrupts = <0 81 4>, + <0 82 4>, + <0 83 4>, + <0 84 4>; clocks = <&ahb_gates 28>; }; diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi index 389e987ec28..44ec401ec36 100644 --- a/arch/arm/boot/dts/tegra114.dtsi +++ b/arch/arm/boot/dts/tegra114.dtsi @@ -57,6 +57,8 @@ resets = <&tegra_car 27>; reset-names = "dc"; + nvidia,head = <0>; + rgb { status = "disabled"; }; @@ -72,6 +74,8 @@ resets = <&tegra_car 26>; reset-names = "dc"; + nvidia,head = <1>; + rgb { status = "disabled"; }; diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index 480ecda3416..48d2a7f4d0c 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi @@ -94,6 +94,8 @@ resets = <&tegra_car 27>; reset-names = "dc"; + nvidia,head = <0>; + rgb { status = "disabled"; }; @@ -109,6 +111,8 @@ resets = <&tegra_car 26>; reset-names = "dc"; + nvidia,head = <1>; + rgb { status = "disabled"; }; diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi index 9104224124e..1e156d9d050 100644 --- a/arch/arm/boot/dts/tegra30-cardhu.dtsi +++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi @@ -28,7 +28,7 @@ compatible = "nvidia,cardhu", "nvidia,tegra30"; aliases { - rtc0 = "/i2c@7000d000/tps6586x@34"; + rtc0 = "/i2c@7000d000/tps65911@2d"; rtc1 = "/rtc@7000e000"; }; diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index ed8e7700b46..19a84e933f4 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -170,6 +170,8 @@ resets = <&tegra_car 27>; reset-names = "dc"; + nvidia,head = <0>; + rgb { status = "disabled"; }; @@ -185,6 +187,8 @@ resets = <&tegra_car 26>; reset-names = "dc"; + nvidia,head = <1>; + rgb { status = "disabled"; }; diff --git a/arch/arm/boot/dts/testcases/tests-interrupts.dtsi b/arch/arm/boot/dts/testcases/tests-interrupts.dtsi deleted file mode 100644 index c843720bd3e..00000000000 --- a/arch/arm/boot/dts/testcases/tests-interrupts.dtsi +++ /dev/null @@ -1,58 +0,0 @@ - -/ { - testcase-data { - interrupts { - #address-cells = <1>; - #size-cells = <1>; - test_intc0: intc0 { - interrupt-controller; - #interrupt-cells = <1>; - }; - - test_intc1: intc1 { - interrupt-controller; - #interrupt-cells = <3>; - }; - - test_intc2: intc2 { - interrupt-controller; - #interrupt-cells = <2>; - }; - - test_intmap0: intmap0 { - #interrupt-cells = <1>; - #address-cells = <0>; - interrupt-map = <1 &test_intc0 9>, - <2 &test_intc1 10 11 12>, - <3 &test_intc2 13 14>, - <4 &test_intc2 15 16>; - }; - - test_intmap1: intmap1 { - #interrupt-cells = <2>; - interrupt-map = <0x5000 1 2 &test_intc0 15>; - }; - - interrupts0 { - interrupt-parent = <&test_intc0>; - interrupts = <1>, <2>, <3>, <4>; - }; - - interrupts1 { - interrupt-parent = <&test_intmap0>; - interrupts = <1>, <2>, <3>, <4>; - }; - - interrupts-extended0 { - reg = <0x5000 0x100>; - interrupts-extended = <&test_intc0 1>, - <&test_intc1 2 3 4>, - <&test_intc2 5 6>, - <&test_intmap0 1>, - <&test_intmap0 2>, - <&test_intmap0 3>, - <&test_intmap1 1 2>; - }; - }; - }; -}; diff --git a/arch/arm/boot/dts/testcases/tests-phandle.dtsi b/arch/arm/boot/dts/testcases/tests-phandle.dtsi deleted file mode 100644 index 0007d3cd7dc..00000000000 --- a/arch/arm/boot/dts/testcases/tests-phandle.dtsi +++ /dev/null @@ -1,39 +0,0 @@ - -/ { - testcase-data { - phandle-tests { - provider0: provider0 { - #phandle-cells = <0>; - }; - - provider1: provider1 { - #phandle-cells = <1>; - }; - - provider2: provider2 { - #phandle-cells = <2>; - }; - - provider3: provider3 { - #phandle-cells = <3>; - }; - - consumer-a { - phandle-list = <&provider1 1>, - <&provider2 2 0>, - <0>, - <&provider3 4 4 3>, - <&provider2 5 100>, - <&provider0>, - <&provider1 7>; - phandle-list-names = "first", "second", "third"; - - phandle-list-bad-phandle = <12345678 0 0>; - phandle-list-bad-args = <&provider2 1 0>, - <&provider3 0>; - empty-property; - unterminated-string = [40 41 42 43]; - }; - }; - }; -}; diff --git a/arch/arm/boot/dts/testcases/tests.dtsi b/arch/arm/boot/dts/testcases/tests.dtsi deleted file mode 100644 index 3f123ecc9dd..00000000000 --- a/arch/arm/boot/dts/testcases/tests.dtsi +++ /dev/null @@ -1,2 +0,0 @@ -/include/ "tests-phandle.dtsi" -/include/ "tests-interrupts.dtsi" diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts index f43907c40c9..65f65771132 100644 --- a/arch/arm/boot/dts/versatile-pb.dts +++ b/arch/arm/boot/dts/versatile-pb.dts @@ -1,4 +1,4 @@ -/include/ "versatile-ab.dts" +#include <versatile-ab.dts> / { model = "ARM Versatile PB"; @@ -47,4 +47,4 @@ }; }; -/include/ "testcases/tests.dtsi" +#include <testcases.dtsi> diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 845bc745706..ee6982976d6 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -29,6 +29,7 @@ CONFIG_ARCH_OMAP3=y CONFIG_ARCH_OMAP4=y CONFIG_SOC_OMAP5=y CONFIG_SOC_AM33XX=y +CONFIG_SOC_DRA7XX=y CONFIG_SOC_AM43XX=y CONFIG_ARCH_ROCKCHIP=y CONFIG_ARCH_SOCFPGA=y diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig index 00fe9e9710f..27d69b558c5 100644 --- a/arch/arm/configs/tegra_defconfig +++ b/arch/arm/configs/tegra_defconfig @@ -204,7 +204,10 @@ CONFIG_MMC_BLOCK_MINORS=16 CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_TEGRA=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y +CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_ONESHOT=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e9a49fe0284..8b8b61685a3 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, static inline void __flush_icache_all(void) { __flush_icache_preferred(); + dsb(); } /* diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 8756e4bcdba..4afb376d9c7 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -30,14 +30,15 @@ */ #define UL(x) _AC(x, UL) +/* PAGE_OFFSET - the virtual address of the start of the kernel image */ +#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) + #ifdef CONFIG_MMU /* - * PAGE_OFFSET - the virtual address of the start of the kernel image * TASK_SIZE - the maximum size of a user space task. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area */ -#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) @@ -104,10 +105,6 @@ #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) #endif -#ifndef PAGE_OFFSET -#define PAGE_OFFSET PLAT_PHYS_OFFSET -#endif - /* * The module can be at any place in ram in nommu mode. */ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 03243f7eedd..85c60adc8b6 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -120,13 +120,16 @@ /* * 2nd stage PTE definitions for LPAE. */ -#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ +#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */ +#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */ +#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */ +#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */ +#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2) -#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ +#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ +#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ + +#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ /* * Hyp-mode PL2 PTE definitions for LPAE. diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index ef3c6072aa4..ac4bfae2670 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -37,18 +37,9 @@ static inline void dsb_sev(void) { -#if __LINUX_ARM_ARCH__ >= 7 - __asm__ __volatile__ ( - "dsb ishst\n" - SEV - ); -#else - __asm__ __volatile__ ( - "mcr p15, 0, %0, c7, c10, 4\n" - SEV - : : "r" (0) - ); -#endif + + dsb(ishst); + __asm__(SEV); } /* diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 47cd974e57e..c96ecacb202 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -177,6 +177,18 @@ __lookup_processor_type_data: .long __proc_info_end .size __lookup_processor_type_data, . - __lookup_processor_type_data +__error_lpae: +#ifdef CONFIG_DEBUG_LL + adr r0, str_lpae + bl printascii + b __error +str_lpae: .asciz "\nError: Kernel with LPAE support, but CPU does not support LPAE.\n" +#else + b __error +#endif + .align +ENDPROC(__error_lpae) + __error_p: #ifdef CONFIG_DEBUG_LL adr r0, str_p1 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 914616e0bdc..f5f381d9155 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -102,7 +102,7 @@ ENTRY(stext) and r3, r3, #0xf @ extract VMSA support cmp r3, #5 @ long-descriptor translation table format? THUMB( it lo ) @ force fixup-able long branch encoding - blo __error_p @ only classic page table format + blo __error_lpae @ only classic page table format #endif #ifndef CONFIG_XIP_KERNEL diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index b0df9761de6..1e8b030dbef 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) kernel_data.end = virt_to_phys(_end - 1); for_each_memblock(memory, region) { - res = memblock_virt_alloc_low(sizeof(*res), 0); + res = memblock_virt_alloc(sizeof(*res), 0); res->name = "System RAM"; res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 1d8248ea566..bd18bb8b277 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -878,7 +878,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, void *v) { - if (cmd == CPU_PM_EXIT) { + if (cmd == CPU_PM_EXIT && + __hyp_get_vectors() == hyp_default_vectors) { cpu_init_hyp_mode(NULL); return NOTIFY_OK; } diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index ddc15539bad..0d68d407306 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -220,6 +220,10 @@ after_vfp_restore: * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are * passed in r0 and r1. * + * A function pointer with a value of 0xffffffff has a special meaning, + * and is used to implement __hyp_get_vectors in the same way as in + * arch/arm/kernel/hyp_stub.S. + * * The calling convention follows the standard AAPCS: * r0 - r3: caller save * r12: caller save @@ -363,6 +367,11 @@ hyp_hvc: host_switch_to_hyp: pop {r0, r1, r2} + /* Check for __hyp_get_vectors */ + cmp r0, #-1 + mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR + beq 1f + push {lr} mrs lr, SPSR push {lr} @@ -378,7 +387,7 @@ THUMB( orr lr, #1) pop {lr} msr SPSR_csxf, lr pop {lr} - eret +1: eret guest_trap: load_vcpu @ Load VCPU pointer to r0 diff --git a/arch/arm/mach-hisi/Kconfig b/arch/arm/mach-hisi/Kconfig index 8f4649b301b..1abae5f6a41 100644 --- a/arch/arm/mach-hisi/Kconfig +++ b/arch/arm/mach-hisi/Kconfig @@ -8,7 +8,7 @@ config ARCH_HI3xxx select CLKSRC_OF select GENERIC_CLOCKEVENTS select HAVE_ARM_SCU - select HAVE_ARM_TWD + select HAVE_ARM_TWD if SMP select HAVE_SMP select PINCTRL select PINCTRL_SINGLE diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index befcaf5d057..ec419649320 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -101,11 +101,9 @@ obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o -ifeq ($(CONFIG_PM),y) obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o # i.MX6SL reuses i.MX6Q code obj-$(CONFIG_SOC_IMX6SL) += pm-imx6q.o headsmp.o -endif # i.MX5 based machines obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index af2e582d2b7..4d677f44253 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -482,6 +482,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) if (IS_ENABLED(CONFIG_PCI_IMX6)) clk_set_parent(clk[lvds1_sel], clk[sata_ref]); + /* Set initial power mode */ + imx6q_set_lpm(WAIT_CLOCKED); + np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); base = of_iomap(np, 0); WARN_ON(!base); diff --git a/arch/arm/mach-imx/clk-imx6sl.c b/arch/arm/mach-imx/clk-imx6sl.c index 3781a185399..4c86f303520 100644 --- a/arch/arm/mach-imx/clk-imx6sl.c +++ b/arch/arm/mach-imx/clk-imx6sl.c @@ -266,6 +266,9 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node) /* Audio-related clocks configuration */ clk_set_parent(clks[IMX6SL_CLK_SPDIF0_SEL], clks[IMX6SL_CLK_PLL3_PFD3]); + /* Set initial power mode */ + imx6q_set_lpm(WAIT_CLOCKED); + np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpt"); base = of_iomap(np, 0); WARN_ON(!base); diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 59c3b9b26bb..baf439dc22d 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -144,13 +144,11 @@ void imx6q_set_chicken_bit(void); void imx_cpu_die(unsigned int cpu); int imx_cpu_kill(unsigned int cpu); -#ifdef CONFIG_PM void imx6q_pm_init(void); void imx6q_pm_set_ccm_base(void __iomem *base); +#ifdef CONFIG_PM void imx5_pm_init(void); #else -static inline void imx6q_pm_init(void) {} -static inline void imx6q_pm_set_ccm_base(void __iomem *base) {} static inline void imx5_pm_init(void) {} #endif diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c index 9d47adc078a..7a9b98589db 100644 --- a/arch/arm/mach-imx/pm-imx6q.c +++ b/arch/arm/mach-imx/pm-imx6q.c @@ -236,8 +236,6 @@ void __init imx6q_pm_init(void) regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT, IMX6Q_GPR1_GINT); - /* Set initial power mode */ - imx6q_set_lpm(WAIT_CLOCKED); suspend_set_ops(&imx6q_pm_ops); } diff --git a/arch/arm/mach-moxart/Kconfig b/arch/arm/mach-moxart/Kconfig index ba470d64493..3795ae28a61 100644 --- a/arch/arm/mach-moxart/Kconfig +++ b/arch/arm/mach-moxart/Kconfig @@ -2,7 +2,6 @@ config ARCH_MOXART bool "MOXA ART SoC" if ARCH_MULTI_V4T select CPU_FA526 select ARM_DMA_MEM_BUFFERABLE - select DMA_OF select USE_OF select CLKSRC_OF select CLKSRC_MMIO diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c index 91449c5cb70..85089d82198 100644 --- a/arch/arm/mach-omap1/board-nokia770.c +++ b/arch/arm/mach-omap1/board-nokia770.c @@ -156,6 +156,7 @@ static struct omap_usb_config nokia770_usb_config __initdata = { .register_dev = 1, .hmc_mode = 16, .pins[0] = 6, + .extcon = "tahvo-usb", }; #if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 653b489479e..0af7ca02314 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -50,11 +50,12 @@ config SOC_OMAP5 bool "TI OMAP5" depends on ARCH_MULTI_V7 select ARCH_OMAP2PLUS + select ARCH_HAS_OPP select ARM_CPU_SUSPEND if PM select ARM_GIC select CPU_V7 select HAVE_ARM_SCU if SMP - select HAVE_ARM_TWD if LOCAL_TIMERS + select HAVE_ARM_TWD if SMP select HAVE_SMP select HAVE_ARM_ARCH_TIMER select ARM_ERRATA_798181 if SMP @@ -63,6 +64,7 @@ config SOC_AM33XX bool "TI AM33XX" depends on ARCH_MULTI_V7 select ARCH_OMAP2PLUS + select ARCH_HAS_OPP select ARM_CPU_SUSPEND if PM select CPU_V7 select MULTI_IRQ_HANDLER @@ -72,6 +74,7 @@ config SOC_AM43XX depends on ARCH_MULTI_V7 select CPU_V7 select ARCH_OMAP2PLUS + select ARCH_HAS_OPP select MULTI_IRQ_HANDLER select ARM_GIC select MACH_OMAP_GENERIC @@ -80,6 +83,7 @@ config SOC_DRA7XX bool "TI DRA7XX" depends on ARCH_MULTI_V7 select ARCH_OMAP2PLUS + select ARCH_HAS_OPP select ARM_CPU_SUSPEND if PM select ARM_GIC select CPU_V7 @@ -268,9 +272,6 @@ config MACH_OMAP_3430SDP default y select OMAP_PACKAGE_CBB -config MACH_NOKIA_N800 - bool - config MACH_NOKIA_N810 bool @@ -281,7 +282,6 @@ config MACH_NOKIA_N8X0 bool "Nokia N800/N810" depends on SOC_OMAP2420 default y - select MACH_NOKIA_N800 select MACH_NOKIA_N810 select MACH_NOKIA_N810_WIMAX select OMAP_PACKAGE_ZAC diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c index 3b05aea56d1..11ed9152e66 100644 --- a/arch/arm/mach-omap2/cclock3xxx_data.c +++ b/arch/arm/mach-omap2/cclock3xxx_data.c @@ -433,7 +433,9 @@ static const struct clk_ops dpll4_m5x2_ck_ops = { .enable = &omap2_dflt_clk_enable, .disable = &omap2_dflt_clk_disable, .is_enabled = &omap2_dflt_clk_is_enabled, + .set_rate = &omap3_clkoutx2_set_rate, .recalc_rate = &omap3_clkoutx2_recalc, + .round_rate = &omap3_clkoutx2_round_rate, }; static const struct clk_ops dpll4_m5x2_ck_3630_ops = { diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index 4c158c838d4..01fc710c818 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -23,6 +23,8 @@ #include "prm.h" #include "clockdomain.h" +#define MAX_CPUS 2 + /* Machine specific information */ struct idle_statedata { u32 cpu_state; @@ -48,11 +50,11 @@ static struct idle_statedata omap4_idle_data[] = { }, }; -static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS]; -static struct clockdomain *cpu_clkdm[NR_CPUS]; +static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS]; +static struct clockdomain *cpu_clkdm[MAX_CPUS]; static atomic_t abort_barrier; -static bool cpu_done[NR_CPUS]; +static bool cpu_done[MAX_CPUS]; static struct idle_statedata *state_ptr = &omap4_idle_data[0]; /* Private functions */ diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c index 3185ced807c..3c418ea54bb 100644 --- a/arch/arm/mach-omap2/dpll3xxx.c +++ b/arch/arm/mach-omap2/dpll3xxx.c @@ -623,6 +623,32 @@ void omap3_dpll_deny_idle(struct clk_hw_omap *clk) /* Clock control for DPLL outputs */ +/* Find the parent DPLL for the given clkoutx2 clock */ +static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw) +{ + struct clk_hw_omap *pclk = NULL; + struct clk *parent; + + /* Walk up the parents of clk, looking for a DPLL */ + do { + do { + parent = __clk_get_parent(hw->clk); + hw = __clk_get_hw(parent); + } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC)); + if (!hw) + break; + pclk = to_clk_hw_omap(hw); + } while (pclk && !pclk->dpll_data); + + /* clk does not have a DPLL as a parent? error in the clock data */ + if (!pclk) { + WARN_ON(1); + return NULL; + } + + return pclk; +} + /** * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate * @clk: DPLL output struct clk @@ -637,27 +663,14 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, unsigned long rate; u32 v; struct clk_hw_omap *pclk = NULL; - struct clk *parent; if (!parent_rate) return 0; - /* Walk up the parents of clk, looking for a DPLL */ - do { - do { - parent = __clk_get_parent(hw->clk); - hw = __clk_get_hw(parent); - } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC)); - if (!hw) - break; - pclk = to_clk_hw_omap(hw); - } while (pclk && !pclk->dpll_data); + pclk = omap3_find_clkoutx2_dpll(hw); - /* clk does not have a DPLL as a parent? error in the clock data */ - if (!pclk) { - WARN_ON(1); + if (!pclk) return 0; - } dd = pclk->dpll_data; @@ -672,6 +685,55 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, return rate; } +int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + return 0; +} + +long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + const struct dpll_data *dd; + u32 v; + struct clk_hw_omap *pclk = NULL; + + if (!*prate) + return 0; + + pclk = omap3_find_clkoutx2_dpll(hw); + + if (!pclk) + return 0; + + dd = pclk->dpll_data; + + /* TYPE J does not have a clkoutx2 */ + if (dd->flags & DPLL_J_TYPE) { + *prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate); + return *prate; + } + + WARN_ON(!dd->enable_mask); + + v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask; + v >>= __ffs(dd->enable_mask); + + /* If in bypass, the rate is fixed to the bypass rate*/ + if (v != OMAP3XXX_EN_DPLL_LOCKED) + return *prate; + + if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { + unsigned long best_parent; + + best_parent = (rate / 2); + *prate = __clk_round_rate(__clk_get_parent(hw->clk), + best_parent); + } + + return *prate * 2; +} + /* OMAP3/4 non-CORE DPLL clkops */ const struct clk_hw_omap_ops clkhwops_omap3_dpll = { .allow_idle = omap3_dpll_allow_idle, diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index d24926e6340..ab43755364f 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -1339,7 +1339,7 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np, of_property_read_bool(np, "gpmc,time-para-granularity"); } -#ifdef CONFIG_MTD_NAND +#if IS_ENABLED(CONFIG_MTD_NAND) static const char * const nand_xfer_types[] = { [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled", @@ -1429,7 +1429,7 @@ static int gpmc_probe_nand_child(struct platform_device *pdev, } #endif -#ifdef CONFIG_MTD_ONENAND +#if IS_ENABLED(CONFIG_MTD_ONENAND) static int gpmc_probe_onenand_child(struct platform_device *pdev, struct device_node *child) { diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index d408b15b4fb..af432b19125 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -179,15 +179,6 @@ static struct map_desc omap34xx_io_desc[] __initdata = { .length = L4_EMU_34XX_SIZE, .type = MT_DEVICE }, -#if defined(CONFIG_DEBUG_LL) && \ - (defined(CONFIG_MACH_OMAP_ZOOM2) || defined(CONFIG_MACH_OMAP_ZOOM3)) - { - .virtual = ZOOM_UART_VIRT, - .pfn = __phys_to_pfn(ZOOM_UART_BASE), - .length = SZ_1M, - .type = MT_DEVICE - }, -#endif }; #endif diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 42d81885c70..1f33f5db10d 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -1947,29 +1947,31 @@ static int _ocp_softreset(struct omap_hwmod *oh) goto dis_opt_clks; _write_sysconfig(v, oh); - ret = _clear_softreset(oh, &v); - if (ret) - goto dis_opt_clks; - - _write_sysconfig(v, oh); if (oh->class->sysc->srst_udelay) udelay(oh->class->sysc->srst_udelay); c = _wait_softreset_complete(oh); - if (c == MAX_MODULE_SOFTRESET_WAIT) + if (c == MAX_MODULE_SOFTRESET_WAIT) { pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n", oh->name, MAX_MODULE_SOFTRESET_WAIT); - else + ret = -ETIMEDOUT; + goto dis_opt_clks; + } else { pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c); + } + + ret = _clear_softreset(oh, &v); + if (ret) + goto dis_opt_clks; + + _write_sysconfig(v, oh); /* * XXX add _HWMOD_STATE_WEDGED for modules that don't come back from * _wait_target_ready() or _reset() */ - ret = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0; - dis_opt_clks: if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) _disable_optional_clocks(oh); diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 18f333c440d..810c205d668 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -1365,11 +1365,10 @@ static struct omap_hwmod_class_sysconfig dra7xx_spinlock_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, - .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | - SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | - SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - SIDLE_SMART_WKUP), + .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP | + SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | + SYSS_HAS_RESET_STATUS), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 3d5b24dcd9a..c33e07e2f0d 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -22,6 +22,8 @@ #include "common-board-devices.h" #include "dss-common.h" #include "control.h" +#include "omap-secure.h" +#include "soc.h" struct pdata_init { const char *compatible; @@ -169,6 +171,22 @@ static void __init am3517_evm_legacy_init(void) omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET); omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */ } + +static void __init nokia_n900_legacy_init(void) +{ + hsmmc2_internal_input_clk(); + + if (omap_type() == OMAP2_DEVICE_TYPE_SEC) { + if (IS_ENABLED(CONFIG_ARM_ERRATA_430973)) { + pr_info("RX-51: Enabling ARM errata 430973 workaround\n"); + /* set IBE to 1 */ + rx51_secure_update_aux_cr(BIT(6), 0); + } else { + pr_warning("RX-51: Not enabling ARM errata 430973 workaround\n"); + pr_warning("Thumb binaries may crash randomly without this workaround\n"); + } + } +} #endif /* CONFIG_ARCH_OMAP3 */ #ifdef CONFIG_ARCH_OMAP4 @@ -239,6 +257,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = { #endif #ifdef CONFIG_ARCH_OMAP3 OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata), + OF_DEV_AUXDATA("ti,omap3-padconf", 0x480025a0, "480025a0.pinmux", &pcs_pdata), OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata), /* Only on am3517 */ OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL), @@ -259,7 +278,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = { static struct pdata_init pdata_quirks[] __initdata = { #ifdef CONFIG_ARCH_OMAP3 { "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, }, - { "nokia,omap3-n900", hsmmc2_internal_input_clk, }, + { "nokia,omap3-n900", nokia_n900_legacy_init, }, { "nokia,omap3-n9", hsmmc2_internal_input_clk, }, { "nokia,omap3-n950", hsmmc2_internal_input_clk, }, { "isee,omap3-igep0020", omap3_igep0020_legacy_init, }, diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c index 6334b96b409..280f3c58abe 100644 --- a/arch/arm/mach-omap2/prminst44xx.c +++ b/arch/arm/mach-omap2/prminst44xx.c @@ -183,11 +183,11 @@ void omap4_prminst_global_warm_sw_reset(void) OMAP4_PRM_RSTCTRL_OFFSET); v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK; omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION, - OMAP4430_PRM_DEVICE_INST, + dev_inst, OMAP4_PRM_RSTCTRL_OFFSET); /* OCP barrier */ v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, - OMAP4430_PRM_DEVICE_INST, + dev_inst, OMAP4_PRM_RSTCTRL_OFFSET); } diff --git a/arch/arm/mach-pxa/am300epd.c b/arch/arm/mach-pxa/am300epd.c index c9f309ae88c..8b90c4f2d43 100644 --- a/arch/arm/mach-pxa/am300epd.c +++ b/arch/arm/mach-pxa/am300epd.c @@ -30,6 +30,7 @@ #include <mach/gumstix.h> #include <mach/mfp-pxa25x.h> +#include <mach/irqs.h> #include <linux/platform_data/video-pxafb.h> #include "generic.h" diff --git a/arch/arm/mach-pxa/include/mach/balloon3.h b/arch/arm/mach-pxa/include/mach/balloon3.h index 954641e6c8b..1b0825911e6 100644 --- a/arch/arm/mach-pxa/include/mach/balloon3.h +++ b/arch/arm/mach-pxa/include/mach/balloon3.h @@ -14,6 +14,8 @@ #ifndef ASM_ARCH_BALLOON3_H #define ASM_ARCH_BALLOON3_H +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ + enum balloon3_features { BALLOON3_FEATURE_OHCI, BALLOON3_FEATURE_MMC, diff --git a/arch/arm/mach-pxa/include/mach/corgi.h b/arch/arm/mach-pxa/include/mach/corgi.h index f3c3493b468..c030d955bbd 100644 --- a/arch/arm/mach-pxa/include/mach/corgi.h +++ b/arch/arm/mach-pxa/include/mach/corgi.h @@ -13,6 +13,7 @@ #ifndef __ASM_ARCH_CORGI_H #define __ASM_ARCH_CORGI_H 1 +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ /* * Corgi (Non Standard) GPIO Definitions diff --git a/arch/arm/mach-pxa/include/mach/csb726.h b/arch/arm/mach-pxa/include/mach/csb726.h index 2628e7b7211..00cfbbbf73f 100644 --- a/arch/arm/mach-pxa/include/mach/csb726.h +++ b/arch/arm/mach-pxa/include/mach/csb726.h @@ -11,6 +11,8 @@ #ifndef CSB726_H #define CSB726_H +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + #define CSB726_GPIO_IRQ_LAN 52 #define CSB726_GPIO_IRQ_SM501 53 #define CSB726_GPIO_MMC_DETECT 100 diff --git a/arch/arm/mach-pxa/include/mach/gumstix.h b/arch/arm/mach-pxa/include/mach/gumstix.h index dba14b6503a..f7df27bbb42 100644 --- a/arch/arm/mach-pxa/include/mach/gumstix.h +++ b/arch/arm/mach-pxa/include/mach/gumstix.h @@ -6,6 +6,7 @@ * published by the Free Software Foundation. */ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ /* BTRESET - Reset line to Bluetooth module, active low signal. */ #define GPIO_GUMSTIX_BTRESET 7 diff --git a/arch/arm/mach-pxa/include/mach/idp.h b/arch/arm/mach-pxa/include/mach/idp.h index 22a96f87232..7e63f468027 100644 --- a/arch/arm/mach-pxa/include/mach/idp.h +++ b/arch/arm/mach-pxa/include/mach/idp.h @@ -23,6 +23,7 @@ * IDP hardware. */ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ #define IDP_FLASH_PHYS (PXA_CS0_PHYS) #define IDP_ALT_FLASH_PHYS (PXA_CS1_PHYS) diff --git a/arch/arm/mach-pxa/include/mach/palmld.h b/arch/arm/mach-pxa/include/mach/palmld.h index 2c447133657..b184f296023 100644 --- a/arch/arm/mach-pxa/include/mach/palmld.h +++ b/arch/arm/mach-pxa/include/mach/palmld.h @@ -13,6 +13,8 @@ #ifndef _INCLUDE_PALMLD_H_ #define _INCLUDE_PALMLD_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/palmt5.h b/arch/arm/mach-pxa/include/mach/palmt5.h index 0bd4f036c72..e342c592140 100644 --- a/arch/arm/mach-pxa/include/mach/palmt5.h +++ b/arch/arm/mach-pxa/include/mach/palmt5.h @@ -15,6 +15,8 @@ #ifndef _INCLUDE_PALMT5_H_ #define _INCLUDE_PALMT5_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/palmtc.h b/arch/arm/mach-pxa/include/mach/palmtc.h index c383a21680b..81c727b3cfd 100644 --- a/arch/arm/mach-pxa/include/mach/palmtc.h +++ b/arch/arm/mach-pxa/include/mach/palmtc.h @@ -16,6 +16,8 @@ #ifndef _INCLUDE_PALMTC_H_ #define _INCLUDE_PALMTC_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/palmtx.h b/arch/arm/mach-pxa/include/mach/palmtx.h index f2e53038025..92bc1f05300 100644 --- a/arch/arm/mach-pxa/include/mach/palmtx.h +++ b/arch/arm/mach-pxa/include/mach/palmtx.h @@ -16,6 +16,8 @@ #ifndef _INCLUDE_PALMTX_H_ #define _INCLUDE_PALMTX_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/pcm027.h b/arch/arm/mach-pxa/include/mach/pcm027.h index 6bf28de228b..86ebd7b6c96 100644 --- a/arch/arm/mach-pxa/include/mach/pcm027.h +++ b/arch/arm/mach-pxa/include/mach/pcm027.h @@ -23,6 +23,8 @@ * Definitions of CPU card resources only */ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /* phyCORE-PXA270 (PCM027) Interrupts */ #define PCM027_IRQ(x) (IRQ_BOARD_START + (x)) #define PCM027_BTDET_IRQ PCM027_IRQ(0) diff --git a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h index 0260aaa2fc1..7e544c14967 100644 --- a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h +++ b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h @@ -20,6 +20,7 @@ */ #include <mach/pcm027.h> +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ /* * definitions relevant only when the PCM-990 diff --git a/arch/arm/mach-pxa/include/mach/poodle.h b/arch/arm/mach-pxa/include/mach/poodle.h index f32ff75dcca..b56b19351a0 100644 --- a/arch/arm/mach-pxa/include/mach/poodle.h +++ b/arch/arm/mach-pxa/include/mach/poodle.h @@ -15,6 +15,8 @@ #ifndef __ASM_ARCH_POODLE_H #define __ASM_ARCH_POODLE_H 1 +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /* * GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/spitz.h b/arch/arm/mach-pxa/include/mach/spitz.h index 0bfe6507c95..25c9f62e46a 100644 --- a/arch/arm/mach-pxa/include/mach/spitz.h +++ b/arch/arm/mach-pxa/include/mach/spitz.h @@ -15,8 +15,8 @@ #define __ASM_ARCH_SPITZ_H 1 #endif +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO, PXA_GPIO_TO_IRQ */ #include <linux/fb.h> -#include <linux/gpio.h> /* Spitz/Akita GPIOs */ diff --git a/arch/arm/mach-pxa/include/mach/tosa.h b/arch/arm/mach-pxa/include/mach/tosa.h index 2bb0e862598..0497d95cef2 100644 --- a/arch/arm/mach-pxa/include/mach/tosa.h +++ b/arch/arm/mach-pxa/include/mach/tosa.h @@ -13,6 +13,8 @@ #ifndef _ASM_ARCH_TOSA_H_ #define _ASM_ARCH_TOSA_H_ 1 +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ + /* TOSA Chip selects */ #define TOSA_LCDC_PHYS PXA_CS4_PHYS /* Internel Scoop */ diff --git a/arch/arm/mach-pxa/include/mach/trizeps4.h b/arch/arm/mach-pxa/include/mach/trizeps4.h index d2ca01053f6..ae3ca013afa 100644 --- a/arch/arm/mach-pxa/include/mach/trizeps4.h +++ b/arch/arm/mach-pxa/include/mach/trizeps4.h @@ -10,6 +10,8 @@ #ifndef _TRIPEPS4_H_ #define _TRIPEPS4_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /* physical memory regions */ #define TRIZEPS4_FLASH_PHYS (PXA_CS0_PHYS) /* Flash region */ #define TRIZEPS4_DISK_PHYS (PXA_CS1_PHYS) /* Disk On Chip region */ diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c index f70583fee59..29997bde277 100644 --- a/arch/arm/mach-pxa/mioa701.c +++ b/arch/arm/mach-pxa/mioa701.c @@ -38,6 +38,7 @@ #include <linux/mtd/physmap.h> #include <linux/usb/gpio_vbus.h> #include <linux/reboot.h> +#include <linux/regulator/fixed.h> #include <linux/regulator/max1586.h> #include <linux/slab.h> #include <linux/i2c/pxa-i2c.h> @@ -714,6 +715,10 @@ static struct gpio global_gpios[] = { { GPIO56_MT9M111_nOE, GPIOF_OUT_INIT_LOW, "Camera nOE" }, }; +static struct regulator_consumer_supply fixed_5v0_consumers[] = { + REGULATOR_SUPPLY("power", "pwm-backlight"), +}; + static void __init mioa701_machine_init(void) { int rc; @@ -753,6 +758,10 @@ static void __init mioa701_machine_init(void) pxa_set_i2c_info(&i2c_pdata); pxa27x_set_i2c_power_info(NULL); pxa_set_camera_info(&mioa701_pxacamera_platform_data); + + regulator_register_always_on(0, "fixed-5.0V", fixed_5v0_consumers, + ARRAY_SIZE(fixed_5v0_consumers), + 5000000); } static void mioa701_machine_exit(void) diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h index f33679d2d3e..50e1d850ee2 100644 --- a/arch/arm/mach-sa1100/include/mach/collie.h +++ b/arch/arm/mach-sa1100/include/mach/collie.h @@ -13,6 +13,8 @@ #ifndef __ASM_ARCH_COLLIE_H #define __ASM_ARCH_COLLIE_H +#include "hardware.h" /* Gives GPIO_MAX */ + extern void locomolcd_power(int on); #define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1) diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index 338640631e0..05fa505df58 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig @@ -8,7 +8,7 @@ config ARCH_SHMOBILE_MULTI select CPU_V7 select GENERIC_CLOCKEVENTS select HAVE_ARM_SCU if SMP - select HAVE_ARM_TWD if LOCAL_TIMERS + select HAVE_ARM_TWD if SMP select HAVE_SMP select ARM_GIC select MIGHT_HAVE_CACHE_L2X0 diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c index 4ae0286b468..f55b05a29b5 100644 --- a/arch/arm/mach-tegra/pm.c +++ b/arch/arm/mach-tegra/pm.c @@ -24,6 +24,7 @@ #include <linux/cpu_pm.h> #include <linux/suspend.h> #include <linux/err.h> +#include <linux/slab.h> #include <linux/clk/tegra.h> #include <asm/smp_plat.h> diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c index 303a285d80f..6191603379e 100644 --- a/arch/arm/mach-tegra/tegra.c +++ b/arch/arm/mach-tegra/tegra.c @@ -73,10 +73,20 @@ u32 tegra_uart_config[3] = { static void __init tegra_init_cache(void) { #ifdef CONFIG_CACHE_L2X0 + static const struct of_device_id pl310_ids[] __initconst = { + { .compatible = "arm,pl310-cache", }, + {} + }; + + struct device_node *np; int ret; void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000; u32 aux_ctrl, cache_type; + np = of_find_matching_node(NULL, pl310_ids); + if (!np) + return; + cache_type = readl(p + L2X0_CACHE_TYPE); aux_ctrl = (cache_type & 0x700) << (17-8); aux_ctrl |= 0x7C400001; diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c index 1db2a5ca9ab..8c09a8393fb 100644 --- a/arch/arm/mach-zynq/common.c +++ b/arch/arm/mach-zynq/common.c @@ -25,6 +25,7 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/of.h> +#include <linux/memblock.h> #include <linux/irqchip.h> #include <linux/irqchip/arm-gic.h> @@ -41,6 +42,18 @@ void __iomem *zynq_scu_base; +/** + * zynq_memory_init - Initialize special memory + * + * We need to stop things allocating the low memory as DMA can't work in + * the 1st 512K of memory. + */ +static void __init zynq_memory_init(void) +{ + if (!__pa(PAGE_OFFSET)) + memblock_reserve(__pa(PAGE_OFFSET), __pa(swapper_pg_dir)); +} + static struct platform_device zynq_cpuidle_device = { .name = "cpuidle-zynq", }; @@ -117,5 +130,6 @@ DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") .init_machine = zynq_init_machine, .init_time = zynq_timer_init, .dt_compat = zynq_dt_match, + .reserve = zynq_memory_init, .restart = zynq_system_reset, MACHINE_END diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1a77450e728..11b3914660d 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1358,7 +1358,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); - if (gfp & GFP_ATOMIC) + if (!(gfp & __GFP_WAIT)) return __iommu_alloc_atomic(dev, size, handle); /* diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 2b3a5641427..ef69152f9b5 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -264,6 +264,9 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) note_page(st, addr, 3, pmd_val(*pmd)); else walk_pte(st, pmd, addr); + + if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) + note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1])); } } diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index d5a982d15a8..7ea641b7aa7 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -38,6 +38,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt) struct mem_type { pteval_t prot_pte; + pteval_t prot_pte_s2; pmdval_t prot_l1; pmdval_t prot_sect; unsigned int domain; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4f08c133cc2..a623cb3ad01 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -232,12 +232,16 @@ __setup("noalign", noalign_setup); #endif /* ifdef CONFIG_CPU_CP15 / else */ #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN +#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE static struct mem_type mem_types[] = { [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | L_PTE_SHARED, + .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) | + s2_policy(L_PTE_S2_MT_DEV_SHARED) | + L_PTE_SHARED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, .domain = DOMAIN_IO, @@ -508,7 +512,8 @@ static void __init build_mem_type_table(void) cp = &cache_policies[cachepolicy]; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; s2_pgprot = cp->pte_s2; - hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte; + hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; + s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; /* * ARMv6 and above have extended page tables. diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 45dc29f85d5..32b3558321c 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -208,7 +208,6 @@ __v6_setup: mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache - mcr p15, 0, r0, c7, c10, 4 @ drain write buffer #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs mcr p15, 0, r0, c2, c0, 2 @ TTB control register @@ -218,6 +217,8 @@ __v6_setup: ALT_UP(orr r8, r8, #TTB_FLAGS_UP) mcr p15, 0, r8, c2, c0, 1 @ load TTB1 #endif /* CONFIG_MMU */ + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and + @ complete invalidations adr r5, v6_crval ldmia r5, {r5, r6} ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index bd1781979a3..74f6033e76d 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -351,7 +351,6 @@ __v7_setup: 4: mov r10, #0 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate - dsb #ifdef CONFIG_MMU mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup @@ -360,6 +359,7 @@ __v7_setup: mcr p15, 0, r5, c10, c2, 0 @ write PRRR mcr p15, 0, r6, c10, c2, 1 @ write NMRR #endif + dsb @ Complete invalidations #ifndef CONFIG_ARM_THUMBEE mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE and r0, r0, #(0xf << 12) @ ThumbEE enabled field diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index dd4327f09ba..27bbcfc7202 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -36,6 +36,7 @@ config ARM64 select HAVE_GENERIC_DMA_COHERENT select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_MEMBLOCK + select HAVE_PATA_PLATFORM select HAVE_PERF_EVENTS select IRQ_DOMAIN select MODULES_USE_ELF_RELA diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 84139be62ae..7959dd0ca5d 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set # CONFIG_SWAP is not set CONFIG_SYSVIPC=y @@ -19,6 +18,7 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_KALLSYMS_ALL=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y +CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set @@ -27,6 +27,7 @@ CONFIG_ARCH_VEXPRESS=y CONFIG_ARCH_XGENE=y CONFIG_SMP=y CONFIG_PREEMPT=y +CONFIG_CMA=y CONFIG_CMDLINE="console=ttyAMA0" # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_COMPAT=y @@ -42,14 +43,17 @@ CONFIG_IP_PNP_BOOTP=y # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y -CONFIG_BLK_DEV=y +CONFIG_DMA_CMA=y CONFIG_SCSI=y # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +CONFIG_PATA_PLATFORM=y +CONFIG_PATA_OF_PLATFORM=y CONFIG_NETDEVICES=y -CONFIG_MII=y CONFIG_SMC91X=y +CONFIG_SMSC911X=y # CONFIG_WLAN is not set CONFIG_INPUT_EVDEV=y # CONFIG_SERIO_I8042 is not set @@ -62,13 +66,19 @@ CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_FB=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set -# CONFIG_USB_SUPPORT is not set +CONFIG_USB=y +CONFIG_USB_ISP1760_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_MMC=y +CONFIG_MMC_ARMMMCI=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 01de5aaa3ed..0237f0867e3 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v) " stxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline int atomic_add_return(int i, atomic_t *v) @@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v) int result; asm volatile("// atomic_add_return\n" -"1: ldaxr %w0, %2\n" +"1: ldxr %w0, %2\n" " add %w0, %w0, %w3\n" " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v) " stxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline int atomic_sub_return(int i, atomic_t *v) @@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v) int result; asm volatile("// atomic_sub_return\n" -"1: ldaxr %w0, %2\n" +"1: ldxr %w0, %2\n" " sub %w0, %w0, %w3\n" " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) unsigned long tmp; int oldval; + smp_mb(); + asm volatile("// atomic_cmpxchg\n" -"1: ldaxr %w1, %2\n" +"1: ldxr %w1, %2\n" " cmp %w1, %w3\n" " b.ne 2f\n" -" stlxr %w0, %w4, %2\n" +" stxr %w0, %w4, %2\n" " cbnz %w0, 1b\n" "2:" : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) : "Ir" (old), "r" (new) - : "cc", "memory"); + : "cc"); + smp_mb(); return oldval; } @@ -173,8 +176,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v) " stxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline long atomic64_add_return(long i, atomic64_t *v) @@ -183,14 +185,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_add_return\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " add %0, %0, %3\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -205,8 +208,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) " stxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline long atomic64_sub_return(long i, atomic64_t *v) @@ -215,14 +217,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_sub_return\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " sub %0, %0, %3\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -231,17 +234,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) long oldval; unsigned long res; + smp_mb(); + asm volatile("// atomic64_cmpxchg\n" -"1: ldaxr %1, %2\n" +"1: ldxr %1, %2\n" " cmp %1, %3\n" " b.ne 2f\n" -" stlxr %w0, %4, %2\n" +" stxr %w0, %4, %2\n" " cbnz %w0, 1b\n" "2:" : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) : "Ir" (old), "r" (new) - : "cc", "memory"); + : "cc"); + smp_mb(); return oldval; } @@ -253,11 +259,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_dec_if_positive\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " subs %0, %0, #1\n" " b.mi 2f\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b\n" +" dmb ish\n" "2:" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 78e20ba8806..409ca370cfe 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -25,7 +25,7 @@ #define wfi() asm volatile("wfi" : : : "memory") #define isb() asm volatile("isb" : : : "memory") -#define dsb() asm volatile("dsb sy" : : : "memory") +#define dsb(opt) asm volatile("dsb sy" : : : "memory") #define mb() dsb() #define rmb() asm volatile("dsb ld" : : : "memory") diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index fea9ee32720..889324981aa 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *); static inline void __flush_icache_all(void) { asm("ic ialluis"); + dsb(); } #define flush_dcache_mmap_lock(mapping) \ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 56166d7f4a2..57c0fa7bf71 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -29,44 +29,45 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size switch (size) { case 1: asm volatile("// __xchg1\n" - "1: ldaxrb %w0, %2\n" + "1: ldxrb %w0, %2\n" " stlxrb %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; case 2: asm volatile("// __xchg2\n" - "1: ldaxrh %w0, %2\n" + "1: ldxrh %w0, %2\n" " stlxrh %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; case 4: asm volatile("// __xchg4\n" - "1: ldaxr %w0, %2\n" + "1: ldxr %w0, %2\n" " stlxr %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; case 8: asm volatile("// __xchg8\n" - "1: ldaxr %0, %2\n" + "1: ldxr %0, %2\n" " stlxr %w1, %3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; default: BUILD_BUG(); } + smp_mb(); return ret; } diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 78834123a32..c4a7f940b38 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -42,7 +42,7 @@ #define ESR_EL1_EC_SP_ALIGN (0x26) #define ESR_EL1_EC_FP_EXC32 (0x28) #define ESR_EL1_EC_FP_EXC64 (0x2C) -#define ESR_EL1_EC_SERRROR (0x2F) +#define ESR_EL1_EC_SERROR (0x2F) #define ESR_EL1_EC_BREAKPT_EL0 (0x30) #define ESR_EL1_EC_BREAKPT_EL1 (0x31) #define ESR_EL1_EC_SOFTSTP_EL0 (0x32) diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 78cc3aba5d6..5f750dc96e0 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -24,10 +24,11 @@ #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ asm volatile( \ -"1: ldaxr %w1, %2\n" \ +"1: ldxr %w1, %2\n" \ insn "\n" \ "2: stlxr %w3, %w0, %2\n" \ " cbnz %w3, 1b\n" \ +" dmb ish\n" \ "3:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -40,7 +41,7 @@ " .popsection\n" \ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ : "r" (oparg), "Ir" (-EFAULT) \ - : "cc", "memory") + : "memory") static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) @@ -111,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; asm volatile("// futex_atomic_cmpxchg_inatomic\n" -"1: ldaxr %w1, %2\n" +"1: ldxr %w1, %2\n" " sub %w3, %w1, %w4\n" " cbnz %w3, 3f\n" "2: stlxr %w3, %w5, %2\n" " cbnz %w3, 1b\n" +" dmb ish\n" "3:\n" " .pushsection .fixup,\"ax\"\n" "4: mov %w0, %w6\n" @@ -127,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " .popsection\n" : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) : "r" (oldval), "r" (newval), "Ir" (-EFAULT) - : "cc", "memory"); + : "memory"); *uval = val; return ret; diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index c98ef4771c7..0eb39865537 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -231,7 +231,7 @@ #define ESR_EL2_EC_SP_ALIGN (0x26) #define ESR_EL2_EC_FP_EXC32 (0x28) #define ESR_EL2_EC_FP_EXC64 (0x2C) -#define ESR_EL2_EC_SERRROR (0x2F) +#define ESR_EL2_EC_SERROR (0x2F) #define ESR_EL2_EC_BREAKPT (0x30) #define ESR_EL2_EC_BREAKPT_HYP (0x31) #define ESR_EL2_EC_SOFTSTP (0x32) diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 13fb0b3efc5..453a179469a 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -16,6 +16,8 @@ #ifndef __ASM_PERCPU_H #define __ASM_PERCPU_H +#ifdef CONFIG_SMP + static inline void set_my_cpu_offset(unsigned long off) { asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); @@ -36,6 +38,12 @@ static inline unsigned long __my_cpu_offset(void) } #define __my_cpu_offset __my_cpu_offset() +#else /* !CONFIG_SMP */ + +#define set_my_cpu_offset(x) do { } while (0) + +#endif /* CONFIG_SMP */ + #include <asm-generic/percpu.h> #endif /* __ASM_PERCPU_H */ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index b524dcd1724..aa3917c8b62 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -136,11 +136,11 @@ extern struct page *empty_zero_page; /* * The following only work if pte_present(). Undefined behaviour otherwise. */ -#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) -#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) -#define pte_young(pte) (pte_val(pte) & PTE_AF) -#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) -#define pte_write(pte) (pte_val(pte) & PTE_WRITE) +#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) +#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) +#define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) +#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) +#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) #define pte_valid_user(pte) \ diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 3d5cf064d7a..c45b7b1b719 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -132,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) " cbnz %w0, 2b\n" : "=&r" (tmp), "+Q" (rw->lock) : "r" (0x80000000) - : "cc", "memory"); + : "memory"); } static inline int arch_write_trylock(arch_rwlock_t *rw) @@ -146,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) "1:\n" : "=&r" (tmp), "+Q" (rw->lock) : "r" (0x80000000) - : "cc", "memory"); + : "memory"); return !tmp; } @@ -187,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) " cbnz %w1, 2b\n" : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : - : "cc", "memory"); + : "memory"); } static inline void arch_read_unlock(arch_rwlock_t *rw) @@ -201,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) " cbnz %w1, 1b\n" : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : - : "cc", "memory"); + : "memory"); } static inline int arch_read_trylock(arch_rwlock_t *rw) @@ -216,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) "1:\n" : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) : - : "cc", "memory"); + : "memory"); return !tmp2; } diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 58125bf008d..bb8eb8a78e6 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -399,7 +399,10 @@ __SYSCALL(374, compat_sys_sendmmsg) __SYSCALL(375, sys_setns) __SYSCALL(376, compat_sys_process_vm_readv) __SYSCALL(377, compat_sys_process_vm_writev) -__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */ +__SYSCALL(378, sys_kcmp) +__SYSCALL(379, sys_finit_module) +__SYSCALL(380, sys_sched_setattr) +__SYSCALL(381, sys_sched_getattr) #define __NR_compat_syscalls 379 diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 495ab6f84a6..eaf54a30bed 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -148,6 +148,15 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) +/* Device Control API: ARM VGIC */ +#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 +#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 +#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 +#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 +#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) +#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 +#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) + /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_TYPE_SHIFT 24 #define KVM_ARM_IRQ_TYPE_MASK 0xff diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S index 63c48ffdf23..7787208e8cc 100644 --- a/arch/arm64/kernel/kuser32.S +++ b/arch/arm64/kernel/kuser32.S @@ -38,12 +38,13 @@ __kuser_cmpxchg64: // 0xffff0f60 .inst 0xe92d00f0 // push {r4, r5, r6, r7} .inst 0xe1c040d0 // ldrd r4, r5, [r0] .inst 0xe1c160d0 // ldrd r6, r7, [r1] - .inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2] + .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2] .inst 0xe0303004 // eors r3, r0, r4 .inst 0x00313005 // eoreqs r3, r1, r5 .inst 0x01a23e96 // stlexdeq r3, r6, [r2] .inst 0x03330001 // teqeq r3, #1 .inst 0x0afffff9 // beq 1b + .inst 0xf57ff05b // dmb ish .inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe8bd00f0 // pop {r4, r5, r6, r7} .inst 0xe12fff1e // bx lr @@ -55,11 +56,12 @@ __kuser_memory_barrier: // 0xffff0fa0 .align 5 __kuser_cmpxchg: // 0xffff0fc0 - .inst 0xe1923e9f // 1: ldaex r3, [r2] + .inst 0xe1923f9f // 1: ldrex r3, [r2] .inst 0xe0533000 // subs r3, r3, r0 .inst 0x01823e91 // stlexeq r3, r1, [r2] .inst 0x03330001 // teqeq r3, #1 .inst 0x0afffffa // beq 1b + .inst 0xf57ff05b // dmb ish .inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe12fff1e // bx lr diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index c3b6c63ea5f..38f0558f0c0 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -48,7 +48,11 @@ int unwind_frame(struct stackframe *frame) frame->sp = fp + 0x10; frame->fp = *(unsigned long *)(fp); - frame->pc = *(unsigned long *)(fp + 8); + /* + * -4 here because we care about the PC at time of bl, + * not where the return will go. + */ + frame->pc = *(unsigned long *)(fp + 8) - 4; return 0; } diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 65d40cf6945..a7149cae161 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -238,6 +238,8 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->use_syscall = use_syscall; vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; + vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; + vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; if (!use_syscall) { vdso_data->cs_cycle_last = tk->clock->cycle_last; @@ -245,8 +247,6 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->xtime_clock_nsec = tk->xtime_nsec; vdso_data->cs_mult = tk->mult; vdso_data->cs_shift = tk->shift; - vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; - vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; } smp_wmb(); diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index d8064af42e6..6d20b7d162d 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S # Actual build commands quiet_cmd_vdsold = VDSOL $@ - cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@ + cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ quiet_cmd_vdsoas = VDSOA $@ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index f0a6d10b521..fe652ffd34c 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime) bl __do_get_tspec seqcnt_check w9, 1b + mov x30, x2 + cmp w0, #CLOCK_MONOTONIC b.ne 6f @@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime) ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne b.ne 8f + /* xtime_coarse_nsec is already right-shifted */ + mov x12, #0 + /* Get coarse timespec. */ adr vdso_data, _vdso_data 3: seqcnt_acquire @@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime) lsr x11, x11, x12 stp x10, x11, [x1, #TSPEC_TV_SEC] mov x0, xzr - ret x2 + ret 7: mov x30, x2 8: /* Syscall fallback. */ diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 3b47c36e10f..2c56012cb2d 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -694,6 +694,24 @@ __hyp_panic_str: .align 2 +/* + * u64 kvm_call_hyp(void *hypfn, ...); + * + * This is not really a variadic function in the classic C-way and care must + * be taken when calling this to ensure parameters are passed in registers + * only, since the stack will change between the caller and the callee. + * + * Call the function with the first argument containing a pointer to the + * function you wish to call in Hyp mode, and subsequent arguments will be + * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the + * function pointer can be passed). The function being called must be mapped + * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are + * passed in r0 and r1. + * + * A function pointer with a value of 0 has a special meaning, and is + * used to implement __hyp_get_vectors in the same way as in + * arch/arm64/kernel/hyp_stub.S. + */ ENTRY(kvm_call_hyp) hvc #0 ret @@ -737,7 +755,12 @@ el1_sync: // Guest trapped into EL2 pop x2, x3 pop x0, x1 - push lr, xzr + /* Check for __hyp_get_vectors */ + cbnz x0, 1f + mrs x0, vbar_el2 + b 2f + +1: push lr, xzr /* * Compute the function address in EL2, and shuffle the parameters. @@ -750,7 +773,7 @@ el1_sync: // Guest trapped into EL2 blr lr pop lr, xzr - eret +2: eret el1_trap: /* diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S index e5db797790d..7dac371cc9a 100644 --- a/arch/arm64/lib/bitops.S +++ b/arch/arm64/lib/bitops.S @@ -46,11 +46,12 @@ ENTRY( \name ) mov x2, #1 add x1, x1, x0, lsr #3 // Get word offset lsl x4, x2, x3 // Create mask -1: ldaxr x2, [x1] +1: ldxr x2, [x1] lsr x0, x2, x3 // Save old value of bit \instr x2, x2, x4 // toggle bit stlxr w5, x2, [x1] cbnz w5, 1b + dmb ish and x0, x0, #1 3: ret ENDPROC(\name ) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 45b5ab54c9e..fbd76785c5d 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -45,6 +45,7 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size, if (IS_ENABLED(CONFIG_DMA_CMA)) { struct page *page; + size = PAGE_ALIGN(size); page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, get_order(size)); if (!page) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index f557ebbe701..f8dc7e8fce6 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, do { next = pmd_addr_end(addr, end); /* try section mapping first */ - if (((addr | next | phys) & ~SECTION_MASK) == 0) + if (((addr | next | phys) & ~SECTION_MASK) == 0) { + pmd_t old_pmd =*pmd; set_pmd(pmd, __pmd(phys | prot_sect_kernel)); - else + /* + * Check for previous table entries created during + * boot (__create_page_tables) and flush them. + */ + if (!pmd_none(old_pmd)) + flush_tlb_all(); + } else { alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys)); + } phys += next - addr; } while (pmd++, addr = next, addr != end); } diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index 7083cdada65..62c6101df26 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c @@ -32,17 +32,10 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *new_pgd; - if (PGD_SIZE == PAGE_SIZE) - new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); + return (pgd_t *)get_zeroed_page(GFP_KERNEL); else - new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL); - - if (!new_pgd) - return NULL; - - return new_pgd; + return kzalloc(PGD_SIZE, GFP_KERNEL); } void pgd_free(struct mm_struct *mm, pgd_t *pgd) diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile index 22fb66590dc..dba48a5d5bb 100644 --- a/arch/avr32/Makefile +++ b/arch/avr32/Makefile @@ -11,7 +11,7 @@ all: uImage vmlinux.elf KBUILD_DEFCONFIG := atstk1002_defconfig -KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic +KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__ KBUILD_AFLAGS += -mrelax -mno-pic KBUILD_CFLAGS_MODULE += -mno-relax LDFLAGS_vmlinux += --relax diff --git a/arch/avr32/boards/mimc200/fram.c b/arch/avr32/boards/mimc200/fram.c index 9764a1a1073..c1466a872b9 100644 --- a/arch/avr32/boards/mimc200/fram.c +++ b/arch/avr32/boards/mimc200/fram.c @@ -11,6 +11,7 @@ #define FRAM_VERSION "1.0" #include <linux/miscdevice.h> +#include <linux/module.h> #include <linux/proc_fs.h> #include <linux/mm.h> #include <linux/io.h> diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index cfb9fe1b8df..c7c64a63c29 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild @@ -17,5 +17,6 @@ generic-y += scatterlist.h generic-y += sections.h generic-y += topology.h generic-y += trace_clock.h +generic-y += vga.h generic-y += xor.h generic-y += hash.h diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h index fc6483f83cc..4f5ec2bb717 100644 --- a/arch/avr32/include/asm/io.h +++ b/arch/avr32/include/asm/io.h @@ -295,6 +295,8 @@ extern void __iounmap(void __iomem *addr); #define iounmap(addr) \ __iounmap(addr) +#define ioremap_wc ioremap_nocache + #define cached(addr) P1SEGADDR(addr) #define uncached(addr) P2SEGADDR(addr) diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h index 09c5a0f5f4d..86648c083bb 100644 --- a/arch/c6x/include/asm/cache.h +++ b/arch/c6x/include/asm/cache.h @@ -12,6 +12,7 @@ #define _ASM_C6X_CACHE_H #include <linux/irqflags.h> +#include <linux/init.h> /* * Cache line size diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h index 184066ceb1f..053c17b3655 100644 --- a/arch/cris/include/asm/bitops.h +++ b/arch/cris/include/asm/bitops.h @@ -144,7 +144,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) * definition, which doesn't have the same semantics. We don't want to * use -fno-builtin, so just hide the name ffs. */ -#define ffs kernel_ffs +#define ffs(x) kernel_ffs(x) #include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/__fls.h> diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index afd45e0d552..ae763d8bf55 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -11,7 +11,7 @@ -#define NR_syscalls 312 /* length of syscall table */ +#define NR_syscalls 314 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 34fd6fe46da..715e85f858d 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h @@ -325,5 +325,7 @@ #define __NR_process_vm_writev 1333 #define __NR_accept4 1334 #define __NR_finit_module 1335 +#define __NR_sched_setattr 1336 +#define __NR_sched_getattr 1337 #endif /* _UAPI_ASM_IA64_UNISTD_H */ diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index ddea607f948..fa8d61a312a 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1773,6 +1773,8 @@ sys_call_table: data8 sys_process_vm_writev data8 sys_accept4 data8 sys_finit_module // 1335 + data8 sys_sched_setattr + data8 sys_sched_getattr .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index a96bcf83a73..20e8a9b21d7 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) /* attempt to allocate a granule's worth of cached memory pages */ page = alloc_pages_exact_node(nid, - GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { mutex_unlock(&uc_pool->add_chunk_mutex); diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 7cc8c364924..6fb9e813a91 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -1,4 +1,4 @@ - +generic-y += barrier.h generic-y += bitsperlong.h generic-y += clkdev.h generic-y += cputime.h @@ -6,6 +6,7 @@ generic-y += device.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ipcbuf.h @@ -18,6 +19,7 @@ generic-y += local.h generic-y += mman.h generic-y += mutex.h generic-y += percpu.h +generic-y += preempt.h generic-y += resource.h generic-y += scatterlist.h generic-y += sections.h @@ -31,5 +33,3 @@ generic-y += trace_clock.h generic-y += types.h generic-y += word-at-a-time.h generic-y += xor.h -generic-y += preempt.h -generic-y += hash.h diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h deleted file mode 100644 index 15c5f77c161..00000000000 --- a/arch/m68k/include/asm/barrier.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _M68K_BARRIER_H -#define _M68K_BARRIER_H - -#define nop() do { asm volatile ("nop"); barrier(); } while (0) - -#include <asm-generic/barrier.h> - -#endif /* _M68K_BARRIER_H */ diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 014f288fc81..9d38b73989e 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include <uapi/asm/unistd.h> -#define NR_syscalls 349 +#define NR_syscalls 351 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 625f321001d..b932dd47004 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -354,5 +354,7 @@ #define __NR_process_vm_writev 346 #define __NR_kcmp 347 #define __NR_finit_module 348 +#define __NR_sched_setattr 349 +#define __NR_sched_getattr 350 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 3f04ea0ab80..b6223dc41d8 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -369,4 +369,6 @@ ENTRY(sys_call_table) .long sys_process_vm_writev .long sys_kcmp .long sys_finit_module + .long sys_sched_setattr + .long sys_sched_getattr /* 350 */ diff --git a/arch/microblaze/include/asm/delay.h b/arch/microblaze/include/asm/delay.h index 05b7d39e439..66fc24c2423 100644 --- a/arch/microblaze/include/asm/delay.h +++ b/arch/microblaze/include/asm/delay.h @@ -13,6 +13,8 @@ #ifndef _ASM_MICROBLAZE_DELAY_H #define _ASM_MICROBLAZE_DELAY_H +#include <linux/param.h> + extern inline void __delay(unsigned long loops) { asm volatile ("# __delay \n\t" \ diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index a2cea720607..3fbb7f1db3b 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h @@ -89,6 +89,11 @@ static inline unsigned int readl(const volatile void __iomem *addr) { return le32_to_cpu(*(volatile unsigned int __force *)addr); } +#define readq readq +static inline u64 readq(const volatile void __iomem *addr) +{ + return le64_to_cpu(__raw_readq(addr)); +} static inline void writeb(unsigned char v, volatile void __iomem *addr) { *(volatile unsigned char __force *)addr = v; @@ -101,6 +106,7 @@ static inline void writel(unsigned int v, volatile void __iomem *addr) { *(volatile unsigned int __force *)addr = cpu_to_le32(v); } +#define writeq(b, addr) __raw_writeq(cpu_to_le64(b), addr) /* ioread and iowrite variants. thease are for now same as __raw_ * variants of accessors. we might check for endianess in the feature diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index b7fb0438458..17645b2e2f0 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -66,7 +66,7 @@ real_start: mts rmsr, r0 /* Disable stack protection from bootloader */ mts rslr, r0 - addi r8, r0, 0xFFFFFFF + addi r8, r0, 0xFFFFFFFF mts rshr, r8 /* * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc' diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index dcae3a7035d..95fa1f1d5c8 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1776,12 +1776,12 @@ endchoice config FORCE_MAX_ZONEORDER int "Maximum zone order" - range 14 64 if HUGETLB_PAGE && PAGE_SIZE_64KB - default "14" if HUGETLB_PAGE && PAGE_SIZE_64KB - range 13 64 if HUGETLB_PAGE && PAGE_SIZE_32KB - default "13" if HUGETLB_PAGE && PAGE_SIZE_32KB - range 12 64 if HUGETLB_PAGE && PAGE_SIZE_16KB - default "12" if HUGETLB_PAGE && PAGE_SIZE_16KB + range 14 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB + default "14" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB + range 13 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB + default "13" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB + range 12 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB + default "12" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB range 11 64 default "11" help @@ -2353,9 +2353,8 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. config MIPS_O32_FP64_SUPPORT - bool "Support for O32 binaries using 64-bit FP" + bool "Support for O32 binaries using 64-bit FP (EXPERIMENTAL)" depends on 32BIT || MIPS32_O32 - default y help When this is enabled, the kernel will support use of 64-bit floating point registers with binaries using the O32 ABI along with the @@ -2367,7 +2366,14 @@ config MIPS_O32_FP64_SUPPORT of your kernel & potentially improve FP emulation performance by saying N here. - If unsure, say Y. + Although binutils currently supports use of this flag the details + concerning its effect upon the O32 ABI in userland are still being + worked on. In order to avoid userland becoming dependant upon current + behaviour before the details have been finalised, this option should + be considered experimental and only enabled by those working upon + said details. + + If unsure, say N. config USE_OF bool diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c index 9edc35ff8cf..acf9a2a37f5 100644 --- a/arch/mips/alchemy/board-gpr.c +++ b/arch/mips/alchemy/board-gpr.c @@ -53,10 +53,8 @@ void __init prom_init(void) prom_init_cmdline(); memsize_str = prom_getenv("memsize"); - if (!memsize_str) + if (!memsize_str || kstrtoul(memsize_str, 0, &memsize)) memsize = 0x04000000; - else - strict_strtoul(memsize_str, 0, &memsize); add_memory_region(0, memsize, BOOT_MEM_RAM); } diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c index 9969dbab19e..25a59a23547 100644 --- a/arch/mips/alchemy/board-mtx1.c +++ b/arch/mips/alchemy/board-mtx1.c @@ -52,10 +52,8 @@ void __init prom_init(void) prom_init_cmdline(); memsize_str = prom_getenv("memsize"); - if (!memsize_str) + if (!memsize_str || kstrtoul(memsize_str, 0, &memsize)) memsize = 0x04000000; - else - strict_strtoul(memsize_str, 0, &memsize); add_memory_region(0, memsize, BOOT_MEM_RAM); } diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c index 11f3ad20321..5483906e0f8 100644 --- a/arch/mips/alchemy/devboards/db1000.c +++ b/arch/mips/alchemy/devboards/db1000.c @@ -534,13 +534,10 @@ static int __init db1000_dev_init(void) s0 = AU1100_GPIO1_INT; s1 = AU1100_GPIO4_INT; + gpio_request(19, "sd0_cd"); + gpio_request(20, "sd1_cd"); gpio_direction_input(19); /* sd0 cd# */ gpio_direction_input(20); /* sd1 cd# */ - gpio_direction_input(21); /* touch pendown# */ - gpio_direction_input(207); /* SPI MISO */ - gpio_direction_output(208, 0); /* SPI MOSI */ - gpio_direction_output(209, 1); /* SPI SCK */ - gpio_direction_output(210, 1); /* SPI CS# */ /* spi_gpio on SSI0 pins */ pfc = __raw_readl((void __iomem *)SYS_PINFUNC); diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c index 6d612e2b949..cdd8246f92b 100644 --- a/arch/mips/bcm47xx/board.c +++ b/arch/mips/bcm47xx/board.c @@ -1,3 +1,4 @@ +#include <linux/errno.h> #include <linux/export.h> #include <linux/string.h> #include <bcm47xx_board.h> diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c index 6decb27cf48..2bed73a684a 100644 --- a/arch/mips/bcm47xx/nvram.c +++ b/arch/mips/bcm47xx/nvram.c @@ -196,7 +196,7 @@ int bcm47xx_nvram_gpio_pin(const char *name) char nvram_var[10]; char buf[30]; - for (i = 0; i < 16; i++) { + for (i = 0; i < 32; i++) { err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i); if (err <= 0) continue; diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 25fbfae06c1..c2bb4f896ce 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -975,10 +975,6 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d, if (ciu > 1 || bit > 63) return -EINVAL; - /* These are the GPIO lines */ - if (ciu == 0 && bit >= 16 && bit < 32) - return -EINVAL; - *out_hwirq = (ciu << 6) | bit; *out_type = 0; @@ -1007,6 +1003,10 @@ static int octeon_irq_ciu_map(struct irq_domain *d, if (!octeon_irq_virq_in_range(virq)) return -EINVAL; + /* Don't map irq if it is reserved for GPIO. */ + if (line == 0 && bit >= 16 && bit <32) + return 0; + if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) return -EINVAL; @@ -1525,10 +1525,6 @@ static int octeon_irq_ciu2_xlat(struct irq_domain *d, ciu = intspec[0]; bit = intspec[1]; - /* Line 7 are the GPIO lines */ - if (ciu > 6 || bit > 63) - return -EINVAL; - *out_hwirq = (ciu << 6) | bit; *out_type = 0; @@ -1570,8 +1566,14 @@ static int octeon_irq_ciu2_map(struct irq_domain *d, if (!octeon_irq_virq_in_range(virq)) return -EINVAL; - /* Line 7 are the GPIO lines */ - if (line > 6 || octeon_irq_ciu_to_irq[line][bit] != 0) + /* + * Don't map irq if it is reserved for GPIO. + * (Line 7 are the GPIO lines.) + */ + if (line == 7) + return 0; + + if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0) return -EINVAL; if (octeon_irq_ciu2_is_edge(line, bit)) diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 3220c93ea98..4225e99bd7b 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -9,6 +9,7 @@ #define _ASM_ASMMACRO_H #include <asm/hazards.h> +#include <asm/asm-offsets.h> #ifdef CONFIG_32BIT #include <asm/asmmacro-32.h> @@ -54,11 +55,21 @@ .endm .macro local_irq_disable reg=t0 +#ifdef CONFIG_PREEMPT + lw \reg, TI_PRE_COUNT($28) + addi \reg, \reg, 1 + sw \reg, TI_PRE_COUNT($28) +#endif mfc0 \reg, CP0_STATUS ori \reg, \reg, 1 xori \reg, \reg, 1 mtc0 \reg, CP0_STATUS irq_disable_hazard +#ifdef CONFIG_PREEMPT + lw \reg, TI_PRE_COUNT($28) + addi \reg, \reg, -1 + sw \reg, TI_PRE_COUNT($28) +#endif .endm #endif /* CONFIG_MIPS_MT_SMTC */ @@ -106,7 +117,7 @@ .endm .macro fpu_save_double thread status tmp -#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) sll \tmp, \status, 5 bgez \tmp, 10f fpu_save_16odd \thread @@ -159,7 +170,7 @@ .endm .macro fpu_restore_double thread status tmp -#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) sll \tmp, \status, 5 bgez \tmp, 10f # 16 register mode? diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index cfe092fc720..58e50cbdb1a 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -57,7 +57,7 @@ static inline int __enable_fpu(enum fpu_mode mode) return 0; case FPU_64BIT: -#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_MIPS64)) +#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)) /* we only have a 32-bit FPU */ return SIGFPE; #endif @@ -74,6 +74,8 @@ static inline int __enable_fpu(enum fpu_mode mode) default: BUG(); } + + return SIGFPE; } #define __disable_fpu() \ diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h index ce35c9af0c2..992aaba603b 100644 --- a/arch/mips/include/asm/ftrace.h +++ b/arch/mips/include/asm/ftrace.h @@ -22,12 +22,12 @@ extern void _mcount(void); #define safe_load(load, src, dst, error) \ do { \ asm volatile ( \ - "1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\ - " li %[" STR(error) "], 0\n" \ + "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \ + " li %[tmp_err], 0\n" \ "2:\n" \ \ ".section .fixup, \"ax\"\n" \ - "3: li %[" STR(error) "], 1\n" \ + "3: li %[tmp_err], 1\n" \ " j 2b\n" \ ".previous\n" \ \ @@ -35,8 +35,8 @@ do { \ STR(PTR) "\t1b, 3b\n\t" \ ".previous\n" \ \ - : [dst] "=&r" (dst), [error] "=r" (error)\ - : [src] "r" (src) \ + : [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\ + : [tmp_src] "r" (src) \ : "memory" \ ); \ } while (0) @@ -44,12 +44,12 @@ do { \ #define safe_store(store, src, dst, error) \ do { \ asm volatile ( \ - "1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\ - " li %[" STR(error) "], 0\n" \ + "1: " store " %[tmp_src], 0(%[tmp_dst])\n"\ + " li %[tmp_err], 0\n" \ "2:\n" \ \ ".section .fixup, \"ax\"\n" \ - "3: li %[" STR(error) "], 1\n" \ + "3: li %[tmp_err], 1\n" \ " j 2b\n" \ ".previous\n" \ \ @@ -57,8 +57,8 @@ do { \ STR(PTR) "\t1b, 3b\n\t" \ ".previous\n" \ \ - : [error] "=r" (error) \ - : [dst] "r" (dst), [src] "r" (src)\ + : [tmp_err] "=r" (error) \ + : [tmp_dst] "r" (dst), [tmp_src] "r" (src)\ : "memory" \ ); \ } while (0) diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 33e8dbfc1b6..f35b131977e 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h @@ -13,6 +13,7 @@ #ifndef __ASM_MIPS_SYSCALL_H #define __ASM_MIPS_SYSCALL_H +#include <linux/compiler.h> #include <linux/audit.h> #include <linux/elf-em.h> #include <linux/kernel.h> @@ -39,14 +40,14 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg, #ifdef CONFIG_32BIT case 4: case 5: case 6: case 7: - return get_user(*arg, (int *)usp + 4 * n); + return get_user(*arg, (int *)usp + n); #endif #ifdef CONFIG_64BIT case 4: case 5: case 6: case 7: #ifdef CONFIG_MIPS32_O32 if (test_thread_flag(TIF_32BIT_REGS)) - return get_user(*arg, (int *)usp + 4 * n); + return get_user(*arg, (int *)usp + n); else #endif *arg = regs->regs[4 + n]; @@ -57,6 +58,8 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg, default: BUG(); } + + unreachable(); } static inline long syscall_get_return_value(struct task_struct *task, @@ -83,11 +86,10 @@ static inline void syscall_get_arguments(struct task_struct *task, unsigned int i, unsigned int n, unsigned long *args) { - unsigned long arg; int ret; while (n--) - ret |= mips_get_syscall_arg(&arg, task, regs, i++); + ret |= mips_get_syscall_arg(args++, task, regs, i++); /* * No way to communicate an error because this is a void function. diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index b39ba25b41c..f25181b1994 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -163,8 +163,8 @@ enum cop1_sdw_func { */ enum cop1x_func { lwxc1_op = 0x00, ldxc1_op = 0x01, - pfetch_op = 0x07, swxc1_op = 0x08, - sdxc1_op = 0x09, madd_s_op = 0x20, + swxc1_op = 0x08, sdxc1_op = 0x09, + pfetch_op = 0x0f, madd_s_op = 0x20, madd_d_op = 0x21, madd_e_op = 0x22, msub_s_op = 0x28, msub_d_op = 0x29, msub_e_op = 0x2a, nmadd_s_op = 0x30, diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 1dee279f966..d6e154a9e6a 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -369,16 +369,18 @@ #define __NR_process_vm_writev (__NR_Linux + 346) #define __NR_kcmp (__NR_Linux + 347) #define __NR_finit_module (__NR_Linux + 348) +#define __NR_sched_setattr (__NR_Linux + 349) +#define __NR_sched_getattr (__NR_Linux + 350) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 348 +#define __NR_Linux_syscalls 350 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 348 +#define __NR_O32_Linux_syscalls 350 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -695,16 +697,18 @@ #define __NR_kcmp (__NR_Linux + 306) #define __NR_finit_module (__NR_Linux + 307) #define __NR_getdents64 (__NR_Linux + 308) +#define __NR_sched_setattr (__NR_Linux + 309) +#define __NR_sched_getattr (__NR_Linux + 310) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 308 +#define __NR_Linux_syscalls 310 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 308 +#define __NR_64_Linux_syscalls 310 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1025,15 +1029,17 @@ #define __NR_process_vm_writev (__NR_Linux + 310) #define __NR_kcmp (__NR_Linux + 311) #define __NR_finit_module (__NR_Linux + 312) +#define __NR_sched_setattr (__NR_Linux + 313) +#define __NR_sched_getattr (__NR_Linux + 314) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 312 +#define __NR_Linux_syscalls 314 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 312 +#define __NR_N32_Linux_syscalls 314 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 185ba258361..374ed74cd51 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -111,11 +111,10 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, safe_store_code(new_code1, ip, faulted); if (unlikely(faulted)) return -EFAULT; - ip += 4; - safe_store_code(new_code2, ip, faulted); + safe_store_code(new_code2, ip + 4, faulted); if (unlikely(faulted)) return -EFAULT; - flush_icache_range(ip, ip + 8); /* original ip + 12 */ + flush_icache_range(ip, ip + 8); return 0; } #endif diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 253b2fb5202..73b0ddf910d 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -35,9 +35,9 @@ LEAF(_save_fp_context) cfc1 t1, fcr31 -#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) .set push -#ifdef CONFIG_MIPS32_R2 +#ifdef CONFIG_CPU_MIPS32_R2 .set mips64r2 mfc0 t0, CP0_STATUS sll t0, t0, 5 @@ -146,11 +146,11 @@ LEAF(_save_fp_context32) * - cp1 status/control register */ LEAF(_restore_fp_context) - EX lw t0, SC_FPC_CSR(a0) + EX lw t1, SC_FPC_CSR(a0) -#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) .set push -#ifdef CONFIG_MIPS32_R2 +#ifdef CONFIG_CPU_MIPS32_R2 .set mips64r2 mfc0 t0, CP0_STATUS sll t0, t0, 5 @@ -191,7 +191,7 @@ LEAF(_restore_fp_context) EX ldc1 $f26, SC_FPREGS+208(a0) EX ldc1 $f28, SC_FPREGS+224(a0) EX ldc1 $f30, SC_FPREGS+240(a0) - ctc1 t0, fcr31 + ctc1 t1, fcr31 jr ra li v0, 0 # success END(_restore_fp_context) @@ -199,7 +199,7 @@ LEAF(_restore_fp_context) #ifdef CONFIG_MIPS32_COMPAT LEAF(_restore_fp_context32) /* Restore an o32 sigcontext. */ - EX lw t0, SC32_FPC_CSR(a0) + EX lw t1, SC32_FPC_CSR(a0) mfc0 t0, CP0_STATUS sll t0, t0, 5 @@ -239,7 +239,7 @@ LEAF(_restore_fp_context32) EX ldc1 $f26, SC32_FPREGS+208(a0) EX ldc1 $f28, SC32_FPREGS+224(a0) EX ldc1 $f30, SC32_FPREGS+240(a0) - ctc1 t0, fcr31 + ctc1 t1, fcr31 jr ra li v0, 0 # success END(_restore_fp_context32) diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c index 56dc6963515..758fb3cd232 100644 --- a/arch/mips/kernel/rtlx-cmp.c +++ b/arch/mips/kernel/rtlx-cmp.c @@ -112,5 +112,8 @@ void __exit rtlx_module_exit(void) for (i = 0; i < RTLX_CHANNELS; i++) device_destroy(mt_class, MKDEV(major, i)); + unregister_chrdev(major, RTLX_MODULE_NAME); + + aprp_hook = NULL; } diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c index 91d61ba422b..9c1aca00fd5 100644 --- a/arch/mips/kernel/rtlx-mt.c +++ b/arch/mips/kernel/rtlx-mt.c @@ -144,5 +144,8 @@ void __exit rtlx_module_exit(void) for (i = 0; i < RTLX_CHANNELS; i++) device_destroy(mt_class, MKDEV(major, i)); + unregister_chrdev(major, RTLX_MODULE_NAME); + + aprp_hook = NULL; } diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index e8e541b40d8..a5b14f48e1a 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -563,3 +563,5 @@ EXPORT(sys_call_table) PTR sys_process_vm_writev PTR sys_kcmp PTR sys_finit_module + PTR sys_sched_setattr + PTR sys_sched_getattr /* 4350 */ diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 57e3742fec5..b56e254beb1 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -425,4 +425,6 @@ EXPORT(sys_call_table) PTR sys_kcmp PTR sys_finit_module PTR sys_getdents64 + PTR sys_sched_setattr + PTR sys_sched_getattr /* 5310 */ .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 2f48f593439..f7e5b72cf48 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -418,4 +418,6 @@ EXPORT(sysn32_call_table) PTR compat_sys_process_vm_writev /* 6310 */ PTR sys_kcmp PTR sys_finit_module + PTR sys_sched_setattr + PTR sys_sched_getattr .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index f1acdb429f4..6788727d91a 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -541,4 +541,6 @@ EXPORT(sys32_call_table) PTR compat_sys_process_vm_writev PTR sys_kcmp PTR sys_finit_module + PTR sys_sched_setattr + PTR sys_sched_getattr /* 4350 */ .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 506925b2c3f..0b4e2e38294 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -1538,10 +1538,10 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, break; } - case 0x7: /* 7 */ - if (MIPSInst_FUNC(ir) != pfetch_op) { + case 0x3: + if (MIPSInst_FUNC(ir) != pfetch_op) return SIGILL; - } + /* ignore prefx operation */ break; diff --git a/arch/mips/mti-malta/malta-amon.c b/arch/mips/mti-malta/malta-amon.c index 592ac042742..84ac523b0ce 100644 --- a/arch/mips/mti-malta/malta-amon.c +++ b/arch/mips/mti-malta/malta-amon.c @@ -72,7 +72,7 @@ int amon_cpu_start(int cpu, return 0; } -#ifdef CONFIG_MIPS_VPE_LOADER +#ifdef CONFIG_MIPS_VPE_LOADER_CMP int vpe_run(struct vpe *v) { struct vpe_notifications *n; diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index ca3e3a46a42..2242181a628 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -119,7 +119,7 @@ static void malta_hw0_irqdispatch(void) do_IRQ(MALTA_INT_BASE + irq); -#ifdef MIPS_VPE_APSP_API +#ifdef CONFIG_MIPS_VPE_APSP_API_MT if (aprp_hook) aprp_hook(); #endif @@ -310,7 +310,7 @@ static void ipi_call_dispatch(void) static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) { -#ifdef MIPS_VPE_APSP_API +#ifdef CONFIG_MIPS_VPE_APSP_API_CMP if (aprp_hook) aprp_hook(); #endif diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c index d37be36dc65..2b91b0e6156 100644 --- a/arch/mips/pci/msi-octeon.c +++ b/arch/mips/pci/msi-octeon.c @@ -150,6 +150,7 @@ msi_irq_allocated: msg.address_lo = ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff; msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32; + break; case OCTEON_DMA_BAR_TYPE_BIG: /* When using big bar, Bar 0 is based at 0 */ msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff; diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c index 88d0962de65..2bedafea3d9 100644 --- a/arch/parisc/hpux/fs.c +++ b/arch/parisc/hpux/fs.c @@ -33,22 +33,9 @@ int hpux_execve(struct pt_regs *regs) { - int error; - struct filename *filename; - - filename = getname((const char __user *) regs->gr[26]); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - goto out; - - error = do_execve(filename->name, + return do_execve(getname((const char __user *) regs->gr[26]), (const char __user *const __user *) regs->gr[25], (const char __user *const __user *) regs->gr[24]); - - putname(filename); - -out: - return error; } struct hpux_dirent { diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index 637fe031aa8..60d5d174dfe 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -32,17 +32,6 @@ void copy_page_asm(void *to, void *from); void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, struct page *pg); -/* #define CONFIG_PARISC_TMPALIAS */ - -#ifdef CONFIG_PARISC_TMPALIAS -void clear_user_highpage(struct page *page, unsigned long vaddr); -#define clear_user_highpage clear_user_highpage -struct vm_area_struct; -void copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr, struct vm_area_struct *vma); -#define __HAVE_ARCH_COPY_USER_HIGHPAGE -#endif - /* * These are used to make use of C type-checking.. */ diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 3516e0b2704..64f2992e439 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -191,8 +191,4 @@ static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define arch_spin_relax(lock) cpu_relax() -#define arch_read_relax(lock) cpu_relax() -#define arch_write_relax(lock) cpu_relax() - #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 42706794a36..265ae5190b0 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h @@ -828,13 +828,13 @@ #define __NR_finit_module (__NR_Linux + 333) #define __NR_sched_setattr (__NR_Linux + 334) #define __NR_sched_getattr (__NR_Linux + 335) +#define __NR_utimes (__NR_Linux + 336) -#define __NR_Linux_syscalls (__NR_sched_getattr + 1) +#define __NR_Linux_syscalls (__NR_utimes + 1) #define __IGNORE_select /* newselect */ #define __IGNORE_fadvise64 /* fadvise64_64 */ -#define __IGNORE_utimes /* utime */ #define HPUX_GATEWAY_ADDR 0xC0000004 diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index ac87a40502e..a6ffc775a9f 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -581,67 +581,3 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); } } - -#ifdef CONFIG_PARISC_TMPALIAS - -void clear_user_highpage(struct page *page, unsigned long vaddr) -{ - void *vto; - unsigned long flags; - - /* Clear using TMPALIAS region. The page doesn't need to - be flushed but the kernel mapping needs to be purged. */ - - vto = kmap_atomic(page); - - /* The PA-RISC 2.0 Architecture book states on page F-6: - "Before a write-capable translation is enabled, *all* - non-equivalently-aliased translations must be removed - from the page table and purged from the TLB. (Note - that the caches are not required to be flushed at this - time.) Before any non-equivalent aliased translation - is re-enabled, the virtual address range for the writeable - page (the entire page) must be flushed from the cache, - and the write-capable translation removed from the page - table and purged from the TLB." */ - - purge_kernel_dcache_page_asm((unsigned long)vto); - purge_tlb_start(flags); - pdtlb_kernel(vto); - purge_tlb_end(flags); - preempt_disable(); - clear_user_page_asm(vto, vaddr); - preempt_enable(); - - pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */ -} - -void copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr, struct vm_area_struct *vma) -{ - void *vfrom, *vto; - unsigned long flags; - - /* Copy using TMPALIAS region. This has the advantage - that the `from' page doesn't need to be flushed. However, - the `to' page must be flushed in copy_user_page_asm since - it can be used to bring in executable code. */ - - vfrom = kmap_atomic(from); - vto = kmap_atomic(to); - - purge_kernel_dcache_page_asm((unsigned long)vto); - purge_tlb_start(flags); - pdtlb_kernel(vto); - pdtlb_kernel(vfrom); - purge_tlb_end(flags); - preempt_disable(); - copy_user_page_asm(vto, vfrom, vaddr); - flush_dcache_page_asm(__pa(vto), vaddr); - preempt_enable(); - - pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */ - pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */ -} - -#endif /* CONFIG_PARISC_TMPALIAS */ diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 8fa3fbb3e4d..80e5dd24893 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -431,6 +431,7 @@ ENTRY_SAME(finit_module) ENTRY_SAME(sched_setattr) ENTRY_SAME(sched_getattr) /* 335 */ + ENTRY_COMP(utimes) /* Nothing yet */ diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 84fdf6857c3..a613d2c82fd 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -200,10 +200,11 @@ static inline void __user *arch_compat_alloc_user_space(long len) /* * We can't access below the stack pointer in the 32bit ABI and - * can access 288 bytes in the 64bit ABI + * can access 288 bytes in the 64bit big-endian ABI, + * or 512 bytes with the new ELFv2 little-endian ABI. */ if (!is_32bit_task()) - usp -= 288; + usp -= USER_REDZONE_SIZE; return (void __user *) (usp - len); } diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index e27e9ad6818..150866b2a3f 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -134,6 +134,7 @@ static inline int dma_supported(struct device *dev, u64 mask) } extern int dma_set_mask(struct device *dev, u64 dma_mask); +extern int __dma_set_mask(struct device *dev, u64 dma_mask); #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 9e39ceb1d19..d4dd41fb951 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -172,10 +172,20 @@ struct eeh_ops { }; extern struct eeh_ops *eeh_ops; -extern int eeh_subsystem_enabled; +extern bool eeh_subsystem_enabled; extern raw_spinlock_t confirm_error_lock; extern int eeh_probe_mode; +static inline bool eeh_enabled(void) +{ + return eeh_subsystem_enabled; +} + +static inline void eeh_set_enable(bool mode) +{ + eeh_subsystem_enabled = mode; +} + #define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */ #define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */ @@ -246,7 +256,7 @@ void eeh_remove_device(struct pci_dev *); * If this macro yields TRUE, the caller relays to eeh_check_failure() * which does further tests out of line. */ -#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled) +#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_enabled()) /* * Reads from a device which has been isolated by EEH will return @@ -257,6 +267,13 @@ void eeh_remove_device(struct pci_dev *); #else /* !CONFIG_EEH */ +static inline bool eeh_enabled(void) +{ + return false; +} + +static inline void eeh_set_enable(bool mode) { } + static inline int eeh_init(void) { return 0; diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index d750336b171..623f2971ce0 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -127,7 +127,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_PPC64 - return __pte(pte_update(mm, addr, ptep, ~0UL, 1)); + return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); #else return __pte(pte_update(ptep, ~0UL, 0)); #endif diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index f7a8036579b..42632c7a2a4 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -77,6 +77,7 @@ struct iommu_table { #ifdef CONFIG_IOMMU_API struct iommu_group *it_group; #endif + void (*set_bypass)(struct iommu_table *tbl, bool enable); }; /* Pure 2^n version of get_order */ diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 40157e2ca69..ed82142a325 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -816,8 +816,8 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, int64_t opal_pci_poll(uint64_t phb_id); int64_t opal_return_cpu(void); -int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); -int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); +int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val); +int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val); int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, uint32_t addr, uint32_t data, uint32_t sz); diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index bc141c950b1..eb9261024f5 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -195,6 +195,7 @@ extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long clr, + unsigned long set, int huge) { #ifdef PTE_ATOMIC_UPDATES @@ -205,14 +206,15 @@ static inline unsigned long pte_update(struct mm_struct *mm, andi. %1,%0,%6\n\ bne- 1b \n\ andc %1,%0,%4 \n\ + or %1,%1,%7\n\ stdcx. %1,0,%3 \n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*ptep) - : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) + : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) : "cc" ); #else unsigned long old = pte_val(*ptep); - *ptep = __pte(old & ~clr); + *ptep = __pte((old & ~clr) | set); #endif /* huge pages use the old page table lock */ if (!huge) @@ -231,9 +233,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, { unsigned long old; - if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) + if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) return 0; - old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); return (old & _PAGE_ACCESSED) != 0; } #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG @@ -252,7 +254,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, if ((pte_val(*ptep) & _PAGE_RW) == 0) return; - pte_update(mm, addr, ptep, _PAGE_RW, 0); + pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); } static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, @@ -261,7 +263,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, if ((pte_val(*ptep) & _PAGE_RW) == 0) return; - pte_update(mm, addr, ptep, _PAGE_RW, 1); + pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); } /* @@ -284,14 +286,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); + unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); return __pte(old); } static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t * ptep) { - pte_update(mm, addr, ptep, ~0UL, 0); + pte_update(mm, addr, ptep, ~0UL, 0, 0); } @@ -506,7 +508,9 @@ extern int pmdp_set_access_flags(struct vm_area_struct *vma, extern unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, unsigned long clr); + pmd_t *pmdp, + unsigned long clr, + unsigned long set); static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) @@ -515,7 +519,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) return 0; - old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED); + old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); return ((old & _PAGE_ACCESSED) != 0); } @@ -542,7 +546,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, if ((pmd_val(*pmdp) & _PAGE_RW) == 0) return; - pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW); + pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); } #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index f83b6f3e1b3..3ebb188c3ff 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -75,12 +75,34 @@ static inline pte_t pte_mknuma(pte_t pte) return pte; } +#define ptep_set_numa ptep_set_numa +static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + if ((pte_val(*ptep) & _PAGE_PRESENT) == 0) + VM_BUG_ON(1); + + pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0); + return; +} + #define pmd_numa pmd_numa static inline int pmd_numa(pmd_t pmd) { return pte_numa(pmd_pte(pmd)); } +#define pmdp_set_numa pmdp_set_numa +static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0) + VM_BUG_ON(1); + + pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA); + return; +} + #define pmd_mknonnuma pmd_mknonnuma static inline pmd_t pmd_mknonnuma(pmd_t pmd) { diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index becc08e6a65..279b80f3bb2 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -28,11 +28,23 @@ #ifdef __powerpc64__ +/* + * Size of redzone that userspace is allowed to use below the stack + * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in + * the new ELFv2 little-endian ABI, so we allow the larger amount. + * + * For kernel code we allow a 288-byte redzone, in order to conserve + * kernel stack space; gcc currently only uses 288 bytes, and will + * hopefully allow explicit control of the redzone size in future. + */ +#define USER_REDZONE_SIZE 512 +#define KERNEL_REDZONE_SIZE 288 + #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */ #define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265) #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \ - STACK_FRAME_OVERHEAD + 288) + STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE) #define STACK_FRAME_MARKER 12 /* Size of dummy stack frame allocated when calling signal handler. */ @@ -41,6 +53,8 @@ #else /* __powerpc64__ */ +#define USER_REDZONE_SIZE 0 +#define KERNEL_REDZONE_SIZE 0 #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */ #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */ #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 4ee06fe15de..d0e784e0ff4 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -8,6 +8,7 @@ #ifdef __powerpc64__ +extern char __start_interrupts[]; extern char __end_interrupts[]; extern char __prom_init_toc_start[]; @@ -21,6 +22,17 @@ static inline int in_kernel_text(unsigned long addr) return 0; } +static inline int overlaps_interrupt_vector_text(unsigned long start, + unsigned long end) +{ + unsigned long real_start, real_end; + real_start = __start_interrupts - _stext; + real_end = __end_interrupts - _stext; + + return start < (unsigned long)__va(real_end) && + (unsigned long)__va(real_start) < end; +} + static inline int overlaps_kernel_text(unsigned long start, unsigned long end) { return start < (unsigned long)__init_end && diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h index 0d9cecddf8a..c53f5f6d176 100644 --- a/arch/powerpc/include/asm/vdso.h +++ b/arch/powerpc/include/asm/vdso.h @@ -4,11 +4,11 @@ #ifdef __KERNEL__ /* Default link addresses for the vDSOs */ -#define VDSO32_LBASE 0x100000 -#define VDSO64_LBASE 0x100000 +#define VDSO32_LBASE 0x0 +#define VDSO64_LBASE 0x0 /* Default map addresses for 32bit vDSO */ -#define VDSO32_MBASE VDSO32_LBASE +#define VDSO32_MBASE 0x100000 #define VDSO_VERSION_STRING LINUX_2.6.15 diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 11c1d069d92..7a13f378ca2 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -98,17 +98,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, unsigned long offset, int userbuf) { void *vaddr; + phys_addr_t paddr; if (!csize) return 0; csize = min_t(size_t, csize, PAGE_SIZE); + paddr = pfn << PAGE_SHIFT; - if ((min_low_pfn < pfn) && (pfn < max_pfn)) { - vaddr = __va(pfn << PAGE_SHIFT); + if (memblock_is_region_memory(paddr, csize)) { + vaddr = __va(paddr); csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); } else { - vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); + vaddr = __ioremap(paddr, PAGE_SIZE, 0); csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); iounmap(vaddr); } diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 8032b97ccdc..ee78f6e49d6 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -191,12 +191,10 @@ EXPORT_SYMBOL(dma_direct_ops); #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) -int dma_set_mask(struct device *dev, u64 dma_mask) +int __dma_set_mask(struct device *dev, u64 dma_mask) { struct dma_map_ops *dma_ops = get_dma_ops(dev); - if (ppc_md.dma_set_mask) - return ppc_md.dma_set_mask(dev, dma_mask); if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) return dma_ops->set_dma_mask(dev, dma_mask); if (!dev->dma_mask || !dma_supported(dev, dma_mask)) @@ -204,6 +202,12 @@ int dma_set_mask(struct device *dev, u64 dma_mask) *dev->dma_mask = dma_mask; return 0; } +int dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (ppc_md.dma_set_mask) + return ppc_md.dma_set_mask(dev, dma_mask); + return __dma_set_mask(dev, dma_mask); +} EXPORT_SYMBOL(dma_set_mask); u64 dma_get_required_mask(struct device *dev) diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 148db72a8c4..e7b76a6bf15 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -28,6 +28,7 @@ #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/rbtree.h> +#include <linux/reboot.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/export.h> @@ -89,7 +90,7 @@ /* Platform dependent EEH operations */ struct eeh_ops *eeh_ops = NULL; -int eeh_subsystem_enabled; +bool eeh_subsystem_enabled = false; EXPORT_SYMBOL(eeh_subsystem_enabled); /* @@ -364,7 +365,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) eeh_stats.total_mmio_ffs++; - if (!eeh_subsystem_enabled) + if (!eeh_enabled()) return 0; if (!edev) { @@ -747,6 +748,17 @@ int __exit eeh_ops_unregister(const char *name) return -EEXIST; } +static int eeh_reboot_notifier(struct notifier_block *nb, + unsigned long action, void *unused) +{ + eeh_set_enable(false); + return NOTIFY_DONE; +} + +static struct notifier_block eeh_reboot_nb = { + .notifier_call = eeh_reboot_notifier, +}; + /** * eeh_init - EEH initialization * @@ -778,6 +790,14 @@ int eeh_init(void) if (machine_is(powernv) && cnt++ <= 0) return ret; + /* Register reboot notifier */ + ret = register_reboot_notifier(&eeh_reboot_nb); + if (ret) { + pr_warn("%s: Failed to register notifier (%d)\n", + __func__, ret); + return ret; + } + /* call platform initialization function */ if (!eeh_ops) { pr_warning("%s: Platform EEH operation not found\n", @@ -822,7 +842,7 @@ int eeh_init(void) return ret; } - if (eeh_subsystem_enabled) + if (eeh_enabled()) pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); else pr_warning("EEH: No capable adapters found\n"); @@ -897,7 +917,7 @@ void eeh_add_device_late(struct pci_dev *dev) struct device_node *dn; struct eeh_dev *edev; - if (!dev || !eeh_subsystem_enabled) + if (!dev || !eeh_enabled()) return; pr_debug("EEH: Adding device %s\n", pci_name(dev)); @@ -1005,7 +1025,7 @@ void eeh_remove_device(struct pci_dev *dev) { struct eeh_dev *edev; - if (!dev || !eeh_subsystem_enabled) + if (!dev || !eeh_enabled()) return; edev = pci_dev_to_eeh_dev(dev); @@ -1045,7 +1065,7 @@ void eeh_remove_device(struct pci_dev *dev) static int proc_eeh_show(struct seq_file *m, void *v) { - if (0 == eeh_subsystem_enabled) { + if (!eeh_enabled()) { seq_printf(m, "EEH Subsystem is globally disabled\n"); seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs); } else { diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 7bb30dca4e1..fdc679d309e 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -362,9 +362,13 @@ static void *eeh_rmv_device(void *data, void *userdata) */ if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) return NULL; + driver = eeh_pcid_get(dev); - if (driver && driver->err_handler) - return NULL; + if (driver) { + eeh_pcid_put(dev); + if (driver->err_handler) + return NULL; + } /* Remove it from PCI subsystem */ pr_debug("EEH: Removing %s without EEH sensitive driver\n", diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 9b27b293a92..b0ded97ee4e 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -74,6 +74,7 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) */ static int test_24bit_addr(unsigned long ip, unsigned long addr) { + addr = ppc_function_entry((void *)addr); /* use the create_branch to verify that this offset can be branched */ return create_branch((unsigned int *)ip, addr, 0); diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index d773dd440a4..88e3ec6e1d9 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl) memset(tbl->it_map, 0xff, sz); iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); + /* + * Disable iommu bypass, otherwise the user can DMA to all of + * our physical memory via the bypass window instead of just + * the pages that has been explicitly mapped into the iommu + */ + if (tbl->set_bypass) + tbl->set_bypass(tbl, false); + return 0; } EXPORT_SYMBOL_GPL(iommu_take_ownership); @@ -1102,6 +1110,10 @@ void iommu_release_ownership(struct iommu_table *tbl) /* Restore bit#0 set by iommu_init_table() */ if (tbl->it_offset == 0) set_bit(0, tbl->it_map); + + /* The kernel owns the device now, we can restore the iommu bypass */ + if (tbl->set_bypass) + tbl->set_bypass(tbl, true); } EXPORT_SYMBOL_GPL(iommu_release_ownership); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 9729b23bfb0..1d0848bba04 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -559,8 +559,13 @@ void exc_lvl_ctx_init(void) #ifdef CONFIG_PPC64 cpu_nr = i; #else +#ifdef CONFIG_SMP cpu_nr = get_hard_smp_processor_id(i); +#else + cpu_nr = 0; #endif +#endif + memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); tp = critirq_ctx[cpu_nr]; tp->cpu = cpu_nr; diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 75d4f7340da..015ae55c186 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -196,7 +196,9 @@ int overlaps_crashkernel(unsigned long start, unsigned long size) /* Values we need to export to the second kernel via the device tree. */ static phys_addr_t kernel_end; +static phys_addr_t crashk_base; static phys_addr_t crashk_size; +static unsigned long long mem_limit; static struct property kernel_end_prop = { .name = "linux,kernel-end", @@ -207,7 +209,7 @@ static struct property kernel_end_prop = { static struct property crashk_base_prop = { .name = "linux,crashkernel-base", .length = sizeof(phys_addr_t), - .value = &crashk_res.start, + .value = &crashk_base }; static struct property crashk_size_prop = { @@ -219,9 +221,11 @@ static struct property crashk_size_prop = { static struct property memory_limit_prop = { .name = "linux,memory-limit", .length = sizeof(unsigned long long), - .value = &memory_limit, + .value = &mem_limit, }; +#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG) + static void __init export_crashk_values(struct device_node *node) { struct property *prop; @@ -237,8 +241,9 @@ static void __init export_crashk_values(struct device_node *node) of_remove_property(node, prop); if (crashk_res.start != 0) { + crashk_base = cpu_to_be_ulong(crashk_res.start), of_add_property(node, &crashk_base_prop); - crashk_size = resource_size(&crashk_res); + crashk_size = cpu_to_be_ulong(resource_size(&crashk_res)); of_add_property(node, &crashk_size_prop); } @@ -246,6 +251,7 @@ static void __init export_crashk_values(struct device_node *node) * memory_limit is required by the kexec-tools to limit the * crash regions to the actual memory used. */ + mem_limit = cpu_to_be_ulong(memory_limit); of_update_property(node, &memory_limit_prop); } @@ -264,7 +270,7 @@ static int __init kexec_setup(void) of_remove_property(node, prop); /* information needed by userspace when using default_machine_kexec */ - kernel_end = __pa(_end); + kernel_end = cpu_to_be_ulong(__pa(_end)); of_add_property(node, &kernel_end_prop); export_crashk_values(node); diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index be4e6d648f6..59d229a2a3e 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -369,6 +369,7 @@ void default_machine_kexec(struct kimage *image) /* Values we need to export to the second kernel via the device tree. */ static unsigned long htab_base; +static unsigned long htab_size; static struct property htab_base_prop = { .name = "linux,htab-base", @@ -379,7 +380,7 @@ static struct property htab_base_prop = { static struct property htab_size_prop = { .name = "linux,htab-size", .length = sizeof(unsigned long), - .value = &htab_size_bytes, + .value = &htab_size, }; static int __init export_htab_values(void) @@ -403,8 +404,9 @@ static int __init export_htab_values(void) if (prop) of_remove_property(node, prop); - htab_base = __pa(htab_address); + htab_base = cpu_to_be64(__pa(htab_address)); of_add_property(node, &htab_base_prop); + htab_size = cpu_to_be64(htab_size_bytes); of_add_property(node, &htab_size_prop); of_node_put(node); diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 879f09620f8..7c6bb4b17b4 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -57,11 +57,14 @@ _GLOBAL(call_do_softirq) mtlr r0 blr +/* + * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); + */ _GLOBAL(call_do_irq) mflr r0 stw r0,4(r1) lwz r10,THREAD+KSP_LIMIT(r2) - addi r11,r3,THREAD_INFO_GAP + addi r11,r4,THREAD_INFO_GAP stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) mr r1,r4 stw r10,8(r1) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8d4c247f173..af064d28b36 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1048,6 +1048,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) flush_altivec_to_thread(src); flush_vsx_to_thread(src); flush_spe_to_thread(src); + /* + * Flush TM state out so we can copy it. __switch_to_tm() does this + * flush but it removes the checkpointed state from the current CPU and + * transitions the CPU out of TM mode. Hence we need to call + * tm_recheckpoint_new_task() (on the same task) to restore the + * checkpointed state back and the TM mode. + */ + __switch_to_tm(src); + tm_recheckpoint_new_task(src); *dst = *src; diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S index b47a0e1ab00..d88736fbece 100644 --- a/arch/powerpc/kernel/reloc_64.S +++ b/arch/powerpc/kernel/reloc_64.S @@ -69,8 +69,8 @@ _GLOBAL(relocate) * R_PPC64_RELATIVE ones. */ mtctr r8 -5: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */ - cmpwi r0,R_PPC64_RELATIVE +5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */ + cmpdi r0,R_PPC64_RELATIVE bne 6f ld r6,0(r9) /* reloc->r_offset */ ld r0,16(r9) /* reloc->r_addend */ @@ -81,6 +81,7 @@ _GLOBAL(relocate) 6: blr +.balign 8 p_dyn: .llong __dynamic_start - 0b p_rela: .llong __rela_dyn_start - 0b p_st: .llong _stext - 0b diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 2b0da27eaee..04cc4fcca78 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -247,7 +247,12 @@ static void __init exc_lvl_early_init(void) /* interrupt stacks must be in lowmem, we get that for free on ppc32 * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ for_each_possible_cpu(i) { +#ifdef CONFIG_SMP hw_cpu = get_hard_smp_processor_id(i); +#else + hw_cpu = 0; +#endif + critirq_ctx[hw_cpu] = (struct thread_info *) __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); #ifdef CONFIG_BOOKE diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index e35bf773df7..8d253c29649 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -65,8 +65,8 @@ struct rt_sigframe { struct siginfo __user *pinfo; void __user *puc; struct siginfo info; - /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ - char abigap[288]; + /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */ + char abigap[USER_REDZONE_SIZE]; } __attribute__ ((aligned (16))); static const char fmt32[] = KERN_INFO \ diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S index 79683d0393f..6ac107ac402 100644 --- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S +++ b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S @@ -6,7 +6,7 @@ .globl vdso32_start, vdso32_end .balign PAGE_SIZE vdso32_start: - .incbin "arch/powerpc/kernel/vdso32/vdso32.so" + .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg" .balign PAGE_SIZE vdso32_end: diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S index 8df9e246300..df60fca6a13 100644 --- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S +++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S @@ -6,7 +6,7 @@ .globl vdso64_start, vdso64_end .balign PAGE_SIZE vdso64_start: - .incbin "arch/powerpc/kernel/vdso64/vdso64.so" + .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg" .balign PAGE_SIZE vdso64_end: diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index e66d4ec04d9..818dce344e8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1504,73 +1504,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1: addi r8,r8,16 .endr - /* Save DEC */ - mfspr r5,SPRN_DEC - mftb r6 - extsw r5,r5 - add r5,r5,r6 - std r5,VCPU_DEC_EXPIRES(r9) - -BEGIN_FTR_SECTION - b 8f -END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) - /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ - mfmsr r8 - li r0, 1 - rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG - mtmsrd r8 - - /* Save POWER8-specific registers */ - mfspr r5, SPRN_IAMR - mfspr r6, SPRN_PSPB - mfspr r7, SPRN_FSCR - std r5, VCPU_IAMR(r9) - stw r6, VCPU_PSPB(r9) - std r7, VCPU_FSCR(r9) - mfspr r5, SPRN_IC - mfspr r6, SPRN_VTB - mfspr r7, SPRN_TAR - std r5, VCPU_IC(r9) - std r6, VCPU_VTB(r9) - std r7, VCPU_TAR(r9) -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM - mfspr r5, SPRN_TFHAR - mfspr r6, SPRN_TFIAR - mfspr r7, SPRN_TEXASR - std r5, VCPU_TFHAR(r9) - std r6, VCPU_TFIAR(r9) - std r7, VCPU_TEXASR(r9) -#endif - mfspr r8, SPRN_EBBHR - std r8, VCPU_EBBHR(r9) - mfspr r5, SPRN_EBBRR - mfspr r6, SPRN_BESCR - mfspr r7, SPRN_CSIGR - mfspr r8, SPRN_TACR - std r5, VCPU_EBBRR(r9) - std r6, VCPU_BESCR(r9) - std r7, VCPU_CSIGR(r9) - std r8, VCPU_TACR(r9) - mfspr r5, SPRN_TCSCR - mfspr r6, SPRN_ACOP - mfspr r7, SPRN_PID - mfspr r8, SPRN_WORT - std r5, VCPU_TCSCR(r9) - std r6, VCPU_ACOP(r9) - stw r7, VCPU_GUEST_PID(r9) - std r8, VCPU_WORT(r9) -8: - - /* Save and reset AMR and UAMOR before turning on the MMU */ -BEGIN_FTR_SECTION - mfspr r5,SPRN_AMR - mfspr r6,SPRN_UAMOR - std r5,VCPU_AMR(r9) - std r6,VCPU_UAMOR(r9) - li r6,0 - mtspr SPRN_AMR,r6 -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) - /* Unset guest mode */ li r0, KVM_GUEST_MODE_NONE stb r0, HSTATE_IN_GUEST(r13) @@ -2203,7 +2136,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif mfspr r6,SPRN_VRSAVE - stw r6,VCPU_VRSAVE(r3) + stw r6,VCPU_VRSAVE(r31) mtlr r30 mtmsrd r5 isync @@ -2240,7 +2173,7 @@ BEGIN_FTR_SECTION bl .load_vr_state END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif - lwz r7,VCPU_VRSAVE(r4) + lwz r7,VCPU_VRSAVE(r31) mtspr SPRN_VRSAVE,r7 mtlr r30 mr r4,r31 diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index de6881259ae..d766d6ee33f 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -207,6 +207,20 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, if (overlaps_kernel_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; + /* + * If relocatable, check if it overlaps interrupt vectors that + * are copied down to real 0. For relocatable kernel + * (e.g. kdump case) we copy interrupt vectors down to real + * address 0. Mark that region as executable. This is + * because on p8 system with relocation on exception feature + * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence + * in order to execute the interrupt handlers in virtual + * mode the vector region need to be marked as executable. + */ + if ((PHYSICAL_START > MEMORY_START) && + overlaps_interrupt_vector_text(vaddr, vaddr + step)) + tprot &= ~HPTE_R_N; + hash = hpt_hash(vpn, shift, ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 65b7b65e870..62bf5e8e78d 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -510,7 +510,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, } unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, unsigned long clr) + pmd_t *pmdp, unsigned long clr, + unsigned long set) { unsigned long old, tmp; @@ -526,14 +527,15 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, andi. %1,%0,%6\n\ bne- 1b \n\ andc %1,%0,%4 \n\ + or %1,%1,%7\n\ stdcx. %1,0,%3 \n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) - : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY) + : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set) : "cc" ); #else old = pmd_val(*pmdp); - *pmdp = __pmd(old & ~clr); + *pmdp = __pmd((old & ~clr) | set); #endif if (old & _PAGE_HASHPTE) hpte_do_hugepage_flush(mm, addr, pmdp); @@ -708,7 +710,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { - pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT); + pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); } /* @@ -835,7 +837,7 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long old; pgtable_t *pgtable_slot; - old = pmd_hugepage_update(mm, addr, pmdp, ~0UL); + old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); old_pmd = __pmd(old); /* * We have pmd == none and we are holding page_table_lock. diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index a770df2dae7..6c0b1f5f8d2 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c @@ -78,7 +78,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, pte = pte_offset_map_lock(mm, pmd, addr, &ptl); arch_enter_lazy_mmu_mode(); for (; npages > 0; --npages) { - pte_update(mm, addr, pte, 0, 0); + pte_update(mm, addr, pte, 0, 0, 0); addr += PAGE_SIZE; ++pte; } diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 29b89e863d7..67cf22083f4 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1147,6 +1147,9 @@ static void power_pmu_enable(struct pmu *pmu) mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); mb(); + if (cpuhw->bhrb_users) + ppmu->config_bhrb(cpuhw->bhrb_filter); + write_mmcr0(cpuhw, mmcr0); /* @@ -1158,8 +1161,6 @@ static void power_pmu_enable(struct pmu *pmu) } out: - if (cpuhw->bhrb_users) - ppmu->config_bhrb(cpuhw->bhrb_filter); local_irq_restore(flags); } diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index a3f7abd2f13..96cee20dcd3 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -25,6 +25,37 @@ #define PM_BRU_FIN 0x10068 #define PM_BR_MPRED_CMPL 0x400f6 +/* All L1 D cache load references counted at finish, gated by reject */ +#define PM_LD_REF_L1 0x100ee +/* Load Missed L1 */ +#define PM_LD_MISS_L1 0x3e054 +/* Store Missed L1 */ +#define PM_ST_MISS_L1 0x300f0 +/* L1 cache data prefetches */ +#define PM_L1_PREF 0x0d8b8 +/* Instruction fetches from L1 */ +#define PM_INST_FROM_L1 0x04080 +/* Demand iCache Miss */ +#define PM_L1_ICACHE_MISS 0x200fd +/* Instruction Demand sectors wriittent into IL1 */ +#define PM_L1_DEMAND_WRITE 0x0408c +/* Instruction prefetch written into IL1 */ +#define PM_IC_PREF_WRITE 0x0408e +/* The data cache was reloaded from local core's L3 due to a demand load */ +#define PM_DATA_FROM_L3 0x4c042 +/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */ +#define PM_DATA_FROM_L3MISS 0x300fe +/* All successful D-side store dispatches for this thread */ +#define PM_L2_ST 0x17080 +/* All successful D-side store dispatches for this thread that were L2 Miss */ +#define PM_L2_ST_MISS 0x17082 +/* Total HW L3 prefetches(Load+store) */ +#define PM_L3_PREF_ALL 0x4e052 +/* Data PTEG reload */ +#define PM_DTLB_MISS 0x300fc +/* ITLB Reloaded */ +#define PM_ITLB_MISS 0x400fc + /* * Raw event encoding for POWER8: @@ -557,6 +588,8 @@ static int power8_generic_events[] = { [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, + [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, + [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1, }; static u64 power8_bhrb_filter_map(u64 branch_sample_type) @@ -596,6 +629,116 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter) mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); } +#define C(x) PERF_COUNT_HW_CACHE_##x + +/* + * Table of generalized cache-related events. + * 0 means not supported, -1 means nonsensical, other values + * are event codes. + */ +static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_LD_REF_L1, + [ C(RESULT_MISS) ] = PM_LD_MISS_L1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = PM_ST_MISS_L1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = PM_L1_PREF, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(L1I) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1, + [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(LL) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3, + [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = PM_L2_ST, + [ C(RESULT_MISS) ] = PM_L2_ST_MISS, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = PM_DTLB_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = PM_ITLB_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_BRU_FIN, + [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + +#undef C + static struct power_pmu power8_pmu = { .name = "POWER8", .n_counter = 6, @@ -611,6 +754,7 @@ static struct power_pmu power8_pmu = { .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, .n_generic = ARRAY_SIZE(power8_generic_events), .generic_events = power8_generic_events, + .cache_events = &power8_cache_events, .attr_groups = power8_pmu_attr_groups, .bhrb_nr = 32, }; diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 5ec1e47a0d7..e865d748179 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c @@ -123,7 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) area->nid = nid; area->order = order; - area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE, + area->pages = alloc_pages_exact_node(area->nid, + GFP_KERNEL|__GFP_THISNODE, area->order); if (!area->pages) { diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index e1e71618b70..253fefe3d1a 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -44,7 +44,8 @@ static int ioda_eeh_event(struct notifier_block *nb, /* We simply send special EEH event */ if ((changed_evts & OPAL_EVENT_PCI_ERROR) && - (events & OPAL_EVENT_PCI_ERROR)) + (events & OPAL_EVENT_PCI_ERROR) && + eeh_enabled()) eeh_send_failure_event(NULL); return 0; @@ -113,6 +114,7 @@ DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get, ioda_eeh_inbB_dbgfs_set, "0x%llx\n"); #endif /* CONFIG_DEBUG_FS */ + /** * ioda_eeh_post_init - Chip dependent post initialization * @hose: PCI controller @@ -220,6 +222,22 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option) return ret; } +static void ioda_eeh_phb_diag(struct pci_controller *hose) +{ + struct pnv_phb *phb = hose->private_data; + long rc; + + rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, + PNV_PCI_DIAG_BUF_SIZE); + if (rc != OPAL_SUCCESS) { + pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", + __func__, hose->global_number, rc); + return; + } + + pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); +} + /** * ioda_eeh_get_state - Retrieve the state of PE * @pe: EEH PE @@ -271,6 +289,9 @@ static int ioda_eeh_get_state(struct eeh_pe *pe) result |= EEH_STATE_DMA_ACTIVE; result |= EEH_STATE_MMIO_ENABLED; result |= EEH_STATE_DMA_ENABLED; + } else if (!(pe->state & EEH_PE_ISOLATED)) { + eeh_pe_state_mark(pe, EEH_PE_ISOLATED); + ioda_eeh_phb_diag(hose); } return result; @@ -314,6 +335,15 @@ static int ioda_eeh_get_state(struct eeh_pe *pe) __func__, fstate, hose->global_number, pe_no); } + /* Dump PHB diag-data for frozen PE */ + if (result != EEH_STATE_NOT_SUPPORT && + (result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) != + (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) && + !(pe->state & EEH_PE_ISOLATED)) { + eeh_pe_state_mark(pe, EEH_PE_ISOLATED); + ioda_eeh_phb_diag(hose); + } + return result; } @@ -489,8 +519,7 @@ static int ioda_eeh_bridge_reset(struct pci_controller *hose, static int ioda_eeh_reset(struct eeh_pe *pe, int option) { struct pci_controller *hose = pe->phb; - struct eeh_dev *edev; - struct pci_dev *dev; + struct pci_bus *bus; int ret; /* @@ -519,73 +548,17 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option) if (pe->type & EEH_PE_PHB) { ret = ioda_eeh_phb_reset(hose, option); } else { - if (pe->type & EEH_PE_DEVICE) { - /* - * If it's device PE, we didn't refer to the parent - * PCI bus yet. So we have to figure it out indirectly. - */ - edev = list_first_entry(&pe->edevs, - struct eeh_dev, list); - dev = eeh_dev_to_pci_dev(edev); - dev = dev->bus->self; - } else { - /* - * If it's bus PE, the parent PCI bus is already there - * and just pick it up. - */ - dev = pe->bus->self; - } - - /* - * Do reset based on the fact that the direct upstream bridge - * is root bridge (port) or not. - */ - if (dev->bus->number == 0) + bus = eeh_pe_bus_get(pe); + if (pci_is_root_bus(bus)) ret = ioda_eeh_root_reset(hose, option); else - ret = ioda_eeh_bridge_reset(hose, dev, option); + ret = ioda_eeh_bridge_reset(hose, bus->self, option); } return ret; } /** - * ioda_eeh_get_log - Retrieve error log - * @pe: EEH PE - * @severity: Severity level of the log - * @drv_log: buffer to store the log - * @len: space of the log buffer - * - * The function is used to retrieve error log from P7IOC. - */ -static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, - char *drv_log, unsigned long len) -{ - s64 ret; - unsigned long flags; - struct pci_controller *hose = pe->phb; - struct pnv_phb *phb = hose->private_data; - - spin_lock_irqsave(&phb->lock, flags); - - ret = opal_pci_get_phb_diag_data2(phb->opal_id, - phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE); - if (ret) { - spin_unlock_irqrestore(&phb->lock, flags); - pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n", - __func__, hose->global_number, pe->addr, ret); - return -EIO; - } - - /* The PHB diag-data is always indicative */ - pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); - - spin_unlock_irqrestore(&phb->lock, flags); - - return 0; -} - -/** * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE * @pe: EEH PE * @@ -666,22 +639,6 @@ static void ioda_eeh_hub_diag(struct pci_controller *hose) } } -static void ioda_eeh_phb_diag(struct pci_controller *hose) -{ - struct pnv_phb *phb = hose->private_data; - long rc; - - rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, - PNV_PCI_DIAG_BUF_SIZE); - if (rc != OPAL_SUCCESS) { - pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", - __func__, hose->global_number, rc); - return; - } - - pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); -} - static int ioda_eeh_get_phb_pe(struct pci_controller *hose, struct eeh_pe **pe) { @@ -855,6 +812,20 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) } /* + * EEH core will try recover from fenced PHB or + * frozen PE. In the time for frozen PE, EEH core + * enable IO path for that before collecting logs, + * but it ruins the site. So we have to dump the + * log in advance here. + */ + if ((ret == EEH_NEXT_ERR_FROZEN_PE || + ret == EEH_NEXT_ERR_FENCED_PHB) && + !((*pe)->state & EEH_PE_ISOLATED)) { + eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); + ioda_eeh_phb_diag(hose); + } + + /* * If we have no errors on the specific PHB or only * informative error there, we continue poking it. * Otherwise, we need actions to be taken by upper @@ -872,7 +843,6 @@ struct pnv_eeh_ops ioda_eeh_ops = { .set_option = ioda_eeh_set_option, .get_state = ioda_eeh_get_state, .reset = ioda_eeh_reset, - .get_log = ioda_eeh_get_log, .configure_bridge = ioda_eeh_configure_bridge, .next_error = ioda_eeh_next_error }; diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index a79fddc5e74..a59788e83b8 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -145,7 +145,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) * Enable EEH explicitly so that we will do EEH check * while accessing I/O stuff */ - eeh_subsystem_enabled = 1; + eeh_set_enable(true); /* Save memory bars */ eeh_save_bars(edev); diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 4fbf276ac99..4cd2ea6c0db 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c @@ -71,11 +71,11 @@ static int opal_xscom_err_xlate(int64_t rc) } } -static u64 opal_scom_unmangle(u64 reg) +static u64 opal_scom_unmangle(u64 addr) { /* * XSCOM indirect addresses have the top bit set. Additionally - * the reset of the top 3 nibbles is always 0. + * the rest of the top 3 nibbles is always 0. * * Because the debugfs interface uses signed offsets and shifts * the address left by 3, we basically cannot use the top 4 bits @@ -86,10 +86,13 @@ static u64 opal_scom_unmangle(u64 reg) * conversion here. To leave room for further xscom address * expansion, we only clear out the top byte * + * For in-kernel use, we also support the real indirect bit, so + * we test for any of the top 5 bits + * */ - if (reg & (1ull << 59)) - reg = (reg & ~(0xffull << 56)) | (1ull << 63); - return reg; + if (addr & (0x1full << 59)) + addr = (addr & ~(0xffull << 56)) | (1ull << 63); + return addr; } static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) @@ -98,8 +101,8 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) int64_t rc; __be64 v; - reg = opal_scom_unmangle(reg); - rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v)); + reg = opal_scom_unmangle(m->addr + reg); + rc = opal_xscom_read(m->chip, reg, (__be64 *)__pa(&v)); *value = be64_to_cpu(v); return opal_xscom_err_xlate(rc); } @@ -109,8 +112,8 @@ static int opal_scom_write(scom_map_t map, u64 reg, u64 value) struct opal_scom_map *m = map; int64_t rc; - reg = opal_scom_unmangle(reg); - rc = opal_xscom_write(m->chip, m->addr + reg, value); + reg = opal_scom_unmangle(m->addr + reg); + rc = opal_xscom_write(m->chip, reg, value); return opal_xscom_err_xlate(rc); } diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 7d6dcc6d5fa..3b2b4fb3585 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -21,6 +21,7 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/msi.h> +#include <linux/memblock.h> #include <asm/sections.h> #include <asm/io.h> @@ -460,9 +461,39 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev return; pe = &phb->ioda.pe_array[pdn->pe_number]; + WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); } +static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb, + struct pci_dev *pdev, u64 dma_mask) +{ + struct pci_dn *pdn = pci_get_pdn(pdev); + struct pnv_ioda_pe *pe; + uint64_t top; + bool bypass = false; + + if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) + return -ENODEV;; + + pe = &phb->ioda.pe_array[pdn->pe_number]; + if (pe->tce_bypass_enabled) { + top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; + bypass = (dma_mask >= top); + } + + if (bypass) { + dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n"); + set_dma_ops(&pdev->dev, &dma_direct_ops); + set_dma_offset(&pdev->dev, pe->tce_bypass_base); + } else { + dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n"); + set_dma_ops(&pdev->dev, &dma_iommu_ops); + set_iommu_table_base(&pdev->dev, &pe->tce32_table); + } + return 0; +} + static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) { struct pci_dev *dev; @@ -657,6 +688,56 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); } +static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable) +{ + struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe, + tce32_table); + uint16_t window_id = (pe->pe_number << 1 ) + 1; + int64_t rc; + + pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); + if (enable) { + phys_addr_t top = memblock_end_of_DRAM(); + + top = roundup_pow_of_two(top); + rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, + pe->pe_number, + window_id, + pe->tce_bypass_base, + top); + } else { + rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, + pe->pe_number, + window_id, + pe->tce_bypass_base, + 0); + + /* + * We might want to reset the DMA ops of all devices on + * this PE. However in theory, that shouldn't be necessary + * as this is used for VFIO/KVM pass-through and the device + * hasn't yet been returned to its kernel driver + */ + } + if (rc) + pe_err(pe, "OPAL error %lld configuring bypass window\n", rc); + else + pe->tce_bypass_enabled = enable; +} + +static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb, + struct pnv_ioda_pe *pe) +{ + /* TVE #1 is selected by PCI address bit 59 */ + pe->tce_bypass_base = 1ull << 59; + + /* Install set_bypass callback for VFIO */ + pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass; + + /* Enable bypass by default */ + pnv_pci_ioda2_set_bypass(&pe->tce32_table, true); +} + static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { @@ -727,6 +808,8 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, else pnv_ioda_setup_bus_dma(pe, pe->pbus); + /* Also create a bypass window */ + pnv_pci_ioda2_setup_bypass_pe(phb, pe); return; fail: if (pe->tce32_seg >= 0) @@ -1286,6 +1369,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, /* Setup TCEs */ phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; + phb->dma_set_mask = pnv_pci_ioda_dma_set_mask; /* Setup shutdown function for kexec */ phb->shutdown = pnv_pci_ioda_shutdown; diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index b555ebc57ef..8518817dcdf 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -134,57 +134,72 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n", hose->global_number, common->version); - pr_info(" brdgCtl: %08x\n", data->brdgCtl); - - pr_info(" portStatusReg: %08x\n", data->portStatusReg); - pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); - pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); - - pr_info(" deviceStatus: %08x\n", data->deviceStatus); - pr_info(" slotStatus: %08x\n", data->slotStatus); - pr_info(" linkStatus: %08x\n", data->linkStatus); - pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); - pr_info(" devSecStatus: %08x\n", data->devSecStatus); - - pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); - pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); - pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); - pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); - pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); - pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); - pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); - pr_info(" sourceId: %08x\n", data->sourceId); - pr_info(" errorClass: %016llx\n", data->errorClass); - pr_info(" correlator: %016llx\n", data->correlator); - pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr); - pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr); - pr_info(" lemFir: %016llx\n", data->lemFir); - pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); - pr_info(" lemWOF: %016llx\n", data->lemWOF); - pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); - pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); - pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); - pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); - pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); - pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); - pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); - pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); - pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); - pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); - pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); - pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); - pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); - pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); - pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); - pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); + if (data->brdgCtl) + pr_info(" brdgCtl: %08x\n", + data->brdgCtl); + if (data->portStatusReg || data->rootCmplxStatus || + data->busAgentStatus) + pr_info(" UtlSts: %08x %08x %08x\n", + data->portStatusReg, data->rootCmplxStatus, + data->busAgentStatus); + if (data->deviceStatus || data->slotStatus || + data->linkStatus || data->devCmdStatus || + data->devSecStatus) + pr_info(" RootSts: %08x %08x %08x %08x %08x\n", + data->deviceStatus, data->slotStatus, + data->linkStatus, data->devCmdStatus, + data->devSecStatus); + if (data->rootErrorStatus || data->uncorrErrorStatus || + data->corrErrorStatus) + pr_info(" RootErrSts: %08x %08x %08x\n", + data->rootErrorStatus, data->uncorrErrorStatus, + data->corrErrorStatus); + if (data->tlpHdr1 || data->tlpHdr2 || + data->tlpHdr3 || data->tlpHdr4) + pr_info(" RootErrLog: %08x %08x %08x %08x\n", + data->tlpHdr1, data->tlpHdr2, + data->tlpHdr3, data->tlpHdr4); + if (data->sourceId || data->errorClass || + data->correlator) + pr_info(" RootErrLog1: %08x %016llx %016llx\n", + data->sourceId, data->errorClass, + data->correlator); + if (data->p7iocPlssr || data->p7iocCsr) + pr_info(" PhbSts: %016llx %016llx\n", + data->p7iocPlssr, data->p7iocCsr); + if (data->lemFir || data->lemErrorMask || + data->lemWOF) + pr_info(" Lem: %016llx %016llx %016llx\n", + data->lemFir, data->lemErrorMask, + data->lemWOF); + if (data->phbErrorStatus || data->phbFirstErrorStatus || + data->phbErrorLog0 || data->phbErrorLog1) + pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n", + data->phbErrorStatus, data->phbFirstErrorStatus, + data->phbErrorLog0, data->phbErrorLog1); + if (data->mmioErrorStatus || data->mmioFirstErrorStatus || + data->mmioErrorLog0 || data->mmioErrorLog1) + pr_info(" OutErr: %016llx %016llx %016llx %016llx\n", + data->mmioErrorStatus, data->mmioFirstErrorStatus, + data->mmioErrorLog0, data->mmioErrorLog1); + if (data->dma0ErrorStatus || data->dma0FirstErrorStatus || + data->dma0ErrorLog0 || data->dma0ErrorLog1) + pr_info(" InAErr: %016llx %016llx %016llx %016llx\n", + data->dma0ErrorStatus, data->dma0FirstErrorStatus, + data->dma0ErrorLog0, data->dma0ErrorLog1); + if (data->dma1ErrorStatus || data->dma1FirstErrorStatus || + data->dma1ErrorLog0 || data->dma1ErrorLog1) + pr_info(" InBErr: %016llx %016llx %016llx %016llx\n", + data->dma1ErrorStatus, data->dma1FirstErrorStatus, + data->dma1ErrorLog0, data->dma1ErrorLog1); for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { if ((data->pestA[i] >> 63) == 0 && (data->pestB[i] >> 63) == 0) continue; - pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); - pr_info(" PESTB: %016llx\n", data->pestB[i]); + pr_info(" PE[%3d] A/B: %016llx %016llx\n", + i, data->pestA[i], data->pestB[i]); } } @@ -197,62 +212,77 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, data = (struct OpalIoPhb3ErrorData*)common; pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n", hose->global_number, common->version); - - pr_info(" brdgCtl: %08x\n", data->brdgCtl); - - pr_info(" portStatusReg: %08x\n", data->portStatusReg); - pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); - pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); - - pr_info(" deviceStatus: %08x\n", data->deviceStatus); - pr_info(" slotStatus: %08x\n", data->slotStatus); - pr_info(" linkStatus: %08x\n", data->linkStatus); - pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); - pr_info(" devSecStatus: %08x\n", data->devSecStatus); - - pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); - pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); - pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); - pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); - pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); - pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); - pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); - pr_info(" sourceId: %08x\n", data->sourceId); - pr_info(" errorClass: %016llx\n", data->errorClass); - pr_info(" correlator: %016llx\n", data->correlator); - - pr_info(" nFir: %016llx\n", data->nFir); - pr_info(" nFirMask: %016llx\n", data->nFirMask); - pr_info(" nFirWOF: %016llx\n", data->nFirWOF); - pr_info(" PhbPlssr: %016llx\n", data->phbPlssr); - pr_info(" PhbCsr: %016llx\n", data->phbCsr); - pr_info(" lemFir: %016llx\n", data->lemFir); - pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); - pr_info(" lemWOF: %016llx\n", data->lemWOF); - pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); - pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); - pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); - pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); - pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); - pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); - pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); - pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); - pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); - pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); - pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); - pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); - pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); - pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); - pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); - pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); + if (data->brdgCtl) + pr_info(" brdgCtl: %08x\n", + data->brdgCtl); + if (data->portStatusReg || data->rootCmplxStatus || + data->busAgentStatus) + pr_info(" UtlSts: %08x %08x %08x\n", + data->portStatusReg, data->rootCmplxStatus, + data->busAgentStatus); + if (data->deviceStatus || data->slotStatus || + data->linkStatus || data->devCmdStatus || + data->devSecStatus) + pr_info(" RootSts: %08x %08x %08x %08x %08x\n", + data->deviceStatus, data->slotStatus, + data->linkStatus, data->devCmdStatus, + data->devSecStatus); + if (data->rootErrorStatus || data->uncorrErrorStatus || + data->corrErrorStatus) + pr_info(" RootErrSts: %08x %08x %08x\n", + data->rootErrorStatus, data->uncorrErrorStatus, + data->corrErrorStatus); + if (data->tlpHdr1 || data->tlpHdr2 || + data->tlpHdr3 || data->tlpHdr4) + pr_info(" RootErrLog: %08x %08x %08x %08x\n", + data->tlpHdr1, data->tlpHdr2, + data->tlpHdr3, data->tlpHdr4); + if (data->sourceId || data->errorClass || + data->correlator) + pr_info(" RootErrLog1: %08x %016llx %016llx\n", + data->sourceId, data->errorClass, + data->correlator); + if (data->nFir || data->nFirMask || + data->nFirWOF) + pr_info(" nFir: %016llx %016llx %016llx\n", + data->nFir, data->nFirMask, + data->nFirWOF); + if (data->phbPlssr || data->phbCsr) + pr_info(" PhbSts: %016llx %016llx\n", + data->phbPlssr, data->phbCsr); + if (data->lemFir || data->lemErrorMask || + data->lemWOF) + pr_info(" Lem: %016llx %016llx %016llx\n", + data->lemFir, data->lemErrorMask, + data->lemWOF); + if (data->phbErrorStatus || data->phbFirstErrorStatus || + data->phbErrorLog0 || data->phbErrorLog1) + pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n", + data->phbErrorStatus, data->phbFirstErrorStatus, + data->phbErrorLog0, data->phbErrorLog1); + if (data->mmioErrorStatus || data->mmioFirstErrorStatus || + data->mmioErrorLog0 || data->mmioErrorLog1) + pr_info(" OutErr: %016llx %016llx %016llx %016llx\n", + data->mmioErrorStatus, data->mmioFirstErrorStatus, + data->mmioErrorLog0, data->mmioErrorLog1); + if (data->dma0ErrorStatus || data->dma0FirstErrorStatus || + data->dma0ErrorLog0 || data->dma0ErrorLog1) + pr_info(" InAErr: %016llx %016llx %016llx %016llx\n", + data->dma0ErrorStatus, data->dma0FirstErrorStatus, + data->dma0ErrorLog0, data->dma0ErrorLog1); + if (data->dma1ErrorStatus || data->dma1FirstErrorStatus || + data->dma1ErrorLog0 || data->dma1ErrorLog1) + pr_info(" InBErr: %016llx %016llx %016llx %016llx\n", + data->dma1ErrorStatus, data->dma1FirstErrorStatus, + data->dma1ErrorLog0, data->dma1ErrorLog1); for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { if ((data->pestA[i] >> 63) == 0 && (data->pestB[i] >> 63) == 0) continue; - pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); - pr_info(" PESTB: %016llx\n", data->pestB[i]); + pr_info(" PE[%3d] A/B: %016llx %016llx\n", + i, data->pestA[i], data->pestB[i]); } } @@ -634,6 +664,16 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) pnv_pci_dma_fallback_setup(hose, pdev); } +int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) +{ + struct pci_controller *hose = pci_bus_to_host(pdev->bus); + struct pnv_phb *phb = hose->private_data; + + if (phb && phb->dma_set_mask) + return phb->dma_set_mask(phb, pdev, dma_mask); + return __dma_set_mask(&pdev->dev, dma_mask); +} + void pnv_pci_shutdown(void) { struct pci_controller *hose; diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 13f1942a9a5..cde16944277 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -54,7 +54,9 @@ struct pnv_ioda_pe { struct iommu_table tce32_table; phys_addr_t tce_inval_reg_phys; - /* XXX TODO: Add support for additional 64-bit iommus */ + /* 64-bit TCE bypass region */ + bool tce_bypass_enabled; + uint64_t tce_bypass_base; /* MSIs. MVE index is identical for for 32 and 64 bit MSI * and -1 if not supported. (It's actually identical to the @@ -113,6 +115,8 @@ struct pnv_phb { unsigned int hwirq, unsigned int virq, unsigned int is_64, struct msi_msg *msg); void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); + int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev, + u64 dma_mask); void (*fixup_phb)(struct pci_controller *hose); u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); void (*shutdown)(struct pnv_phb *phb); diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index de6819be1f9..0051e108ef0 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h @@ -7,12 +7,20 @@ extern void pnv_smp_init(void); static inline void pnv_smp_init(void) { } #endif +struct pci_dev; + #ifdef CONFIG_PCI extern void pnv_pci_init(void); extern void pnv_pci_shutdown(void); +extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask); #else static inline void pnv_pci_init(void) { } static inline void pnv_pci_shutdown(void) { } + +static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) +{ + return -ENODEV; +} #endif extern void pnv_lpc_init(void); diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 21166f65c97..110f4fbd319 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -27,6 +27,7 @@ #include <linux/interrupt.h> #include <linux/bug.h> #include <linux/cpuidle.h> +#include <linux/pci.h> #include <asm/machdep.h> #include <asm/firmware.h> @@ -141,6 +142,13 @@ static void pnv_progress(char *s, unsigned short hex) { } +static int pnv_dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (dev_is_pci(dev)) + return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask); + return __dma_set_mask(dev, dma_mask); +} + static void pnv_shutdown(void) { /* Let the PCI code clear up IODA tables */ @@ -238,6 +246,7 @@ define_machine(powernv) { .machine_shutdown = pnv_shutdown, .power_save = powernv_idle, .calibrate_decr = generic_calibrate_decr, + .dma_set_mask = pnv_dma_set_mask, #ifdef CONFIG_KEXEC .kexec_cpu_down = pnv_kexec_cpu_down, #endif diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 37300f6ee24..80b1d57c306 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -20,6 +20,7 @@ config PPC_PSERIES select PPC_DOORBELL select HAVE_CONTEXT_TRACKING select HOTPLUG_CPU if SMP + select ARCH_RANDOM default y config PPC_SPLPAR diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 9ef3cc8ebc1..8a8f0472d98 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -265,7 +265,7 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) enable = 1; if (enable) { - eeh_subsystem_enabled = 1; + eeh_set_enable(true); eeh_add_to_parent_pe(edev); pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n", diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 82789e79e53..0ea99e3d481 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -35,12 +35,7 @@ #include "offline_states.h" /* This version can't take the spinlock, because it never returns */ -static struct rtas_args rtas_stop_self_args = { - .token = RTAS_UNKNOWN_SERVICE, - .nargs = 0, - .nret = 1, - .rets = &rtas_stop_self_args.args[0], -}; +static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = CPU_STATE_OFFLINE; @@ -93,15 +88,20 @@ void set_default_offline_state(int cpu) static void rtas_stop_self(void) { - struct rtas_args *args = &rtas_stop_self_args; + struct rtas_args args = { + .token = cpu_to_be32(rtas_stop_self_token), + .nargs = 0, + .nret = 1, + .rets = &args.args[0], + }; local_irq_disable(); - BUG_ON(args->token == RTAS_UNKNOWN_SERVICE); + BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); printk("cpu %u (hwid %u) Ready to die...\n", smp_processor_id(), hard_smp_processor_id()); - enter_rtas(__pa(args)); + enter_rtas(__pa(&args)); panic("Alas, I survived.\n"); } @@ -392,10 +392,10 @@ static int __init pseries_cpu_hotplug_init(void) } } - rtas_stop_self_args.token = rtas_token("stop-self"); + rtas_stop_self_token = rtas_token("stop-self"); qcss_tok = rtas_token("query-cpu-stopped-state"); - if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE || + if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE || qcss_tok == RTAS_UNKNOWN_SERVICE) { printk(KERN_INFO "CPU Hotplug not supported by firmware " "- disabling.\n"); diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 70670a2d9cf..c413ec158ff 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -113,7 +113,8 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) { struct device_node *dn, *pdn; struct pci_bus *bus; - const __be32 *pcie_link_speed_stats; + u32 pcie_link_speed_stats[2]; + int rc; bus = bridge->bus; @@ -122,38 +123,45 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) return 0; for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { - pcie_link_speed_stats = of_get_property(pdn, - "ibm,pcie-link-speed-stats", NULL); - if (pcie_link_speed_stats) + rc = of_property_read_u32_array(pdn, + "ibm,pcie-link-speed-stats", + &pcie_link_speed_stats[0], 2); + if (!rc) break; } of_node_put(pdn); - if (!pcie_link_speed_stats) { + if (rc) { pr_err("no ibm,pcie-link-speed-stats property\n"); return 0; } - switch (be32_to_cpup(pcie_link_speed_stats)) { + switch (pcie_link_speed_stats[0]) { case 0x01: bus->max_bus_speed = PCIE_SPEED_2_5GT; break; case 0x02: bus->max_bus_speed = PCIE_SPEED_5_0GT; break; + case 0x04: + bus->max_bus_speed = PCIE_SPEED_8_0GT; + break; default: bus->max_bus_speed = PCI_SPEED_UNKNOWN; break; } - switch (be32_to_cpup(pcie_link_speed_stats)) { + switch (pcie_link_speed_stats[1]) { case 0x01: bus->cur_bus_speed = PCIE_SPEED_2_5GT; break; case 0x02: bus->cur_bus_speed = PCIE_SPEED_5_0GT; break; + case 0x04: + bus->cur_bus_speed = PCIE_SPEED_8_0GT; + break; default: bus->cur_bus_speed = PCI_SPEED_UNKNOWN; break; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 8e639d7cbda..972df0ffd4d 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -430,8 +430,7 @@ static void pSeries_machine_kexec(struct kimage *image) { long rc; - if (firmware_has_feature(FW_FEATURE_SET_MODE) && - (image->type != KEXEC_TYPE_CRASH)) { + if (firmware_has_feature(FW_FEATURE_SET_MODE)) { rc = pSeries_disable_reloc_on_exc(); if (rc != H_SUCCESS) pr_warning("Warning: Failed to disable relocation on " diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 0e166ed4cd1..8209744b282 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -886,25 +886,25 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) /* Default: read HW settings */ if (flow_type == IRQ_TYPE_DEFAULT) { - switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | - MPIC_INFO(VECPRI_SENSE_MASK))) { - case MPIC_INFO(VECPRI_SENSE_EDGE) | - MPIC_INFO(VECPRI_POLARITY_POSITIVE): - flow_type = IRQ_TYPE_EDGE_RISING; - break; - case MPIC_INFO(VECPRI_SENSE_EDGE) | - MPIC_INFO(VECPRI_POLARITY_NEGATIVE): - flow_type = IRQ_TYPE_EDGE_FALLING; - break; - case MPIC_INFO(VECPRI_SENSE_LEVEL) | - MPIC_INFO(VECPRI_POLARITY_POSITIVE): - flow_type = IRQ_TYPE_LEVEL_HIGH; - break; - case MPIC_INFO(VECPRI_SENSE_LEVEL) | - MPIC_INFO(VECPRI_POLARITY_NEGATIVE): - flow_type = IRQ_TYPE_LEVEL_LOW; - break; - } + int vold_ps; + + vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | + MPIC_INFO(VECPRI_SENSE_MASK)); + + if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) | + MPIC_INFO(VECPRI_POLARITY_POSITIVE))) + flow_type = IRQ_TYPE_EDGE_RISING; + else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) | + MPIC_INFO(VECPRI_POLARITY_NEGATIVE))) + flow_type = IRQ_TYPE_EDGE_FALLING; + else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) | + MPIC_INFO(VECPRI_POLARITY_POSITIVE))) + flow_type = IRQ_TYPE_LEVEL_HIGH; + else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) | + MPIC_INFO(VECPRI_POLARITY_NEGATIVE))) + flow_type = IRQ_TYPE_LEVEL_LOW; + else + WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold); } /* Apply to irq desc */ diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index a90731b3d44..b07909850f7 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -309,16 +309,23 @@ static void get_output_lock(void) if (xmon_speaker == me) return; + for (;;) { - if (xmon_speaker == 0) { - last_speaker = cmpxchg(&xmon_speaker, 0, me); - if (last_speaker == 0) - return; - } - timeout = 10000000; + last_speaker = cmpxchg(&xmon_speaker, 0, me); + if (last_speaker == 0) + return; + + /* + * Wait a full second for the lock, we might be on a slow + * console, but check every 100us. + */ + timeout = 10000; while (xmon_speaker == last_speaker) { - if (--timeout > 0) + if (--timeout > 0) { + udelay(100); continue; + } + /* hostile takeover */ prev = cmpxchg(&xmon_speaker, last_speaker, me); if (prev == last_speaker) @@ -397,7 +404,6 @@ static int xmon_core(struct pt_regs *regs, int fromipi) } xmon_fault_jmp[cpu] = recurse_jmp; - cpumask_set_cpu(cpu, &cpus_in_xmon); bp = NULL; if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) @@ -419,6 +425,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi) release_output_lock(); } + cpumask_set_cpu(cpu, &cpus_in_xmon); + waiting: secondary = 1; while (secondary && !xmon_gate) { diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 4c4a1cef520..47c8630c93c 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -529,6 +529,7 @@ static int __init appldata_init(void) { int rc; + init_virt_timer(&appldata_timer); appldata_timer.function = appldata_timer_function; appldata_timer.data = (unsigned long) &appldata_work; diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index b3feabd39f3..cf3c0089bef 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -25,6 +25,7 @@ #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/spinlock.h> #include "crypt_s390.h" #define AES_KEYLEN_128 1 @@ -32,6 +33,7 @@ #define AES_KEYLEN_256 4 static u8 *ctrblk; +static DEFINE_SPINLOCK(ctrblk_lock); static char keylen_flag; struct s390_aes_ctx { @@ -758,43 +760,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return aes_set_key(tfm, in_key, key_len); } +static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes) +{ + unsigned int i, n; + + /* only use complete blocks, max. PAGE_SIZE */ + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); + for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { + memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE, + AES_BLOCK_SIZE); + crypto_inc(ctrptr + i, AES_BLOCK_SIZE); + } + return n; +} + static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, struct s390_aes_ctx *sctx, struct blkcipher_walk *walk) { int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); - unsigned int i, n, nbytes; - u8 buf[AES_BLOCK_SIZE]; - u8 *out, *in; + unsigned int n, nbytes; + u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE]; + u8 *out, *in, *ctrptr = ctrbuf; if (!walk->nbytes) return ret; - memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE); + if (spin_trylock(&ctrblk_lock)) + ctrptr = ctrblk; + + memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { out = walk->dst.virt.addr; in = walk->src.virt.addr; while (nbytes >= AES_BLOCK_SIZE) { - /* only use complete blocks, max. PAGE_SIZE */ - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : - nbytes & ~(AES_BLOCK_SIZE - 1); - for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { - memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE, - AES_BLOCK_SIZE); - crypto_inc(ctrblk + i, AES_BLOCK_SIZE); - } - ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk); - if (ret < 0 || ret != n) + if (ctrptr == ctrblk) + n = __ctrblk_init(ctrptr, nbytes); + else + n = AES_BLOCK_SIZE; + ret = crypt_s390_kmctr(func, sctx->key, out, in, + n, ctrptr); + if (ret < 0 || ret != n) { + if (ctrptr == ctrblk) + spin_unlock(&ctrblk_lock); return -EIO; + } if (n > AES_BLOCK_SIZE) - memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE, + memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE, AES_BLOCK_SIZE); - crypto_inc(ctrblk, AES_BLOCK_SIZE); + crypto_inc(ctrptr, AES_BLOCK_SIZE); out += n; in += n; nbytes -= n; } ret = blkcipher_walk_done(desc, walk, nbytes); } + if (ctrptr == ctrblk) { + if (nbytes) + memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE); + else + memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); + spin_unlock(&ctrblk_lock); + } /* * final block may be < AES_BLOCK_SIZE, copy only nbytes */ @@ -802,14 +828,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, out = walk->dst.virt.addr; in = walk->src.virt.addr; ret = crypt_s390_kmctr(func, sctx->key, buf, in, - AES_BLOCK_SIZE, ctrblk); + AES_BLOCK_SIZE, ctrbuf); if (ret < 0 || ret != AES_BLOCK_SIZE) return -EIO; memcpy(out, buf, nbytes); - crypto_inc(ctrblk, AES_BLOCK_SIZE); + crypto_inc(ctrbuf, AES_BLOCK_SIZE); ret = blkcipher_walk_done(desc, walk, 0); + memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE); } - memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE); + return ret; } diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 200f2a1b599..0a5aac8a941 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -25,6 +25,7 @@ #define DES3_KEY_SIZE (3 * DES_KEY_SIZE) static u8 *ctrblk; +static DEFINE_SPINLOCK(ctrblk_lock); struct s390_des_ctx { u8 iv[DES_BLOCK_SIZE]; @@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func, } static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, - u8 *iv, struct blkcipher_walk *walk) + struct blkcipher_walk *walk) { + struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); int ret = blkcipher_walk_virt(desc, walk); unsigned int nbytes = walk->nbytes; + struct { + u8 iv[DES_BLOCK_SIZE]; + u8 key[DES3_KEY_SIZE]; + } param; if (!nbytes) goto out; - memcpy(iv, walk->iv, DES_BLOCK_SIZE); + memcpy(param.iv, walk->iv, DES_BLOCK_SIZE); + memcpy(param.key, ctx->key, DES3_KEY_SIZE); do { /* only use complete blocks */ unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); u8 *out = walk->dst.virt.addr; u8 *in = walk->src.virt.addr; - ret = crypt_s390_kmc(func, iv, out, in, n); + ret = crypt_s390_kmc(func, ¶m, out, in, n); if (ret < 0 || ret != n) return -EIO; nbytes &= DES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, walk, nbytes); } while ((nbytes = walk->nbytes)); - memcpy(walk->iv, iv, DES_BLOCK_SIZE); + memcpy(walk->iv, param.iv, DES_BLOCK_SIZE); out: return ret; @@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk); } static int cbc_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk); } static struct crypto_alg cbc_des_alg = { @@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk); } static int cbc_des3_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk); } static struct crypto_alg cbc_des3_alg = { @@ -366,54 +369,80 @@ static struct crypto_alg cbc_des3_alg = { } }; +static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes) +{ + unsigned int i, n; + + /* align to block size, max. PAGE_SIZE */ + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1); + for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) { + memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE); + crypto_inc(ctrptr + i, DES_BLOCK_SIZE); + } + return n; +} + static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, - struct s390_des_ctx *ctx, struct blkcipher_walk *walk) + struct s390_des_ctx *ctx, + struct blkcipher_walk *walk) { int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); - unsigned int i, n, nbytes; - u8 buf[DES_BLOCK_SIZE]; - u8 *out, *in; + unsigned int n, nbytes; + u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE]; + u8 *out, *in, *ctrptr = ctrbuf; + + if (!walk->nbytes) + return ret; - memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE); + if (spin_trylock(&ctrblk_lock)) + ctrptr = ctrblk; + + memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { out = walk->dst.virt.addr; in = walk->src.virt.addr; while (nbytes >= DES_BLOCK_SIZE) { - /* align to block size, max. PAGE_SIZE */ - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : - nbytes & ~(DES_BLOCK_SIZE - 1); - for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) { - memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE, - DES_BLOCK_SIZE); - crypto_inc(ctrblk + i, DES_BLOCK_SIZE); - } - ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk); - if (ret < 0 || ret != n) + if (ctrptr == ctrblk) + n = __ctrblk_init(ctrptr, nbytes); + else + n = DES_BLOCK_SIZE; + ret = crypt_s390_kmctr(func, ctx->key, out, in, + n, ctrptr); + if (ret < 0 || ret != n) { + if (ctrptr == ctrblk) + spin_unlock(&ctrblk_lock); return -EIO; + } if (n > DES_BLOCK_SIZE) - memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE, + memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE, DES_BLOCK_SIZE); - crypto_inc(ctrblk, DES_BLOCK_SIZE); + crypto_inc(ctrptr, DES_BLOCK_SIZE); out += n; in += n; nbytes -= n; } ret = blkcipher_walk_done(desc, walk, nbytes); } - + if (ctrptr == ctrblk) { + if (nbytes) + memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE); + else + memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE); + spin_unlock(&ctrblk_lock); + } /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { out = walk->dst.virt.addr; in = walk->src.virt.addr; ret = crypt_s390_kmctr(func, ctx->key, buf, in, - DES_BLOCK_SIZE, ctrblk); + DES_BLOCK_SIZE, ctrbuf); if (ret < 0 || ret != DES_BLOCK_SIZE) return -EIO; memcpy(out, buf, nbytes); - crypto_inc(ctrblk, DES_BLOCK_SIZE); + crypto_inc(ctrbuf, DES_BLOCK_SIZE); ret = blkcipher_walk_done(desc, walk, 0); + memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE); } - memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE); return ret; } diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 59c8efce1b9..0248949a756 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1421,5 +1421,5 @@ ENTRY(sys_sched_setattr_wrapper) ENTRY(sys_sched_getattr_wrapper) lgfr %r2,%r2 # pid_t llgtr %r3,%r3 # const char __user * - llgfr %r3,%r3 # unsigned int + llgfr %r4,%r4 # unsigned int jg sys_sched_getattr diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index b9e25ae2579..d7c00507568 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -59,7 +59,7 @@ ENTRY(startup_continue) .quad 0 # cr12: tracing off .quad 0 # cr13: home space segment table .quad 0xc0000000 # cr14: machine check handling off - .quad 0 # cr15: linkage stack operations + .quad .Llinkage_stack # cr15: linkage stack operations .Lpcmsk:.quad 0x0000000180000000 .L4malign:.quad 0xffffffffffc00000 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 @@ -67,12 +67,15 @@ ENTRY(startup_continue) .Lparmaddr: .quad PARMAREA .align 64 -.Lduct: .long 0,0,0,0,.Lduald,0,0,0 +.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0 .long 0,0,0,0,0,0,0,0 +.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0 .align 128 .Lduald:.rept 8 .long 0x80000000,0,0,0 # invalid access-list entries .endr +.Llinkage_stack: + .long 0,0,0x89000000,0,0,0,0x8a000000,0 ENTRY(_ehead) diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index a90d45e9dfb..27c50f4d90c 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -12,6 +12,8 @@ #include <linux/mm.h> #include <linux/gfp.h> #include <linux/init.h> +#include <asm/setup.h> +#include <asm/ipl.h> #define ESSA_SET_STABLE 1 #define ESSA_SET_UNUSED 2 @@ -41,6 +43,14 @@ void __init cmma_init(void) if (!cmma_flag) return; + /* + * Disable CMM for dump, otherwise the tprot based memory + * detection can fail because of unstable pages. + */ + if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) { + cmma_flag = 0; + return; + } asm volatile( " .insn rrf,0xb9ab0000,%1,%1,0,0\n" "0: la %0,0\n" diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 60c11a629d9..f91c0311980 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -206,11 +206,13 @@ static void dma_cleanup_tables(struct zpci_dev *zdev) zdev->dma_table = NULL; } -static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start, - int size) +static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, + unsigned long start, int size) { - unsigned long boundary_size = 0x1000000; + unsigned long boundary_size; + boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1, + PAGE_SIZE) >> PAGE_SHIFT; return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages, start, size, 0, boundary_size, 0); } diff --git a/arch/sh/include/cpu-sh2/cpu/cache.h b/arch/sh/include/cpu-sh2/cpu/cache.h index 673515bc413..aa1b2b9088a 100644 --- a/arch/sh/include/cpu-sh2/cpu/cache.h +++ b/arch/sh/include/cpu-sh2/cpu/cache.h @@ -18,7 +18,7 @@ #define SH_CACHE_ASSOC 8 #if defined(CONFIG_CPU_SUBTYPE_SH7619) -#define CCR 0xffffffec +#define SH_CCR 0xffffffec #define CCR_CACHE_CE 0x01 /* Cache enable */ #define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */ diff --git a/arch/sh/include/cpu-sh2a/cpu/cache.h b/arch/sh/include/cpu-sh2a/cpu/cache.h index defb0baa5a0..b27ce92cb60 100644 --- a/arch/sh/include/cpu-sh2a/cpu/cache.h +++ b/arch/sh/include/cpu-sh2a/cpu/cache.h @@ -17,8 +17,8 @@ #define SH_CACHE_COMBINED 4 #define SH_CACHE_ASSOC 8 -#define CCR 0xfffc1000 /* CCR1 */ -#define CCR2 0xfffc1004 +#define SH_CCR 0xfffc1000 /* CCR1 */ +#define SH_CCR2 0xfffc1004 /* * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not diff --git a/arch/sh/include/cpu-sh3/cpu/cache.h b/arch/sh/include/cpu-sh3/cpu/cache.h index bee2d81c56b..29700fd88c7 100644 --- a/arch/sh/include/cpu-sh3/cpu/cache.h +++ b/arch/sh/include/cpu-sh3/cpu/cache.h @@ -17,7 +17,7 @@ #define SH_CACHE_COMBINED 4 #define SH_CACHE_ASSOC 8 -#define CCR 0xffffffec /* Address of Cache Control Register */ +#define SH_CCR 0xffffffec /* Address of Cache Control Register */ #define CCR_CACHE_CE 0x01 /* Cache Enable */ #define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */ diff --git a/arch/sh/include/cpu-sh4/cpu/cache.h b/arch/sh/include/cpu-sh4/cpu/cache.h index 7bfb9e8b069..92c4cd119b6 100644 --- a/arch/sh/include/cpu-sh4/cpu/cache.h +++ b/arch/sh/include/cpu-sh4/cpu/cache.h @@ -17,7 +17,7 @@ #define SH_CACHE_COMBINED 4 #define SH_CACHE_ASSOC 8 -#define CCR 0xff00001c /* Address of Cache Control Register */ +#define SH_CCR 0xff00001c /* Address of Cache Control Register */ #define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */ #define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/ #define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */ diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index ecf83cd158d..0d7360d549c 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -112,7 +112,7 @@ static void cache_init(void) unsigned long ccr, flags; jump_to_uncached(); - ccr = __raw_readl(CCR); + ccr = __raw_readl(SH_CCR); /* * At this point we don't know whether the cache is enabled or not - a @@ -189,7 +189,7 @@ static void cache_init(void) l2_cache_init(); - __raw_writel(flags, CCR); + __raw_writel(flags, SH_CCR); back_to_cached(); } #else diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index 11572519803..777e50f33c0 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c @@ -36,7 +36,7 @@ static int cache_seq_show(struct seq_file *file, void *iter) */ jump_to_uncached(); - ccr = __raw_readl(CCR); + ccr = __raw_readl(SH_CCR); if ((ccr & CCR_CACHE_ENABLE) == 0) { back_to_cached(); diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index defcf719f2e..a74259f2f98 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c @@ -63,9 +63,9 @@ static void sh2__flush_invalidate_region(void *start, int size) local_irq_save(flags); jump_to_uncached(); - ccr = __raw_readl(CCR); + ccr = __raw_readl(SH_CCR); ccr |= CCR_CACHE_INVALIDATE; - __raw_writel(ccr, CCR); + __raw_writel(ccr, SH_CCR); back_to_cached(); local_irq_restore(flags); diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c index 949e2d3138a..ee87d081259 100644 --- a/arch/sh/mm/cache-sh2a.c +++ b/arch/sh/mm/cache-sh2a.c @@ -134,7 +134,8 @@ static void sh2a__flush_invalidate_region(void *start, int size) /* If there are too many pages then just blow the cache */ if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { - __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); + __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE, + SH_CCR); } else { for (v = begin; v < end; v += L1_CACHE_BYTES) sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); @@ -167,7 +168,8 @@ static void sh2a_flush_icache_range(void *args) /* I-Cache invalidate */ /* If there are too many pages then just blow the cache */ if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { - __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR); + __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE, + SH_CCR); } else { for (v = start; v < end; v += L1_CACHE_BYTES) sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v); diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 0e529285b28..51d8f7f31d1 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -133,9 +133,9 @@ static void flush_icache_all(void) jump_to_uncached(); /* Flush I-cache */ - ccr = __raw_readl(CCR); + ccr = __raw_readl(SH_CCR); ccr |= CCR_CACHE_ICI; - __raw_writel(ccr, CCR); + __raw_writel(ccr, SH_CCR); /* * back_to_cached() will take care of the barrier for us, don't add diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c index c0adbee97b5..24c58b7dc02 100644 --- a/arch/sh/mm/cache-shx3.c +++ b/arch/sh/mm/cache-shx3.c @@ -19,7 +19,7 @@ void __init shx3_cache_init(void) { unsigned int ccr; - ccr = __raw_readl(CCR); + ccr = __raw_readl(SH_CCR); /* * If we've got cache aliases, resolve them in hardware. @@ -40,5 +40,5 @@ void __init shx3_cache_init(void) ccr |= CCR_CACHE_IBE; #endif - writel_uncached(ccr, CCR); + writel_uncached(ccr, SH_CCR); } diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 616966a96cb..097c2cdd117 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -285,8 +285,8 @@ void __init cpu_cache_init(void) { unsigned int cache_disabled = 0; -#ifdef CCR - cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); +#ifdef SH_CCR + cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE); #endif compute_alias(&boot_cpu_data.icache); diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index c51efdcd07a..7d8b7e94b93 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -27,7 +27,7 @@ config SPARC select RTC_DRV_M48T59 select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG - select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_JUMP_LABEL if SPARC64 select GENERIC_IRQ_SHOW select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_PCI_IOMAP diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 32a280ec38c..d7b4967f8fa 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -58,9 +58,12 @@ void arch_cpu_idle(void) { if (tlb_type != hypervisor) { touch_nmi_watchdog(); + local_irq_enable(); } else { unsigned long pstate; + local_irq_enable(); + /* The sun4v sleeping code requires that we have PSTATE.IE cleared over * the cpu sleep hypervisor call. */ @@ -82,7 +85,6 @@ void arch_cpu_idle(void) : "=&r" (pstate) : "i" (PSTATE_IE)); } - local_irq_enable(); } #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index 87729fff13b..33a17e7b3cc 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -189,7 +189,8 @@ linux_sparc_syscall32: mov %i0, %l5 ! IEU1 5: call %l7 ! CTI Group brk forced srl %i5, 0, %o5 ! IEU1 - ba,a,pt %xcc, 3f + ba,pt %xcc, 3f + sra %o0, 0, %o0 /* Linux native system calls enter here... */ .align 32 @@ -217,7 +218,6 @@ linux_sparc_syscall: 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] ret_sys_call: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 - sra %o0, 0, %o0 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 sllx %g2, 32, %g2 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 869023abe5a..cfbe53c17b0 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -14,6 +14,7 @@ #include <linux/pagemap.h> #include <linux/vmalloc.h> #include <linux/kdebug.h> +#include <linux/export.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/log2.h> @@ -62,6 +63,7 @@ extern unsigned long last_valid_pfn; static pgd_t *srmmu_swapper_pg_dir; const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops; +EXPORT_SYMBOL(sparc32_cachetlb_ops); #ifdef CONFIG_SMP const struct sparc32_cachetlb_ops *local_ops; diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 3b3a360b429..f5d506fddda 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -273,7 +273,7 @@ void __init pgtable_cache_init(void) prom_halt(); } - for (i = 0; i < 8; i++) { + for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) { unsigned long size = 8192 << i; const char *name = tsb_cache_names[i]; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 940e50ebfaf..0af5250d914 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -444,6 +444,7 @@ config X86_INTEL_MID bool "Intel MID platform support" depends on X86_32 depends on X86_EXTENDED_PLATFORM + depends on X86_PLATFORM_DEVICES depends on PCI depends on PCI_GOANY depends on X86_IO_APIC @@ -1051,9 +1052,9 @@ config MICROCODE_INTEL This options enables microcode patch loading support for Intel processors. - For latest news and information on obtaining all the required - Intel ingredients for this driver, check: - <http://www.urbanmyth.org/microcode/>. + For the current Intel microcode data package go to + <https://downloadcenter.intel.com> and search for + 'Linux Processor Microcode Data File'. config MICROCODE_AMD bool "AMD microcode loading support" diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index c026cca5602..f3aaf231b4e 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -341,10 +341,6 @@ config X86_USE_3DNOW def_bool y depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML -config X86_OOSTORE - def_bool y - depends on (MWINCHIP3D || MWINCHIPC6) && MTRR - # # P6_NOPs are a relatively minor optimization that require a family >= # 6 processor, except that it is broken on certain VIA chips. diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 0f3621ed1db..321a52ccf63 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -184,6 +184,7 @@ config HAVE_MMIOTRACE_SUPPORT config X86_DECODER_SELFTEST bool "x86 instruction decoder selftest" depends on DEBUG_KERNEL && KPROBES + depends on !COMPILE_TEST ---help--- Perform x86 instruction decoder selftests at build time. This option is useful for checking the sanity of x86 instruction diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index 90a21f43011..4dbf967da50 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c @@ -111,7 +111,7 @@ struct mem_vector { }; #define MEM_AVOID_MAX 5 -struct mem_vector mem_avoid[MEM_AVOID_MAX]; +static struct mem_vector mem_avoid[MEM_AVOID_MAX]; static bool mem_contains(struct mem_vector *region, struct mem_vector *item) { @@ -180,7 +180,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size, } /* Does this memory vector overlap a known avoided area? */ -bool mem_avoid_overlap(struct mem_vector *img) +static bool mem_avoid_overlap(struct mem_vector *img) { int i; @@ -192,8 +192,9 @@ bool mem_avoid_overlap(struct mem_vector *img) return false; } -unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN]; -unsigned long slot_max = 0; +static unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / + CONFIG_PHYSICAL_ALIGN]; +static unsigned long slot_max; static void slots_append(unsigned long addr) { diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index a54ee1d054d..aaac3b2fb74 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -19,7 +19,7 @@ extern int amd_cache_northbridges(void); extern void amd_flush_garts(void); extern int amd_numa_init(void); extern int amd_get_subcaches(int); -extern int amd_set_subcaches(int, int); +extern int amd_set_subcaches(int, unsigned long); struct amd_l3_cache { unsigned indices; diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 04a48903b2e..69bbb484502 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -85,11 +85,7 @@ #else # define smp_rmb() barrier() #endif -#ifdef CONFIG_X86_OOSTORE -# define smp_wmb() wmb() -#else -# define smp_wmb() barrier() -#endif +#define smp_wmb() barrier() #define smp_read_barrier_depends() read_barrier_depends() #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) #else /* !SMP */ @@ -100,7 +96,7 @@ #define set_mb(var, value) do { var = value; barrier(); } while (0) #endif /* SMP */ -#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) +#if defined(CONFIG_X86_PPRO_FENCE) /* * For either of these options x86 doesn't have a strong TSO memory diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 3b978c472d0..acd86c85041 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -132,6 +132,9 @@ extern void __init efi_map_region_fixed(efi_memory_desc_t *md); extern void efi_sync_low_kernel_mappings(void); extern void efi_setup_page_tables(void); extern void __init old_map_region(efi_memory_desc_t *md); +extern void __init runtime_code_page_mkexec(void); +extern void __init efi_runtime_mkexec(void); +extern void __init efi_apply_memmap_quirks(void); struct efi_setup_data { u64 fw_vendor; diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 34f69cb9350..91d9c69a629 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -237,7 +237,7 @@ memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) static inline void flush_write_buffers(void) { -#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) +#if defined(CONFIG_X86_PPRO_FENCE) asm volatile("lock; addl $0,0(%%esp)": : :"memory"); #endif } diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index bbc8b12fa44..5ad38ad0789 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -445,10 +445,20 @@ static inline int pte_same(pte_t a, pte_t b) return a.pte == b.pte; } +static inline int pteval_present(pteval_t pteval) +{ + /* + * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this + * way clearly states that the intent is that protnone and numa + * hinting ptes are considered present for the purposes of + * pagetable operations like zapping, protection changes, gup etc. + */ + return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA); +} + static inline int pte_present(pte_t a) { - return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE | - _PAGE_NUMA); + return pteval_present(pte_flags(a)); } #define pte_accessible pte_accessible diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index bf156ded74b..0f62f5482d9 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -26,10 +26,9 @@ # define LOCK_PTR_REG "D" #endif -#if defined(CONFIG_X86_32) && \ - (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) +#if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE)) /* - * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock + * On PPro SMP, we use a locked operation to unlock * (PPro errata 66, 92) */ # define UNLOCK_LOCK_PREFIX LOCK_PREFIX diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index e6d90babc24..04905bfc508 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -62,7 +62,7 @@ static inline void __flush_tlb_all(void) static inline void __flush_tlb_one(unsigned long addr) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } @@ -93,13 +93,13 @@ static inline void __flush_tlb_one(unsigned long addr) */ static inline void __flush_tlb_up(void) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); __flush_tlb(); } static inline void flush_tlb_all(void) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); __flush_tlb_all(); } diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 57ae63cd6ee..94605c0e9ce 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -66,6 +66,6 @@ extern void tsc_save_sched_clock_state(void); extern void tsc_restore_sched_clock_state(void); /* MSR based TSC calibration for Intel Atom SoC platforms */ -int try_msr_calibrate_tsc(unsigned long *fast_calibrate); +unsigned long try_msr_calibrate_tsc(void); #endif /* _ASM_X86_TSC_H */ diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 787e1bb5aaf..3e276eb23d1 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -52,8 +52,7 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s, extern int m2p_add_override(unsigned long mfn, struct page *page, struct gnttab_map_grant_ref *kmap_op); extern int m2p_remove_override(struct page *page, - struct gnttab_map_grant_ref *kmap_op, - unsigned long mfn); + struct gnttab_map_grant_ref *kmap_op); extern struct page *m2p_find_override(unsigned long mfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); @@ -122,7 +121,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) pfn = m2p_find_override_pfn(mfn, ~0); } - /* + /* * pfn is ~0 if there are no entries in the m2p for mfn or if the * entry doesn't map back to the mfn and m2p_override doesn't have a * valid entry for it. diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 59554dca96e..dec8de4e166 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -179,7 +179,7 @@ int amd_get_subcaches(int cpu) return (mask >> (4 * cuid)) & 0xf; } -int amd_set_subcaches(int cpu, int mask) +int amd_set_subcaches(int cpu, unsigned long mask) { static unsigned int reset, ban; struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index fd972a3e4cb..9fa8aa051f5 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -18,7 +18,6 @@ #include <linux/pci_ids.h> #include <linux/pci.h> #include <linux/bitops.h> -#include <linux/ioport.h> #include <linux/suspend.h> #include <asm/e820.h> #include <asm/io.h> @@ -54,18 +53,6 @@ int fallback_aper_force __initdata; int fix_aperture __initdata = 1; -static struct resource gart_resource = { - .name = "GART", - .flags = IORESOURCE_MEM, -}; - -static void __init insert_aperture_resource(u32 aper_base, u32 aper_size) -{ - gart_resource.start = aper_base; - gart_resource.end = aper_base + aper_size - 1; - insert_resource(&iomem_resource, &gart_resource); -} - /* This code runs before the PCI subsystem is initialized, so just access the northbridge directly. */ @@ -96,7 +83,6 @@ static u32 __init allocate_aperture(void) memblock_reserve(addr, aper_size); printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", aper_size >> 10, addr); - insert_aperture_resource((u32)addr, aper_size); register_nosave_region(addr >> PAGE_SHIFT, (addr+aper_size) >> PAGE_SHIFT); @@ -444,12 +430,8 @@ int __init gart_iommu_hole_init(void) out: if (!fix && !fallback_aper_force) { - if (last_aper_base) { - unsigned long n = (32 * 1024 * 1024) << last_aper_order; - - insert_aperture_resource((u32)last_aper_base, n); + if (last_aper_base) return 1; - } return 0; } diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index d3153e281d7..c67ffa68606 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -767,10 +767,7 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) { - tlb_flushall_shift = 5; - - if (c->x86 <= 0x11) - tlb_flushall_shift = 4; + tlb_flushall_shift = 6; } static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 8779edab684..d8fba5c15fb 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -8,236 +8,6 @@ #include "cpu.h" -#ifdef CONFIG_X86_OOSTORE - -static u32 power2(u32 x) -{ - u32 s = 1; - - while (s <= x) - s <<= 1; - - return s >>= 1; -} - - -/* - * Set up an actual MCR - */ -static void centaur_mcr_insert(int reg, u32 base, u32 size, int key) -{ - u32 lo, hi; - - hi = base & ~0xFFF; - lo = ~(size-1); /* Size is a power of 2 so this makes a mask */ - lo &= ~0xFFF; /* Remove the ctrl value bits */ - lo |= key; /* Attribute we wish to set */ - wrmsr(reg+MSR_IDT_MCR0, lo, hi); - mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */ -} - -/* - * Figure what we can cover with MCR's - * - * Shortcut: We know you can't put 4Gig of RAM on a winchip - */ -static u32 ramtop(void) -{ - u32 clip = 0xFFFFFFFFUL; - u32 top = 0; - int i; - - for (i = 0; i < e820.nr_map; i++) { - unsigned long start, end; - - if (e820.map[i].addr > 0xFFFFFFFFUL) - continue; - /* - * Don't MCR over reserved space. Ignore the ISA hole - * we frob around that catastrophe already - */ - if (e820.map[i].type == E820_RESERVED) { - if (e820.map[i].addr >= 0x100000UL && - e820.map[i].addr < clip) - clip = e820.map[i].addr; - continue; - } - start = e820.map[i].addr; - end = e820.map[i].addr + e820.map[i].size; - if (start >= end) - continue; - if (end > top) - top = end; - } - /* - * Everything below 'top' should be RAM except for the ISA hole. - * Because of the limited MCR's we want to map NV/ACPI into our - * MCR range for gunk in RAM - * - * Clip might cause us to MCR insufficient RAM but that is an - * acceptable failure mode and should only bite obscure boxes with - * a VESA hole at 15Mb - * - * The second case Clip sometimes kicks in is when the EBDA is marked - * as reserved. Again we fail safe with reasonable results - */ - if (top > clip) - top = clip; - - return top; -} - -/* - * Compute a set of MCR's to give maximum coverage - */ -static int centaur_mcr_compute(int nr, int key) -{ - u32 mem = ramtop(); - u32 root = power2(mem); - u32 base = root; - u32 top = root; - u32 floor = 0; - int ct = 0; - - while (ct < nr) { - u32 fspace = 0; - u32 high; - u32 low; - - /* - * Find the largest block we will fill going upwards - */ - high = power2(mem-top); - - /* - * Find the largest block we will fill going downwards - */ - low = base/2; - - /* - * Don't fill below 1Mb going downwards as there - * is an ISA hole in the way. - */ - if (base <= 1024*1024) - low = 0; - - /* - * See how much space we could cover by filling below - * the ISA hole - */ - - if (floor == 0) - fspace = 512*1024; - else if (floor == 512*1024) - fspace = 128*1024; - - /* And forget ROM space */ - - /* - * Now install the largest coverage we get - */ - if (fspace > high && fspace > low) { - centaur_mcr_insert(ct, floor, fspace, key); - floor += fspace; - } else if (high > low) { - centaur_mcr_insert(ct, top, high, key); - top += high; - } else if (low > 0) { - base -= low; - centaur_mcr_insert(ct, base, low, key); - } else - break; - ct++; - } - /* - * We loaded ct values. We now need to set the mask. The caller - * must do this bit. - */ - return ct; -} - -static void centaur_create_optimal_mcr(void) -{ - int used; - int i; - - /* - * Allocate up to 6 mcrs to mark as much of ram as possible - * as write combining and weak write ordered. - * - * To experiment with: Linux never uses stack operations for - * mmio spaces so we could globally enable stack operation wc - * - * Load the registers with type 31 - full write combining, all - * writes weakly ordered. - */ - used = centaur_mcr_compute(6, 31); - - /* - * Wipe unused MCRs - */ - for (i = used; i < 8; i++) - wrmsr(MSR_IDT_MCR0+i, 0, 0); -} - -static void winchip2_create_optimal_mcr(void) -{ - u32 lo, hi; - int used; - int i; - - /* - * Allocate up to 6 mcrs to mark as much of ram as possible - * as write combining, weak store ordered. - * - * Load the registers with type 25 - * 8 - weak write ordering - * 16 - weak read ordering - * 1 - write combining - */ - used = centaur_mcr_compute(6, 25); - - /* - * Mark the registers we are using. - */ - rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - for (i = 0; i < used; i++) - lo |= 1<<(9+i); - wrmsr(MSR_IDT_MCR_CTRL, lo, hi); - - /* - * Wipe unused MCRs - */ - - for (i = used; i < 8; i++) - wrmsr(MSR_IDT_MCR0+i, 0, 0); -} - -/* - * Handle the MCR key on the Winchip 2. - */ -static void winchip2_unprotect_mcr(void) -{ - u32 lo, hi; - u32 key; - - rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - lo &= ~0x1C0; /* blank bits 8-6 */ - key = (lo>>17) & 7; - lo |= key<<6; /* replace with unlock key */ - wrmsr(MSR_IDT_MCR_CTRL, lo, hi); -} - -static void winchip2_protect_mcr(void) -{ - u32 lo, hi; - - rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - lo &= ~0x1C0; /* blank bits 8-6 */ - wrmsr(MSR_IDT_MCR_CTRL, lo, hi); -} -#endif /* CONFIG_X86_OOSTORE */ - #define ACE_PRESENT (1 << 6) #define ACE_ENABLED (1 << 7) #define ACE_FCR (1 << 28) /* MSR_VIA_FCR */ @@ -362,20 +132,6 @@ static void init_centaur(struct cpuinfo_x86 *c) fcr_clr = DPDC; printk(KERN_NOTICE "Disabling bugged TSC.\n"); clear_cpu_cap(c, X86_FEATURE_TSC); -#ifdef CONFIG_X86_OOSTORE - centaur_create_optimal_mcr(); - /* - * Enable: - * write combining on non-stack, non-string - * write combining on string, all types - * weak write ordering - * - * The C6 original lacks weak read order - * - * Note 0x120 is write only on Winchip 1 - */ - wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); -#endif break; case 8: switch (c->x86_mask) { @@ -392,40 +148,12 @@ static void init_centaur(struct cpuinfo_x86 *c) fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| E2MMX|EAMD3D; fcr_clr = DPDC; -#ifdef CONFIG_X86_OOSTORE - winchip2_unprotect_mcr(); - winchip2_create_optimal_mcr(); - rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - /* - * Enable: - * write combining on non-stack, non-string - * write combining on string, all types - * weak write ordering - */ - lo |= 31; - wrmsr(MSR_IDT_MCR_CTRL, lo, hi); - winchip2_protect_mcr(); -#endif break; case 9: name = "3"; fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| E2MMX|EAMD3D; fcr_clr = DPDC; -#ifdef CONFIG_X86_OOSTORE - winchip2_unprotect_mcr(); - winchip2_create_optimal_mcr(); - rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - /* - * Enable: - * write combining on non-stack, non-string - * write combining on string, all types - * weak write ordering - */ - lo |= 31; - wrmsr(MSR_IDT_MCR_CTRL, lo, hi); - winchip2_protect_mcr(); -#endif break; default: name = "??"; diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 24b6fd10625..8e28bf2fc3e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c) raw_local_save_flags(eflags); BUG_ON(eflags & X86_EFLAGS_AC); - if (cpu_has(c, X86_FEATURE_SMAP)) + if (cpu_has(c, X86_FEATURE_SMAP)) { +#ifdef CONFIG_X86_SMAP set_in_cr4(X86_CR4_SMAP); +#else + clear_in_cr4(X86_CR4_SMAP); +#endif + } } /* diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 3db61c644e4..5cd9bfabd64 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -640,21 +640,17 @@ static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) case 0x61d: /* six-core 45 nm xeon "Dunnington" */ tlb_flushall_shift = -1; break; + case 0x63a: /* Ivybridge */ + tlb_flushall_shift = 2; + break; case 0x61a: /* 45 nm nehalem, "Bloomfield" */ case 0x61e: /* 45 nm nehalem, "Lynnfield" */ case 0x625: /* 32 nm nehalem, "Clarkdale" */ case 0x62c: /* 32 nm nehalem, "Gulftown" */ case 0x62e: /* 45 nm nehalem-ex, "Beckton" */ case 0x62f: /* 32 nm Xeon E7 */ - tlb_flushall_shift = 6; - break; case 0x62a: /* SandyBridge */ case 0x62d: /* SandyBridge, "Romely-EP" */ - tlb_flushall_shift = 5; - break; - case 0x63a: /* Ivybridge */ - tlb_flushall_shift = 1; - break; default: tlb_flushall_shift = 6; } diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c index 8384c0fa206..617a9e28424 100644 --- a/arch/x86/kernel/cpu/microcode/amd_early.c +++ b/arch/x86/kernel/cpu/microcode/amd_early.c @@ -285,6 +285,15 @@ static void __init collect_cpu_sig_on_bsp(void *arg) uci->cpu_sig.sig = cpuid_eax(0x00000001); } + +static void __init get_bsp_sig(void) +{ + unsigned int bsp = boot_cpu_data.cpu_index; + struct ucode_cpu_info *uci = ucode_cpu_info + bsp; + + if (!uci->cpu_sig.sig) + smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); +} #else void load_ucode_amd_ap(void) { @@ -337,31 +346,37 @@ void load_ucode_amd_ap(void) int __init save_microcode_in_initrd_amd(void) { + unsigned long cont; enum ucode_state ret; u32 eax; -#ifdef CONFIG_X86_32 - unsigned int bsp = boot_cpu_data.cpu_index; - struct ucode_cpu_info *uci = ucode_cpu_info + bsp; - - if (!uci->cpu_sig.sig) - smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); + if (!container) + return -EINVAL; +#ifdef CONFIG_X86_32 + get_bsp_sig(); + cont = (unsigned long)container; +#else /* - * Take into account the fact that the ramdisk might get relocated - * and therefore we need to recompute the container's position in - * virtual memory space. + * We need the physical address of the container for both bitness since + * boot_params.hdr.ramdisk_image is a physical address. */ - container = (u8 *)(__va((u32)relocated_ramdisk) + - ((u32)container - boot_params.hdr.ramdisk_image)); + cont = __pa(container); #endif + + /* + * Take into account the fact that the ramdisk might get relocated and + * therefore we need to recompute the container's position in virtual + * memory space. + */ + if (relocated_ramdisk) + container = (u8 *)(__va(relocated_ramdisk) + + (cont - boot_params.hdr.ramdisk_image)); + if (ucode_new_rev) pr_info("microcode: updated early to new patch_level=0x%08x\n", ucode_new_rev); - if (!container) - return -EINVAL; - eax = cpuid_eax(0x00000001); eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index ce2d0a2c3e4..0e25a1bc5ab 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -683,7 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) } /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); __flush_tlb(); /* Save MTRR state */ @@ -697,7 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) static void post_set(void) __releases(set_atomicity_lock) { /* Flush TLBs (no need to flush caches - they are disabled) */ - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); __flush_tlb(); /* Intel (P6) standard MTRRs */ diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index b88645191fe..79f9f848bee 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1192,6 +1192,9 @@ static void x86_pmu_del(struct perf_event *event, int flags) for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event_list[i]) { + if (i >= cpuc->n_events - cpuc->n_added) + --cpuc->n_added; + if (x86_pmu.put_event_constraints) x86_pmu.put_event_constraints(cpuc, event); @@ -1521,6 +1524,8 @@ static int __init init_hw_perf_events(void) pr_cont("%s PMU driver.\n", x86_pmu.name); + x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ + for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) quirk->func(); @@ -1534,7 +1539,6 @@ static int __init init_hw_perf_events(void) __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 0, x86_pmu.num_counters, 0, 0); - x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ x86_pmu_format_group.attrs = x86_pmu.format_attrs; if (x86_pmu.event_attrs) @@ -1820,9 +1824,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev, if (ret) return ret; + if (x86_pmu.attr_rdpmc_broken) + return -ENOTSUPP; + if (!!val != !!x86_pmu.attr_rdpmc) { x86_pmu.attr_rdpmc = !!val; - smp_call_function(change_rdpmc, (void *)val, 1); + on_each_cpu(change_rdpmc, (void *)val, 1); } return count; diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index c1a861829d8..4972c244d0b 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -409,6 +409,7 @@ struct x86_pmu { /* * sysfs attrs */ + int attr_rdpmc_broken; int attr_rdpmc; struct attribute **format_attrs; struct attribute **event_attrs; diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 0fa4f242f05..aa333d96688 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1361,10 +1361,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) intel_pmu_disable_all(); handled = intel_pmu_drain_bts_buffer(); status = intel_pmu_get_status(); - if (!status) { - intel_pmu_enable_all(0); - return handled; - } + if (!status) + goto done; loops = 0; again: @@ -2310,10 +2308,7 @@ __init int intel_pmu_init(void) if (version > 1) x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); - /* - * v2 and above have a perf capabilities MSR - */ - if (version > 1) { + if (boot_cpu_has(X86_FEATURE_PDCM)) { u64 capabilities; rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 29c248799ce..047f540cf3f 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -501,8 +501,11 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = { SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, SNBEP_CBO_PMON_CTL_TID_EN, 0x1), SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6), SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6), SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6), SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), @@ -1178,10 +1181,15 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = { SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, SNBEP_CBO_PMON_CTL_TID_EN, 0x1), SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), + SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc), SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc), SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), - SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), + SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc), SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), @@ -3326,6 +3334,8 @@ static int __init uncore_type_init(struct intel_uncore_type *type) if (!pmus) return -ENOMEM; + type->pmus = pmus; + type->unconstrainted = (struct event_constraint) __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, 0, type->num_counters, 0, 0); @@ -3361,7 +3371,6 @@ static int __init uncore_type_init(struct intel_uncore_type *type) } type->pmu_group = &uncore_pmu_attr_group; - type->pmus = pmus; return 0; fail: uncore_type_exit(type); diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index b1e2fe11532..7c1a0c07b60 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c @@ -231,31 +231,49 @@ static __initconst const struct x86_pmu p6_pmu = { }; +static __init void p6_pmu_rdpmc_quirk(void) +{ + if (boot_cpu_data.x86_mask < 9) { + /* + * PPro erratum 26; fixed in stepping 9 and above. + */ + pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n"); + x86_pmu.attr_rdpmc_broken = 1; + x86_pmu.attr_rdpmc = 0; + } +} + __init int p6_pmu_init(void) { + x86_pmu = p6_pmu; + switch (boot_cpu_data.x86_model) { - case 1: - case 3: /* Pentium Pro */ - case 5: - case 6: /* Pentium II */ - case 7: - case 8: - case 11: /* Pentium III */ - case 9: - case 13: - /* Pentium M */ + case 1: /* Pentium Pro */ + x86_add_quirk(p6_pmu_rdpmc_quirk); + break; + + case 3: /* Pentium II - Klamath */ + case 5: /* Pentium II - Deschutes */ + case 6: /* Pentium II - Mendocino */ break; + + case 7: /* Pentium III - Katmai */ + case 8: /* Pentium III - Coppermine */ + case 10: /* Pentium III Xeon */ + case 11: /* Pentium III - Tualatin */ + break; + + case 9: /* Pentium M - Banias */ + case 13: /* Pentium M - Dothan */ + break; + default: - pr_cont("unsupported p6 CPU model %d ", - boot_cpu_data.x86_model); + pr_cont("unsupported p6 CPU model %d ", boot_cpu_data.x86_model); return -ENODEV; } - x86_pmu = p6_pmu; - memcpy(hw_cache_event_ids, p6_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - return 0; } diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index d4bdd253fea..e6253195a30 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -77,8 +77,7 @@ within(unsigned long addr, unsigned long start, unsigned long end) return addr >= start && addr < end; } -static int -do_ftrace_mod_code(unsigned long ip, const void *new_code) +static unsigned long text_ip_addr(unsigned long ip) { /* * On x86_64, kernel text mappings are mapped read-only with @@ -91,7 +90,7 @@ do_ftrace_mod_code(unsigned long ip, const void *new_code) if (within(ip, (unsigned long)_text, (unsigned long)_etext)) ip = (unsigned long)__va(__pa_symbol(ip)); - return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE); + return ip; } static const unsigned char *ftrace_nop_replace(void) @@ -123,8 +122,10 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code, if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) return -EINVAL; + ip = text_ip_addr(ip); + /* replace the text with the new text */ - if (do_ftrace_mod_code(ip, new_code)) + if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) return -EPERM; sync_core(); @@ -221,37 +222,51 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, return -EINVAL; } -int ftrace_update_ftrace_func(ftrace_func_t func) +static unsigned long ftrace_update_func; + +static int update_ftrace_func(unsigned long ip, void *new) { - unsigned long ip = (unsigned long)(&ftrace_call); - unsigned char old[MCOUNT_INSN_SIZE], *new; + unsigned char old[MCOUNT_INSN_SIZE]; int ret; - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); - new = ftrace_call_replace(ip, (unsigned long)func); + memcpy(old, (void *)ip, MCOUNT_INSN_SIZE); + + ftrace_update_func = ip; + /* Make sure the breakpoints see the ftrace_update_func update */ + smp_wmb(); /* See comment above by declaration of modifying_ftrace_code */ atomic_inc(&modifying_ftrace_code); ret = ftrace_modify_code(ip, old, new); + atomic_dec(&modifying_ftrace_code); + + return ret; +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long ip = (unsigned long)(&ftrace_call); + unsigned char *new; + int ret; + + new = ftrace_call_replace(ip, (unsigned long)func); + ret = update_ftrace_func(ip, new); + /* Also update the regs callback function */ if (!ret) { ip = (unsigned long)(&ftrace_regs_call); - memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(ip, (unsigned long)func); - ret = ftrace_modify_code(ip, old, new); + ret = update_ftrace_func(ip, new); } - atomic_dec(&modifying_ftrace_code); - return ret; } static int is_ftrace_caller(unsigned long ip) { - if (ip == (unsigned long)(&ftrace_call) || - ip == (unsigned long)(&ftrace_regs_call)) + if (ip == ftrace_update_func) return 1; return 0; @@ -677,45 +692,41 @@ int __init ftrace_dyn_arch_init(void *data) #ifdef CONFIG_DYNAMIC_FTRACE extern void ftrace_graph_call(void); -static int ftrace_mod_jmp(unsigned long ip, - int old_offset, int new_offset) +static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) { - unsigned char code[MCOUNT_INSN_SIZE]; + static union ftrace_code_union calc; - if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) - return -EFAULT; + /* Jmp not a call (ignore the .e8) */ + calc.e8 = 0xe9; + calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); - if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) - return -EINVAL; + /* + * ftrace external locks synchronize the access to the static variable. + */ + return calc.code; +} - *(int *)(&code[1]) = new_offset; +static int ftrace_mod_jmp(unsigned long ip, void *func) +{ + unsigned char *new; - if (do_ftrace_mod_code(ip, &code)) - return -EPERM; + new = ftrace_jmp_replace(ip, (unsigned long)func); - return 0; + return update_ftrace_func(ip, new); } int ftrace_enable_ftrace_graph_caller(void) { unsigned long ip = (unsigned long)(&ftrace_graph_call); - int old_offset, new_offset; - old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); - new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); - - return ftrace_mod_jmp(ip, old_offset, new_offset); + return ftrace_mod_jmp(ip, &ftrace_graph_caller); } int ftrace_disable_ftrace_graph_caller(void) { unsigned long ip = (unsigned long)(&ftrace_graph_call); - int old_offset, new_offset; - - old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); - new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); - return ftrace_mod_jmp(ip, old_offset, new_offset); + return ftrace_mod_jmp(ip, &ftrace_stub); } #endif /* !CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 81ba27679f1..f36bd42d6f0 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -544,6 +544,10 @@ ENDPROC(early_idt_handlers) /* This is global to keep gas from relaxing the jumps */ ENTRY(early_idt_handler) cld + + cmpl $2,(%esp) # X86_TRAP_NMI + je is_nmi # Ignore NMI + cmpl $2,%ss:early_recursion_flag je hlt_loop incl %ss:early_recursion_flag @@ -594,8 +598,9 @@ ex_entry: pop %edx pop %ecx pop %eax - addl $8,%esp /* drop vector number and error code */ decl %ss:early_recursion_flag +is_nmi: + addl $8,%esp /* drop vector number and error code */ iret ENDPROC(early_idt_handler) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index e1aabdb314c..a468c0a65c4 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -343,6 +343,9 @@ early_idt_handlers: ENTRY(early_idt_handler) cld + cmpl $2,(%rsp) # X86_TRAP_NMI + je is_nmi # Ignore NMI + cmpl $2,early_recursion_flag(%rip) jz 1f incl early_recursion_flag(%rip) @@ -405,8 +408,9 @@ ENTRY(early_idt_handler) popq %rdx popq %rcx popq %rax - addq $16,%rsp # drop vector number and error code decl early_recursion_flag(%rip) +is_nmi: + addq $16,%rsp # drop vector number and error code INTERRUPT_RETURN ENDPROC(early_idt_handler) diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index e8368c6dd2a..d5dd8081441 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -86,10 +86,19 @@ EXPORT_SYMBOL(__kernel_fpu_begin); void __kernel_fpu_end(void) { - if (use_eager_fpu()) - math_state_restore(); - else + if (use_eager_fpu()) { + /* + * For eager fpu, most the time, tsk_used_math() is true. + * Restore the user math as we are done with the kernel usage. + * At few instances during thread exit, signal handling etc, + * tsk_used_math() is false. Those few places will take proper + * actions, so we don't need to restore the math here. + */ + if (likely(tsk_used_math(current))) + math_state_restore(); + } else { stts(); + } } EXPORT_SYMBOL(__kernel_fpu_end); diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index dbb60878b74..d99f31d9a75 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -266,6 +266,14 @@ __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs) EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); #ifdef CONFIG_HOTPLUG_CPU + +/* These two declarations are only used in check_irq_vectors_for_cpu_disable() + * below, which is protected by stop_machine(). Putting them on the stack + * results in a stack frame overflow. Dynamically allocating could result in a + * failure so declare these two cpumasks as global. + */ +static struct cpumask affinity_new, online_new; + /* * This cpu is going to be removed and its vectors migrated to the remaining * online cpus. Check to see if there are enough vectors in the remaining cpus. @@ -277,7 +285,6 @@ int check_irq_vectors_for_cpu_disable(void) unsigned int this_cpu, vector, this_count, count; struct irq_desc *desc; struct irq_data *data; - struct cpumask affinity_new, online_new; this_cpu = smp_processor_id(); cpumask_copy(&online_new, cpu_online_mask); diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 4eabc160696..679cef0791c 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -279,5 +279,7 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_SYMBOL(node_data); VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); #endif + vmcoreinfo_append_str("KERNELOFFSET=%lx\n", + (unsigned long)&_text - __START_KERNEL); } diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 872079a67e4..f7d0672481f 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -100,8 +100,10 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, flag |= __GFP_ZERO; again: page = NULL; - if (!(flag & GFP_ATOMIC)) + /* CMA can be used only in the context which permits sleeping */ + if (flag & __GFP_WAIT) page = dma_alloc_from_contiguous(dev, count, get_order(size)); + /* fallback */ if (!page) page = alloc_pages_node(dev_to_node(dev), flag, get_order(size)); if (!page) diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 04ee1e2e4c0..ff898bbf579 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -529,7 +529,7 @@ static void quirk_amd_nb_node(struct pci_dev *dev) return; pci_read_config_dword(nb_ht, 0x60, &val); - node = val & 7; + node = pcibus_to_node(dev->bus) | (val & 7); /* * Some hardware may return an invalid node ID, * so check it first: @@ -571,3 +571,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5, quirk_amd_nb_node); #endif + +#ifdef CONFIG_PCI +/* + * Processor does not ensure DRAM scrub read/write sequence + * is atomic wrt accesses to CC6 save state area. Therefore + * if a concurrent scrub read/write access is to same address + * the entry may appear as if it is not written. This quirk + * applies to Fam16h models 00h-0Fh + * + * See "Revision Guide" for AMD F16h models 00h-0fh, + * document 51810 rev. 3.04, Nov 2013 + */ +static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev) +{ + u32 val; + + /* + * Suggested workaround: + * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b + */ + pci_read_config_dword(dev, 0x58, &val); + if (val & 0x1F) { + val &= ~(0x1F); + pci_write_config_dword(dev, 0x58, val); + } + + pci_read_config_dword(dev, 0x5C, &val); + if (val & BIT(0)) { + val &= ~BIT(0); + pci_write_config_dword(dev, 0x5c, val); + } +} + +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3, + amd_disable_seq_and_redirect_scrub); + +#endif diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 06853e67035..ce72964b2f4 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1239,14 +1239,8 @@ void __init setup_arch(char **cmdline_p) register_refined_jiffies(CLOCK_TICK_RATE); #ifdef CONFIG_EFI - /* Once setup is done above, unmap the EFI memory map on - * mismatched firmware/kernel archtectures since there is no - * support for runtime services. - */ - if (efi_enabled(EFI_BOOT) && !efi_is_native()) { - pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); - efi_unmap_memmap(); - } + if (efi_enabled(EFI_BOOT)) + efi_apply_memmap_quirks(); #endif } diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 19e5adb49a2..cfbe99f8883 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -209,7 +209,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) * dance when its actually needed. */ - preempt_disable(); + preempt_disable_notrace(); data = this_cpu_read(cyc2ns.head); tail = this_cpu_read(cyc2ns.tail); @@ -229,7 +229,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) if (!--data->__count) this_cpu_write(cyc2ns.tail, data); } - preempt_enable(); + preempt_enable_notrace(); return ns; } @@ -653,13 +653,10 @@ unsigned long native_calibrate_tsc(void) /* Calibrate TSC using MSR for Intel Atom SoCs */ local_irq_save(flags); - i = try_msr_calibrate_tsc(&fast_calibrate); + fast_calibrate = try_msr_calibrate_tsc(); local_irq_restore(flags); - if (i >= 0) { - if (i == 0) - pr_warn("Fast TSC calibration using MSR failed\n"); + if (fast_calibrate) return fast_calibrate; - } local_irq_save(flags); fast_calibrate = quick_pit_calibrate(); diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c index 8b5434f4389..92ae6acac8a 100644 --- a/arch/x86/kernel/tsc_msr.c +++ b/arch/x86/kernel/tsc_msr.c @@ -53,7 +53,7 @@ static struct freq_desc freq_desc_tables[] = { /* TNG */ { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } }, /* VLV2 */ - { 6, 0x37, 1, { 0, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } }, + { 6, 0x37, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } }, /* ANN */ { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } }, }; @@ -77,21 +77,18 @@ static int match_cpu(u8 family, u8 model) /* * Do MSR calibration only for known/supported CPUs. - * Return values: - * -1: CPU is unknown/unsupported for MSR based calibration - * 0: CPU is known/supported, but calibration failed - * 1: CPU is known/supported, and calibration succeeded + * + * Returns the calibration value or 0 if MSR calibration failed. */ -int try_msr_calibrate_tsc(unsigned long *fast_calibrate) +unsigned long try_msr_calibrate_tsc(void) { - int cpu_index; u32 lo, hi, ratio, freq_id, freq; + unsigned long res; + int cpu_index; cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model); if (cpu_index < 0) - return -1; - - *fast_calibrate = 0; + return 0; if (freq_desc_tables[cpu_index].msr_plat) { rdmsr(MSR_PLATFORM_INFO, lo, hi); @@ -103,7 +100,7 @@ int try_msr_calibrate_tsc(unsigned long *fast_calibrate) pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio); if (!ratio) - return 0; + goto fail; /* Get FSB FREQ ID */ rdmsr(MSR_FSB_FREQ, lo, hi); @@ -112,16 +109,19 @@ int try_msr_calibrate_tsc(unsigned long *fast_calibrate) pr_info("Resolved frequency ID: %u, frequency: %u KHz\n", freq_id, freq); if (!freq) - return 0; + goto fail; /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */ - *fast_calibrate = freq * ratio; - pr_info("TSC runs at %lu KHz\n", *fast_calibrate); + res = freq * ratio; + pr_info("TSC runs at %lu KHz\n", res); #ifdef CONFIG_X86_LOCAL_APIC lapic_timer_frequency = (freq * 1000) / HZ; pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency); #endif + return res; - return 1; +fail: + pr_warn("Fast TSC calibration using MSR failed\n"); + return 0; } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e50425d0f5f..9b531351a58 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2672,6 +2672,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, break; } + drop_large_spte(vcpu, iterator.sptep); if (!is_shadow_present_pte(*iterator.sptep)) { u64 base_addr = iterator.addr; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e81df8fce02..2de1bc09a8d 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3002,10 +3002,8 @@ static int cr8_write_interception(struct vcpu_svm *svm) u8 cr8_prev = kvm_get_cr8(&svm->vcpu); /* instruction emulation calls kvm_set_cr8() */ r = cr_interception(svm); - if (irqchip_in_kernel(svm->vcpu.kvm)) { - clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); + if (irqchip_in_kernel(svm->vcpu.kvm)) return r; - } if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) return r; kvm_run->exit_reason = KVM_EXIT_SET_TPR; @@ -3567,6 +3565,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) return; + clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); + if (irr == -1) return; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a06f101ef64..39275283475 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6688,7 +6688,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) else if (is_page_fault(intr_info)) return enable_ept; else if (is_no_device(intr_info) && - !(nested_read_cr0(vmcs12) & X86_CR0_TS)) + !(vmcs12->guest_cr0 & X86_CR0_TS)) return 0; return vmcs12->exception_bitmap & (1u << (intr_info & INTR_INFO_VECTOR_MASK)); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 39c28f09dfd..2b8578432d5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6186,7 +6186,7 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) frag->len -= len; } - if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { + if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { vcpu->mmio_needed = 0; /* FIXME: return into emulator if single-stepping. */ diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9d591c89580..a10c8c79216 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1001,6 +1001,12 @@ static int fault_in_kernel_space(unsigned long address) static inline bool smap_violation(int error_code, struct pt_regs *regs) { + if (!IS_ENABLED(CONFIG_X86_SMAP)) + return false; + + if (!static_cpu_has(X86_FEATURE_SMAP)) + return false; + if (error_code & PF_USER) return false; @@ -1014,13 +1020,17 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs) * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. + * + * This function must have noinline because both callers + * {,trace_}do_page_fault() have notrace on. Having this an actual function + * guarantees there's a function trace entry. */ -static void __kprobes -__do_page_fault(struct pt_regs *regs, unsigned long error_code) +static void __kprobes noinline +__do_page_fault(struct pt_regs *regs, unsigned long error_code, + unsigned long address) { struct vm_area_struct *vma; struct task_struct *tsk; - unsigned long address; struct mm_struct *mm; int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; @@ -1028,9 +1038,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) tsk = current; mm = tsk->mm; - /* Get the faulting address: */ - address = read_cr2(); - /* * Detect and handle instructions that would cause a page fault for * both a tracked kernel page and a userspace page. @@ -1087,11 +1094,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); - if (static_cpu_has(X86_FEATURE_SMAP)) { - if (unlikely(smap_violation(error_code, regs))) { - bad_area_nosemaphore(regs, error_code, address); - return; - } + if (unlikely(smap_violation(error_code, regs))) { + bad_area_nosemaphore(regs, error_code, address); + return; } /* @@ -1244,32 +1249,50 @@ good_area: up_read(&mm->mmap_sem); } -dotraplinkage void __kprobes +dotraplinkage void __kprobes notrace do_page_fault(struct pt_regs *regs, unsigned long error_code) { + unsigned long address = read_cr2(); /* Get the faulting address */ enum ctx_state prev_state; + /* + * We must have this function tagged with __kprobes, notrace and call + * read_cr2() before calling anything else. To avoid calling any kind + * of tracing machinery before we've observed the CR2 value. + * + * exception_{enter,exit}() contain all sorts of tracepoints. + */ + prev_state = exception_enter(); - __do_page_fault(regs, error_code); + __do_page_fault(regs, error_code, address); exception_exit(prev_state); } -static void trace_page_fault_entries(struct pt_regs *regs, +#ifdef CONFIG_TRACING +static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs, unsigned long error_code) { if (user_mode(regs)) - trace_page_fault_user(read_cr2(), regs, error_code); + trace_page_fault_user(address, regs, error_code); else - trace_page_fault_kernel(read_cr2(), regs, error_code); + trace_page_fault_kernel(address, regs, error_code); } -dotraplinkage void __kprobes +dotraplinkage void __kprobes notrace trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) { + /* + * The exception_enter and tracepoint processing could + * trigger another page faults (user space callchain + * reading) and destroy the original cr2 value, so read + * the faulting address now. + */ + unsigned long address = read_cr2(); enum ctx_state prev_state; prev_state = exception_enter(); - trace_page_fault_entries(regs, error_code); - __do_page_fault(regs, error_code); + trace_page_fault_entries(address, regs, error_code); + __do_page_fault(regs, error_code, address); exception_exit(prev_state); } +#endif /* CONFIG_TRACING */ diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 81b2750f366..27aa0455fab 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -493,14 +493,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) struct numa_memblk *mb = &mi->blk[i]; memblock_set_node(mb->start, mb->end - mb->start, &memblock.memory, mb->nid); - - /* - * At this time, all memory regions reserved by memblock are - * used by the kernel. Set the nid in memblock.reserved will - * mark out all the nodes the kernel resides in. - */ - memblock_set_node(mb->start, mb->end - mb->start, - &memblock.reserved, mb->nid); } /* @@ -565,10 +557,21 @@ static void __init numa_init_array(void) static void __init numa_clear_kernel_node_hotplug(void) { int i, nid; - nodemask_t numa_kernel_nodes; + nodemask_t numa_kernel_nodes = NODE_MASK_NONE; unsigned long start, end; struct memblock_type *type = &memblock.reserved; + /* + * At this time, all memory regions reserved by memblock are + * used by the kernel. Set the nid in memblock.reserved will + * mark out all the nodes the kernel resides in. + */ + for (i = 0; i < numa_meminfo.nr_blks; i++) { + struct numa_memblk *mb = &numa_meminfo.blk[i]; + memblock_set_node(mb->start, mb->end - mb->start, + &memblock.reserved, mb->nid); + } + /* Mark all kernel nodes. */ for (i = 0; i < type->cnt; i++) node_set(type->regions[i].nid, numa_kernel_nodes); diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 0342d27ca79..47b6436e41c 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -52,6 +52,8 @@ void memory_present(int nid, unsigned long start, unsigned long end) nid, start, end); printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); printk(KERN_DEBUG " "); + start = round_down(start, PAGES_PER_SECTION); + end = round_up(end, PAGES_PER_SECTION); for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { physnode_map[pfn / PAGES_PER_SECTION] = nid; printk(KERN_CONT "%lx ", pfn); diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 1a25187e151..1953e9c9391 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c @@ -42,15 +42,25 @@ static __init inline int srat_disabled(void) return acpi_numa < 0; } -/* Callback for SLIT parsing */ +/* + * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for + * I/O localities since SRAT does not list them. I/O localities are + * not supported at this point. + */ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) { int i, j; - for (i = 0; i < slit->locality_count; i++) - for (j = 0; j < slit->locality_count; j++) + for (i = 0; i < slit->locality_count; i++) { + if (pxm_to_node(i) == NUMA_NO_NODE) + continue; + for (j = 0; j < slit->locality_count; j++) { + if (pxm_to_node(j) == NUMA_NO_NODE) + continue; numa_set_distance(pxm_to_node(i), pxm_to_node(j), slit->entry[slit->locality_count * i + j]); + } + } } /* Callback for Proximity Domain -> x2APIC mapping */ diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index ae699b3bbac..dd8dda167a2 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -103,7 +103,7 @@ static void flush_tlb_func(void *info) if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) return; - count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_end == TLB_FLUSH_ALL) local_flush_tlb(); @@ -131,7 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, info.flush_start = start; info.flush_end = end; - count_vm_event(NR_TLB_REMOTE_FLUSH); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); if (is_uv_system()) { unsigned int cpu; @@ -151,44 +151,19 @@ void flush_tlb_current_task(void) preempt_disable(); - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); preempt_enable(); } -/* - * It can find out the THP large page, or - * HUGETLB page in tlb_flush when THP disabled - */ -static inline unsigned long has_large_page(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - unsigned long addr = ALIGN(start, HPAGE_SIZE); - for (; addr < end; addr += HPAGE_SIZE) { - pgd = pgd_offset(mm, addr); - if (likely(!pgd_none(*pgd))) { - pud = pud_offset(pgd, addr); - if (likely(!pud_none(*pud))) { - pmd = pmd_offset(pud, addr); - if (likely(!pmd_none(*pmd))) - if (pmd_large(*pmd)) - return addr; - } - } - } - return 0; -} - void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; unsigned act_entries, tlb_entries = 0; + unsigned long nr_base_pages; preempt_disable(); if (current->active_mm != mm) @@ -210,21 +185,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, tlb_entries = tlb_lli_4k[ENTRIES]; else tlb_entries = tlb_lld_4k[ENTRIES]; + /* Assume all of TLB entries was occupied by this task */ - act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm; + act_entries = tlb_entries >> tlb_flushall_shift; + act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm; + nr_base_pages = (end - start) >> PAGE_SHIFT; /* tlb_flushall_shift is on balance point, details in commit log */ - if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); + if (nr_base_pages > act_entries) { + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { - if (has_large_page(mm, start, end)) { - local_flush_tlb(); - goto flush_all; - } /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { - count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } @@ -262,7 +236,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) static void do_flush_tlb_all(void *info) { - count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); __flush_tlb_all(); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(smp_processor_id()); @@ -270,7 +244,7 @@ static void do_flush_tlb_all(void *info) void flush_tlb_all(void) { - count_vm_event(NR_TLB_REMOTE_FLUSH); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); on_each_cpu(do_flush_tlb_all, NULL, 1); } diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S index 877b9a1b215..01495755701 100644 --- a/arch/x86/net/bpf_jit.S +++ b/arch/x86/net/bpf_jit.S @@ -140,7 +140,7 @@ bpf_slow_path_byte_msh: push %r9; \ push SKBDATA; \ /* rsi already has offset */ \ - mov $SIZE,%ecx; /* size */ \ + mov $SIZE,%edx; /* size */ \ call bpf_internal_load_pointer_neg_helper; \ test %rax,%rax; \ pop SKBDATA; \ diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c index 7145ec63c52..f15103dff4b 100644 --- a/arch/x86/platform/efi/efi-bgrt.c +++ b/arch/x86/platform/efi/efi-bgrt.c @@ -42,14 +42,15 @@ void __init efi_bgrt_init(void) if (bgrt_tab->header.length < sizeof(*bgrt_tab)) return; - if (bgrt_tab->version != 1) + if (bgrt_tab->version != 1 || bgrt_tab->status != 1) return; if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address) return; image = efi_lookup_mapped_addr(bgrt_tab->image_address); if (!image) { - image = ioremap(bgrt_tab->image_address, sizeof(bmp_header)); + image = early_memremap(bgrt_tab->image_address, + sizeof(bmp_header)); ioremapped = true; if (!image) return; @@ -57,7 +58,7 @@ void __init efi_bgrt_init(void) memcpy_fromio(&bmp_header, image, sizeof(bmp_header)); if (ioremapped) - iounmap(image); + early_iounmap(image, sizeof(bmp_header)); bgrt_image_size = bmp_header.size; bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL); @@ -65,7 +66,8 @@ void __init efi_bgrt_init(void) return; if (ioremapped) { - image = ioremap(bgrt_tab->image_address, bmp_header.size); + image = early_memremap(bgrt_tab->image_address, + bmp_header.size); if (!image) { kfree(bgrt_image); bgrt_image = NULL; @@ -75,5 +77,5 @@ void __init efi_bgrt_init(void) memcpy_fromio(bgrt_image, image, bgrt_image_size); if (ioremapped) - iounmap(image); + early_iounmap(image, bmp_header.size); } diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index d62ec87a2b2..b97acecf3fd 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -52,6 +52,7 @@ #include <asm/tlbflush.h> #include <asm/x86_init.h> #include <asm/rtc.h> +#include <asm/uv/uv.h> #define EFI_DEBUG @@ -792,7 +793,7 @@ void __init efi_set_executable(efi_memory_desc_t *md, bool executable) set_memory_nx(addr, npages); } -static void __init runtime_code_page_mkexec(void) +void __init runtime_code_page_mkexec(void) { efi_memory_desc_t *md; void *p; @@ -1069,8 +1070,7 @@ void __init efi_enter_virtual_mode(void) efi.update_capsule = virt_efi_update_capsule; efi.query_capsule_caps = virt_efi_query_capsule_caps; - if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) - runtime_code_page_mkexec(); + efi_runtime_mkexec(); kfree(new_memmap); @@ -1211,3 +1211,22 @@ static int __init parse_efi_cmdline(char *str) return 0; } early_param("efi", parse_efi_cmdline); + +void __init efi_apply_memmap_quirks(void) +{ + /* + * Once setup is done earlier, unmap the EFI memory map on mismatched + * firmware/kernel architectures since there is no support for runtime + * services. + */ + if (!efi_is_native()) { + pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); + efi_unmap_memmap(); + } + + /* + * UV doesn't support the new EFI pagetable mapping yet. + */ + if (is_uv_system()) + set_bit(EFI_OLD_MEMMAP, &x86_efi_facility); +} diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 249b183cf41..0b74cdf7f81 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -77,3 +77,9 @@ void efi_call_phys_epilog(void) local_irq_restore(efi_rt_eflags); } + +void __init efi_runtime_mkexec(void) +{ + if (__supported_pte_mask & _PAGE_NX) + runtime_code_page_mkexec(); +} diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 6284f158a47..0c2a234fef1 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -233,3 +233,12 @@ void __init parse_efi_setup(u64 phys_addr, u32 data_len) { efi_setup = phys_addr + sizeof(struct setup_data); } + +void __init efi_runtime_mkexec(void) +{ + if (!efi_enabled(EFI_OLD_MEMMAP)) + return; + + if (__supported_pte_mask & _PAGE_NX) + runtime_code_page_mkexec(); +} diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index 7d01b8c56c0..cc04e67bfd0 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h @@ -40,11 +40,7 @@ #define smp_rmb() barrier() #endif /* CONFIG_X86_PPRO_FENCE */ -#ifdef CONFIG_X86_OOSTORE -#define smp_wmb() wmb() -#else /* CONFIG_X86_OOSTORE */ #define smp_wmb() barrier() -#endif /* CONFIG_X86_OOSTORE */ #define smp_read_barrier_depends() read_barrier_depends() #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index a4d7b647867..201d09a7c46 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1473,6 +1473,18 @@ static void xen_pvh_set_cr_flags(int cpu) * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */ write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM); + + if (!cpu) + return; + /* + * For BSP, PSE PGE are set in probe_page_size_mask(), for APs + * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init. + */ + if (cpu_has_pse) + set_in_cr4(X86_CR4_PSE); + + if (cpu_has_pge) + set_in_cr4(X86_CR4_PGE); } /* diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 2423ef04ffe..256282e7888 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, /* Assume pteval_t is equivalent to all the other *val_t types. */ static pteval_t pte_mfn_to_pfn(pteval_t val) { - if (val & _PAGE_PRESENT) { + if (pteval_present(val)) { unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; unsigned long pfn = mfn_to_pfn(mfn); @@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val) static pteval_t pte_pfn_to_mfn(pteval_t val) { - if (val & _PAGE_PRESENT) { + if (pteval_present(val)) { unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; pteval_t flags = val & PTE_FLAGS_MASK; unsigned long mfn; diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 8009acbe41e..696c694986d 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -899,6 +899,13 @@ int m2p_add_override(unsigned long mfn, struct page *page, "m2p_add_override: pfn %lx not mapped", pfn)) return -EINVAL; } + WARN_ON(PagePrivate(page)); + SetPagePrivate(page); + set_page_private(page, mfn); + page->index = pfn_to_mfn(pfn); + + if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) + return -ENOMEM; if (kmap_op != NULL) { if (!PageHighMem(page)) { @@ -937,16 +944,19 @@ int m2p_add_override(unsigned long mfn, struct page *page, } EXPORT_SYMBOL_GPL(m2p_add_override); int m2p_remove_override(struct page *page, - struct gnttab_map_grant_ref *kmap_op, - unsigned long mfn) + struct gnttab_map_grant_ref *kmap_op) { unsigned long flags; + unsigned long mfn; unsigned long pfn; unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; pfn = page_to_pfn(page); + mfn = get_phys_to_machine(pfn); + if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) + return -EINVAL; if (!PageHighMem(page)) { address = (unsigned long)__va(pfn << PAGE_SHIFT); @@ -960,7 +970,10 @@ int m2p_remove_override(struct page *page, spin_lock_irqsave(&m2p_override_lock, flags); list_del(&page->lru); spin_unlock_irqrestore(&m2p_override_lock, flags); + WARN_ON(!PagePrivate(page)); + ClearPagePrivate(page); + set_phys_to_machine(pfn, page->index); if (kmap_op != NULL) { if (!PageHighMem(page)) { struct multicall_space mcs; diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index ba56e11cbf7..c87ae7c6e5f 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -20,6 +20,7 @@ config XTENSA select HAVE_FUNCTION_TRACER select HAVE_IRQ_TIME_ACCOUNTING select HAVE_PERF_EVENTS + select COMMON_CLK help Xtensa processors are 32-bit RISC machines designed by Tensilica primarily for embedded systems. These processors are both @@ -80,7 +81,6 @@ choice config XTENSA_VARIANT_FSF bool "fsf - default (not generic) configuration" select MMU - select HAVE_XTENSA_GPIO32 config XTENSA_VARIANT_DC232B bool "dc232b - Diamond 232L Standard Core Rev.B (LE)" @@ -135,7 +135,6 @@ config HAVE_SMP config SMP bool "Enable Symmetric multi-processing support" depends on HAVE_SMP - select USE_GENERIC_SMP_HELPERS select GENERIC_SMP_IDLE_THREAD help Enabled SMP Software; allows more than one CPU/CORE diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi index 46b4f5eab42..e7370b11348 100644 --- a/arch/xtensa/boot/dts/xtfpga.dtsi +++ b/arch/xtensa/boot/dts/xtfpga.dtsi @@ -35,6 +35,13 @@ interrupt-controller; }; + clocks { + osc: main-oscillator { + #clock-cells = <0>; + compatible = "fixed-clock"; + }; + }; + serial0: serial@fd050020 { device_type = "serial"; compatible = "ns16550a"; @@ -42,9 +49,7 @@ reg = <0xfd050020 0x20>; reg-shift = <2>; interrupts = <0 1>; /* external irq 0 */ - /* Filled in by platform_setup from FPGA register - * clock-frequency = <100000000>; - */ + clocks = <&osc>; }; enet0: ethoc@fd030000 { @@ -52,5 +57,6 @@ reg = <0xfd030000 0x4000 0xfd800000 0x4000>; interrupts = <1 1>; /* external irq 1 */ local-mac-address = [00 50 c2 13 6f 00]; + clocks = <&osc>; }; }; diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index 2a042d430c2..74944207167 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h @@ -25,7 +25,7 @@ #ifdef CONFIG_MMU -#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF +#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) extern unsigned long xtensa_kio_paddr; static inline unsigned long xtensa_get_kio_paddr(void) diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h index 8c194f6af45..677bfcf4ee5 100644 --- a/arch/xtensa/include/asm/traps.h +++ b/arch/xtensa/include/asm/traps.h @@ -23,25 +23,37 @@ void secondary_trap_init(void); static inline void spill_registers(void) { - +#if XCHAL_NUM_AREGS > 16 __asm__ __volatile__ ( - "movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t" - "mov a12, a0\n\t" - "rsr a13, sar\n\t" - "xsr a14, ps\n\t" - "movi a0, _spill_registers\n\t" - "rsync\n\t" - "callx0 a0\n\t" - "mov a0, a12\n\t" - "wsr a13, sar\n\t" - "wsr a14, ps\n\t" - : : -#if defined(CONFIG_FRAME_POINTER) - : "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15", + " call12 1f\n" + " _j 2f\n" + " retw\n" + " .align 4\n" + "1:\n" + " _entry a1, 48\n" + " addi a12, a0, 3\n" +#if XCHAL_NUM_AREGS > 32 + " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n" + " _entry a1, 48\n" + " mov a12, a0\n" + " .endr\n" +#endif + " _entry a1, 48\n" +#if XCHAL_NUM_AREGS % 12 == 0 + " mov a8, a8\n" +#elif XCHAL_NUM_AREGS % 12 == 4 + " mov a12, a12\n" +#elif XCHAL_NUM_AREGS % 12 == 8 + " mov a4, a4\n" +#endif + " retw\n" + "2:\n" + : : : "a12", "a13", "memory"); #else - : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", + __asm__ __volatile__ ( + " mov a12, a12\n" + : : : "memory"); #endif - "memory"); } #endif /* _XTENSA_TRAPS_H */ diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h index 5791b45d5a5..f74ddfbb92e 100644 --- a/arch/xtensa/include/asm/vectors.h +++ b/arch/xtensa/include/asm/vectors.h @@ -25,7 +25,7 @@ #define XCHAL_KIO_DEFAULT_PADDR 0xf0000000 #define XCHAL_KIO_SIZE 0x10000000 -#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF +#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) #define XCHAL_KIO_PADDR xtensa_get_kio_paddr() #else #define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index 51940fec699..b9395529f02 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h @@ -734,7 +734,12 @@ __SYSCALL(332, sys_finit_module, 3) #define __NR_accept4 333 __SYSCALL(333, sys_accept4, 4) -#define __NR_syscall_count 334 +#define __NR_sched_setattr 334 +__SYSCALL(334, sys_sched_setattr, 2) +#define __NR_sched_getattr 335 +__SYSCALL(335, sys_sched_getattr, 3) + +#define __NR_syscall_count 336 /* * sysxtensa syscall handler diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 21dbe6bdb8e..ef7f4990722 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1081,196 +1081,53 @@ ENTRY(fast_syscall_spill_registers) rsr a0, sar s32i a3, a2, PT_AREG3 - s32i a4, a2, PT_AREG4 - s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5 + s32i a0, a2, PT_SAR - /* The spill routine might clobber a7, a11, and a15. */ + /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */ + s32i a4, a2, PT_AREG4 s32i a7, a2, PT_AREG7 + s32i a8, a2, PT_AREG8 s32i a11, a2, PT_AREG11 + s32i a12, a2, PT_AREG12 s32i a15, a2, PT_AREG15 - call0 _spill_registers # destroys a3, a4, and SAR - - /* Advance PC, restore registers and SAR, and return from exception. */ - - l32i a3, a2, PT_AREG5 - l32i a4, a2, PT_AREG4 - l32i a0, a2, PT_AREG0 - wsr a3, sar - l32i a3, a2, PT_AREG3 - - /* Restore clobbered registers. */ - - l32i a7, a2, PT_AREG7 - l32i a11, a2, PT_AREG11 - l32i a15, a2, PT_AREG15 - - movi a2, 0 - rfe - -ENDPROC(fast_syscall_spill_registers) - -/* Fixup handler. - * - * We get here if the spill routine causes an exception, e.g. tlb miss. - * We basically restore WINDOWBASE and WINDOWSTART to the condition when - * we entered the spill routine and jump to the user exception handler. - * - * a0: value of depc, original value in depc - * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE - * a3: exctable, original value in excsave1 - */ - -ENTRY(fast_syscall_spill_registers_fixup) - - rsr a2, windowbase # get current windowbase (a2 is saved) - xsr a0, depc # restore depc and a0 - ssl a2 # set shift (32 - WB) - - /* We need to make sure the current registers (a0-a3) are preserved. - * To do this, we simply set the bit for the current window frame - * in WS, so that the exception handlers save them to the task stack. - */ - - xsr a3, excsave1 # get spill-mask - slli a3, a3, 1 # shift left by one - - slli a2, a3, 32-WSBITS - src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... - wsr a2, windowstart # set corrected windowstart - - srli a3, a3, 1 - rsr a2, excsave1 - l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2 - xsr a2, excsave1 - s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3 - l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task) - xsr a2, excsave1 - - /* Return to the original (user task) WINDOWBASE. - * We leave the following frame behind: - * a0, a1, a2 same - * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE) - * depc: depc (we have to return to that address) - * excsave_1: exctable - */ - - wsr a3, windowbase - rsync - - /* We are now in the original frame when we entered _spill_registers: - * a0: return address - * a1: used, stack pointer - * a2: kernel stack pointer - * a3: available - * depc: exception address - * excsave: exctable - * Note: This frame might be the same as above. - */ - - /* Setup stack pointer. */ - - addi a2, a2, -PT_USER_SIZE - s32i a0, a2, PT_AREG0 - - /* Make sure we return to this fixup handler. */ - - movi a3, fast_syscall_spill_registers_fixup_return - s32i a3, a2, PT_DEPC # setup depc - - /* Jump to the exception handler. */ - - rsr a3, excsave1 - rsr a0, exccause - addx4 a0, a0, a3 # find entry in table - l32i a0, a0, EXC_TABLE_FAST_USER # load handler - l32i a3, a3, EXC_TABLE_DOUBLE_SAVE - jx a0 - -ENDPROC(fast_syscall_spill_registers_fixup) - -ENTRY(fast_syscall_spill_registers_fixup_return) - - /* When we return here, all registers have been restored (a2: DEPC) */ - - wsr a2, depc # exception address - - /* Restore fixup handler. */ - - rsr a2, excsave1 - s32i a3, a2, EXC_TABLE_DOUBLE_SAVE - movi a3, fast_syscall_spill_registers_fixup - s32i a3, a2, EXC_TABLE_FIXUP - rsr a3, windowbase - s32i a3, a2, EXC_TABLE_PARAM - l32i a2, a2, EXC_TABLE_KSTK - - /* Load WB at the time the exception occurred. */ - - rsr a3, sar # WB is still in SAR - neg a3, a3 - wsr a3, windowbase - rsync - - rsr a3, excsave1 - l32i a3, a3, EXC_TABLE_DOUBLE_SAVE - - rfde - -ENDPROC(fast_syscall_spill_registers_fixup_return) - -/* - * spill all registers. - * - * This is not a real function. The following conditions must be met: - * - * - must be called with call0. - * - uses a3, a4 and SAR. - * - the last 'valid' register of each frame are clobbered. - * - the caller must have registered a fixup handler - * (or be inside a critical section) - * - PS_EXCM must be set (PS_WOE cleared?) - */ - -ENTRY(_spill_registers) - /* * Rotate ws so that the current windowbase is at bit 0. * Assume ws = xxxwww1yy (www1 current window frame). * Rotate ws right so that a4 = yyxxxwww1. */ - rsr a4, windowbase + rsr a0, windowbase rsr a3, windowstart # a3 = xxxwww1yy - ssr a4 # holds WB - slli a4, a3, WSBITS - or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy + ssr a0 # holds WB + slli a0, a3, WSBITS + or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 /* We are done if there are no more than the current register frame. */ extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww - movi a4, (1 << (WSBITS-1)) + movi a0, (1 << (WSBITS-1)) _beqz a3, .Lnospill # only one active frame? jump /* We want 1 at the top, so that we return to the current windowbase */ - or a3, a3, a4 # 1yyxxxwww + or a3, a3, a0 # 1yyxxxwww /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ wsr a3, windowstart # save shifted windowstart - neg a4, a3 - and a3, a4, a3 # first bit set from right: 000010000 + neg a0, a3 + and a3, a0, a3 # first bit set from right: 000010000 - ffs_ws a4, a3 # a4: shifts to skip empty frames + ffs_ws a0, a3 # a0: shifts to skip empty frames movi a3, WSBITS - sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right - ssr a4 # save in SAR for later. + sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right + ssr a0 # save in SAR for later. rsr a3, windowbase - add a3, a3, a4 + add a3, a3, a0 wsr a3, windowbase rsync @@ -1285,22 +1142,6 @@ ENTRY(_spill_registers) * we have to save 4,8. or 12 registers. */ - _bbsi.l a3, 1, .Lc4 - _bbsi.l a3, 2, .Lc8 - - /* Special case: we have a call12-frame starting at a4. */ - - _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first) - - s32e a4, a1, -16 # a1 is valid with an empty spill area - l32e a4, a5, -12 - s32e a8, a4, -48 - mov a8, a4 - l32e a4, a1, -16 - j .Lc12c - -.Lnospill: - ret .Lloop: _bbsi.l a3, 1, .Lc4 _bbci.l a3, 2, .Lc12 @@ -1314,20 +1155,10 @@ ENTRY(_spill_registers) s32e a9, a4, -28 s32e a10, a4, -24 s32e a11, a4, -20 - srli a11, a3, 2 # shift windowbase by 2 rotw 2 _bnei a3, 1, .Lloop - -.Lexit: /* Done. Do the final rotation, set WS, and return. */ - - rotw 1 - rsr a3, windowbase - ssl a3 - movi a3, 1 - sll a3, a3 - wsr a3, windowstart - ret + j .Lexit .Lc4: s32e a4, a9, -16 s32e a5, a9, -12 @@ -1343,11 +1174,11 @@ ENTRY(_spill_registers) /* 12-register frame (call12) */ - l32e a2, a5, -12 - s32e a8, a2, -48 - mov a8, a2 + l32e a0, a5, -12 + s32e a8, a0, -48 + mov a8, a0 -.Lc12c: s32e a9, a8, -44 + s32e a9, a8, -44 s32e a10, a8, -40 s32e a11, a8, -36 s32e a12, a8, -32 @@ -1367,30 +1198,54 @@ ENTRY(_spill_registers) */ rotw 1 - mov a5, a13 + mov a4, a13 rotw -1 - s32e a4, a9, -16 - s32e a5, a9, -12 - s32e a6, a9, -8 - s32e a7, a9, -4 + s32e a4, a8, -16 + s32e a5, a8, -12 + s32e a6, a8, -8 + s32e a7, a8, -4 rotw 3 _beqi a3, 1, .Lexit j .Lloop -.Linvalid_mask: +.Lexit: - /* We get here because of an unrecoverable error in the window - * registers. If we are in user space, we kill the application, - * however, this condition is unrecoverable in kernel space. - */ + /* Done. Do the final rotation and set WS */ + + rotw 1 + rsr a3, windowbase + ssl a3 + movi a3, 1 + sll a3, a3 + wsr a3, windowstart +.Lnospill: + + /* Advance PC, restore registers and SAR, and return from exception. */ + + l32i a3, a2, PT_SAR + l32i a0, a2, PT_AREG0 + wsr a3, sar + l32i a3, a2, PT_AREG3 - rsr a0, ps - _bbci.l a0, PS_UM_BIT, 1f + /* Restore clobbered registers. */ - /* User space: Setup a dummy frame and kill application. + l32i a4, a2, PT_AREG4 + l32i a7, a2, PT_AREG7 + l32i a8, a2, PT_AREG8 + l32i a11, a2, PT_AREG11 + l32i a12, a2, PT_AREG12 + l32i a15, a2, PT_AREG15 + + movi a2, 0 + rfe + +.Linvalid_mask: + + /* We get here because of an unrecoverable error in the window + * registers, so set up a dummy frame and kill the user application. * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. */ @@ -1414,14 +1269,136 @@ ENTRY(_spill_registers) movi a4, do_exit callx4 a4 -1: /* Kernel space: PANIC! */ + /* shouldn't return, so panic */ wsr a0, excsave1 movi a0, unrecoverable_exception callx0 a0 # should not return 1: j 1b -ENDPROC(_spill_registers) + +ENDPROC(fast_syscall_spill_registers) + +/* Fixup handler. + * + * We get here if the spill routine causes an exception, e.g. tlb miss. + * We basically restore WINDOWBASE and WINDOWSTART to the condition when + * we entered the spill routine and jump to the user exception handler. + * + * Note that we only need to restore the bits in windowstart that have not + * been spilled yet by the _spill_register routine. Luckily, a3 contains a + * rotated windowstart with only those bits set for frames that haven't been + * spilled yet. Because a3 is rotated such that bit 0 represents the register + * frame for the current windowbase - 1, we need to rotate a3 left by the + * value of the current windowbase + 1 and move it to windowstart. + * + * a0: value of depc, original value in depc + * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE + * a3: exctable, original value in excsave1 + */ + +ENTRY(fast_syscall_spill_registers_fixup) + + rsr a2, windowbase # get current windowbase (a2 is saved) + xsr a0, depc # restore depc and a0 + ssl a2 # set shift (32 - WB) + + /* We need to make sure the current registers (a0-a3) are preserved. + * To do this, we simply set the bit for the current window frame + * in WS, so that the exception handlers save them to the task stack. + * + * Note: we use a3 to set the windowbase, so we take a special care + * of it, saving it in the original _spill_registers frame across + * the exception handler call. + */ + + xsr a3, excsave1 # get spill-mask + slli a3, a3, 1 # shift left by one + addi a3, a3, 1 # set the bit for the current window frame + + slli a2, a3, 32-WSBITS + src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... + wsr a2, windowstart # set corrected windowstart + + srli a3, a3, 1 + rsr a2, excsave1 + l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2 + xsr a2, excsave1 + s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3 + l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task) + xsr a2, excsave1 + + /* Return to the original (user task) WINDOWBASE. + * We leave the following frame behind: + * a0, a1, a2 same + * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE) + * depc: depc (we have to return to that address) + * excsave_1: exctable + */ + + wsr a3, windowbase + rsync + + /* We are now in the original frame when we entered _spill_registers: + * a0: return address + * a1: used, stack pointer + * a2: kernel stack pointer + * a3: available + * depc: exception address + * excsave: exctable + * Note: This frame might be the same as above. + */ + + /* Setup stack pointer. */ + + addi a2, a2, -PT_USER_SIZE + s32i a0, a2, PT_AREG0 + + /* Make sure we return to this fixup handler. */ + + movi a3, fast_syscall_spill_registers_fixup_return + s32i a3, a2, PT_DEPC # setup depc + + /* Jump to the exception handler. */ + + rsr a3, excsave1 + rsr a0, exccause + addx4 a0, a0, a3 # find entry in table + l32i a0, a0, EXC_TABLE_FAST_USER # load handler + l32i a3, a3, EXC_TABLE_DOUBLE_SAVE + jx a0 + +ENDPROC(fast_syscall_spill_registers_fixup) + +ENTRY(fast_syscall_spill_registers_fixup_return) + + /* When we return here, all registers have been restored (a2: DEPC) */ + + wsr a2, depc # exception address + + /* Restore fixup handler. */ + + rsr a2, excsave1 + s32i a3, a2, EXC_TABLE_DOUBLE_SAVE + movi a3, fast_syscall_spill_registers_fixup + s32i a3, a2, EXC_TABLE_FIXUP + rsr a3, windowbase + s32i a3, a2, EXC_TABLE_PARAM + l32i a2, a2, EXC_TABLE_KSTK + + /* Load WB at the time the exception occurred. */ + + rsr a3, sar # WB is still in SAR + neg a3, a3 + wsr a3, windowbase + rsync + + rsr a3, excsave1 + l32i a3, a3, EXC_TABLE_DOUBLE_SAVE + + rfde + +ENDPROC(fast_syscall_spill_registers_fixup_return) #ifdef CONFIG_MMU /* @@ -1794,6 +1771,43 @@ ENTRY(system_call) ENDPROC(system_call) +/* + * Spill live registers on the kernel stack macro. + * + * Entry condition: ps.woe is set, ps.excm is cleared + * Exit condition: windowstart has single bit set + * May clobber: a12, a13 + */ + .macro spill_registers_kernel + +#if XCHAL_NUM_AREGS > 16 + call12 1f + _j 2f + retw + .align 4 +1: + _entry a1, 48 + addi a12, a0, 3 +#if XCHAL_NUM_AREGS > 32 + .rept (XCHAL_NUM_AREGS - 32) / 12 + _entry a1, 48 + mov a12, a0 + .endr +#endif + _entry a1, 48 +#if XCHAL_NUM_AREGS % 12 == 0 + mov a8, a8 +#elif XCHAL_NUM_AREGS % 12 == 4 + mov a12, a12 +#elif XCHAL_NUM_AREGS % 12 == 8 + mov a4, a4 +#endif + retw +2: +#else + mov a12, a12 +#endif + .endm /* * Task switch. @@ -1806,21 +1820,20 @@ ENTRY(_switch_to) entry a1, 16 - mov a12, a2 # preserve 'prev' (a2) - mov a13, a3 # and 'next' (a3) + mov a10, a2 # preserve 'prev' (a2) + mov a11, a3 # and 'next' (a3) l32i a4, a2, TASK_THREAD_INFO l32i a5, a3, TASK_THREAD_INFO - save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER + save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER - s32i a0, a12, THREAD_RA # save return address - s32i a1, a12, THREAD_SP # save stack pointer + s32i a0, a10, THREAD_RA # save return address + s32i a1, a10, THREAD_SP # save stack pointer /* Disable ints while we manipulate the stack pointer. */ - movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL - xsr a14, ps + rsil a14, LOCKLEVEL rsr a3, excsave1 rsync s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ @@ -1835,7 +1848,7 @@ ENTRY(_switch_to) /* Flush register file. */ - call0 _spill_registers # destroys a3, a4, and SAR + spill_registers_kernel /* Set kernel stack (and leave critical section) * Note: It's save to set it here. The stack will not be overwritten @@ -1851,13 +1864,13 @@ ENTRY(_switch_to) /* restore context of the task 'next' */ - l32i a0, a13, THREAD_RA # restore return address - l32i a1, a13, THREAD_SP # restore stack pointer + l32i a0, a11, THREAD_RA # restore return address + l32i a1, a11, THREAD_SP # restore stack pointer - load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER + load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER wsr a14, ps - mov a2, a12 # return 'prev' + mov a2, a10 # return 'prev' rsync retw diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 7d12af1317f..84fe931bb60 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -22,6 +22,7 @@ #include <linux/bootmem.h> #include <linux/kernel.h> #include <linux/percpu.h> +#include <linux/clk-provider.h> #include <linux/cpu.h> #include <linux/of_fdt.h> #include <linux/of_platform.h> @@ -276,6 +277,7 @@ void __init early_init_devtree(void *params) static int __init xtensa_device_probe(void) { + of_clk_init(NULL); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); return 0; } diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 08b769d3b3a..2a1823de69c 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -30,6 +30,7 @@ #include <asm/platform.h> unsigned long ccount_freq; /* ccount Hz */ +EXPORT_SYMBOL(ccount_freq); static cycle_t ccount_read(struct clocksource *cs) { diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index cb8fd44caab..f9e1ec346e3 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -235,7 +235,7 @@ ENTRY(_DoubleExceptionVector) /* Check for overflow/underflow exception, jump if overflow. */ - _bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow + bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow /* * Restart window underflow exception. diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 74a60c7e085..80b33ed51f3 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -122,9 +122,7 @@ EXPORT_SYMBOL(insw); EXPORT_SYMBOL(insl); extern long common_exception_return; -extern long _spill_registers; EXPORT_SYMBOL(common_exception_return); -EXPORT_SYMBOL(_spill_registers); #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 479d7537a32..aff108df92d 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -90,7 +90,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) /* - * Initialize the bootmem system and give it all the memory we have available. + * Initialize the bootmem system and give it all low memory we have available. */ void __init bootmem_init(void) @@ -142,9 +142,14 @@ void __init bootmem_init(void) /* Add all remaining memory pieces into the bootmem map */ - for (i=0; i<sysmem.nr_banks; i++) - free_bootmem(sysmem.bank[i].start, - sysmem.bank[i].end - sysmem.bank[i].start); + for (i = 0; i < sysmem.nr_banks; i++) { + if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) { + unsigned long end = min(max_low_pfn << PAGE_SHIFT, + sysmem.bank[i].end); + free_bootmem(sysmem.bank[i].start, + end - sysmem.bank[i].start); + } + } } diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 36ec171698b..861203e958d 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -39,7 +39,7 @@ void init_mmu(void) set_itlbcfg_register(0); set_dtlbcfg_register(0); #endif -#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF +#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) /* * Update the IO area mapping in case xtensa_kio_paddr has changed */ diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c index 800227862fe..57fd08b36f5 100644 --- a/arch/xtensa/platforms/xtfpga/setup.c +++ b/arch/xtensa/platforms/xtfpga/setup.c @@ -135,11 +135,11 @@ static void __init update_local_mac(struct device_node *node) static int __init machine_setup(void) { - struct device_node *serial; + struct device_node *clock; struct device_node *eth = NULL; - for_each_compatible_node(serial, NULL, "ns16550a") - update_clock_frequency(serial); + for_each_node_by_name(clock, "main-oscillator") + update_clock_frequency(clock); if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc"))) update_local_mac(eth); @@ -290,6 +290,7 @@ static int __init xtavnet_init(void) * knows whether they set it correctly on the DIP switches. */ pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr); + ethoc_pdata.eth_clkfreq = *(long *)XTFPGA_CLKFRQ_VADDR; return 0; } diff --git a/arch/xtensa/variants/fsf/include/variant/tie.h b/arch/xtensa/variants/fsf/include/variant/tie.h index bf4020116df..244cdea4dee 100644 --- a/arch/xtensa/variants/fsf/include/variant/tie.h +++ b/arch/xtensa/variants/fsf/include/variant/tie.h @@ -18,13 +18,6 @@ #define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */ #define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */ -/* Basic parameters of each coprocessor: */ -#define XCHAL_CP7_NAME "XTIOP" -#define XCHAL_CP7_IDENT XTIOP -#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */ -#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */ -#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */ - /* Filler info for unassigned coprocessors, to simplify arrays etc: */ #define XCHAL_NCP_SA_SIZE 0 #define XCHAL_NCP_SA_ALIGN 1 @@ -42,6 +35,8 @@ #define XCHAL_CP5_SA_ALIGN 1 #define XCHAL_CP6_SA_SIZE 0 #define XCHAL_CP6_SA_ALIGN 1 +#define XCHAL_CP7_SA_SIZE 0 +#define XCHAL_CP7_SA_ALIGN 1 /* Save area for non-coprocessor optional and custom (TIE) state: */ #define XCHAL_NCP_SA_SIZE 0 |