diff options
author | Olof Johansson <olof@lixom.net> | 2014-11-04 20:37:25 -0800 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2014-11-04 20:37:25 -0800 |
commit | 83b3d538db83fe37e24b46befa699a4ae8c496f2 (patch) | |
tree | 71141d9e170e9f489db186c640ef2a3abf7f1c18 /arch | |
parent | 4257412db57900e43716d0b7ddd4f4a51e6ed2f4 (diff) | |
parent | 89fbec5b97fbcf08db3a9cd93a340f21f95d38b8 (diff) |
Merge tag 'imx-fixes-3.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux into fixes
Merge "ARM: imx: fixes for 3.18, 2nd round" from Shawn Guo:
"This is the second round of i.MX fixes for 3.18. The clk-vf610 fix is
relatively big, because it needs some adaption to the change made by
offending commit dc4805c2e78b (ARM: imx: remove ENABLE and BYPASS bits
from clk-pllv3 driver). And it should have been sent to you for earlier
-rc inclusion, but unfortunately it got delayed for some time because
Stefan wasn't aware of my email address change."
The i.MX fixes for 3.18, 2nd round:
- Fix a regression on Vybrid platform which is caused by commit
dc4805c2e78b (ARM: imx: remove ENABLE and BYPASS bits from clk-pllv3
driver), and results in a missing configuration on PLL clocks.
- Fix a regression with i.MX defconfig files where CONFIG_SPI option
gets lost accidentally.
* tag 'imx-fixes-3.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux: (460 commits)
ARM: imx: Fix the removal of CONFIG_SPI option
ARM: imx: clk-vf610: define PLL's clock tree
+ Linux 3.18-rc3
Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'arch')
54 files changed, 265 insertions, 329 deletions
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 739fcf29c64..bc82a12d4c2 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -668,6 +668,8 @@ bank-width = <2>; pinctrl-names = "default"; pinctrl-0 = <ðernet_pins>; + power-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>; /* gpio86 */ + reset-gpios = <&gpio6 4 GPIO_ACTIVE_HIGH>; /* gpio164 */ gpmc,device-width = <2>; gpmc,sync-clk-ps = <0>; gpmc,cs-on-ns = <0>; diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig index e688741c89a..e6b0007355f 100644 --- a/arch/arm/configs/imx_v4_v5_defconfig +++ b/arch/arm/configs/imx_v4_v5_defconfig @@ -97,6 +97,7 @@ CONFIG_SERIAL_IMX_CONSOLE=y # CONFIG_HW_RANDOM is not set CONFIG_I2C_CHARDEV=y CONFIG_I2C_IMX=y +CONFIG_SPI=y CONFIG_SPI_IMX=y CONFIG_SPI_SPIDEV=y CONFIG_GPIO_SYSFS=y diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index 8fca6e276b6..6790f1b3f3a 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -158,6 +158,7 @@ CONFIG_I2C_CHARDEV=y CONFIG_I2C_ALGOPCF=m CONFIG_I2C_ALGOPCA=m CONFIG_I2C_IMX=y +CONFIG_SPI=y CONFIG_SPI_IMX=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_MC9S08DZ60=y diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 3aaa75cae90..705bb762067 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -412,6 +412,7 @@ #define __NR_seccomp (__NR_SYSCALL_BASE+383) #define __NR_getrandom (__NR_SYSCALL_BASE+384) #define __NR_memfd_create (__NR_SYSCALL_BASE+385) +#define __NR_bpf (__NR_SYSCALL_BASE+386) /* * The following SWIs are ARM private. diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 713e807621d..2d2d6087b9b 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -10,6 +10,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include <linux/compiler.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/dma-mapping.h> @@ -39,10 +40,19 @@ * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c * (http://gcc.gnu.org/PR8896) and incorrect structure * initialisation in fs/jffs2/erase.c + * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854 + * miscompiles find_get_entry(), and can result in EXT3 and EXT4 + * filesystem corruption (possibly other FS too). */ +#ifdef __GNUC__ #if (__GNUC__ == 3 && __GNUC_MINOR__ < 3) #error Your compiler is too buggy; it is known to miscompile kernels. -#error Known good compilers: 3.3 +#error Known good compilers: 3.3, 4.x +#endif +#if GCC_VERSION >= 40800 && GCC_VERSION < 40803 +#error Your compiler is too buggy; it is known to miscompile kernels +#error and result in filesystem corruption and oopses. +#endif #endif int main(void) diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 9f899d8fdcc..e51833f8cc3 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -395,6 +395,7 @@ CALL(sys_seccomp) CALL(sys_getrandom) /* 385 */ CALL(sys_memfd_create) + CALL(sys_bpf) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/mach-imx/clk-vf610.c b/arch/arm/mach-imx/clk-vf610.c index a1781847505..40963725459 100644 --- a/arch/arm/mach-imx/clk-vf610.c +++ b/arch/arm/mach-imx/clk-vf610.c @@ -58,8 +58,14 @@ #define PFD_PLL1_BASE (anatop_base + 0x2b0) #define PFD_PLL2_BASE (anatop_base + 0x100) #define PFD_PLL3_BASE (anatop_base + 0xf0) +#define PLL1_CTRL (anatop_base + 0x270) +#define PLL2_CTRL (anatop_base + 0x30) #define PLL3_CTRL (anatop_base + 0x10) +#define PLL4_CTRL (anatop_base + 0x70) +#define PLL5_CTRL (anatop_base + 0xe0) +#define PLL6_CTRL (anatop_base + 0xa0) #define PLL7_CTRL (anatop_base + 0x20) +#define ANA_MISC1 (anatop_base + 0x160) static void __iomem *anatop_base; static void __iomem *ccm_base; @@ -67,25 +73,34 @@ static void __iomem *ccm_base; /* sources for multiplexer clocks, this is used multiple times */ static const char *fast_sels[] = { "firc", "fxosc", }; static const char *slow_sels[] = { "sirc_32k", "sxosc", }; -static const char *pll1_sels[] = { "pll1_main", "pll1_pfd1", "pll1_pfd2", "pll1_pfd3", "pll1_pfd4", }; -static const char *pll2_sels[] = { "pll2_main", "pll2_pfd1", "pll2_pfd2", "pll2_pfd3", "pll2_pfd4", }; -static const char *sys_sels[] = { "fast_clk_sel", "slow_clk_sel", "pll2_pfd_sel", "pll2_main", "pll1_pfd_sel", "pll3_main", }; +static const char *pll1_sels[] = { "pll1_sys", "pll1_pfd1", "pll1_pfd2", "pll1_pfd3", "pll1_pfd4", }; +static const char *pll2_sels[] = { "pll2_bus", "pll2_pfd1", "pll2_pfd2", "pll2_pfd3", "pll2_pfd4", }; +static const char *pll_bypass_src_sels[] = { "fast_clk_sel", "lvds1_in", }; +static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", }; +static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", }; +static const char *pll3_bypass_sels[] = { "pll3", "pll3_bypass_src", }; +static const char *pll4_bypass_sels[] = { "pll4", "pll4_bypass_src", }; +static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", }; +static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", }; +static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", }; +static const char *sys_sels[] = { "fast_clk_sel", "slow_clk_sel", "pll2_pfd_sel", "pll2_bus", "pll1_pfd_sel", "pll3_usb_otg", }; static const char *ddr_sels[] = { "pll2_pfd2", "sys_sel", }; static const char *rmii_sels[] = { "enet_ext", "audio_ext", "enet_50m", "enet_25m", }; static const char *enet_ts_sels[] = { "enet_ext", "fxosc", "audio_ext", "usb", "enet_ts", "enet_25m", "enet_50m", }; -static const char *esai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_main_div", }; -static const char *sai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_main_div", }; +static const char *esai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_audio_div", }; +static const char *sai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_audio_div", }; static const char *nfc_sels[] = { "platform_bus", "pll1_pfd1", "pll3_pfd1", "pll3_pfd3", }; -static const char *qspi_sels[] = { "pll3_main", "pll3_pfd4", "pll2_pfd4", "pll1_pfd4", }; -static const char *esdhc_sels[] = { "pll3_main", "pll3_pfd3", "pll1_pfd3", "platform_bus", }; -static const char *dcu_sels[] = { "pll1_pfd2", "pll3_main", }; +static const char *qspi_sels[] = { "pll3_usb_otg", "pll3_pfd4", "pll2_pfd4", "pll1_pfd4", }; +static const char *esdhc_sels[] = { "pll3_usb_otg", "pll3_pfd3", "pll1_pfd3", "platform_bus", }; +static const char *dcu_sels[] = { "pll1_pfd2", "pll3_usb_otg", }; static const char *gpu_sels[] = { "pll2_pfd2", "pll3_pfd2", }; -static const char *vadc_sels[] = { "pll6_main_div", "pll3_main_div", "pll3_main", }; +static const char *vadc_sels[] = { "pll6_video_div", "pll3_usb_otg_div", "pll3_usb_otg", }; /* FTM counter clock source, not module clock */ static const char *ftm_ext_sels[] = {"sirc_128k", "sxosc", "fxosc_half", "audio_ext", }; static const char *ftm_fix_sels[] = { "sxosc", "ipg_bus", }; -static struct clk_div_table pll4_main_div_table[] = { + +static struct clk_div_table pll4_audio_div_table[] = { { .val = 0, .div = 1 }, { .val = 1, .div = 2 }, { .val = 2, .div = 6 }, @@ -120,6 +135,9 @@ static void __init vf610_clocks_init(struct device_node *ccm_node) clk[VF610_CLK_AUDIO_EXT] = imx_obtain_fixed_clock("audio_ext", 0); clk[VF610_CLK_ENET_EXT] = imx_obtain_fixed_clock("enet_ext", 0); + /* Clock source from external clock via LVDs PAD */ + clk[VF610_CLK_ANACLK1] = imx_obtain_fixed_clock("anaclk1", 0); + clk[VF610_CLK_FXOSC_HALF] = imx_clk_fixed_factor("fxosc_half", "fxosc", 1, 2); np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop"); @@ -133,31 +151,63 @@ static void __init vf610_clocks_init(struct device_node *ccm_node) clk[VF610_CLK_SLOW_CLK_SEL] = imx_clk_mux("slow_clk_sel", CCM_CCSR, 4, 1, slow_sels, ARRAY_SIZE(slow_sels)); clk[VF610_CLK_FASK_CLK_SEL] = imx_clk_mux("fast_clk_sel", CCM_CCSR, 5, 1, fast_sels, ARRAY_SIZE(fast_sels)); - clk[VF610_CLK_PLL1_MAIN] = imx_clk_fixed_factor("pll1_main", "fast_clk_sel", 22, 1); - clk[VF610_CLK_PLL1_PFD1] = imx_clk_pfd("pll1_pfd1", "pll1_main", PFD_PLL1_BASE, 0); - clk[VF610_CLK_PLL1_PFD2] = imx_clk_pfd("pll1_pfd2", "pll1_main", PFD_PLL1_BASE, 1); - clk[VF610_CLK_PLL1_PFD3] = imx_clk_pfd("pll1_pfd3", "pll1_main", PFD_PLL1_BASE, 2); - clk[VF610_CLK_PLL1_PFD4] = imx_clk_pfd("pll1_pfd4", "pll1_main", PFD_PLL1_BASE, 3); - - clk[VF610_CLK_PLL2_MAIN] = imx_clk_fixed_factor("pll2_main", "fast_clk_sel", 22, 1); - clk[VF610_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1", "pll2_main", PFD_PLL2_BASE, 0); - clk[VF610_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2", "pll2_main", PFD_PLL2_BASE, 1); - clk[VF610_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3", "pll2_main", PFD_PLL2_BASE, 2); - clk[VF610_CLK_PLL2_PFD4] = imx_clk_pfd("pll2_pfd4", "pll2_main", PFD_PLL2_BASE, 3); - - clk[VF610_CLK_PLL3_MAIN] = imx_clk_fixed_factor("pll3_main", "fast_clk_sel", 20, 1); - clk[VF610_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1", "pll3_main", PFD_PLL3_BASE, 0); - clk[VF610_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_main", PFD_PLL3_BASE, 1); - clk[VF610_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_main", PFD_PLL3_BASE, 2); - clk[VF610_CLK_PLL3_PFD4] = imx_clk_pfd("pll3_pfd4", "pll3_main", PFD_PLL3_BASE, 3); - - clk[VF610_CLK_PLL4_MAIN] = imx_clk_fixed_factor("pll4_main", "fast_clk_sel", 25, 1); - /* Enet pll: fixed 50Mhz */ - clk[VF610_CLK_PLL5_MAIN] = imx_clk_fixed_factor("pll5_main", "fast_clk_sel", 125, 6); - /* pll6: default 960Mhz */ - clk[VF610_CLK_PLL6_MAIN] = imx_clk_fixed_factor("pll6_main", "fast_clk_sel", 40, 1); - /* pll7: USB1 PLL at 480MHz */ - clk[VF610_CLK_PLL7_MAIN] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_main", "fast_clk_sel", PLL7_CTRL, 0x2); + clk[VF610_CLK_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", PLL1_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clk[VF610_CLK_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", PLL2_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clk[VF610_CLK_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", PLL3_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clk[VF610_CLK_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", PLL4_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clk[VF610_CLK_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", PLL5_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clk[VF610_CLK_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", PLL6_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clk[VF610_CLK_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", PLL7_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + + clk[VF610_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll1", "pll1_bypass_src", PLL1_CTRL, 0x1); + clk[VF610_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", PLL2_CTRL, 0x1); + clk[VF610_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", PLL3_CTRL, 0x1); + clk[VF610_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", PLL4_CTRL, 0x7f); + clk[VF610_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll5", "pll5_bypass_src", PLL5_CTRL, 0x3); + clk[VF610_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_AV, "pll6", "pll6_bypass_src", PLL6_CTRL, 0x7f); + clk[VF610_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", PLL7_CTRL, 0x1); + + clk[VF610_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", PLL1_CTRL, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); + clk[VF610_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", PLL2_CTRL, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); + clk[VF610_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", PLL3_CTRL, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT); + clk[VF610_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", PLL4_CTRL, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT); + clk[VF610_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", PLL5_CTRL, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); + clk[VF610_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", PLL6_CTRL, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); + clk[VF610_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", PLL7_CTRL, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); + + /* Do not bypass PLLs initially */ + clk_set_parent(clk[VF610_PLL1_BYPASS], clk[VF610_CLK_PLL1]); + clk_set_parent(clk[VF610_PLL2_BYPASS], clk[VF610_CLK_PLL2]); + clk_set_parent(clk[VF610_PLL3_BYPASS], clk[VF610_CLK_PLL3]); + clk_set_parent(clk[VF610_PLL4_BYPASS], clk[VF610_CLK_PLL4]); + clk_set_parent(clk[VF610_PLL5_BYPASS], clk[VF610_CLK_PLL5]); + clk_set_parent(clk[VF610_PLL6_BYPASS], clk[VF610_CLK_PLL6]); + clk_set_parent(clk[VF610_PLL7_BYPASS], clk[VF610_CLK_PLL7]); + + clk[VF610_CLK_PLL1_SYS] = imx_clk_gate("pll1_sys", "pll1_bypass", PLL1_CTRL, 13); + clk[VF610_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", PLL2_CTRL, 13); + clk[VF610_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", PLL3_CTRL, 13); + clk[VF610_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", PLL4_CTRL, 13); + clk[VF610_CLK_PLL5_ENET] = imx_clk_gate("pll5_enet", "pll5_bypass", PLL5_CTRL, 13); + clk[VF610_CLK_PLL6_VIDEO] = imx_clk_gate("pll6_video", "pll6_bypass", PLL6_CTRL, 13); + clk[VF610_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", PLL7_CTRL, 13); + + clk[VF610_CLK_LVDS1_IN] = imx_clk_gate_exclusive("lvds1_in", "anaclk1", ANA_MISC1, 12, BIT(10)); + + clk[VF610_CLK_PLL1_PFD1] = imx_clk_pfd("pll1_pfd1", "pll1_sys", PFD_PLL1_BASE, 0); + clk[VF610_CLK_PLL1_PFD2] = imx_clk_pfd("pll1_pfd2", "pll1_sys", PFD_PLL1_BASE, 1); + clk[VF610_CLK_PLL1_PFD3] = imx_clk_pfd("pll1_pfd3", "pll1_sys", PFD_PLL1_BASE, 2); + clk[VF610_CLK_PLL1_PFD4] = imx_clk_pfd("pll1_pfd4", "pll1_sys", PFD_PLL1_BASE, 3); + + clk[VF610_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1", "pll2_bus", PFD_PLL2_BASE, 0); + clk[VF610_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2", "pll2_bus", PFD_PLL2_BASE, 1); + clk[VF610_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3", "pll2_bus", PFD_PLL2_BASE, 2); + clk[VF610_CLK_PLL2_PFD4] = imx_clk_pfd("pll2_pfd4", "pll2_bus", PFD_PLL2_BASE, 3); + + clk[VF610_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1", "pll3_usb_otg", PFD_PLL3_BASE, 0); + clk[VF610_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_usb_otg", PFD_PLL3_BASE, 1); + clk[VF610_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_usb_otg", PFD_PLL3_BASE, 2); + clk[VF610_CLK_PLL3_PFD4] = imx_clk_pfd("pll3_pfd4", "pll3_usb_otg", PFD_PLL3_BASE, 3); clk[VF610_CLK_PLL1_PFD_SEL] = imx_clk_mux("pll1_pfd_sel", CCM_CCSR, 16, 3, pll1_sels, 5); clk[VF610_CLK_PLL2_PFD_SEL] = imx_clk_mux("pll2_pfd_sel", CCM_CCSR, 19, 3, pll2_sels, 5); @@ -167,12 +217,12 @@ static void __init vf610_clocks_init(struct device_node *ccm_node) clk[VF610_CLK_PLATFORM_BUS] = imx_clk_divider("platform_bus", "sys_bus", CCM_CACRR, 3, 3); clk[VF610_CLK_IPG_BUS] = imx_clk_divider("ipg_bus", "platform_bus", CCM_CACRR, 11, 2); - clk[VF610_CLK_PLL3_MAIN_DIV] = imx_clk_divider("pll3_main_div", "pll3_main", CCM_CACRR, 20, 1); - clk[VF610_CLK_PLL4_MAIN_DIV] = clk_register_divider_table(NULL, "pll4_main_div", "pll4_main", 0, CCM_CACRR, 6, 3, 0, pll4_main_div_table, &imx_ccm_lock); - clk[VF610_CLK_PLL6_MAIN_DIV] = imx_clk_divider("pll6_main_div", "pll6_main", CCM_CACRR, 21, 1); + clk[VF610_CLK_PLL3_MAIN_DIV] = imx_clk_divider("pll3_usb_otg_div", "pll3_usb_otg", CCM_CACRR, 20, 1); + clk[VF610_CLK_PLL4_MAIN_DIV] = clk_register_divider_table(NULL, "pll4_audio_div", "pll4_audio", 0, CCM_CACRR, 6, 3, 0, pll4_audio_div_table, &imx_ccm_lock); + clk[VF610_CLK_PLL6_MAIN_DIV] = imx_clk_divider("pll6_video_div", "pll6_video", CCM_CACRR, 21, 1); - clk[VF610_CLK_USBPHY0] = imx_clk_gate("usbphy0", "pll3_main", PLL3_CTRL, 6); - clk[VF610_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll7_main", PLL7_CTRL, 6); + clk[VF610_CLK_USBPHY0] = imx_clk_gate("usbphy0", "pll3_usb_otg", PLL3_CTRL, 6); + clk[VF610_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll7_usb_host", PLL7_CTRL, 6); clk[VF610_CLK_USBC0] = imx_clk_gate2("usbc0", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(4)); clk[VF610_CLK_USBC1] = imx_clk_gate2("usbc1", "ipg_bus", CCM_CCGR7, CCM_CCGRx_CGn(4)); @@ -191,8 +241,8 @@ static void __init vf610_clocks_init(struct device_node *ccm_node) clk[VF610_CLK_QSPI1_X1_DIV] = imx_clk_divider("qspi1_x1", "qspi1_x2", CCM_CSCDR3, 11, 1); clk[VF610_CLK_QSPI1] = imx_clk_gate2("qspi1", "qspi1_x1", CCM_CCGR8, CCM_CCGRx_CGn(4)); - clk[VF610_CLK_ENET_50M] = imx_clk_fixed_factor("enet_50m", "pll5_main", 1, 10); - clk[VF610_CLK_ENET_25M] = imx_clk_fixed_factor("enet_25m", "pll5_main", 1, 20); + clk[VF610_CLK_ENET_50M] = imx_clk_fixed_factor("enet_50m", "pll5_enet", 1, 10); + clk[VF610_CLK_ENET_25M] = imx_clk_fixed_factor("enet_25m", "pll5_enet", 1, 20); clk[VF610_CLK_ENET_SEL] = imx_clk_mux("enet_sel", CCM_CSCMR2, 4, 2, rmii_sels, 4); clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7); clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24); diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index c95346c9482..cec9d6c6442 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -252,9 +252,6 @@ static void __init nokia_n900_legacy_init(void) platform_device_register(&omap3_rom_rng_device); } - - /* Only on some development boards */ - gpio_request_one(164, GPIOF_OUT_INIT_LOW, "smc91x reset"); } static void __init omap3_tao3530_legacy_init(void) diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 55f9d6e0cc8..5e65ca8dea6 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -956,7 +956,7 @@ static u32 cache_id_part_number_from_dt; * @associativity: variable to return the calculated associativity in * @max_way_size: the maximum size in bytes for the cache ways */ -static void __init l2x0_cache_size_of_parse(const struct device_node *np, +static int __init l2x0_cache_size_of_parse(const struct device_node *np, u32 *aux_val, u32 *aux_mask, u32 *associativity, u32 max_way_size) @@ -974,7 +974,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np, of_property_read_u32(np, "cache-line-size", &line_size); if (!cache_size || !sets) - return; + return -ENODEV; /* All these l2 caches have the same line = block size actually */ if (!line_size) { @@ -1009,7 +1009,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np, if (way_size > max_way_size) { pr_err("L2C OF: set size %dKB is too large\n", way_size); - return; + return -EINVAL; } pr_info("L2C OF: override cache size: %d bytes (%dKB)\n", @@ -1027,7 +1027,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np, if (way_size_bits < 1 || way_size_bits > 6) { pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n", way_size); - return; + return -EINVAL; } mask |= L2C_AUX_CTRL_WAY_SIZE_MASK; @@ -1036,6 +1036,8 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np, *aux_val &= ~mask; *aux_val |= val; *aux_mask &= ~mask; + + return 0; } static void __init l2x0_of_parse(const struct device_node *np, @@ -1046,6 +1048,7 @@ static void __init l2x0_of_parse(const struct device_node *np, u32 dirty = 0; u32 val = 0, mask = 0; u32 assoc; + int ret; of_property_read_u32(np, "arm,tag-latency", &tag); if (tag) { @@ -1068,7 +1071,10 @@ static void __init l2x0_of_parse(const struct device_node *np, val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; } - l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); + ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); + if (ret) + return; + if (assoc > 8) { pr_err("l2x0 of: cache setting yield too high associativity\n"); pr_err("l2x0 of: %d calculated, max 8\n", assoc); @@ -1125,6 +1131,7 @@ static void __init l2c310_of_parse(const struct device_node *np, u32 tag[3] = { 0, 0, 0 }; u32 filter[2] = { 0, 0 }; u32 assoc; + int ret; of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); if (tag[0] && tag[1] && tag[2]) @@ -1152,7 +1159,10 @@ static void __init l2c310_of_parse(const struct device_node *np, l2x0_base + L310_ADDR_FILTER_START); } - l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); + ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); + if (ret) + return; + switch (assoc) { case 16: *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; @@ -1164,8 +1174,8 @@ static void __init l2c310_of_parse(const struct device_node *np, *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; break; default: - pr_err("PL310 OF: cache setting yield illegal associativity\n"); - pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc); + pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", + assoc); break; } } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c245d903927..e8907117861 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1198,7 +1198,6 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, { return dma_common_pages_remap(pages, size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); - return NULL; } /* diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 45aeaaca905..e17ed00828d 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -127,8 +127,11 @@ void *kmap_atomic_pfn(unsigned long pfn) { unsigned long vaddr; int idx, type; + struct page *page = pfn_to_page(pfn); pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 92bba32d923..9481f85c56e 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -559,10 +559,10 @@ void __init mem_init(void) #ifdef CONFIG_MODULES " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif - " .text : 0x%p" " - 0x%p" " (%4d kB)\n" - " .init : 0x%p" " - 0x%p" " (%4d kB)\n" - " .data : 0x%p" " - 0x%p" " (%4d kB)\n" - " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", + " .text : 0x%p" " - 0x%p" " (%4td kB)\n" + " .init : 0x%p" " - 0x%p" " (%4td kB)\n" + " .data : 0x%p" " - 0x%p" " (%4td kB)\n" + " .bss : 0x%p" " - 0x%p" " (%4td kB)\n", MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 6feded3b0c4..a7736fa0580 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -129,6 +129,10 @@ endmenu menu "Kernel features" +config NR_CPUS + int + default "1" + config ADVANCED_OPTIONS bool "Prompt for advanced kernel configuration options" help diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index ea4b233647c..0a53362d554 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h @@ -38,6 +38,6 @@ #endif /* __ASSEMBLY__ */ -#define __NR_syscalls 387 +#define __NR_syscalls 388 #endif /* _ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h index 1c2380bf8fe..c712677f8a2 100644 --- a/arch/microblaze/include/uapi/asm/unistd.h +++ b/arch/microblaze/include/uapi/asm/unistd.h @@ -402,5 +402,6 @@ #define __NR_seccomp 384 #define __NR_getrandom 385 #define __NR_memfd_create 386 +#define __NR_bpf 387 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index de59ee1d701..0166e890486 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S @@ -387,3 +387,4 @@ ENTRY(sys_call_table) .long sys_seccomp .long sys_getrandom /* 385 */ .long sys_memfd_create + .long sys_bpf diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 9037914f698..b30e41c0c03 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -660,8 +660,13 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose, res = &hose->mem_resources[memno++]; break; } - if (res != NULL) - of_pci_range_to_resource(&range, dev, res); + if (res != NULL) { + res->name = dev->full_name; + res->flags = range.flags; + res->start = range.cpu_addr; + res->end = range.cpu_addr + range.size - 1; + res->parent = res->child = res->sibling = NULL; + } } /* If there's an ISA hole and the pci_mem_offset is -not- matching diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 623f2971ce0..766b77d527a 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -71,7 +71,7 @@ pte_t *huge_pte_offset_and_shift(struct mm_struct *mm, void flush_dcache_icache_hugepage(struct page *page); -#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT) +#if defined(CONFIG_PPC_MM_SLICES) int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len); #else diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 7d8a6006880..ce9577d693b 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -365,3 +365,4 @@ SYSCALL_SPU(renameat2) SYSCALL_SPU(seccomp) SYSCALL_SPU(getrandom) SYSCALL_SPU(memfd_create) +SYSCALL_SPU(bpf) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 4e9af3fd43e..e0da021caa0 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls 361 +#define __NR_syscalls 362 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 0688fc06e18..f55351f2e66 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -383,5 +383,6 @@ #define __NR_seccomp 358 #define __NR_getrandom 359 #define __NR_memfd_create 360 +#define __NR_bpf 361 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 0f9939e693d..5a236f082c7 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -99,8 +99,6 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) u64 vsid; int psize, ssize; - slb->esid = (ea & ESID_MASK) | SLB_ESID_V; - switch (REGION_ID(ea)) { case USER_REGION_ID: pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); @@ -133,6 +131,7 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) vsid |= mmu_psize_defs[psize].sllp | ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0); + slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V; slb->vsid = vsid; return 0; diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index e5236c24dc0..b9d1dfdbe5b 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1509,11 +1509,14 @@ static int update_cpu_topology(void *data) cpu = smp_processor_id(); for (update = data; update; update = update->next) { + int new_nid = update->new_nid; if (cpu != update->cpu) continue; - unmap_cpu_from_node(update->cpu); - map_cpu_to_node(update->cpu, update->new_nid); + unmap_cpu_from_node(cpu); + map_cpu_to_node(cpu, new_nid); + set_cpu_numa_node(cpu, new_nid); + set_cpu_numa_mem(cpu, local_memory_node(new_nid)); vdso_getcpu_init(); } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 8d7bda94d19..ded0ea1afde 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -682,6 +682,7 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start, slice_convert(mm, mask, psize); } +#ifdef CONFIG_HUGETLB_PAGE /* * is_hugepage_only_range() is used by generic code to verify whether * a normal mmap mapping (non hugetlbfs) is valid on a given area. @@ -726,4 +727,4 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, #endif return !slice_check_fit(mask, available); } - +#endif diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 6c8710dd90c..dba34088da2 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -417,11 +417,6 @@ static int h_24x7_event_add(struct perf_event *event, int flags) return 0; } -static int h_24x7_event_idx(struct perf_event *event) -{ - return 0; -} - static struct pmu h_24x7_pmu = { .task_ctx_nr = perf_invalid_context, @@ -433,7 +428,6 @@ static struct pmu h_24x7_pmu = { .start = h_24x7_event_start, .stop = h_24x7_event_stop, .read = h_24x7_event_update, - .event_idx = h_24x7_event_idx, }; static int hv_24x7_init(void) diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c index 15fc76c9302..a051fe946c6 100644 --- a/arch/powerpc/perf/hv-gpci.c +++ b/arch/powerpc/perf/hv-gpci.c @@ -246,11 +246,6 @@ static int h_gpci_event_init(struct perf_event *event) return 0; } -static int h_gpci_event_idx(struct perf_event *event) -{ - return 0; -} - static struct pmu h_gpci_pmu = { .task_ctx_nr = perf_invalid_context, @@ -262,7 +257,6 @@ static struct pmu h_gpci_pmu = { .start = h_gpci_event_start, .stop = h_gpci_event_stop, .read = h_gpci_event_update, - .event_idx = h_gpci_event_idx, }; static int hv_gpci_init(void) diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index dd2c285ad17..ad4b31df779 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -191,7 +191,6 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf, { struct lpc_debugfs_entry *lpc = filp->private_data; u32 data, pos, len, todo; - __be32 bedata; int rc; if (!access_ok(VERIFY_WRITE, ubuf, count)) @@ -214,10 +213,9 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf, len = 2; } rc = opal_lpc_read(opal_lpc_chip_id, lpc->lpc_type, pos, - &bedata, len); + &data, len); if (rc) return -ENXIO; - data = be32_to_cpu(bedata); switch(len) { case 4: rc = __put_user((u32)data, (u32 __user *)ubuf); diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index e9e2450c1fd..feb549aa3ee 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -58,7 +58,7 @@ END_FTR_SECTION(0, 1); \ */ #define OPAL_CALL(name, token) \ - _GLOBAL(name); \ + _GLOBAL_TOC(name); \ mflr r0; \ std r0,16(r1); \ li r0,token; \ diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 08e761318c1..b878f12a959 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1411,11 +1411,6 @@ static void cpumsf_pmu_del(struct perf_event *event, int flags) perf_pmu_enable(event->pmu); } -static int cpumsf_pmu_event_idx(struct perf_event *event) -{ - return event->hw.idx; -} - CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); @@ -1458,7 +1453,6 @@ static struct pmu cpumf_sampling = { .stop = cpumsf_pmu_stop, .read = cpumsf_pmu_read, - .event_idx = cpumsf_pmu_event_idx, .attr_groups = cpumsf_pmu_attr_groups, }; diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c index 9139d14b9c5..538c10db353 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c @@ -118,7 +118,7 @@ static struct plat_sci_port scif0_platform_data = { }; static struct resource scif0_resources[] = { - DEFINE_RES_MEM(0xfffffe80, 0x100), + DEFINE_RES_MEM(0xfffffe80, 0x10), DEFINE_RES_IRQ(evt2irq(0x4e0)), }; @@ -143,7 +143,7 @@ static struct plat_sci_port scif1_platform_data = { }; static struct resource scif1_resources[] = { - DEFINE_RES_MEM(0xa4000150, 0x100), + DEFINE_RES_MEM(0xa4000150, 0x10), DEFINE_RES_IRQ(evt2irq(0x900)), }; @@ -169,7 +169,7 @@ static struct plat_sci_port scif2_platform_data = { }; static struct resource scif2_resources[] = { - DEFINE_RES_MEM(0xa4000140, 0x100), + DEFINE_RES_MEM(0xa4000140, 0x10), DEFINE_RES_IRQ(evt2irq(0x880)), }; diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h index c842a89b119..46d83842edd 100644 --- a/arch/sparc/include/uapi/asm/unistd.h +++ b/arch/sparc/include/uapi/asm/unistd.h @@ -414,8 +414,9 @@ #define __NR_seccomp 346 #define __NR_getrandom 347 #define __NR_memfd_create 348 +#define __NR_bpf 349 -#define NR_syscalls 349 +#define NR_syscalls 350 /* Bitmask values returned from kern_features system call. */ #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 6a873c344bc..ad0cdf497b7 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -86,4 +86,4 @@ sys_call_table: /*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev /*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr -/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create +/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index d9151b6490d..580cde9370c 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -87,7 +87,7 @@ sys_call_table32: /*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr - .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create + .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf #endif /* CONFIG_COMPAT */ @@ -166,4 +166,4 @@ sys_call_table: /*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr - .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create + .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f2327e88e07..ded8a6774ac 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -142,6 +142,10 @@ config INSTRUCTION_DECODER def_bool y depends on KPROBES || PERF_EVENTS || UPROBES +config PERF_EVENTS_INTEL_UNCORE + def_bool y + depends on PERF_EVENTS && SUP_SUP_INTEL && PCI + config OUTPUT_FORMAT string default "elf32-i386" if X86_32 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 8ffba18395c..ffe71228fc1 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -157,7 +157,7 @@ ENTRY(ia32_sysenter_target) * ourselves. To save a few cycles, we can check whether * NT was set instead of doing an unconditional popfq. */ - testl $X86_EFLAGS_NT,EFLAGS(%rsp) /* saved EFLAGS match cpu */ + testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp) jnz sysenter_fix_flags sysenter_flags_fixed: diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 7024c12f7bf..400873450e3 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -105,6 +105,7 @@ static __always_inline bool should_resched(void) # ifdef CONFIG_CONTEXT_TRACKING extern asmlinkage void ___preempt_schedule_context(void); # define __preempt_schedule_context() asm ("call ___preempt_schedule_context") + extern asmlinkage void preempt_schedule_context(void); # endif #endif diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index b436fc735aa..a142e77693e 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -397,7 +397,7 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger, /* Don't set up the ACPI SCI because it's already set up */ if (acpi_gbl_FADT.sci_interrupt == gsi) - return gsi; + return mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC); trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1; polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1; @@ -604,14 +604,18 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) { - int irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK); + int irq; - if (irq >= 0) { + if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { + *irqp = gsi; + } else { + irq = mp_map_gsi_to_irq(gsi, + IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK); + if (irq < 0) + return -1; *irqp = irq; - return 0; } - - return -1; + return 0; } EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 5972b108f15..b708738d016 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -185,8 +185,6 @@ static void apbt_setup_irq(struct apbt_dev *adev) irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); - /* APB timer irqs are set up as mp_irqs, timer is edge type */ - __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge"); } /* Should be called with per cpu */ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 00853b254ab..ba6cc041edb 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1297,7 +1297,7 @@ void setup_local_APIC(void) unsigned int value, queued; int i, j, acked = 0; unsigned long long tsc = 0, ntsc; - long long max_loops = cpu_khz; + long long max_loops = cpu_khz ? cpu_khz : 1000000; if (cpu_has_tsc) rdtscll(tsc); @@ -1383,7 +1383,7 @@ void setup_local_APIC(void) break; } if (queued) { - if (cpu_has_tsc) { + if (cpu_has_tsc && cpu_khz) { rdtscll(ntsc); max_loops = (cpu_khz << 10) - (ntsc - tsc); } else diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 01d5453b550..e27b49d7c92 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -39,9 +39,12 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o endif obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o -obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_uncore_snb.o -obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore_snbep.o perf_event_intel_uncore_nhmex.o obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o + +obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \ + perf_event_intel_uncore_snb.o \ + perf_event_intel_uncore_snbep.o \ + perf_event_intel_uncore_nhmex.o endif diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 1ef45627317..9cc6b6f25f4 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -213,12 +213,13 @@ static void intel_workarounds(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_F00F_BUG /* - * All current models of Pentium and Pentium with MMX technology CPUs + * All models of Pentium and Pentium with MMX technology CPUs * have the F0 0F bug, which lets nonprivileged users lock up the * system. Announce that the fault handler will be checking for it. + * The Quark is also family 5, but does not have the same bug. */ clear_cpu_bug(c, X86_BUG_F00F); - if (!paravirt_enabled() && c->x86 == 5) { + if (!paravirt_enabled() && c->x86 == 5 && c->x86_model < 9) { static int f00f_workaround_enabled; set_cpu_bug(c, X86_BUG_F00F); diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1b8299dd3d9..143e5f5dc85 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -243,8 +243,9 @@ static bool check_hw_exists(void) msr_fail: printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); - printk(boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR - "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new); + printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n", + boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR, + reg, val_new); return false; } @@ -444,12 +445,6 @@ int x86_pmu_hw_config(struct perf_event *event) if (event->attr.type == PERF_TYPE_RAW) event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; - if (event->attr.sample_period && x86_pmu.limit_period) { - if (x86_pmu.limit_period(event, event->attr.sample_period) > - event->attr.sample_period) - return -EINVAL; - } - return x86_setup_perfctr(event); } @@ -987,9 +982,6 @@ int x86_perf_event_set_period(struct perf_event *event) if (left > x86_pmu.max_period) left = x86_pmu.max_period; - if (x86_pmu.limit_period) - left = x86_pmu.limit_period(event, left); - per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; /* diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index d98a34d435d..fc5eb390b36 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -445,7 +445,6 @@ struct x86_pmu { struct x86_pmu_quirk *quirks; int perfctr_second_write; bool late_ack; - unsigned (*limit_period)(struct perf_event *event, unsigned l); /* * sysfs attrs diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index a73947c53b6..944bf019b74 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -220,15 +220,6 @@ static struct event_constraint intel_hsw_event_constraints[] = { EVENT_CONSTRAINT_END }; -static struct event_constraint intel_bdw_event_constraints[] = { - FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ - FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ - INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ - INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */ - EVENT_CONSTRAINT_END -}; - static u64 intel_pmu_event_map(int hw_event) { return intel_perfmon_event_map[hw_event]; @@ -424,126 +415,6 @@ static __initconst const u64 snb_hw_cache_event_ids }; -static __initconst const u64 hsw_hw_cache_event_ids - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX] = -{ - [ C(L1D ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ - [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ - [ C(RESULT_MISS) ] = 0x0, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0x0, - }, - }, - [ C(L1I ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0x0, - }, - }, - [ C(LL ) ] = { - [ C(OP_READ) ] = { - /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */ - [ C(RESULT_ACCESS) ] = 0x1b7, - /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE| - L3_MISS|ANY_SNOOP */ - [ C(RESULT_MISS) ] = 0x1b7, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE:ALL_RFO */ - /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */ - [ C(RESULT_MISS) ] = 0x1b7, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0x0, - }, - }, - [ C(DTLB) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ - [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ - [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0x0, - }, - }, - [ C(ITLB) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */ - [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - }, - [ C(BPU ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ - [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - }, -}; - -static __initconst const u64 hsw_hw_cache_extra_regs - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX] = -{ - [ C(LL ) ] = { - [ C(OP_READ) ] = { - /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */ - [ C(RESULT_ACCESS) ] = 0x2d5, - /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE| - L3_MISS|ANY_SNOOP */ - [ C(RESULT_MISS) ] = 0x3fbc0202d5ull, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x122, /* OFFCORE_RESPONSE:ALL_RFO */ - /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */ - [ C(RESULT_MISS) ] = 0x3fbc020122ull, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0x0, - }, - }, -}; - static __initconst const u64 westmere_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] @@ -2034,24 +1905,6 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) return c; } -/* - * Broadwell: - * The INST_RETIRED.ALL period always needs to have lowest - * 6bits cleared (BDM57). It shall not use a period smaller - * than 100 (BDM11). We combine the two to enforce - * a min-period of 128. - */ -static unsigned bdw_limit_period(struct perf_event *event, unsigned left) -{ - if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == - X86_CONFIG(.event=0xc0, .umask=0x01)) { - if (left < 128) - left = 128; - left &= ~0x3fu; - } - return left; -} - PMU_FORMAT_ATTR(event, "config:0-7" ); PMU_FORMAT_ATTR(umask, "config:8-15" ); PMU_FORMAT_ATTR(edge, "config:18" ); @@ -2692,8 +2545,8 @@ __init int intel_pmu_init(void) case 69: /* 22nm Haswell ULT */ case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ x86_pmu.late_ack = true; - memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); + memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); intel_pmu_lbr_init_snb(); @@ -2712,28 +2565,6 @@ __init int intel_pmu_init(void) pr_cont("Haswell events, "); break; - case 61: /* 14nm Broadwell Core-M */ - x86_pmu.late_ack = true; - memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); - - intel_pmu_lbr_init_snb(); - - x86_pmu.event_constraints = intel_bdw_event_constraints; - x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; - x86_pmu.extra_regs = intel_snbep_extra_regs; - x86_pmu.pebs_aliases = intel_pebs_aliases_snb; - /* all extra regs are per-cpu when HT is on */ - x86_pmu.er_flags |= ERF_HAS_RSP_1; - x86_pmu.er_flags |= ERF_NO_HT_SHARING; - - x86_pmu.hw_config = hsw_hw_config; - x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.cpu_events = hsw_events_attrs; - x86_pmu.limit_period = bdw_limit_period; - pr_cont("Broadwell events, "); - break; - default: switch (x86_pmu.version) { case 1: diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index b553ed89e5f..344b63f18d1 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -447,15 +447,14 @@ sysenter_exit: sysenter_audit: testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) jnz syscall_trace_entry - addl $4,%esp - CFI_ADJUST_CFA_OFFSET -4 - movl %esi,4(%esp) /* 5th arg: 4th syscall arg */ - movl %edx,(%esp) /* 4th arg: 3rd syscall arg */ - /* %ecx already in %ecx 3rd arg: 2nd syscall arg */ - movl %ebx,%edx /* 2nd arg: 1st syscall arg */ - /* %eax already in %eax 1st arg: syscall number */ + /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */ + movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */ + /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */ + pushl_cfi PT_ESI(%esp) /* a3: 5th arg */ + pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */ call __audit_syscall_entry - pushl_cfi %ebx + popl_cfi %ecx /* get that remapped edx off the stack */ + popl_cfi %ecx /* get that remapped esi off the stack */ movl PT_EAX(%esp),%eax /* reload syscall number */ jmp sysenter_do_call diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 8af817105e2..e7cc5370cd2 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -111,8 +111,7 @@ static void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); io_apic_irqs &= ~(1<<irq); - irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, - i8259A_chip.name); + irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq); enable_irq(irq); } diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 44f1ed42fdf..4de73ee7836 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -70,7 +70,6 @@ int vector_used_by_percpu_irq(unsigned int vector) void __init init_ISA_irqs(void) { struct irq_chip *chip = legacy_pic->chip; - const char *name = chip->name; int i; #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) @@ -79,7 +78,7 @@ void __init init_ISA_irqs(void) legacy_pic->init(0); for (i = 0; i < nr_legacy_irqs(); i++) - irq_set_chip_and_handler_name(i, chip, handle_level_irq, name); + irq_set_chip_and_handler(i, chip, handle_level_irq); } void __init init_IRQ(void) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 235cfd39e0d..ab08aa2276f 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1128,7 +1128,6 @@ void __init setup_arch(char **cmdline_p) setup_real_mode(); memblock_set_current_limit(get_max_mapped()); - dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); /* * NOTE: On x86-32, only from this point on, fixmaps are ready for use. @@ -1159,6 +1158,7 @@ void __init setup_arch(char **cmdline_p) early_acpi_boot_init(); initmem_init(); + dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); /* * Reserve memory for crash kernel after SRAT is parsed so that it diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2d5200e5635..4d2128ac70b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -102,8 +102,6 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); -static DEFINE_PER_CPU(struct completion, die_complete); - atomic_t init_deasserted; /* @@ -1318,6 +1316,8 @@ void cpu_disable_common(void) fixup_irqs(); } +static DEFINE_PER_CPU(struct completion, die_complete); + int native_cpu_disable(void) { int ret; diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index b6025f9e36c..b7e50bba3bb 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1166,14 +1166,17 @@ void __init tsc_init(void) x86_init.timers.tsc_pre_init(); - if (!cpu_has_tsc) + if (!cpu_has_tsc) { + setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); return; + } tsc_khz = x86_platform.calibrate_tsc(); cpu_khz = tsc_khz; if (!tsc_khz) { mark_tsc_unstable("could not calculate TSC khz"); + setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); return; } diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 749f9fa3825..5edf088ca51 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -574,12 +574,14 @@ static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, case 4: ctxt->_eip = (u32)dst; break; +#ifdef CONFIG_X86_64 case 8: if ((cs_l && is_noncanonical_address(dst)) || - (!cs_l && (dst & ~(u32)-1))) + (!cs_l && (dst >> 32) != 0)) return emulate_gp(ctxt, 0); ctxt->_eip = dst; break; +#endif default: WARN(1, "unsupported eip assignment size\n"); } @@ -641,7 +643,8 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) static int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, - unsigned size, bool write, bool fetch, + unsigned *max_size, unsigned size, + bool write, bool fetch, ulong *linear) { struct desc_struct desc; @@ -652,10 +655,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, unsigned cpl; la = seg_base(ctxt, addr.seg) + addr.ea; + *max_size = 0; switch (ctxt->mode) { case X86EMUL_MODE_PROT64: if (((signed long)la << 16) >> 16 != la) return emulate_gp(ctxt, 0); + + *max_size = min_t(u64, ~0u, (1ull << 48) - la); + if (size > *max_size) + goto bad; break; default: usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, @@ -673,20 +681,25 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && (ctxt->d & NoBigReal)) { /* la is between zero and 0xffff */ - if (la > 0xffff || (u32)(la + size - 1) > 0xffff) + if (la > 0xffff) goto bad; + *max_size = 0x10000 - la; } else if ((desc.type & 8) || !(desc.type & 4)) { /* expand-up segment */ - if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) + if (addr.ea > lim) goto bad; + *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea); } else { /* expand-down segment */ - if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) + if (addr.ea <= lim) goto bad; lim = desc.d ? 0xffffffff : 0xffff; - if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) + if (addr.ea > lim) goto bad; + *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea); } + if (size > *max_size) + goto bad; cpl = ctxt->ops->cpl(ctxt); if (!(desc.type & 8)) { /* data segment */ @@ -711,9 +724,9 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, return X86EMUL_CONTINUE; bad: if (addr.seg == VCPU_SREG_SS) - return emulate_ss(ctxt, sel); + return emulate_ss(ctxt, 0); else - return emulate_gp(ctxt, sel); + return emulate_gp(ctxt, 0); } static int linearize(struct x86_emulate_ctxt *ctxt, @@ -721,7 +734,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt, unsigned size, bool write, ulong *linear) { - return __linearize(ctxt, addr, size, write, false, linear); + unsigned max_size; + return __linearize(ctxt, addr, &max_size, size, write, false, linear); } @@ -746,17 +760,27 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) { int rc; - unsigned size; + unsigned size, max_size; unsigned long linear; int cur_size = ctxt->fetch.end - ctxt->fetch.data; struct segmented_address addr = { .seg = VCPU_SREG_CS, .ea = ctxt->eip + cur_size }; - size = 15UL ^ cur_size; - rc = __linearize(ctxt, addr, size, false, true, &linear); + /* + * We do not know exactly how many bytes will be needed, and + * __linearize is expensive, so fetch as much as possible. We + * just have to avoid going beyond the 15 byte limit, the end + * of the segment, or the end of the page. + * + * __linearize is called with size 0 so that it does not do any + * boundary check itself. Instead, we use max_size to check + * against op_size. + */ + rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; + size = min_t(unsigned, 15UL ^ cur_size, max_size); size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); /* @@ -766,7 +790,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) * still, we must have hit the 15-byte boundary. */ if (unlikely(size < op_size)) - return X86EMUL_UNHANDLEABLE; + return emulate_gp(ctxt, 0); + rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, size, &ctxt->exception); if (unlikely(rc != X86EMUL_CONTINUE)) @@ -2012,7 +2037,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); if (rc != X86EMUL_CONTINUE) { - WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64); + WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); /* assigning eip failed; restore the old cs */ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); return rc; @@ -2109,7 +2134,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) return rc; rc = assign_eip_far(ctxt, eip, new_desc.l); if (rc != X86EMUL_CONTINUE) { - WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64); + WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); } return rc; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a8b76c4c95e..3e556c68351 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4579,7 +4579,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) vmcs_write32(TPR_THRESHOLD, 0); } - kvm_vcpu_reload_apic_access_page(vcpu); + kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); if (vmx_vm_has_apicv(vcpu->kvm)) memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); @@ -6426,6 +6426,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) const unsigned long *fields = shadow_read_write_fields; const int num_fields = max_shadow_read_write_fields; + preempt_disable(); + vmcs_load(shadow_vmcs); for (i = 0; i < num_fields; i++) { @@ -6449,6 +6451,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) vmcs_clear(shadow_vmcs); vmcs_load(vmx->loaded_vmcs->vmcs); + + preempt_enable(); } static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index ae242a7c11c..36de293caf2 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -409,7 +409,7 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr) psize = page_level_size(level); pmask = page_level_mask(level); offset = virt_addr & ~pmask; - phys_addr = pte_pfn(*pte) << PAGE_SHIFT; + phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; return (phys_addr | offset); } EXPORT_SYMBOL_GPL(slow_virt_to_phys); diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c index 3c53a90fdb1..c14ad34776c 100644 --- a/arch/x86/platform/intel-mid/sfi.c +++ b/arch/x86/platform/intel-mid/sfi.c @@ -106,6 +106,7 @@ int __init sfi_parse_mtmr(struct sfi_table_header *table) mp_irq.dstapic = MP_APIC_ALL; mp_irq.dstirq = pentry->irq; mp_save_irq(&mp_irq); + mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC); } return 0; @@ -176,6 +177,7 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table) mp_irq.dstapic = MP_APIC_ALL; mp_irq.dstirq = pentry->irq; mp_save_irq(&mp_irq); + mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC); } return 0; } |