diff options
Diffstat (limited to 'arch')
82 files changed, 599 insertions, 335 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 56a4df952fb..22e58a99f38 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -477,7 +477,7 @@ config ALPHA_BROKEN_IRQ_MASK config VGA_HOSE bool - depends on ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI + depends on VGA_CONSOLE && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI) default y help Support VGA on an arbitrary hose; needed for several platforms diff --git a/arch/alpha/include/asm/rtc.h b/arch/alpha/include/asm/rtc.h index 1f7fba671ae..d70408d3667 100644 --- a/arch/alpha/include/asm/rtc.h +++ b/arch/alpha/include/asm/rtc.h @@ -1,14 +1,10 @@ #ifndef _ALPHA_RTC_H #define _ALPHA_RTC_H -#if defined(CONFIG_ALPHA_GENERIC) +#if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) \ + || defined(CONFIG_ALPHA_GENERIC) # define get_rtc_time alpha_mv.rtc_get_time # define set_rtc_time alpha_mv.rtc_set_time -#else -# if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) -# define get_rtc_time marvel_get_rtc_time -# define set_rtc_time marvel_set_rtc_time -# endif #endif #include <asm-generic/rtc.h> diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c index 5e7c28f92f1..61893d7bdda 100644 --- a/arch/alpha/kernel/core_tsunami.c +++ b/arch/alpha/kernel/core_tsunami.c @@ -11,6 +11,7 @@ #include <asm/core_tsunami.h> #undef __EXTERN_INLINE +#include <linux/module.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index 14a4b6a7cf5..407accc8087 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c @@ -317,7 +317,7 @@ marvel_init_irq(void) } static int -marvel_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { struct pci_controller *hose = dev->sysdata; struct io7_port *io7_port = hose->sysdata; diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index e81c35f936b..b8df521fb68 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -232,6 +232,9 @@ config MACH_ARMLEX4210 config MACH_UNIVERSAL_C210 bool "Mobile UNIVERSAL_C210 Board" select CPU_EXYNOS4210 + select S5P_HRT + select CLKSRC_MMIO + select HAVE_SCHED_CLOCK select S5P_GPIO_INT select S5P_DEV_FIMC0 select S5P_DEV_FIMC1 diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c index 5cd7a8b8868..7ac6ff4c46b 100644 --- a/arch/arm/mach-exynos/clock-exynos5.c +++ b/arch/arm/mach-exynos/clock-exynos5.c @@ -678,7 +678,7 @@ static struct clk exynos5_clk_pdma1 = { .name = "dma", .devname = "dma-pl330.1", .enable = exynos5_clk_ip_fsys_ctrl, - .ctrlbit = (1 << 1), + .ctrlbit = (1 << 2), }; static struct clk exynos5_clk_mdma1 = { diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c index cb2b027f09a..a34036eb8ba 100644 --- a/arch/arm/mach-exynos/mach-universal_c210.c +++ b/arch/arm/mach-exynos/mach-universal_c210.c @@ -40,6 +40,7 @@ #include <plat/pd.h> #include <plat/regs-fb-v4.h> #include <plat/fimc-core.h> +#include <plat/s5p-time.h> #include <plat/camport.h> #include <plat/mipi_csis.h> @@ -1063,6 +1064,7 @@ static void __init universal_map_io(void) exynos_init_io(NULL, 0); s3c24xx_init_clocks(24000000); s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs)); + s5p_set_timer_source(S5P_PWM2, S5P_PWM4); } static void s5p_tv_setup(void) @@ -1113,7 +1115,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210") .map_io = universal_map_io, .handle_irq = gic_handle_irq, .init_machine = universal_machine_init, - .timer = &exynos4_timer, + .timer = &s5p_timer, .reserve = &universal_reserve, .restart = exynos4_restart, MACHINE_END diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c index 1c672d9e665..f7fe1b9f317 100644 --- a/arch/arm/mach-kirkwood/board-dt.c +++ b/arch/arm/mach-kirkwood/board-dt.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/of.h> #include <linux/of_platform.h> +#include <linux/kexec.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/bridge-regs.h> diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c index fcce7ff3763..cfd98b186fc 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq.c +++ b/arch/arm/mach-omap1/ams-delta-fiq.c @@ -48,7 +48,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id) struct irq_chip *irq_chip = NULL; int gpio, irq_num, fiq_count; - irq_desc = irq_to_desc(IH_GPIO_BASE); + irq_desc = irq_to_desc(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK)); if (irq_desc) irq_chip = irq_desc->irq_data.chip; diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 930c0d38043..740cee9369b 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c @@ -641,7 +641,7 @@ static struct regulator_consumer_supply dummy_supplies[] = { static void __init igep_init(void) { - regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + regulator_register_fixed(1, dummy_supplies, ARRAY_SIZE(dummy_supplies)); omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); /* Get IGEP2 hardware revision */ diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h index 1e2d3322f33..c88420de115 100644 --- a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h +++ b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h @@ -941,10 +941,10 @@ #define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29) #define OMAP4_DSI1_LANEENABLE_SHIFT 24 #define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24) -#define OMAP4_DSI2_PIPD_SHIFT 19 -#define OMAP4_DSI2_PIPD_MASK (0x1f << 19) -#define OMAP4_DSI1_PIPD_SHIFT 14 -#define OMAP4_DSI1_PIPD_MASK (0x1f << 14) +#define OMAP4_DSI1_PIPD_SHIFT 19 +#define OMAP4_DSI1_PIPD_MASK (0x1f << 19) +#define OMAP4_DSI2_PIPD_SHIFT 14 +#define OMAP4_DSI2_PIPD_MASK (0x1f << 14) /* CONTROL_MCBSPLP */ #define OMAP4_ALBCTRLRX_FSX_SHIFT 31 diff --git a/arch/arm/mach-orion5x/mpp.h b/arch/arm/mach-orion5x/mpp.h index eac68978a2c..db70e79a119 100644 --- a/arch/arm/mach-orion5x/mpp.h +++ b/arch/arm/mach-orion5x/mpp.h @@ -65,8 +65,8 @@ #define MPP8_GIGE MPP(8, 0x1, 0, 0, 1, 1, 1) #define MPP9_UNUSED MPP(9, 0x0, 0, 0, 1, 1, 1) -#define MPP9_GPIO MPP(9, 0x0, 0, 0, 1, 1, 1) -#define MPP9_GIGE MPP(9, 0x1, 1, 1, 1, 1, 1) +#define MPP9_GPIO MPP(9, 0x0, 1, 1, 1, 1, 1) +#define MPP9_GIGE MPP(9, 0x1, 0, 0, 1, 1, 1) #define MPP10_UNUSED MPP(10, 0x0, 0, 0, 1, 1, 1) #define MPP10_GPIO MPP(10, 0x0, 1, 1, 1, 1, 1) diff --git a/arch/arm/mach-prima2/irq.c b/arch/arm/mach-prima2/irq.c index 37c2de9b6f2..a7b9415d30f 100644 --- a/arch/arm/mach-prima2/irq.c +++ b/arch/arm/mach-prima2/irq.c @@ -42,7 +42,8 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) static __init void sirfsoc_irq_init(void) { sirfsoc_alloc_gc(sirfsoc_intc_base, 0, 32); - sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32, SIRFSOC_INTENAL_IRQ_END - 32); + sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32, + SIRFSOC_INTENAL_IRQ_END + 1 - 32); writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL0); writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL1); @@ -68,7 +69,8 @@ void __init sirfsoc_of_irq_init(void) if (!sirfsoc_intc_base) panic("unable to map intc cpu registers\n"); - irq_domain_add_legacy(np, 32, 0, 0, &irq_domain_simple_ops, NULL); + irq_domain_add_legacy(np, SIRFSOC_INTENAL_IRQ_END + 1, 0, 0, + &irq_domain_simple_ops, NULL); of_node_put(np); diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c index cb224a344af..0891ec6e27f 100644 --- a/arch/arm/mach-shmobile/board-ag5evm.c +++ b/arch/arm/mach-shmobile/board-ag5evm.c @@ -365,23 +365,13 @@ static struct platform_device mipidsi0_device = { }; /* SDHI0 */ -static irqreturn_t ag5evm_sdhi0_gpio_cd(int irq, void *arg) -{ - struct device *dev = arg; - struct sh_mobile_sdhi_info *info = dev->platform_data; - struct tmio_mmc_data *pdata = info->pdata; - - tmio_mmc_cd_wakeup(pdata); - - return IRQ_HANDLED; -} - static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, - .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD, .tmio_caps = MMC_CAP_SD_HIGHSPEED, .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, + .cd_gpio = GPIO_PORT251, }; static struct resource sdhi0_resources[] = { @@ -557,7 +547,6 @@ static void __init ag5evm_init(void) lcd_backlight_reset(); /* enable SDHI0 on CN15 [SD I/F] */ - gpio_request(GPIO_FN_SDHICD0, NULL); gpio_request(GPIO_FN_SDHIWP0, NULL); gpio_request(GPIO_FN_SDHICMD0, NULL); gpio_request(GPIO_FN_SDHICLK0, NULL); @@ -566,13 +555,6 @@ static void __init ag5evm_init(void) gpio_request(GPIO_FN_SDHID0_1, NULL); gpio_request(GPIO_FN_SDHID0_0, NULL); - if (!request_irq(intcs_evt2irq(0x3c0), ag5evm_sdhi0_gpio_cd, - IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, - "sdhi0 cd", &sdhi0_device.dev)) - sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD; - else - pr_warn("Unable to setup SDHI0 GPIO IRQ\n"); - /* enable SDHI1 on CN4 [WLAN I/F] */ gpio_request(GPIO_FN_SDHICLK1, NULL); gpio_request(GPIO_FN_SDHICMD1_PU, NULL); diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index f49e28abe0a..8c6202bb6ae 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1011,21 +1011,12 @@ static int slot_cn7_get_cd(struct platform_device *pdev) } /* SDHI0 */ -static irqreturn_t mackerel_sdhi0_gpio_cd(int irq, void *arg) -{ - struct device *dev = arg; - struct sh_mobile_sdhi_info *info = dev->platform_data; - struct tmio_mmc_data *pdata = info->pdata; - - tmio_mmc_cd_wakeup(pdata); - - return IRQ_HANDLED; -} - static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, + .tmio_flags = TMIO_MMC_USE_GPIO_CD, .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, + .cd_gpio = GPIO_PORT172, }; static struct resource sdhi0_resources[] = { @@ -1384,7 +1375,6 @@ static void __init mackerel_init(void) { u32 srcr4; struct clk *clk; - int ret; /* External clock source */ clk_set_rate(&sh7372_dv_clki_clk, 27000000); @@ -1481,7 +1471,6 @@ static void __init mackerel_init(void) irq_set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH); /* enable SDHI0 */ - gpio_request(GPIO_FN_SDHICD0, NULL); gpio_request(GPIO_FN_SDHIWP0, NULL); gpio_request(GPIO_FN_SDHICMD0, NULL); gpio_request(GPIO_FN_SDHICLK0, NULL); @@ -1490,13 +1479,6 @@ static void __init mackerel_init(void) gpio_request(GPIO_FN_SDHID0_1, NULL); gpio_request(GPIO_FN_SDHID0_0, NULL); - ret = request_irq(evt2irq(0x3340), mackerel_sdhi0_gpio_cd, - IRQF_TRIGGER_FALLING, "sdhi0 cd", &sdhi0_device.dev); - if (!ret) - sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD; - else - pr_err("Cannot get IRQ #%d: %d\n", evt2irq(0x3340), ret); - #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) /* enable SDHI1 */ gpio_request(GPIO_FN_SDHICMD1, NULL); diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S index 6ac015c8920..b202c127252 100644 --- a/arch/arm/mach-shmobile/headsmp.S +++ b/arch/arm/mach-shmobile/headsmp.S @@ -16,6 +16,59 @@ __CPUINIT +/* Cache invalidation nicked from arch/arm/mach-imx/head-v7.S, thanks! + * + * The secondary kernel init calls v7_flush_dcache_all before it enables + * the L1; however, the L1 comes out of reset in an undefined state, so + * the clean + invalidate performed by v7_flush_dcache_all causes a bunch + * of cache lines with uninitialized data and uninitialized tags to get + * written out to memory, which does really unpleasant things to the main + * processor. We fix this by performing an invalidate, rather than a + * clean + invalidate, before jumping into the kernel. + * + * This funciton is cloned from arch/arm/mach-tegra/headsmp.S, and needs + * to be called for both secondary cores startup and primary core resume + * procedures. Ideally, it should be moved into arch/arm/mm/cache-v7.S. + */ +ENTRY(v7_invalidate_l1) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mcr p15, 2, r0, c0, c0, 0 + mrc p15, 1, r0, c0, c0, 0 + + ldr r1, =0x7fff + and r2, r1, r0, lsr #13 + + ldr r1, =0x3ff + + and r3, r1, r0, lsr #3 @ NumWays - 1 + add r2, r2, #1 @ NumSets + + and r0, r0, #0x7 + add r0, r0, #4 @ SetShift + + clz r1, r3 @ WayShift + add r4, r3, #1 @ NumWays +1: sub r2, r2, #1 @ NumSets-- + mov r3, r4 @ Temp = NumWays +2: subs r3, r3, #1 @ Temp-- + mov r5, r3, lsl r1 + mov r6, r2, lsl r0 + orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) + mcr p15, 0, r5, c7, c6, 2 + bgt 2b + cmp r2, #0 + bgt 1b + dsb + isb + mov pc, lr +ENDPROC(v7_invalidate_l1) + +ENTRY(shmobile_invalidate_start) + bl v7_invalidate_l1 + b secondary_startup +ENDPROC(shmobile_invalidate_start) + /* * Reset vector for secondary CPUs. * This will be mapped at address 0 by SBAR register. @@ -24,4 +77,5 @@ .align 12 ENTRY(shmobile_secondary_vector) ldr pc, 1f -1: .long secondary_startup - PAGE_OFFSET + PLAT_PHYS_OFFSET +1: .long shmobile_invalidate_start - PAGE_OFFSET + PLAT_PHYS_OFFSET +ENDPROC(shmobile_secondary_vector) diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h index 83ad3fe0a75..c85e6ecda60 100644 --- a/arch/arm/mach-shmobile/include/mach/common.h +++ b/arch/arm/mach-shmobile/include/mach/common.h @@ -4,7 +4,6 @@ extern void shmobile_earlytimer_init(void); extern struct sys_timer shmobile_timer; struct twd_local_timer; -void shmobile_twd_init(struct twd_local_timer *twd_local_timer); extern void shmobile_setup_console(void); extern void shmobile_secondary_vector(void); extern int shmobile_platform_cpu_kill(unsigned int cpu); @@ -82,5 +81,6 @@ extern int r8a7779_platform_cpu_kill(unsigned int cpu); extern void r8a7779_secondary_init(unsigned int cpu); extern int r8a7779_boot_secondary(unsigned int cpu); extern void r8a7779_smp_prepare_cpus(void); +extern void r8a7779_register_twd(void); #endif /* __ARCH_MACH_COMMON_H */ diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c index 12c6f529ab8..e98e46f6cf5 100644 --- a/arch/arm/mach-shmobile/setup-r8a7779.c +++ b/arch/arm/mach-shmobile/setup-r8a7779.c @@ -262,10 +262,14 @@ void __init r8a7779_add_standard_devices(void) ARRAY_SIZE(r8a7779_late_devices)); } +/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */ +void __init __weak r8a7779_register_twd(void) { } + static void __init r8a7779_earlytimer_init(void) { r8a7779_clock_init(); shmobile_earlytimer_init(); + r8a7779_register_twd(); } void __init r8a7779_add_early_devices(void) diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c index 5bebffc1045..04a0dfe7549 100644 --- a/arch/arm/mach-shmobile/setup-sh73a0.c +++ b/arch/arm/mach-shmobile/setup-sh73a0.c @@ -688,10 +688,14 @@ void __init sh73a0_add_standard_devices(void) ARRAY_SIZE(sh73a0_late_devices)); } +/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */ +void __init __weak sh73a0_register_twd(void) { } + static void __init sh73a0_earlytimer_init(void) { sh73a0_clock_init(); shmobile_earlytimer_init(); + sh73a0_register_twd(); } void __init sh73a0_add_early_devices(void) diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c index b62e19d4c9a..6d1d0238cbf 100644 --- a/arch/arm/mach-shmobile/smp-r8a7779.c +++ b/arch/arm/mach-shmobile/smp-r8a7779.c @@ -64,8 +64,15 @@ static void __iomem *scu_base_addr(void) static DEFINE_SPINLOCK(scu_lock); static unsigned long tmp; +#ifdef CONFIG_HAVE_ARM_TWD static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29); +void __init r8a7779_register_twd(void) +{ + twd_local_timer_register(&twd_local_timer); +} +#endif + static void modify_scu_cpu_psr(unsigned long set, unsigned long clr) { void __iomem *scu_base = scu_base_addr(); @@ -84,7 +91,6 @@ unsigned int __init r8a7779_get_core_count(void) { void __iomem *scu_base = scu_base_addr(); - shmobile_twd_init(&twd_local_timer); return scu_get_core_count(scu_base); } diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c index 14ad8b052f1..e36c41c4ab4 100644 --- a/arch/arm/mach-shmobile/smp-sh73a0.c +++ b/arch/arm/mach-shmobile/smp-sh73a0.c @@ -42,7 +42,13 @@ static void __iomem *scu_base_addr(void) static DEFINE_SPINLOCK(scu_lock); static unsigned long tmp; +#ifdef CONFIG_HAVE_ARM_TWD static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29); +void __init sh73a0_register_twd(void) +{ + twd_local_timer_register(&twd_local_timer); +} +#endif static void modify_scu_cpu_psr(unsigned long set, unsigned long clr) { @@ -62,7 +68,6 @@ unsigned int __init sh73a0_get_core_count(void) { void __iomem *scu_base = scu_base_addr(); - shmobile_twd_init(&twd_local_timer); return scu_get_core_count(scu_base); } diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c index 2fba5f3d1c8..8b79e7917a2 100644 --- a/arch/arm/mach-shmobile/timer.c +++ b/arch/arm/mach-shmobile/timer.c @@ -46,15 +46,6 @@ static void __init shmobile_timer_init(void) { } -void __init shmobile_twd_init(struct twd_local_timer *twd_local_timer) -{ -#ifdef CONFIG_HAVE_ARM_TWD - int err = twd_local_timer_register(twd_local_timer); - if (err) - pr_err("twd_local_timer_register failed %d\n", err); -#endif -} - struct sys_timer shmobile_timer = { .init = shmobile_timer_init, }; diff --git a/arch/arm/mach-tegra/flowctrl.c b/arch/arm/mach-tegra/flowctrl.c index fef66a7486e..f07488e0bd3 100644 --- a/arch/arm/mach-tegra/flowctrl.c +++ b/arch/arm/mach-tegra/flowctrl.c @@ -53,10 +53,10 @@ static void flowctrl_update(u8 offset, u32 value) void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value) { - return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value); + return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value); } void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value) { - return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value); + return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value); } diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index f5104b7c52c..463fb3bbe11 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1174,7 +1174,7 @@ out: bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { - return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL); + return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); } int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) diff --git a/arch/m68k/platform/520x/config.c b/arch/m68k/platform/520x/config.c index 235947844f2..09df4b89e8b 100644 --- a/arch/m68k/platform/520x/config.c +++ b/arch/m68k/platform/520x/config.c @@ -22,7 +22,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m520x_qspi_init(void) { @@ -35,7 +35,7 @@ static void __init m520x_qspi_init(void) writew(par, MCF_GPIO_PAR_UART); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -79,7 +79,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m520x_uarts_init(); m520x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m520x_qspi_init(); #endif } diff --git a/arch/m68k/platform/523x/config.c b/arch/m68k/platform/523x/config.c index c8b405d5a96..d47dfd8f50a 100644 --- a/arch/m68k/platform/523x/config.c +++ b/arch/m68k/platform/523x/config.c @@ -22,7 +22,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m523x_qspi_init(void) { @@ -36,7 +36,7 @@ static void __init m523x_qspi_init(void) writew(par, MCFGPIO_PAR_TIMER); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -58,7 +58,7 @@ void __init config_BSP(char *commandp, int size) { mach_sched_init = hw_timer_init; m523x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m523x_qspi_init(); #endif } diff --git a/arch/m68k/platform/5249/config.c b/arch/m68k/platform/5249/config.c index bbf05135bb9..300e729a58d 100644 --- a/arch/m68k/platform/5249/config.c +++ b/arch/m68k/platform/5249/config.c @@ -51,7 +51,7 @@ static struct platform_device *m5249_devices[] __initdata = { /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m5249_qspi_init(void) { @@ -61,7 +61,7 @@ static void __init m5249_qspi_init(void) mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -90,7 +90,7 @@ void __init config_BSP(char *commandp, int size) #ifdef CONFIG_M5249C3 m5249_smc91x_init(); #endif -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m5249_qspi_init(); #endif } diff --git a/arch/m68k/platform/527x/config.c b/arch/m68k/platform/527x/config.c index f91a53294c3..b3cb378c5e9 100644 --- a/arch/m68k/platform/527x/config.c +++ b/arch/m68k/platform/527x/config.c @@ -23,7 +23,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m527x_qspi_init(void) { @@ -42,7 +42,7 @@ static void __init m527x_qspi_init(void) #endif } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -90,7 +90,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m527x_uarts_init(); m527x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m527x_qspi_init(); #endif } diff --git a/arch/m68k/platform/528x/config.c b/arch/m68k/platform/528x/config.c index d4492926614..c5f11ba49be 100644 --- a/arch/m68k/platform/528x/config.c +++ b/arch/m68k/platform/528x/config.c @@ -24,7 +24,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m528x_qspi_init(void) { @@ -32,7 +32,7 @@ static void __init m528x_qspi_init(void) __raw_writeb(0x07, MCFGPIO_PQSPAR); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -98,7 +98,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m528x_uarts_init(); m528x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m528x_qspi_init(); #endif } diff --git a/arch/m68k/platform/532x/config.c b/arch/m68k/platform/532x/config.c index 2bec3477b73..37082d02f2b 100644 --- a/arch/m68k/platform/532x/config.c +++ b/arch/m68k/platform/532x/config.c @@ -30,7 +30,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m532x_qspi_init(void) { @@ -38,7 +38,7 @@ static void __init m532x_qspi_init(void) writew(0x01f0, MCF_GPIO_PAR_QSPI); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -77,7 +77,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m532x_uarts_init(); m532x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m532x_qspi_init(); #endif diff --git a/arch/m68k/platform/coldfire/device.c b/arch/m68k/platform/coldfire/device.c index 7af97362b95..3aa77ddea89 100644 --- a/arch/m68k/platform/coldfire/device.c +++ b/arch/m68k/platform/coldfire/device.c @@ -121,7 +121,7 @@ static struct platform_device mcf_fec1 = { #endif /* MCFFEC_BASE1 */ #endif /* CONFIG_FEC */ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) /* * The ColdFire QSPI module is an SPI protocol hardware block used * on a number of different ColdFire CPUs. @@ -274,7 +274,7 @@ static struct platform_device mcf_qspi = { .resource = mcf_qspi_resources, .dev.platform_data = &mcf_qspi_data, }; -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ static struct platform_device *mcf_devices[] __initdata = { &mcf_uart, @@ -284,7 +284,7 @@ static struct platform_device *mcf_devices[] __initdata = { &mcf_fec1, #endif #endif -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) &mcf_qspi, #endif }; diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c index e2150705206..9c717bf98ff 100644 --- a/arch/mips/ath79/dev-wmac.c +++ b/arch/mips/ath79/dev-wmac.c @@ -58,8 +58,8 @@ static void __init ar913x_wmac_setup(void) static int ar933x_wmac_reset(void) { - ath79_device_reset_clear(AR933X_RESET_WMAC); ath79_device_reset_set(AR933X_RESET_WMAC); + ath79_device_reset_clear(AR933X_RESET_WMAC); return 0; } diff --git a/arch/mips/include/asm/mach-jz4740/irq.h b/arch/mips/include/asm/mach-jz4740/irq.h index a865c983c70..5ad1a9c113c 100644 --- a/arch/mips/include/asm/mach-jz4740/irq.h +++ b/arch/mips/include/asm/mach-jz4740/irq.h @@ -45,7 +45,7 @@ #define JZ4740_IRQ_LCD JZ4740_IRQ(30) /* 2nd-level interrupts */ -#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (X)) +#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (x)) #define JZ4740_IRQ_INTC_GPIO(x) (JZ4740_IRQ_GPIO0 - (x)) #define JZ4740_IRQ_GPIO(x) (JZ4740_IRQ(48) + (x)) diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 73c0d45798d..9b02cfba744 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -37,12 +37,6 @@ extern void tlbmiss_handler_setup_pgd(unsigned long pgd); write_c0_xcontext((unsigned long) smp_processor_id() << 51); \ } while (0) - -static inline unsigned long get_current_pgd(void) -{ - return PHYS_TO_XKSEG_CACHED((read_c0_context() >> 11) & ~0xfffUL); -} - #else /* CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/ /* diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 185ca00c4c8..d5a338a1739 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -257,11 +257,8 @@ asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -286,11 +283,8 @@ asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -362,10 +356,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&blocked, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = blocked; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&blocked); sig = restore_sigcontext(®s, &frame->sf_sc); if (sig < 0) @@ -401,10 +392,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) @@ -580,12 +568,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info, if (ret) return ret; - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked, sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + block_sigmask(ka, sig); return ret; } diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 06b5da392e2..ac3b8d89aae 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -290,11 +290,8 @@ asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -318,11 +315,8 @@ asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -488,10 +482,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&blocked, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = blocked; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&blocked); sig = restore_sigcontext32(®s, &frame->sf_sc); if (sig < 0) @@ -529,10 +520,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index ae29e894ab8..86eb4b04631 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -93,11 +93,8 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) sigset_from_compat(&newset, &uset); sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -121,10 +118,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c index 910dddf65e4..9cd69ad6aa0 100644 --- a/arch/mn10300/kernel/smp.c +++ b/arch/mn10300/kernel/smp.c @@ -24,6 +24,7 @@ #include <linux/sched.h> #include <linux/profile.h> #include <linux/smp.h> +#include <linux/cpu.h> #include <asm/tlbflush.h> #include <asm/bitops.h> #include <asm/processor.h> @@ -38,7 +39,6 @@ #include "internal.h" #ifdef CONFIG_HOTPLUG_CPU -#include <linux/cpu.h> #include <asm/cacheflush.h> static unsigned long sleep_mode[NR_CPUS]; @@ -874,10 +874,13 @@ static void __init smp_online(void) cpu = smp_processor_id(); - local_irq_enable(); + notify_cpu_starting(cpu); + ipi_call_lock(); set_cpu_online(cpu, true); - smp_wmb(); + ipi_call_unlock(); + + local_irq_enable(); } /** diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h index 4e9626836ba..d1d864b81ba 100644 --- a/arch/parisc/include/asm/hardware.h +++ b/arch/parisc/include/asm/hardware.h @@ -2,7 +2,6 @@ #define _PARISC_HARDWARE_H #include <linux/mod_devicetable.h> -#include <asm/pdc.h> #define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID #define HVERSION_ANY_ID PA_HVERSION_ANY_ID @@ -95,12 +94,14 @@ struct bc_module { #define HPHW_MC 15 #define HPHW_FAULTY 31 +struct parisc_device_id; /* hardware.c: */ extern const char *parisc_hardware_description(struct parisc_device_id *id); extern enum cpu_type parisc_get_cpu_type(unsigned long hversion); struct pci_dev; +struct hardware_path; /* drivers.c: */ extern struct parisc_device *alloc_pa_dev(unsigned long hpa, diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index a84cc1f925f..4e0e7dbf0f3 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -160,5 +160,11 @@ extern int npmem_ranges; #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> +#include <asm/pdc.h> + +#define PAGE0 ((struct zeropage *)__PAGE_OFFSET) + +/* DEFINITION OF THE ZERO-PAGE (PAG0) */ +/* based on work by Jason Eckhardt (jason@equator.com) */ #endif /* _PARISC_PAGE_H */ diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index 4ca510b3c6f..7f0f2d23059 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -343,8 +343,6 @@ #ifdef __KERNEL__ -#include <asm/page.h> /* for __PAGE_OFFSET */ - extern int pdc_type; /* Values for pdc_type */ @@ -677,11 +675,6 @@ static inline char * os_id_to_string(u16 os_id) { #endif /* __KERNEL__ */ -#define PAGE0 ((struct zeropage *)__PAGE_OFFSET) - -/* DEFINITION OF THE ZERO-PAGE (PAG0) */ -/* based on work by Jason Eckhardt (jason@equator.com) */ - /* flags of the device_path */ #define PF_AUTOBOOT 0x80 #define PF_AUTOSEARCH 0x40 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 22dadeb5869..ee99f233935 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -44,6 +44,8 @@ struct vm_area_struct; #endif /* !__ASSEMBLY__ */ +#include <asm/page.h> + #define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) #define pmd_ERROR(e) \ diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 804aa28ab1d..3516e0b2704 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -1,6 +1,8 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H +#include <asm/barrier.h> +#include <asm/ldcw.h> #include <asm/processor.h> #include <asm/spinlock_types.h> diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c index 4f004596a6e..47341aa208f 100644 --- a/arch/parisc/kernel/pdc_cons.c +++ b/arch/parisc/kernel/pdc_cons.c @@ -50,6 +50,7 @@ #include <linux/init.h> #include <linux/major.h> #include <linux/tty.h> +#include <asm/page.h> /* for PAGE0 */ #include <asm/pdc.h> /* for iodc_call() proto and friends */ static DEFINE_SPINLOCK(pdc_console_lock); @@ -104,7 +105,7 @@ static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp) static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp) { - if (!tty->count) { + if (tty->count == 1) { del_timer_sync(&pdc_console_timer); tty_port_tty_set(&tty_port, NULL); } diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 0bb1d63907f..4dc7b7942b4 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -31,6 +31,7 @@ #include <linux/delay.h> #include <linux/bitops.h> #include <linux/ftrace.h> +#include <linux/cpu.h> #include <linux/atomic.h> #include <asm/current.h> @@ -295,8 +296,13 @@ smp_cpu_init(int cpunum) printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum); machine_halt(); - } + } + + notify_cpu_starting(cpunum); + + ipi_call_lock(); set_cpu_online(cpunum, true); + ipi_call_unlock(); /* Initialise the idle task for this CPU */ atomic_inc(&init_mm.mm_count); diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 7c0774397b8..70e105d6242 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -29,6 +29,7 @@ #include <asm/uaccess.h> #include <asm/io.h> #include <asm/irq.h> +#include <asm/page.h> #include <asm/param.h> #include <asm/pdc.h> #include <asm/led.h> diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 548da3aa0a3..d58fc4e4149 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -288,13 +288,6 @@ label##_hv: \ /* Exception addition: Hard disable interrupts */ #define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11) -/* Exception addition: Keep interrupt state */ -#define ENABLE_INTS \ - ld r11,PACAKMSR(r13); \ - ld r12,_MSR(r1); \ - rlwimi r11,r12,0,MSR_EE; \ - mtmsrd r11,1 - #define ADD_NVGPRS \ bl .save_nvgprs diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index aa795ccef29..fd07f43d662 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -81,12 +81,13 @@ struct kvmppc_vcpu_book3s { u64 sdr1; u64 hior; u64 msr_mask; - u64 vsid_next; #ifdef CONFIG_PPC_BOOK3S_32 u32 vsid_pool[VSID_POOL_SIZE]; + u32 vsid_next; #else - u64 vsid_first; - u64 vsid_max; + u64 proto_vsid_first; + u64 proto_vsid_max; + u64 proto_vsid_next; #endif int context_id[SID_CONTEXTS]; diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index f8a7a1a1a9f..ef2074c3e90 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -588,23 +588,19 @@ _GLOBAL(ret_from_except_lite) fast_exc_return_irq: restore: /* - * This is the main kernel exit path, we first check if we - * have to change our interrupt state. + * This is the main kernel exit path. First we check if we + * are about to re-enable interrupts */ ld r5,SOFTE(r1) lbz r6,PACASOFTIRQEN(r13) - cmpwi cr1,r5,0 - cmpw cr0,r5,r6 - beq cr0,4f + cmpwi cr0,r5,0 + beq restore_irq_off - /* We do, handle disable first, which is easy */ - bne cr1,3f; - li r0,0 - stb r0,PACASOFTIRQEN(r13); - TRACE_DISABLE_INTS - b 4f + /* We are enabling, were we already enabled ? Yes, just return */ + cmpwi cr0,r6,1 + beq cr0,do_restore -3: /* + /* * We are about to soft-enable interrupts (we are hard disabled * at this point). We check if there's anything that needs to * be replayed first. @@ -626,7 +622,7 @@ restore_no_replay: /* * Final return path. BookE is handled in a different file */ -4: +do_restore: #ifdef CONFIG_PPC_BOOK3E b .exception_return_book3e #else @@ -700,6 +696,25 @@ fast_exception_return: #endif /* CONFIG_PPC_BOOK3E */ /* + * We are returning to a context with interrupts soft disabled. + * + * However, we may also about to hard enable, so we need to + * make sure that in this case, we also clear PACA_IRQ_HARD_DIS + * or that bit can get out of sync and bad things will happen + */ +restore_irq_off: + ld r3,_MSR(r1) + lbz r7,PACAIRQHAPPENED(r13) + andi. r0,r3,MSR_EE + beq 1f + rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS + stb r7,PACAIRQHAPPENED(r13) +1: li r0,0 + stb r0,PACASOFTIRQEN(r13); + TRACE_DISABLE_INTS + b do_restore + + /* * Something did happen, check if a re-emit is needed * (this also clears paca->irq_happened) */ @@ -748,6 +763,9 @@ restore_check_irq_replay: #endif /* CONFIG_PPC_BOOK3E */ 1: b .ret_from_except /* What else to do here ? */ + + +3: do_work: #ifdef CONFIG_PREEMPT andi. r0,r3,MSR_PR /* Returning to user mode? */ @@ -767,16 +785,6 @@ do_work: SOFT_DISABLE_INTS(r3,r4) 1: bl .preempt_schedule_irq - /* Hard-disable interrupts again (and update PACA) */ -#ifdef CONFIG_PPC_BOOK3E - wrteei 0 -#else - ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ - mtmsrd r10,1 -#endif /* CONFIG_PPC_BOOK3E */ - li r0,PACA_IRQ_HARD_DIS - stb r0,PACAIRQHAPPENED(r13) - /* Re-test flags and eventually loop */ clrrdi r9,r1,THREAD_SHIFT ld r4,TI_FLAGS(r9) @@ -787,14 +795,6 @@ do_work: user_work: #endif /* CONFIG_PREEMPT */ - /* Enable interrupts */ -#ifdef CONFIG_PPC_BOOK3E - wrteei 1 -#else - ori r10,r10,MSR_EE - mtmsrd r10,1 -#endif /* CONFIG_PPC_BOOK3E */ - andi. r0,r4,_TIF_NEED_RESCHED beq 1f bl .restore_interrupts diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index cb705fdbb45..8f880bc77c5 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -768,8 +768,8 @@ alignment_common: std r3,_DAR(r1) std r4,_DSISR(r1) bl .save_nvgprs + DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - ENABLE_INTS bl .alignment_exception b .ret_from_except diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 43eb74fcedd..641da9e868c 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -229,6 +229,19 @@ notrace void arch_local_irq_restore(unsigned long en) */ if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) __hard_irq_disable(); +#ifdef CONFIG_TRACE_IRQFLAG + else { + /* + * We should already be hard disabled here. We had bugs + * where that wasn't the case so let's dbl check it and + * warn if we are wrong. Only do that when IRQ tracing + * is enabled as mfmsr() can be costly. + */ + if (WARN_ON(mfmsr() & MSR_EE)) + __hard_irq_disable(); + } +#endif /* CONFIG_TRACE_IRQFLAG */ + set_soft_enabled(0); /* @@ -260,11 +273,17 @@ EXPORT_SYMBOL(arch_local_irq_restore); * if they are currently disabled. This is typically called before * schedule() or do_signal() when returning to userspace. We do it * in C to avoid the burden of dealing with lockdep etc... + * + * NOTE: This is called with interrupts hard disabled but not marked + * as such in paca->irq_happened, so we need to resync this. */ void restore_interrupts(void) { - if (irqs_disabled()) + if (irqs_disabled()) { + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; local_irq_enable(); + } else + __hard_irq_enable(); } #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6aa0c663e24..158972341a2 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) addr, regs->nip, regs->link, code); } - if (!arch_irq_disabled_regs(regs)) + if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) local_irq_enable(); memset(&info, 0, sizeof(info)); @@ -1019,7 +1019,9 @@ void __kprobes program_check_exception(struct pt_regs *regs) return; } - local_irq_enable(); + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); #ifdef CONFIG_MATH_EMULATION /* (reason & REASON_ILLEGAL) would be the obvious thing here, @@ -1069,6 +1071,10 @@ void alignment_exception(struct pt_regs *regs) { int sig, code, fixed = 0; + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + /* we don't implement logging of alignment exceptions */ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) fixed = fix_alignment(regs); diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 6f87f39a1ac..10fc8ec9d2a 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -194,14 +194,14 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) backwards_map = !backwards_map; /* Uh-oh ... out of mappings. Let's flush! */ - if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) { - vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; + if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) { + vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first; memset(vcpu_book3s->sid_map, 0, sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); kvmppc_mmu_pte_flush(vcpu, 0, 0); kvmppc_mmu_flush_segments(vcpu); } - map->host_vsid = vcpu_book3s->vsid_next++; + map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M); map->guest_vsid = gvsid; map->valid = true; @@ -319,9 +319,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) return -1; vcpu3s->context_id[0] = err; - vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1; - vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; - vcpu3s->vsid_next = vcpu3s->vsid_first; + vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) + << USER_ESID_BITS) - 1; + vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; + vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; kvmppc_mmu_hpte_init(vcpu); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index ddc485a529f..c3beaeef3f6 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -258,6 +258,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, !(memslot->userspace_addr & (s - 1))) { start &= ~(s - 1); pgsize = s; + get_page(hpage); + put_page(page); page = hpage; } } @@ -281,11 +283,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, err = 0; out: - if (got) { - if (PageHuge(page)) - page = compound_head(page); + if (got) put_page(page); - } return err; up_err: @@ -678,8 +677,15 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, SetPageDirty(page); out_put: - if (page) - put_page(page); + if (page) { + /* + * We drop pages[0] here, not page because page might + * have been set to the head page of a compound, but + * we have to drop the reference on the correct tail + * page to match the get inside gup() + */ + put_page(pages[0]); + } return ret; out_unlock: @@ -979,6 +985,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, pa = *physp; } page = pfn_to_page(pa >> PAGE_SHIFT); + get_page(page); } else { hva = gfn_to_hva_memslot(memslot, gfn); npages = get_user_pages_fast(hva, 1, 1, pages); @@ -991,8 +998,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, page = compound_head(page); psize <<= compound_order(page); } - if (!kvm->arch.using_mmu_notifiers) - get_page(page); offset = gpa & (psize - 1); if (nb_ret) *nb_ret = psize - offset; @@ -1003,7 +1008,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va) { struct page *page = virt_to_page(va); - page = compound_head(page); put_page(page); } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 01294a5099d..108d1f58017 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1192,8 +1192,6 @@ static void unpin_slot(struct kvm *kvm, int slot_id) continue; pfn = physp[j] >> PAGE_SHIFT; page = pfn_to_page(pfn); - if (PageHuge(page)) - page = compound_head(page); SetPageDirty(page); put_page(page); } diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index def880aea63..cec4daddbf3 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -463,6 +463,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) /* insert R and C bits from PTE */ rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); args[j] |= rcbits << (56 - 5); + hp[0] = 0; continue; } diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 0676ae249b9..6e6e9cef34a 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -197,7 +197,8 @@ kvmppc_interrupt: /* Save guest PC and MSR */ #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION - andi. r0,r12,0x2 + andi. r0, r12, 0x2 + cmpwi cr1, r0, 0 beq 1f mfspr r3,SPRN_HSRR0 mfspr r4,SPRN_HSRR1 @@ -250,6 +251,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) beq ld_last_prev_inst cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT beq- ld_last_inst +#ifdef CONFIG_PPC64 +BEGIN_FTR_SECTION + cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST + beq- ld_last_inst +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) +#endif b no_ld_last_inst @@ -316,23 +323,17 @@ no_dcbz32_off: * Having set up SRR0/1 with the address where we want * to continue with relocation on (potentially in module * space), we either just go straight there with rfi[d], - * or we jump to an interrupt handler with bctr if there - * is an interrupt to be handled first. In the latter - * case, the rfi[d] at the end of the interrupt handler - * will get us back to where we want to continue. + * or we jump to an interrupt handler if there is an + * interrupt to be handled first. In the latter case, + * the rfi[d] at the end of the interrupt handler will + * get us back to where we want to continue. */ - cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL - beq 1f - cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER - beq 1f - cmpwi r12, BOOK3S_INTERRUPT_PERFMON -1: mtctr r12 - /* Register usage at this point: * * R1 = host R1 * R2 = host R2 + * R10 = raw exit handler id * R12 = exit handler id * R13 = shadow vcpu (32-bit) or PACA (64-bit) * SVCPU.* = guest * @@ -342,12 +343,25 @@ no_dcbz32_off: PPC_LL r6, HSTATE_HOST_MSR(r13) PPC_LL r8, HSTATE_VMHANDLER(r13) - /* Restore host msr -> SRR1 */ +#ifdef CONFIG_PPC64 +BEGIN_FTR_SECTION + beq cr1, 1f + mtspr SPRN_HSRR1, r6 + mtspr SPRN_HSRR0, r8 +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) +#endif +1: /* Restore host msr -> SRR1 */ mtsrr1 r6 /* Load highmem handler address */ mtsrr0 r8 /* RFI into the highmem handler, or jump to interrupt handler */ - beqctr + cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL + beqa BOOK3S_INTERRUPT_EXTERNAL + cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER + beqa BOOK3S_INTERRUPT_DECREMENTER + cmpwi r12, BOOK3S_INTERRUPT_PERFMON + beqa BOOK3S_INTERRUPT_PERFMON + RFI kvmppc_handler_trampoline_exit_end: diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index af1ab5e9a69..5c3cf2d04e4 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -48,7 +48,13 @@ /* * Assembly helpers from arch/powerpc/net/bpf_jit.S: */ -extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; +#define DECLARE_LOAD_FUNC(func) \ + extern u8 func[], func##_negative_offset[], func##_positive_offset[] + +DECLARE_LOAD_FUNC(sk_load_word); +DECLARE_LOAD_FUNC(sk_load_half); +DECLARE_LOAD_FUNC(sk_load_byte); +DECLARE_LOAD_FUNC(sk_load_byte_msh); #define FUNCTION_DESCR_SIZE 24 diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S index ff4506e85cc..55ba3855a97 100644 --- a/arch/powerpc/net/bpf_jit_64.S +++ b/arch/powerpc/net/bpf_jit_64.S @@ -31,14 +31,13 @@ * then branch directly to slow_path_XXX if required. (In fact, could * load a spare GPR with the address of slow_path_generic and pass size * as an argument, making the call site a mtlr, li and bllr.) - * - * Technically, the "is addr < 0" check is unnecessary & slowing down - * the ABS path, as it's statically checked on generation. */ .globl sk_load_word sk_load_word: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_word_neg + .globl sk_load_word_positive_offset +sk_load_word_positive_offset: /* Are we accessing past headlen? */ subi r_scratch1, r_HL, 4 cmpd r_scratch1, r_addr @@ -51,7 +50,9 @@ sk_load_word: .globl sk_load_half sk_load_half: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_half_neg + .globl sk_load_half_positive_offset +sk_load_half_positive_offset: subi r_scratch1, r_HL, 2 cmpd r_scratch1, r_addr blt bpf_slow_path_half @@ -61,7 +62,9 @@ sk_load_half: .globl sk_load_byte sk_load_byte: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_byte_neg + .globl sk_load_byte_positive_offset +sk_load_byte_positive_offset: cmpd r_HL, r_addr ble bpf_slow_path_byte lbzx r_A, r_D, r_addr @@ -69,22 +72,20 @@ sk_load_byte: /* * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) - * r_addr is the offset value, already known positive + * r_addr is the offset value */ .globl sk_load_byte_msh sk_load_byte_msh: + cmpdi r_addr, 0 + blt bpf_slow_path_byte_msh_neg + .globl sk_load_byte_msh_positive_offset +sk_load_byte_msh_positive_offset: cmpd r_HL, r_addr ble bpf_slow_path_byte_msh lbzx r_X, r_D, r_addr rlwinm r_X, r_X, 2, 32-4-2, 31-2 blr -bpf_error: - /* Entered with cr0 = lt */ - li r3, 0 - /* Generated code will 'blt epilogue', returning 0. */ - blr - /* Call out to skb_copy_bits: * We'll need to back up our volatile regs first; we have * local variable space at r1+(BPF_PPC_STACK_BASIC). @@ -136,3 +137,84 @@ bpf_slow_path_byte_msh: lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1) rlwinm r_X, r_X, 2, 32-4-2, 31-2 blr + +/* Call out to bpf_internal_load_pointer_neg_helper: + * We'll need to back up our volatile regs first; we have + * local variable space at r1+(BPF_PPC_STACK_BASIC). + * Allocate a new stack frame here to remain ABI-compliant in + * stashing LR. + */ +#define sk_negative_common(SIZE) \ + mflr r0; \ + std r0, 16(r1); \ + /* R3 goes in parameter space of caller's frame */ \ + std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ + std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ + /* R3 = r_skb, as passed */ \ + mr r4, r_addr; \ + li r5, SIZE; \ + bl bpf_internal_load_pointer_neg_helper; \ + /* R3 != 0 on success */ \ + addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ + ld r0, 16(r1); \ + ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ + ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + mtlr r0; \ + cmpldi r3, 0; \ + beq bpf_error_slow; /* cr0 = EQ */ \ + mr r_addr, r3; \ + ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + /* Great success! */ + +bpf_slow_path_word_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_word_negative_offset +sk_load_word_negative_offset: + sk_negative_common(4) + lwz r_A, 0(r_addr) + blr + +bpf_slow_path_half_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_half_negative_offset +sk_load_half_negative_offset: + sk_negative_common(2) + lhz r_A, 0(r_addr) + blr + +bpf_slow_path_byte_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_byte_negative_offset +sk_load_byte_negative_offset: + sk_negative_common(1) + lbz r_A, 0(r_addr) + blr + +bpf_slow_path_byte_msh_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_byte_msh_negative_offset +sk_load_byte_msh_negative_offset: + sk_negative_common(1) + lbz r_X, 0(r_addr) + rlwinm r_X, r_X, 2, 32-4-2, 31-2 + blr + +bpf_error_slow: + /* fabricate a cr0 = lt */ + li r_scratch1, -1 + cmpdi r_scratch1, 0 +bpf_error: + /* Entered with cr0 = lt */ + li r3, 0 + /* Generated code will 'blt epilogue', returning 0. */ + blr diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 73619d3aeb6..2dc8b148484 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -127,6 +127,9 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) PPC_BLR(); } +#define CHOOSE_LOAD_FUNC(K, func) \ + ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) + /* Assemble the body code between the prologue & epilogue. */ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, struct codegen_context *ctx, @@ -391,21 +394,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, /*** Absolute loads from packet header/data ***/ case BPF_S_LD_W_ABS: - func = sk_load_word; + func = CHOOSE_LOAD_FUNC(K, sk_load_word); goto common_load; case BPF_S_LD_H_ABS: - func = sk_load_half; + func = CHOOSE_LOAD_FUNC(K, sk_load_half); goto common_load; case BPF_S_LD_B_ABS: - func = sk_load_byte; + func = CHOOSE_LOAD_FUNC(K, sk_load_byte); common_load: - /* - * Load from [K]. Reference with the (negative) - * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported. - */ + /* Load from [K]. */ ctx->seen |= SEEN_DATAREF; - if ((int)K < 0) - return -ENOTSUPP; PPC_LI64(r_scratch1, func); PPC_MTLR(r_scratch1); PPC_LI32(r_addr, K); @@ -429,7 +427,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, common_load_ind: /* * Load from [X + K]. Negative offsets are tested for - * in the helper functions, and result in a 'ret 0'. + * in the helper functions. */ ctx->seen |= SEEN_DATAREF | SEEN_XREG; PPC_LI64(r_scratch1, func); @@ -443,13 +441,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, break; case BPF_S_LDX_B_MSH: - /* - * x86 version drops packet (RET 0) when K<0, whereas - * interpreter does allow K<0 (__load_pointer, special - * ancillary data). common_load returns ENOTSUPP if K<0, - * so we fall back to interpreter & filter works. - */ - func = sk_load_byte_msh; + func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); goto common_load; break; diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index 38d48a59879..9708851a8b9 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c @@ -269,4 +269,4 @@ static int __init sunfire_init(void) return 0; } -subsys_initcall(sunfire_init); +fs_initcall(sunfire_init); diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index b57a5942ba6..874162a11ce 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -495,11 +495,11 @@ xcall_fetch_glob_regs: stx %o7, [%g1 + GR_SNAP_O7] stx %i7, [%g1 + GR_SNAP_I7] /* Don't try this at home kids... */ - rdpr %cwp, %g2 - sub %g2, 1, %g7 + rdpr %cwp, %g3 + sub %g3, 1, %g7 wrpr %g7, %cwp mov %i7, %g7 - wrpr %g2, %cwp + wrpr %g3, %cwp stx %g7, [%g1 + GR_SNAP_RPC] sethi %hi(trap_block), %g7 or %g7, %lo(trap_block), %g7 diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index bc4f562bd45..7594764d8a6 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -100,9 +100,14 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti, #else /* __ASSEMBLY__ */ -/* how to get the thread information struct from ASM */ +/* + * How to get the thread information struct from assembly. + * Note that we use different macros since different architectures + * have different semantics in their "mm" instruction and we would + * like to guarantee that the macro expands to exactly one instruction. + */ #ifdef __tilegx__ -#define GET_THREAD_INFO(reg) move reg, sp; mm reg, zero, LOG2_THREAD_SIZE, 63 +#define EXTRACT_THREAD_INFO(reg) mm reg, zero, LOG2_THREAD_SIZE, 63 #else #define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31 #endif diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index 77763ccd5a7..cdef6e5ec02 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -403,19 +403,17 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * Set up registers for signal handler. * Registers that we don't modify keep the value they had from * user-space at the time we took the signal. + * We always pass siginfo and mcontext, regardless of SA_SIGINFO, + * since some things rely on this (e.g. glibc's debug/segfault.c). */ regs->pc = ptr_to_compat_reg(ka->sa.sa_handler); regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ regs->sp = ptr_to_compat_reg(frame); regs->lr = restorer; regs->regs[0] = (unsigned long) usig; - - if (ka->sa.sa_flags & SA_SIGINFO) { - /* Need extra arguments, so mark to restore caller-saves. */ - regs->regs[1] = ptr_to_compat_reg(&frame->info); - regs->regs[2] = ptr_to_compat_reg(&frame->uc); - regs->flags |= PT_FLAGS_CALLER_SAVES; - } + regs->regs[1] = ptr_to_compat_reg(&frame->info); + regs->regs[2] = ptr_to_compat_reg(&frame->uc); + regs->flags |= PT_FLAGS_CALLER_SAVES; /* * Notify any tracer that was single-stepping it. diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index 5d56a1ef5ba..6943515100f 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -839,6 +839,18 @@ STD_ENTRY(interrupt_return) FEEDBACK_REENTER(interrupt_return) /* + * Use r33 to hold whether we have already loaded the callee-saves + * into ptregs. We don't want to do it twice in this loop, since + * then we'd clobber whatever changes are made by ptrace, etc. + * Get base of stack in r32. + */ + { + GET_THREAD_INFO(r32) + movei r33, 0 + } + +.Lretry_work_pending: + /* * Disable interrupts so as to make sure we don't * miss an interrupt that sets any of the thread flags (like * need_resched or sigpending) between sampling and the iret. @@ -848,9 +860,6 @@ STD_ENTRY(interrupt_return) IRQ_DISABLE(r20, r21) TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ - /* Get base of stack in r32; note r30/31 are used as arguments here. */ - GET_THREAD_INFO(r32) - /* Check to see if there is any work to do before returning to user. */ { @@ -866,16 +875,18 @@ STD_ENTRY(interrupt_return) /* * Make sure we have all the registers saved for signal - * handling or single-step. Call out to C code to figure out - * exactly what we need to do for each flag bit, then if - * necessary, reload the flags and recheck. + * handling, notify-resume, or single-step. Call out to C + * code to figure out exactly what we need to do for each flag bit, + * then if necessary, reload the flags and recheck. */ - push_extra_callee_saves r0 { PTREGS_PTR(r0, PTREGS_OFFSET_BASE) - jal do_work_pending + bnz r33, 1f } - bnz r0, .Lresume_userspace + push_extra_callee_saves r0 + movei r33, 1 +1: jal do_work_pending + bnz r0, .Lretry_work_pending /* * In the NMI case we @@ -1180,10 +1191,12 @@ handle_syscall: add r20, r20, tp lw r21, r20 addi r21, r21, 1 - sw r20, r21 + { + sw r20, r21 + GET_THREAD_INFO(r31) + } /* Trace syscalls, if requested. */ - GET_THREAD_INFO(r31) addi r31, r31, THREAD_INFO_FLAGS_OFFSET lw r30, r31 andi r30, r30, _TIF_SYSCALL_TRACE @@ -1362,7 +1375,10 @@ handle_ill: 3: /* set PC and continue */ lw r26, r24 - sw r28, r26 + { + sw r28, r26 + GET_THREAD_INFO(r0) + } /* * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill. @@ -1370,7 +1386,6 @@ handle_ill: * need to clear it here and can't really impose on all other arches. * So what's another write between friends? */ - GET_THREAD_INFO(r0) addi r1, r0, THREAD_INFO_FLAGS_OFFSET { diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index 49d9d662168..30ae76e50c4 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S @@ -647,6 +647,20 @@ STD_ENTRY(interrupt_return) FEEDBACK_REENTER(interrupt_return) /* + * Use r33 to hold whether we have already loaded the callee-saves + * into ptregs. We don't want to do it twice in this loop, since + * then we'd clobber whatever changes are made by ptrace, etc. + */ + { + movei r33, 0 + move r32, sp + } + + /* Get base of stack in r32. */ + EXTRACT_THREAD_INFO(r32) + +.Lretry_work_pending: + /* * Disable interrupts so as to make sure we don't * miss an interrupt that sets any of the thread flags (like * need_resched or sigpending) between sampling and the iret. @@ -656,9 +670,6 @@ STD_ENTRY(interrupt_return) IRQ_DISABLE(r20, r21) TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ - /* Get base of stack in r32; note r30/31 are used as arguments here. */ - GET_THREAD_INFO(r32) - /* Check to see if there is any work to do before returning to user. */ { @@ -674,16 +685,18 @@ STD_ENTRY(interrupt_return) /* * Make sure we have all the registers saved for signal - * handling or single-step. Call out to C code to figure out + * handling or notify-resume. Call out to C code to figure out * exactly what we need to do for each flag bit, then if * necessary, reload the flags and recheck. */ - push_extra_callee_saves r0 { PTREGS_PTR(r0, PTREGS_OFFSET_BASE) - jal do_work_pending + bnez r33, 1f } - bnez r0, .Lresume_userspace + push_extra_callee_saves r0 + movei r33, 1 +1: jal do_work_pending + bnez r0, .Lretry_work_pending /* * In the NMI case we @@ -968,11 +981,16 @@ handle_syscall: shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) add r20, r20, tp ld4s r21, r20 - addi r21, r21, 1 - st4 r20, r21 + { + addi r21, r21, 1 + move r31, sp + } + { + st4 r20, r21 + EXTRACT_THREAD_INFO(r31) + } /* Trace syscalls, if requested. */ - GET_THREAD_INFO(r31) addi r31, r31, THREAD_INFO_FLAGS_OFFSET ld r30, r31 andi r30, r30, _TIF_SYSCALL_TRACE diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 2d5ef617bb3..54e6c64b85c 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -567,6 +567,10 @@ struct task_struct *__sched _switch_to(struct task_struct *prev, */ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags) { + /* If we enter in kernel mode, do nothing and exit the caller loop. */ + if (!user_mode(regs)) + return 0; + if (thread_info_flags & _TIF_NEED_RESCHED) { schedule(); return 1; @@ -589,8 +593,7 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags) return 1; } if (thread_info_flags & _TIF_SINGLESTEP) { - if ((regs->ex1 & SPR_EX_CONTEXT_1_1__PL_MASK) == 0) - single_step_once(regs); + single_step_once(regs); return 0; } panic("work_pending: bad flags %#x\n", thread_info_flags); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1d14cc6b79a..c9866b0b77d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -81,7 +81,7 @@ config X86 select CLKEVT_I8253 select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_IOMAP - select DCACHE_WORD_ACCESS if !DEBUG_PAGEALLOC + select DCACHE_WORD_ACCESS config INSTRUCTION_DECODER def_bool (KPROBES || PERF_EVENTS) diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c index d3c0b027766..fb7117a4ade 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/boot/compressed/relocs.c @@ -403,13 +403,11 @@ static void print_absolute_symbols(void) for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; char *sym_strtab; - Elf32_Sym *sh_symtab; int j; if (sec->shdr.sh_type != SHT_SYMTAB) { continue; } - sh_symtab = sec->symtab; sym_strtab = sec->link->strtab; for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) { Elf32_Sym *sym; diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 4824fb45560..07b3a68d2d2 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -294,8 +294,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) /* OK, This is the point of no return */ set_personality(PER_LINUX); - set_thread_flag(TIF_IA32); - current->mm->context.ia32_compat = 1; + set_personality_ia32(false); setup_new_exec(bprm); diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 734c3767cfa..183922e13de 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -170,6 +170,9 @@ static inline int kvm_para_available(void) unsigned int eax, ebx, ecx, edx; char signature[13]; + if (boot_cpu_data.cpuid_level < 0) + return 0; /* So we don't blow up on old processors */ + cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); memcpy(signature + 0, &ebx, 4); memcpy(signature + 4, &ecx, 4); diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h index 6fe6767b712..e58f03b206c 100644 --- a/arch/x86/include/asm/word-at-a-time.h +++ b/arch/x86/include/asm/word-at-a-time.h @@ -43,4 +43,37 @@ static inline unsigned long has_zero(unsigned long a) return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80); } +/* + * Load an unaligned word from kernel space. + * + * In the (very unlikely) case of the word being a page-crosser + * and the next page not being mapped, take the exception and + * return zeroes in the non-existing part. + */ +static inline unsigned long load_unaligned_zeropad(const void *addr) +{ + unsigned long ret, dummy; + + asm( + "1:\tmov %2,%0\n" + "2:\n" + ".section .fixup,\"ax\"\n" + "3:\t" + "lea %2,%1\n\t" + "and %3,%1\n\t" + "mov (%1),%0\n\t" + "leal %2,%%ecx\n\t" + "andl %4,%%ecx\n\t" + "shll $3,%%ecx\n\t" + "shr %%cl,%0\n\t" + "jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) + :"=&r" (ret),"=&c" (dummy) + :"m" (*(unsigned long *)addr), + "i" (-sizeof(unsigned long)), + "i" (sizeof(unsigned long)-1)); + return ret; +} + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index a415b1f4436..7c439fe4941 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -593,7 +593,7 @@ void __init acpi_set_irq_model_ioapic(void) #ifdef CONFIG_ACPI_HOTPLUG_CPU #include <acpi/processor.h> -static void __cpuinitdata acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) +static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA int nid; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 1c67ca100e4..146bb6218ee 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -580,6 +580,24 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } + /* re-enable TopologyExtensions if switched off by BIOS */ + if ((c->x86 == 0x15) && + (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && + !cpu_has(c, X86_FEATURE_TOPOEXT)) { + u64 val; + + if (!rdmsrl_amd_safe(0xc0011005, &val)) { + val |= 1ULL << 54; + wrmsrl_amd_safe(0xc0011005, val); + rdmsrl(0xc0011005, val); + if (val & (1ULL << 54)) { + set_cpu_cap(c, X86_FEATURE_TOPOEXT); + printk(KERN_INFO FW_INFO "CPU: Re-enabling " + "disabled Topology Extensions Support\n"); + } + } + } + cpu_detect_cache_sizes(c); /* Multi core CPU? */ diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index b8ba6e4a27e..e554e5ad2fe 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -79,7 +79,6 @@ struct kvm_task_sleep_node { u32 token; int cpu; bool halted; - struct mm_struct *mm; }; static struct kvm_task_sleep_head { @@ -126,9 +125,7 @@ void kvm_async_pf_task_wait(u32 token) n.token = token; n.cpu = smp_processor_id(); - n.mm = current->active_mm; n.halted = idle || preempt_count() > 1; - atomic_inc(&n.mm->mm_count); init_waitqueue_head(&n.wq); hlist_add_head(&n.link, &b->list); spin_unlock(&b->lock); @@ -161,9 +158,6 @@ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); static void apf_task_wake_one(struct kvm_task_sleep_node *n) { hlist_del_init(&n->link); - if (!n->mm) - return; - mmdrop(n->mm); if (n->halted) smp_send_reschedule(n->cpu); else if (waitqueue_active(&n->wq)) @@ -207,7 +201,7 @@ again: * async PF was not yet handled. * Add dummy entry for the token. */ - n = kmalloc(sizeof(*n), GFP_ATOMIC); + n = kzalloc(sizeof(*n), GFP_ATOMIC); if (!n) { /* * Allocation failed! Busy wait while other cpu @@ -219,7 +213,6 @@ again: } n->token = token; n->cpu = smp_processor_id(); - n->mm = NULL; init_waitqueue_head(&n->wq); hlist_add_head(&n->link, &b->list); } else diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index 3ca42d0e43a..0327e2b3c40 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c @@ -147,12 +147,6 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) memset(csig, 0, sizeof(*csig)); - if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || - cpu_has(c, X86_FEATURE_IA64)) { - pr_err("CPU%d not a capable Intel processor\n", cpu_num); - return -1; - } - csig->sig = cpuid_eax(0x00000001); if ((c->x86_model >= 5) || (c->x86 > 6)) { @@ -463,6 +457,14 @@ static struct microcode_ops microcode_intel_ops = { struct microcode_ops * __init init_intel_microcode(void) { + struct cpuinfo_x86 *c = &cpu_data(0); + + if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || + cpu_has(c, X86_FEATURE_IA64)) { + pr_err("Intel CPU family 0x%x not supported\n", c->x86); + return NULL; + } + return µcode_intel_ops; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 733ca39f367..43d8b48b23e 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -423,6 +423,7 @@ void set_personality_ia32(bool x32) current_thread_info()->status |= TS_COMPAT; } } +EXPORT_SYMBOL_GPL(set_personality_ia32); unsigned long get_wchan(struct task_struct *p) { diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 71f4727da37..5a98aa27218 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void) #endif rc = -EINVAL; if (pcpu_chosen_fc != PCPU_FC_PAGE) { - const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE; const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE; + size_t atom_size; + /* + * On 64bit, use PMD_SIZE for atom_size so that embedded + * percpu areas are aligned to PMD. This, in the future, + * can also allow using PMD mappings in vmalloc area. Use + * PAGE_SIZE on 32bit as vmalloc space is highly contended + * and large vmalloc area allocs can easily fail. + */ +#ifdef CONFIG_X86_64 + atom_size = PMD_SIZE; +#else + atom_size = PAGE_SIZE; +#endif rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, dyn_size, atom_size, pcpu_cpu_distance, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 91a5e989abc..185a2b823a2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6581,6 +6581,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, kvm_inject_page_fault(vcpu, &fault); } vcpu->arch.apf.halted = false; + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) diff --git a/arch/x86/platform/geode/net5501.c b/arch/x86/platform/geode/net5501.c index 66d377e334f..646e3b5b4bb 100644 --- a/arch/x86/platform/geode/net5501.c +++ b/arch/x86/platform/geode/net5501.c @@ -63,7 +63,7 @@ static struct gpio_led net5501_leds[] = { .name = "net5501:1", .gpio = 6, .default_trigger = "default-on", - .active_low = 1, + .active_low = 0, }, }; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index a8f8844b8d3..95dccce8e97 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -63,6 +63,7 @@ #include <asm/stackprotector.h> #include <asm/hypervisor.h> #include <asm/mwait.h> +#include <asm/pci_x86.h> #ifdef CONFIG_ACPI #include <linux/acpi.h> @@ -809,9 +810,40 @@ static void xen_io_delay(void) } #ifdef CONFIG_X86_LOCAL_APIC +static unsigned long xen_set_apic_id(unsigned int x) +{ + WARN_ON(1); + return x; +} +static unsigned int xen_get_apic_id(unsigned long x) +{ + return ((x)>>24) & 0xFFu; +} static u32 xen_apic_read(u32 reg) { - return 0; + struct xen_platform_op op = { + .cmd = XENPF_get_cpuinfo, + .interface_version = XENPF_INTERFACE_VERSION, + .u.pcpu_info.xen_cpuid = 0, + }; + int ret = 0; + + /* Shouldn't need this as APIC is turned off for PV, and we only + * get called on the bootup processor. But just in case. */ + if (!xen_initial_domain() || smp_processor_id()) + return 0; + + if (reg == APIC_LVR) + return 0x10; + + if (reg != APIC_ID) + return 0; + + ret = HYPERVISOR_dom0_op(&op); + if (ret) + return 0; + + return op.u.pcpu_info.apic_id << 24; } static void xen_apic_write(u32 reg, u32 val) @@ -849,6 +881,8 @@ static void set_xen_basic_apic_ops(void) apic->icr_write = xen_apic_icr_write; apic->wait_icr_idle = xen_apic_wait_icr_idle; apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; + apic->set_apic_id = xen_set_apic_id; + apic->get_apic_id = xen_get_apic_id; } #endif @@ -1365,8 +1399,10 @@ asmlinkage void __init xen_start_kernel(void) /* Make sure ACS will be enabled */ pci_request_acs(); } - - +#ifdef CONFIG_PCI + /* PCI BIOS service won't work from a PV guest. */ + pci_probe &= ~PCI_PROBE_BIOS; +#endif xen_raw_console_write("about to get started...\n"); xen_setup_runstate_info(0); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index b8e279479a6..69f5857660a 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -353,8 +353,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val) { if (val & _PAGE_PRESENT) { unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; + unsigned long pfn = mfn_to_pfn(mfn); + pteval_t flags = val & PTE_FLAGS_MASK; - val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; + if (unlikely(pfn == ~0)) + val = flags & ~_PAGE_PRESENT; + else + val = ((pteval_t)pfn << PAGE_SHIFT) | flags; } return val; |