diff options
Diffstat (limited to 'arch')
60 files changed, 378 insertions, 179 deletions
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c index 333238564b6..5d76706139d 100644 --- a/arch/arc/kernel/ptrace.c +++ b/arch/arc/kernel/ptrace.c @@ -102,7 +102,7 @@ static int genregs_set(struct task_struct *target, REG_IGNORE_ONE(pad2); REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */ REG_IGNORE_ONE(efa); /* efa update invalid */ - REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */ + REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */ return ret; } diff --git a/arch/arm/Makefile b/arch/arm/Makefile index a37a50f575a..db50b626be9 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -296,10 +296,15 @@ archprepare: # Convert bzImage to zImage bzImage: zImage -zImage Image xipImage bootpImage uImage: vmlinux +BOOT_TARGETS = zImage Image xipImage bootpImage uImage +INSTALL_TARGETS = zinstall uinstall install + +PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS) + +$(BOOT_TARGETS): vmlinux $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ -zinstall uinstall install: vmlinux +$(INSTALL_TARGETS): $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ %.dtb: | scripts diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index 84aa2caf07e..ec2f8065f95 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -95,24 +95,24 @@ initrd: @test "$(INITRD)" != "" || \ (echo You must specify INITRD; exit -1) -install: $(obj)/Image - $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ +install: + $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \ $(obj)/Image System.map "$(INSTALL_PATH)" -zinstall: $(obj)/zImage - $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ +zinstall: + $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \ $(obj)/zImage System.map "$(INSTALL_PATH)" -uinstall: $(obj)/uImage - $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ +uinstall: + $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \ $(obj)/uImage System.map "$(INSTALL_PATH)" zi: - $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ + $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \ $(obj)/zImage System.map "$(INSTALL_PATH)" i: - $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ + $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \ $(obj)/Image System.map "$(INSTALL_PATH)" subdir- := bootp compressed dts diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 7d7cc777ff7..bbac42a78ce 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -96,6 +96,11 @@ <1 14 0xf08>, <1 11 0xf08>, <1 10 0xf08>; + /* Unfortunately we need this since some versions of U-Boot + * on Exynos don't set the CNTFRQ register, so we need the + * value from DT. + */ + clock-frequency = <24000000>; }; mct@101C0000 { diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts index 84838469f0d..ba4dcfc6b72 100644 --- a/arch/arm/boot/dts/omap3-beagle-xm.dts +++ b/arch/arm/boot/dts/omap3-beagle-xm.dts @@ -11,7 +11,7 @@ / { model = "TI OMAP3 BeagleBoard xM"; - compatible = "ti,omap3-beagle-xm", "ti,omap3-beagle", "ti,omap3"; + compatible = "ti,omap3-beagle-xm", "ti,omap36xx", "ti,omap3"; cpus { cpu@0 { diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index 7d95cda1fae..b41bd57f432 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi @@ -108,7 +108,7 @@ #address-cells = <1>; #size-cells = <0>; pinctrl-single,register-width = <16>; - pinctrl-single,function-mask = <0x7f1f>; + pinctrl-single,function-mask = <0xff1f>; }; omap3_pmx_wkup: pinmux@0x48002a00 { @@ -117,7 +117,7 @@ #address-cells = <1>; #size-cells = <0>; pinctrl-single,register-width = <16>; - pinctrl-single,function-mask = <0x7f1f>; + pinctrl-single,function-mask = <0xff1f>; }; gpio1: gpio@48310000 { diff --git a/arch/arm/boot/install.sh b/arch/arm/boot/install.sh index 06ea7d42ce8..2a45092a40e 100644 --- a/arch/arm/boot/install.sh +++ b/arch/arm/boot/install.sh @@ -20,6 +20,20 @@ # $4 - default install path (blank if root directory) # +verify () { + if [ ! -f "$1" ]; then + echo "" 1>&2 + echo " *** Missing file: $1" 1>&2 + echo ' *** You need to run "make" before "make install".' 1>&2 + echo "" 1>&2 + exit 1 + fi +} + +# Make sure the files actually exist +verify "$2" +verify "$3" + # User may have a custom install script if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 370236dd1a0..990250965f2 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -51,7 +51,8 @@ void mcpm_cpu_power_down(void) { phys_reset_t phys_reset; - BUG_ON(!platform_ops); + if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down)) + return; BUG_ON(!irqs_disabled()); /* @@ -93,7 +94,8 @@ void mcpm_cpu_suspend(u64 expected_residency) { phys_reset_t phys_reset; - BUG_ON(!platform_ops); + if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend)) + return; BUG_ON(!irqs_disabled()); /* Very similar to mcpm_cpu_power_down() */ diff --git a/arch/arm/common/sharpsl_param.c b/arch/arm/common/sharpsl_param.c index d56c932580e..025f6ce3859 100644 --- a/arch/arm/common/sharpsl_param.c +++ b/arch/arm/common/sharpsl_param.c @@ -15,6 +15,7 @@ #include <linux/module.h> #include <linux/string.h> #include <asm/mach/sharpsl_param.h> +#include <asm/memory.h> /* * Certain hardware parameters determined at the time of device manufacture, @@ -25,8 +26,10 @@ */ #ifdef CONFIG_ARCH_SA1100 #define PARAM_BASE 0xe8ffc000 +#define param_start(x) (void *)(x) #else #define PARAM_BASE 0xa0000a00 +#define param_start(x) __va(x) #endif #define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a ) @@ -41,7 +44,7 @@ EXPORT_SYMBOL(sharpsl_param); void sharpsl_save_param(void) { - memcpy(&sharpsl_param, (void *)PARAM_BASE, sizeof(struct sharpsl_param_info)); + memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info)); if (sharpsl_param.comadj_keyword != COMADJ_MAGIC) sharpsl_param.comadj=-1; diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index d3db39860b9..59ceae8f3c9 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -31,5 +31,4 @@ generic-y += termbits.h generic-y += termios.h generic-y += timex.h generic-y += trace_clock.h -generic-y += types.h generic-y += unaligned.h diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h index bfc198c7591..863c892b4aa 100644 --- a/arch/arm/include/asm/jump_label.h +++ b/arch/arm/include/asm/jump_label.h @@ -16,7 +16,7 @@ static __always_inline bool arch_static_branch(struct static_key *key) { - asm goto("1:\n\t" + asm_volatile_goto("1:\n\t" JUMP_LABEL_NOP "\n\t" ".pushsection __jump_table, \"aw\"\n\t" ".word 1b, %l[l_yes], %c0\n\t" diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index 0f7b7620e9a..fc82a88f5b6 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -76,8 +76,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); * * This must be called with interrupts disabled. * - * This does not return. Re-entry in the kernel is expected via - * mcpm_entry_point. + * On success this does not return. Re-entry in the kernel is expected + * via mcpm_entry_point. + * + * This will return if mcpm_platform_register() has not been called + * previously in which case the caller should take appropriate action. */ void mcpm_cpu_power_down(void); @@ -98,8 +101,11 @@ void mcpm_cpu_power_down(void); * * This must be called with interrupts disabled. * - * This does not return. Re-entry in the kernel is expected via - * mcpm_entry_point. + * On success this does not return. Re-entry in the kernel is expected + * via mcpm_entry_point. + * + * This will return if mcpm_platform_register() has not been called + * previously in which case the caller should take appropriate action. */ void mcpm_cpu_suspend(u64 expected_residency); diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index f1d96d4e809..73ddd7239b3 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -57,6 +57,9 @@ static inline void syscall_get_arguments(struct task_struct *task, unsigned int i, unsigned int n, unsigned long *args) { + if (n == 0) + return; + if (i + n > SYSCALL_MAX_ARGS) { unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; @@ -81,6 +84,9 @@ static inline void syscall_set_arguments(struct task_struct *task, unsigned int i, unsigned int n, const unsigned long *args) { + if (n == 0) + return; + if (i + n > SYSCALL_MAX_ARGS) { pr_warning("%s called with max args %d, handling only %d\n", __func__, i + n, SYSCALL_MAX_ARGS); diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 2c7cc1e0347..476de57dcef 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -487,7 +487,26 @@ __fixup_smp: mrc p15, 0, r0, c0, c0, 5 @ read MPIDR and r0, r0, #0xc0000000 @ multiprocessing extensions and teq r0, #0x80000000 @ not part of a uniprocessor system? - moveq pc, lr @ yes, assume SMP + bne __fixup_smp_on_up @ no, assume UP + + @ Core indicates it is SMP. Check for Aegis SOC where a single + @ Cortex-A9 CPU is present but SMP operations fault. + mov r4, #0x41000000 + orr r4, r4, #0x0000c000 + orr r4, r4, #0x00000090 + teq r3, r4 @ Check for ARM Cortex-A9 + movne pc, lr @ Not ARM Cortex-A9, + + @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the + @ below address check will need to be #ifdef'd or equivalent + @ for the Aegis platform. + mrc p15, 4, r0, c15, c0 @ get SCU base address + teq r0, #0x0 @ '0' on actual UP A9 hardware + beq __fixup_smp_on_up @ So its an A9 UP + ldr r0, [r0, #4] @ read SCU Config + and r0, r0, #0x3 @ number of CPUs + teq r0, #0x0 @ is 1? + movne pc, lr __fixup_smp_on_up: adr r0, 1f diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 39c78387dde..87162e1b94a 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -129,6 +129,24 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)") .restart = omap3xxx_restart, MACHINE_END +static const char *omap36xx_boards_compat[] __initdata = { + "ti,omap36xx", + NULL, +}; + +DT_MACHINE_START(OMAP36XX_DT, "Generic OMAP36xx (Flattened Device Tree)") + .reserve = omap_reserve, + .map_io = omap3_map_io, + .init_early = omap3630_init_early, + .init_irq = omap_intc_of_init, + .handle_irq = omap3_intc_handle_irq, + .init_machine = omap_generic_init, + .init_late = omap3_init_late, + .init_time = omap3_sync32k_timer_init, + .dt_compat = omap36xx_boards_compat, + .restart = omap3xxx_restart, +MACHINE_END + static const char *omap3_gp_boards_compat[] __initdata = { "ti,omap3-beagle", "timll,omap3-devkit8000", diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index c3270c0f1fc..f6fe388af98 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -167,38 +167,47 @@ static struct lp55xx_led_config rx51_lp5523_led_config[] = { .name = "lp5523:kb1", .chan_nr = 0, .led_current = 50, + .max_current = 100, }, { .name = "lp5523:kb2", .chan_nr = 1, .led_current = 50, + .max_current = 100, }, { .name = "lp5523:kb3", .chan_nr = 2, .led_current = 50, + .max_current = 100, }, { .name = "lp5523:kb4", .chan_nr = 3, .led_current = 50, + .max_current = 100, }, { .name = "lp5523:b", .chan_nr = 4, .led_current = 50, + .max_current = 100, }, { .name = "lp5523:g", .chan_nr = 5, .led_current = 50, + .max_current = 100, }, { .name = "lp5523:r", .chan_nr = 6, .led_current = 50, + .max_current = 100, }, { .name = "lp5523:kb5", .chan_nr = 7, .led_current = 50, + .max_current = 100, }, { .name = "lp5523:kb6", .chan_nr = 8, .led_current = 50, + .max_current = 100, } }; diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c index 64b5a834698..8b6876c98ce 100644 --- a/arch/arm/mach-omap2/gpmc-onenand.c +++ b/arch/arm/mach-omap2/gpmc-onenand.c @@ -272,9 +272,19 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base) struct gpmc_timings t; int ret; - if (gpmc_onenand_data->of_node) + if (gpmc_onenand_data->of_node) { gpmc_read_settings_dt(gpmc_onenand_data->of_node, &onenand_async); + if (onenand_async.sync_read || onenand_async.sync_write) { + if (onenand_async.sync_write) + gpmc_onenand_data->flags |= + ONENAND_SYNC_READWRITE; + else + gpmc_onenand_data->flags |= ONENAND_SYNC_READ; + onenand_async.sync_read = false; + onenand_async.sync_write = false; + } + } omap2_onenand_set_async_mode(onenand_base); diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h index 5d2080ef792..16f78a990d0 100644 --- a/arch/arm/mach-omap2/mux.h +++ b/arch/arm/mach-omap2/mux.h @@ -28,7 +28,7 @@ #define OMAP_PULL_UP (1 << 4) #define OMAP_ALTELECTRICALSEL (1 << 5) -/* 34xx specific mux bit defines */ +/* omap3/4/5 specific mux bit defines */ #define OMAP_INPUT_EN (1 << 8) #define OMAP_OFF_EN (1 << 9) #define OMAP_OFFOUT_EN (1 << 10) @@ -36,8 +36,6 @@ #define OMAP_OFF_PULL_EN (1 << 12) #define OMAP_OFF_PULL_UP (1 << 13) #define OMAP_WAKEUP_EN (1 << 14) - -/* 44xx specific mux bit defines */ #define OMAP_WAKEUP_EVENT (1 << 15) /* Active pin states */ diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index fa74a0625da..ead48fa5715 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -628,7 +628,7 @@ void __init omap4_local_timer_init(void) #endif /* CONFIG_HAVE_ARM_TWD */ #endif /* CONFIG_ARCH_OMAP4 */ -#ifdef CONFIG_SOC_OMAP5 +#if defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) void __init omap5_realtime_timer_init(void) { omap4_sync32k_timer_init(); @@ -636,7 +636,7 @@ void __init omap5_realtime_timer_init(void) clocksource_of_init(); } -#endif /* CONFIG_SOC_OMAP5 */ +#endif /* CONFIG_SOC_OMAP5 || CONFIG_SOC_DRA7XX */ /** * omap_timer_init - build and register timer device with an diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f5e1a847171..1272ed202dd 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1232,7 +1232,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) break; len = (j - i) << PAGE_SHIFT; - ret = iommu_map(mapping->domain, iova, phys, len, 0); + ret = iommu_map(mapping->domain, iova, phys, len, + IOMMU_READ|IOMMU_WRITE); if (ret < 0) goto fail; iova += len; @@ -1431,6 +1432,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, GFP_KERNEL); } +static int __dma_direction_to_prot(enum dma_data_direction dir) +{ + int prot; + + switch (dir) { + case DMA_BIDIRECTIONAL: + prot = IOMMU_READ | IOMMU_WRITE; + break; + case DMA_TO_DEVICE: + prot = IOMMU_READ; + break; + case DMA_FROM_DEVICE: + prot = IOMMU_WRITE; + break; + default: + prot = 0; + } + + return prot; +} + /* * Map a part of the scatter-gather list into contiguous io address space */ @@ -1444,6 +1466,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, int ret = 0; unsigned int count; struct scatterlist *s; + int prot; size = PAGE_ALIGN(size); *handle = DMA_ERROR_CODE; @@ -1460,7 +1483,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); - ret = iommu_map(mapping->domain, iova, phys, len, 0); + prot = __dma_direction_to_prot(dir); + + ret = iommu_map(mapping->domain, iova, phys, len, prot); if (ret < 0) goto fail; count += len >> PAGE_SHIFT; @@ -1665,19 +1690,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p if (dma_addr == DMA_ERROR_CODE) return dma_addr; - switch (dir) { - case DMA_BIDIRECTIONAL: - prot = IOMMU_READ | IOMMU_WRITE; - break; - case DMA_TO_DEVICE: - prot = IOMMU_READ; - break; - case DMA_FROM_DEVICE: - prot = IOMMU_WRITE; - break; - default: - prot = 0; - } + prot = __dma_direction_to_prot(dir); ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); if (ret < 0) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index febaee7ca57..18ec4c504ab 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -17,7 +17,6 @@ #include <linux/nodemask.h> #include <linux/initrd.h> #include <linux/of_fdt.h> -#include <linux/of_reserved_mem.h> #include <linux/highmem.h> #include <linux/gfp.h> #include <linux/memblock.h> @@ -379,8 +378,6 @@ void __init arm_memblock_init(struct meminfo *mi, if (mdesc->reserve) mdesc->reserve(); - early_init_dt_scan_reserved_mem(); - /* * reserve memory for DMA contigouos allocations, * must come from DMA area inside low memory diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index 4d6d77ed9b9..e194f957ca8 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h @@ -22,7 +22,7 @@ static __always_inline bool arch_static_branch(struct static_key *key) { - asm goto("1:\tnop\n\t" + asm_volatile_goto("1:\tnop\n\t" "nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" WORD_INSN " 1b, %l[l_yes], %0\n\t" diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 4204d76af85..029e002a4ea 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -73,7 +73,7 @@ 3: #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) - PTR_L t8, __stack_chk_guard + PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) #endif diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 38af83f84c4..20b7b040e76 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -67,7 +67,7 @@ LEAF(resume) 1: #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) - PTR_L t8, __stack_chk_guard + PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) #endif diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 921238a6bd2..078de5eaca8 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -69,7 +69,7 @@ 1: #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) - PTR_L t8, __stack_chk_guard + PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) #endif diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h index 1945f995f2d..4736020ba5e 100644 --- a/arch/parisc/include/asm/traps.h +++ b/arch/parisc/include/asm/traps.h @@ -6,7 +6,7 @@ struct pt_regs; /* traps.c */ void parisc_terminate(char *msg, struct pt_regs *regs, - int code, unsigned long offset); + int code, unsigned long offset) __noreturn __cold; /* mm/fault.c */ void do_page_fault(struct pt_regs *regs, unsigned long code, diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index c035673209f..b521c0adf4e 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -602,6 +602,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); } } +EXPORT_SYMBOL_GPL(flush_cache_page); #ifdef CONFIG_PARISC_TMPALIAS diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 8a252f2d6c0..2b96602e812 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -72,7 +72,6 @@ enum ipi_message_type { IPI_NOP=0, IPI_RESCHEDULE=1, IPI_CALL_FUNC, - IPI_CALL_FUNC_SINGLE, IPI_CPU_START, IPI_CPU_STOP, IPI_CPU_TEST @@ -164,11 +163,6 @@ ipi_interrupt(int irq, void *dev_id) generic_smp_call_function_interrupt(); break; - case IPI_CALL_FUNC_SINGLE: - smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu); - generic_smp_call_function_single_interrupt(); - break; - case IPI_CPU_START: smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu); break; @@ -260,7 +254,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) void arch_send_call_function_single_ipi(int cpu) { - send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); + send_IPI_single(cpu, IPI_CALL_FUNC); } /* diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 04e47c6a456..1cd1d0c83b6 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -291,11 +291,6 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) do_exit(SIGSEGV); } -int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs) -{ - return syscall(regs); -} - /* gdb uses break 4,8 */ #define GDB_BREAK_INSN 0x10004 static void handle_gdb_break(struct pt_regs *regs, int wot) @@ -805,14 +800,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs) else { /* - * The kernel should never fault on its own address space. + * The kernel should never fault on its own address space, + * unless pagefault_disable() was called before. */ - if (fault_space == 0) + if (fault_space == 0 && !in_atomic()) { pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); parisc_terminate("Kernel Fault", regs, code, fault_address); - } } diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index ac4370b1ca4..b5507ec06b8 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -56,7 +56,7 @@ #ifdef __KERNEL__ #include <linux/module.h> #include <linux/compiler.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #define s_space "%%sr1" #define d_space "%%sr2" #else @@ -524,4 +524,17 @@ EXPORT_SYMBOL(copy_to_user); EXPORT_SYMBOL(copy_from_user); EXPORT_SYMBOL(copy_in_user); EXPORT_SYMBOL(memcpy); + +long probe_kernel_read(void *dst, const void *src, size_t size) +{ + unsigned long addr = (unsigned long)src; + + if (size < 0 || addr < PAGE_SIZE) + return -EFAULT; + + /* check for I/O space F_EXTEND(0xfff00000) access as well? */ + + return __probe_kernel_read(dst, src, size); +} + #endif diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 00c0ed333a3..0293588d5b8 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -171,20 +171,25 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, unsigned long address) { struct vm_area_struct *vma, *prev_vma; - struct task_struct *tsk = current; - struct mm_struct *mm = tsk->mm; + struct task_struct *tsk; + struct mm_struct *mm; unsigned long acc_type; int fault; - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + unsigned int flags; - if (in_atomic() || !mm) + if (in_atomic()) goto no_context; + tsk = current; + mm = tsk->mm; + if (!mm) + goto no_context; + + flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; if (user_mode(regs)) flags |= FAULT_FLAG_USER; acc_type = parisc_acctyp(code, regs->iir); - if (acc_type & VM_WRITE) flags |= FAULT_FLAG_WRITE; retry: diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h index ae098c438f0..f016bb699b5 100644 --- a/arch/powerpc/include/asm/jump_label.h +++ b/arch/powerpc/include/asm/jump_label.h @@ -19,7 +19,7 @@ static __always_inline bool arch_static_branch(struct static_key *key) { - asm goto("1:\n\t" + asm_volatile_goto("1:\n\t" "nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 57d286a78f8..c7cb8c232d2 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -495,14 +495,15 @@ void __do_irq(struct pt_regs *regs) void do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); - struct thread_info *curtp, *irqtp; + struct thread_info *curtp, *irqtp, *sirqtp; /* Switch to the irq stack to handle this */ curtp = current_thread_info(); irqtp = hardirq_ctx[raw_smp_processor_id()]; + sirqtp = softirq_ctx[raw_smp_processor_id()]; /* Already there ? */ - if (unlikely(curtp == irqtp)) { + if (unlikely(curtp == irqtp || curtp == sirqtp)) { __do_irq(regs); set_irq_regs(old_regs); return; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 294b7af28cd..c71103b8a74 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1066,7 +1066,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) BEGIN_FTR_SECTION mfspr r8, SPRN_DSCR ld r7, HSTATE_DSCR(r13) - std r8, VCPU_DSCR(r7) + std r8, VCPU_DSCR(r9) mtspr SPRN_DSCR, r7 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 1c6a9d729df..c65593abae8 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -332,6 +332,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, unsigned long hva; int pfnmap = 0; int tsize = BOOK3E_PAGESZ_4K; + int ret = 0; + unsigned long mmu_seq; + struct kvm *kvm = vcpu_e500->vcpu.kvm; + + /* used to check for invalidations in progress */ + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); /* * Translate guest physical to true physical, acquiring @@ -449,6 +456,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); } + spin_lock(&kvm->mmu_lock); + if (mmu_notifier_retry(kvm, mmu_seq)) { + ret = -EAGAIN; + goto out; + } + kvmppc_e500_ref_setup(ref, gtlbe, pfn); kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, @@ -457,10 +470,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, /* Clear i-cache for new pages */ kvmppc_mmu_flush_icache(pfn); +out: + spin_unlock(&kvm->mmu_lock); + /* Drop refcount on page, so that mmu notifiers can clear it */ kvm_release_pfn_clean(pfn); - return 0; + return ret; } /* XXX only map the one-one case, for now use TLB0 */ diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index 6c32190dc73..346b1c85ffb 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -15,7 +15,7 @@ static __always_inline bool arch_static_branch(struct static_key *key) { - asm goto("0: brcl 0,0\n" + asm_volatile_goto("0: brcl 0,0\n" ".pushsection __jump_table, \"aw\"\n" ASM_ALIGN "\n" ASM_PTR " 0b, %l[label], %0\n" diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index c84f33d51f7..7dd21720e5b 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -40,28 +40,26 @@ static inline void *load_real_addr(void *addr) } /* - * Copy up to one page to vmalloc or real memory + * Copy real to virtual or real memory */ -static ssize_t copy_page_real(void *buf, void *src, size_t csize) +static int copy_from_realmem(void *dest, void *src, size_t count) { - size_t size; + unsigned long size; + int rc; - if (is_vmalloc_addr(buf)) { - BUG_ON(csize >= PAGE_SIZE); - /* If buf is not page aligned, copy first part */ - size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize); - if (size) { - if (memcpy_real(load_real_addr(buf), src, size)) - return -EFAULT; - buf += size; - src += size; - } - /* Copy second part */ - size = csize - size; - return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0; - } else { - return memcpy_real(buf, src, csize); - } + if (!count) + return 0; + if (!is_vmalloc_or_module_addr(dest)) + return memcpy_real(dest, src, count); + do { + size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK)); + if (memcpy_real(load_real_addr(dest), src, size)) + return -EFAULT; + count -= size; + dest += size; + src += size; + } while (count); + return 0; } /* @@ -114,7 +112,7 @@ static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize, rc = copy_to_user_real((void __force __user *) buf, (void *) src, csize); else - rc = copy_page_real(buf, (void *) src, csize); + rc = copy_from_realmem(buf, (void *) src, csize); return (rc == 0) ? rc : csize; } @@ -210,7 +208,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count) if (OLDMEM_BASE) { if ((unsigned long) src < OLDMEM_SIZE) { copied = min(count, OLDMEM_SIZE - (unsigned long) src); - rc = memcpy_real(dest, src + OLDMEM_BASE, copied); + rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied); if (rc) return rc; } @@ -223,7 +221,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count) return rc; } } - return memcpy_real(dest + copied, src + copied, count - copied); + return copy_from_realmem(dest + copied, src + copied, count - copied); } /* diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index cc30d1fb000..0dc2b6d0a1e 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -266,6 +266,7 @@ sysc_sigpending: tm __TI_flags+3(%r12),_TIF_SYSCALL jno sysc_return lm %r2,%r7,__PT_R2(%r11) # load svc arguments + l %r10,__TI_sysc_table(%r12) # 31 bit system call table xr %r8,%r8 # svc 0 returns -ENOSYS clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) jnl sysc_nr_ok # invalid svc number -> do svc 0 diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 2b2188b97c6..e5b43c97a83 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -297,6 +297,7 @@ sysc_sigpending: tm __TI_flags+7(%r12),_TIF_SYSCALL jno sysc_return lmg %r2,%r7,__PT_R2(%r11) # load svc arguments + lg %r10,__TI_sysc_table(%r12) # address of system call table lghi %r8,0 # svc 0 returns -ENOSYS llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number cghi %r1,NR_syscalls diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 0ce9fb24503..d86e64eddb4 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -67,6 +67,11 @@ static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn) case 0xac: /* stnsm */ case 0xad: /* stosm */ return -EINVAL; + case 0xc6: + switch (insn[0] & 0x0f) { + case 0x00: /* exrl */ + return -EINVAL; + } } switch (insn[0]) { case 0x0101: /* pr */ @@ -180,7 +185,6 @@ static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn) break; case 0xc6: switch (insn[0] & 0x0f) { - case 0x00: /* exrl */ case 0x02: /* pfdrl */ case 0x04: /* cghrl */ case 0x05: /* chrl */ diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h index 5080d16a832..ec2e2e2aba7 100644 --- a/arch/sparc/include/asm/jump_label.h +++ b/arch/sparc/include/asm/jump_label.h @@ -9,7 +9,7 @@ static __always_inline bool arch_static_branch(struct static_key *key) { - asm goto("1:\n\t" + asm_volatile_goto("1:\n\t" "nop\n\t" "nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h index d385eaadece..70979846076 100644 --- a/arch/tile/include/asm/atomic.h +++ b/arch/tile/include/asm/atomic.h @@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n) * * Atomically sets @v to @i and returns old @v */ -static inline u64 atomic64_xchg(atomic64_t *v, u64 n) +static inline long long atomic64_xchg(atomic64_t *v, long long n) { return xchg64(&v->counter, n); } @@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n) * Atomically checks if @v holds @o and replaces it with @n if so. * Returns the old value at @v. */ -static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) +static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, + long long n) { return cmpxchg64(&v->counter, o, n); } diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 0d0395b1b15..1ad4a1f7d42 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n) /* A 64bit atomic type */ typedef struct { - u64 __aligned(8) counter; + long long counter; } atomic64_t; #define ATOMIC64_INIT(val) { (val) } @@ -91,14 +91,14 @@ typedef struct { * * Atomically reads the value of @v. */ -static inline u64 atomic64_read(const atomic64_t *v) +static inline long long atomic64_read(const atomic64_t *v) { /* * Requires an atomic op to read both 32-bit parts consistently. * Casting away const is safe since the atomic support routines * do not write to memory if the value has not been modified. */ - return _atomic64_xchg_add((u64 *)&v->counter, 0); + return _atomic64_xchg_add((long long *)&v->counter, 0); } /** @@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v) * * Atomically adds @i to @v. */ -static inline void atomic64_add(u64 i, atomic64_t *v) +static inline void atomic64_add(long long i, atomic64_t *v) { _atomic64_xchg_add(&v->counter, i); } @@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v) * * Atomically adds @i to @v and returns @i + @v */ -static inline u64 atomic64_add_return(u64 i, atomic64_t *v) +static inline long long atomic64_add_return(long long i, atomic64_t *v) { smp_mb(); /* barrier for proper semantics */ return _atomic64_xchg_add(&v->counter, i) + i; @@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) +static inline long long atomic64_add_unless(atomic64_t *v, long long a, + long long u) { smp_mb(); /* barrier for proper semantics */ return _atomic64_xchg_add_unless(&v->counter, a, u) != u; @@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) * atomic64_set() can't be just a raw store, since it would be lost if it * fell between the load and store of one of the other atomic ops. */ -static inline void atomic64_set(atomic64_t *v, u64 n) +static inline void atomic64_set(atomic64_t *v, long long n) { _atomic64_xchg(&v->counter, n); } @@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p, extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); -extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); -extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); -extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); -extern u64 __atomic64_xchg_add_unless(volatile u64 *p, - int *lock, u64 o, u64 n); +extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, + long long o, long long n); +extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n); +extern long long __atomic64_xchg_add(volatile long long *p, int *lock, + long long n); +extern long long __atomic64_xchg_add_unless(volatile long long *p, + int *lock, long long o, long long n); /* Return failure from the atomic wrappers. */ struct __get_user __atomic_bad_address(int __user *addr); diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h index 4001d5eab4b..0ccda3c425b 100644 --- a/arch/tile/include/asm/cmpxchg.h +++ b/arch/tile/include/asm/cmpxchg.h @@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n); int _atomic_xchg_add(int *v, int i); int _atomic_xchg_add_unless(int *v, int a, int u); int _atomic_cmpxchg(int *ptr, int o, int n); -u64 _atomic64_xchg(u64 *v, u64 n); -u64 _atomic64_xchg_add(u64 *v, u64 i); -u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u); -u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); +long long _atomic64_xchg(long long *v, long long n); +long long _atomic64_xchg_add(long long *v, long long i); +long long _atomic64_xchg_add_unless(long long *v, long long a, long long u); +long long _atomic64_cmpxchg(long long *v, long long o, long long n); #define xchg(ptr, n) \ ({ \ @@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); if (sizeof(*(ptr)) != 4) \ __cmpxchg_called_with_bad_pointer(); \ smp_mb(); \ - (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \ + (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \ + (int)n); \ }) #define xchg64(ptr, n) \ @@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); if (sizeof(*(ptr)) != 8) \ __xchg_called_with_bad_pointer(); \ smp_mb(); \ - (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \ + (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \ + (long long)(n)); \ }) #define cmpxchg64(ptr, o, n) \ @@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); if (sizeof(*(ptr)) != 8) \ __cmpxchg_called_with_bad_pointer(); \ smp_mb(); \ - (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \ + (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \ + (long long)o, (long long)n); \ }) #else @@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); switch (sizeof(*(ptr))) { \ case 4: \ __x = (typeof(__x))(unsigned long) \ - __insn_exch4((ptr), (u32)(unsigned long)(n)); \ + __insn_exch4((ptr), \ + (u32)(unsigned long)(n)); \ break; \ case 8: \ - __x = (typeof(__x)) \ + __x = (typeof(__x)) \ __insn_exch((ptr), (unsigned long)(n)); \ break; \ default: \ @@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); switch (sizeof(*(ptr))) { \ case 4: \ __x = (typeof(__x))(unsigned long) \ - __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \ + __insn_cmpexch4((ptr), \ + (u32)(unsigned long)(n)); \ break; \ case 8: \ - __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \ + __x = (typeof(__x))__insn_cmpexch((ptr), \ + (long long)(n)); \ break; \ default: \ __cmpxchg_called_with_bad_pointer(); \ diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h index 63294f5a8ef..4f7ae39fa20 100644 --- a/arch/tile/include/asm/percpu.h +++ b/arch/tile/include/asm/percpu.h @@ -15,9 +15,37 @@ #ifndef _ASM_TILE_PERCPU_H #define _ASM_TILE_PERCPU_H -register unsigned long __my_cpu_offset __asm__("tp"); -#define __my_cpu_offset __my_cpu_offset -#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) +register unsigned long my_cpu_offset_reg asm("tp"); + +#ifdef CONFIG_PREEMPT +/* + * For full preemption, we can't just use the register variable + * directly, since we need barrier() to hazard against it, causing the + * compiler to reload anything computed from a previous "tp" value. + * But we also don't want to use volatile asm, since we'd like the + * compiler to be able to cache the value across multiple percpu reads. + * So we use a fake stack read as a hazard against barrier(). + * The 'U' constraint is like 'm' but disallows postincrement. + */ +static inline unsigned long __my_cpu_offset(void) +{ + unsigned long tp; + register unsigned long *sp asm("sp"); + asm("move %0, tp" : "=r" (tp) : "U" (*sp)); + return tp; +} +#define __my_cpu_offset __my_cpu_offset() +#else +/* + * We don't need to hazard against barrier() since "tp" doesn't ever + * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only + * changes at function call points, at which we are already re-reading + * the value of "tp" due to "my_cpu_offset_reg" being a global variable. + */ +#define __my_cpu_offset my_cpu_offset_reg +#endif + +#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp)) #include <asm-generic/percpu.h> diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c index df27a1fd94a..531f4c36535 100644 --- a/arch/tile/kernel/hardwall.c +++ b/arch/tile/kernel/hardwall.c @@ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = { 0, "udn", LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), - __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock), + __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock), NULL }, #ifndef __tilepro__ @@ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = { 1, /* disabled pending hypervisor support */ "idn", LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), - __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock), + __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock), NULL }, { /* access to user-space IPI */ @@ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = { 0, "ipi", LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), - __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock), + __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock), NULL }, #endif diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index 088d5c141e6..2cbe6d5dd6b 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -815,6 +815,9 @@ STD_ENTRY(interrupt_return) } bzt r28, 1f bnz r29, 1f + /* Disable interrupts explicitly for preemption. */ + IRQ_DISABLE(r20,r21) + TRACE_IRQS_OFF jal preempt_schedule_irq FEEDBACK_REENTER(interrupt_return) 1: diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index ec755d3f373..b8fc497f243 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S @@ -841,6 +841,9 @@ STD_ENTRY(interrupt_return) } beqzt r28, 1f bnez r29, 1f + /* Disable interrupts explicitly for preemption. */ + IRQ_DISABLE(r20,r21) + TRACE_IRQS_OFF jal preempt_schedule_irq FEEDBACK_REENTER(interrupt_return) 1: diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 362284af3af..c93977a6211 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -23,6 +23,7 @@ #include <linux/mmzone.h> #include <linux/dcache.h> #include <linux/fs.h> +#include <linux/string.h> #include <asm/backtrace.h> #include <asm/page.h> #include <asm/ucontext.h> @@ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt, } if (vma->vm_file) { - char *s; p = d_path(&vma->vm_file->f_path, buf, bufsize); if (IS_ERR(p)) p = "?"; - s = strrchr(p, '/'); - if (s) - p = s+1; + name = kbasename(p); } else { - p = "anon"; + name = "anon"; } /* Generate a string description of the vma info. */ - namelen = strlen(p); + namelen = strlen(name); remaining = (bufsize - 1) - namelen; - memmove(buf, p, namelen); + memmove(buf, name, namelen); snprintf(buf + namelen, remaining, "[%lx+%lx] ", vma->vm_start, vma->vm_end - vma->vm_start); } diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c index 759efa337be..c89b211fd9e 100644 --- a/arch/tile/lib/atomic_32.c +++ b/arch/tile/lib/atomic_32.c @@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask) EXPORT_SYMBOL(_atomic_xor); -u64 _atomic64_xchg(u64 *v, u64 n) +long long _atomic64_xchg(long long *v, long long n) { return __atomic64_xchg(v, __atomic_setup(v), n); } EXPORT_SYMBOL(_atomic64_xchg); -u64 _atomic64_xchg_add(u64 *v, u64 i) +long long _atomic64_xchg_add(long long *v, long long i) { return __atomic64_xchg_add(v, __atomic_setup(v), i); } EXPORT_SYMBOL(_atomic64_xchg_add); -u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) +long long _atomic64_xchg_add_unless(long long *v, long long a, long long u) { /* * Note: argument order is switched here since it is easier @@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) } EXPORT_SYMBOL(_atomic64_xchg_add_unless); -u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n) +long long _atomic64_cmpxchg(long long *v, long long o, long long n) { return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ee2fb9d3774..f67e839f06c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -860,7 +860,7 @@ source "kernel/Kconfig.preempt" config X86_UP_APIC bool "Local APIC support on uniprocessors" - depends on X86_32 && !SMP && !X86_32_NON_STANDARD + depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI ---help--- A local APIC (Advanced Programmable Interrupt Controller) is an integrated interrupt controller in the CPU. If you have a single-CPU @@ -885,11 +885,11 @@ config X86_UP_IOAPIC config X86_LOCAL_APIC def_bool y - depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC + depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI config X86_IO_APIC def_bool y - depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC + depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI config X86_VISWS_APIC def_bool y @@ -1033,6 +1033,7 @@ config X86_REBOOTFIXUPS config MICROCODE tristate "CPU microcode loading support" + depends on CPU_SUP_AMD || CPU_SUP_INTEL select FW_LOADER ---help--- diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index d3f5c63078d..89270b4318d 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -374,7 +374,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) * Catch too early usage of this before alternatives * have run. */ - asm goto("1: jmp %l[t_warn]\n" + asm_volatile_goto("1: jmp %l[t_warn]\n" "2:\n" ".section .altinstructions,\"a\"\n" " .long 1b - .\n" @@ -388,7 +388,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) #endif - asm goto("1: jmp %l[t_no]\n" + asm_volatile_goto("1: jmp %l[t_no]\n" "2:\n" ".section .altinstructions,\"a\"\n" " .long 1b - .\n" @@ -453,7 +453,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) * have. Thus, we force the jump to the widest, 4-byte, signed relative * offset even though the last would often fit in less bytes. */ - asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" + asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" "2:\n" ".section .altinstructions,\"a\"\n" " .long 1b - .\n" /* src offset */ diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index 64507f35800..6a2cefb4395 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -18,7 +18,7 @@ static __always_inline bool arch_static_branch(struct static_key *key) { - asm goto("1:" + asm_volatile_goto("1:" ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" ".pushsection __jump_table, \"aw\" \n\t" _ASM_ALIGN "\n\t" diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h index e7e6751648e..07537a44216 100644 --- a/arch/x86/include/asm/mutex_64.h +++ b/arch/x86/include/asm/mutex_64.h @@ -20,7 +20,7 @@ static inline void __mutex_fastpath_lock(atomic_t *v, void (*fail_fn)(atomic_t *)) { - asm volatile goto(LOCK_PREFIX " decl %0\n" + asm_volatile_goto(LOCK_PREFIX " decl %0\n" " jns %l[exit]\n" : : "m" (v->counter) : "memory", "cc" @@ -75,7 +75,7 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count) static inline void __mutex_fastpath_unlock(atomic_t *v, void (*fail_fn)(atomic_t *)) { - asm volatile goto(LOCK_PREFIX " incl %0\n" + asm_volatile_goto(LOCK_PREFIX " incl %0\n" " jg %l[exit]\n" : : "m" (v->counter) : "memory", "cc" diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 1191ac1c9d2..a419814cea5 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -113,7 +113,7 @@ static int __init early_get_pnodeid(void) break; case UV3_HUB_PART_NUMBER: case UV3_HUB_PART_NUMBER_X: - uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1; + uv_min_hub_revision_id += UV3_HUB_REVISION_BASE; break; } diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 897783b3302..9d8449158cf 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1888,10 +1888,7 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; userpg->pmc_width = x86_pmu.cntval_bits; - if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) - return; - - if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + if (!sched_clock_stable) return; userpg->cap_user_time = 1; @@ -1899,10 +1896,8 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) userpg->time_shift = CYC2NS_SCALE_FACTOR; userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; - if (sched_clock_stable && !check_tsc_disabled()) { - userpg->cap_user_time_zero = 1; - userpg->time_zero = this_cpu_read(cyc2ns_offset); - } + userpg->cap_user_time_zero = 1; + userpg->time_zero = this_cpu_read(cyc2ns_offset); } /* diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 697b93af02d..a0e2a8a80c9 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -775,11 +775,22 @@ void __init kvm_spinlock_init(void) if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) return; - printk(KERN_INFO "KVM setup paravirtual spinlock\n"); + pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); + pv_lock_ops.unlock_kick = kvm_unlock_kick; +} + +static __init int kvm_spinlock_init_jump(void) +{ + if (!kvm_para_available()) + return 0; + if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) + return 0; static_key_slow_inc(¶virt_ticketlocks_enabled); + printk(KERN_INFO "KVM setup paravirtual spinlock\n"); - pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); - pv_lock_ops.unlock_kick = kvm_unlock_kick; + return 0; } +early_initcall(kvm_spinlock_init_jump); + #endif /* CONFIG_PARAVIRT_SPINLOCKS */ diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index e643e744e4d..7e920bff99a 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -326,6 +326,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), }, }, + { /* Handle problems with rebooting on the Latitude E5410. */ + .callback = set_pci_reboot, + .ident = "Dell Latitude E5410", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"), + }, + }, { /* Handle problems with rebooting on the Latitude E5420. */ .callback = set_pci_reboot, .ident = "Dell Latitude E5420", diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3b8e7459dd4..2b2fce1b200 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3255,25 +3255,29 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) static void ept_load_pdptrs(struct kvm_vcpu *vcpu) { + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; + if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty)) return; if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { - vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]); - vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]); - vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]); - vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]); + vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); + vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); + vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); + vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); } } static void ept_save_pdptrs(struct kvm_vcpu *vcpu) { + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; + if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { - vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); - vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); - vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); - vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); + mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); + mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); + mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); + mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); } __set_bit(VCPU_EXREG_PDPTR, @@ -7777,10 +7781,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); - __clear_bit(VCPU_EXREG_PDPTR, - (unsigned long *)&vcpu->arch.regs_avail); - __clear_bit(VCPU_EXREG_PDPTR, - (unsigned long *)&vcpu->arch.regs_dirty); } kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index d1e4777b4e7..31d04758b76 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -278,6 +278,15 @@ static void __init xen_smp_prepare_boot_cpu(void) old memory can be recycled */ make_lowmem_page_readwrite(xen_initial_gdt); +#ifdef CONFIG_X86_32 + /* + * Xen starts us with XEN_FLAT_RING1_DS, but linux code + * expects __USER_DS + */ + loadsegment(ds, __USER_DS); + loadsegment(es, __USER_DS); +#endif + xen_filter_cpu_maps(); xen_setup_vcpu_info_placement(); } |