diff options
author | Grant Likely <grant.likely@secretlab.ca> | 2011-10-24 17:03:35 +0200 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2011-10-24 17:03:35 +0200 |
commit | 2bf6f675fa1a0f80b21aff20e6c21e87d6a7c9c9 (patch) | |
tree | ef49d741211dcc4f38636b5c422c9c346da09adf | |
parent | 5762c20593b6b959f1470dc6f1ff4ca4d9570f8d (diff) | |
parent | c3b92c8787367a8bb53d57d9789b558f1295cc96 (diff) |
Merge commit 'v3.1' into devicetree/next
153 files changed, 948 insertions, 596 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 854ed5ca7e3..d6e6724446c 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2706,10 +2706,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. functions are at fixed addresses, they make nice targets for exploits that can control RIP. - emulate [default] Vsyscalls turn into traps and are - emulated reasonably safely. + emulate Vsyscalls turn into traps and are emulated + reasonably safely. - native Vsyscalls are native syscall instructions. + native [default] Vsyscalls are native syscall + instructions. This is a little bit faster than trapping and makes a few dynamic recompilers work better than they would in emulation mode. diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt index 8ce7c30e723..fe67b5c79f0 100644 --- a/Documentation/networking/scaling.txt +++ b/Documentation/networking/scaling.txt @@ -27,7 +27,7 @@ applying a filter to each packet that assigns it to one of a small number of logical flows. Packets for each flow are steered to a separate receive queue, which in turn can be processed by separate CPUs. This mechanism is generally known as “Receive-side Scaling” (RSS). The goal of RSS and -the other scaling techniques to increase performance uniformly. +the other scaling techniques is to increase performance uniformly. Multi-queue distribution can also be used for traffic prioritization, but that is not the focus of these techniques. @@ -186,10 +186,10 @@ are steered using plain RPS. Multiple table entries may point to the same CPU. Indeed, with many flows and few CPUs, it is very likely that a single application thread handles flows with many different flow hashes. -rps_sock_table is a global flow table that contains the *desired* CPU for -flows: the CPU that is currently processing the flow in userspace. Each -table value is a CPU index that is updated during calls to recvmsg and -sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage() +rps_sock_flow_table is a global flow table that contains the *desired* CPU +for flows: the CPU that is currently processing the flow in userspace. +Each table value is a CPU index that is updated during calls to recvmsg +and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage() and tcp_splice_read()). When the scheduler moves a thread to a new CPU while it has outstanding diff --git a/MAINTAINERS b/MAINTAINERS index 046526a647c..73369be4c24 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2460,7 +2460,7 @@ S: Supported F: drivers/infiniband/hw/ehca/ EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER -M: Breno Leitao <leitao@linux.vnet.ibm.com> +M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com> L: netdev@vger.kernel.org S: Maintained F: drivers/net/ehea/ @@ -3313,7 +3313,7 @@ M: David Woodhouse <dwmw2@infradead.org> L: iommu@lists.linux-foundation.org T: git git://git.infradead.org/iommu-2.6.git S: Supported -F: drivers/pci/intel-iommu.c +F: drivers/iommu/intel-iommu.c F: include/linux/intel-iommu.h INTEL IOP-ADMA DMA DRIVER @@ -6368,10 +6368,10 @@ F: net/ipv4/tcp_lp.c TEGRA SUPPORT M: Colin Cross <ccross@android.com> -M: Erik Gilling <konkers@android.com> M: Olof Johansson <olof@lixom.net> +M: Stephen Warren <swarren@nvidia.com> L: linux-tegra@vger.kernel.org -T: git git://android.git.kernel.org/kernel/tegra.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/olof/tegra.git S: Supported F: arch/arm/mach-tegra @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc9 +EXTRAVERSION = NAME = "Divemaster Edition" # *DOCUMENTATION* diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c index 7aa4262ada7..197f81c7735 100644 --- a/arch/arm/common/vic.c +++ b/arch/arm/common/vic.c @@ -259,7 +259,6 @@ static void __init vic_disable(void __iomem *base) writel(0, base + VIC_INT_SELECT); writel(0, base + VIC_INT_ENABLE); writel(~0, base + VIC_INT_ENABLE_CLEAR); - writel(0, base + VIC_IRQ_STATUS); writel(0, base + VIC_ITCR); writel(~0, base + VIC_INT_SOFT_CLEAR); } diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h index 080d74f8128..ff66638ff54 100644 --- a/arch/arm/include/asm/localtimer.h +++ b/arch/arm/include/asm/localtimer.h @@ -10,6 +10,8 @@ #ifndef __ASM_ARM_LOCALTIMER_H #define __ASM_ARM_LOCALTIMER_H +#include <linux/errno.h> + struct clock_event_device; /* diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 4c851834f68..6be3e2e4d83 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -321,8 +321,8 @@ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, - [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT, - [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c index 2028464cf5b..f79b7d2a8ed 100644 --- a/arch/arm/mach-omap2/board-2430sdp.c +++ b/arch/arm/mach-omap2/board-2430sdp.c @@ -193,7 +193,8 @@ static int __init omap2430_i2c_init(void) { omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo, ARRAY_SIZE(sdp2430_i2c1_boardinfo)); - omap2_pmic_init("twl4030", &sdp2430_twldata); + omap_pmic_init(2, 100, "twl4030", INT_24XX_SYS_NIRQ, + &sdp2430_twldata); return 0; } diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index a9b45c76e1d..097a42d81e5 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c @@ -137,8 +137,7 @@ static void omap4_hsmmc1_before_set_reg(struct device *dev, int slot, */ reg = omap4_ctrl_pad_readl(control_pbias_offset); reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | - OMAP4_MMC1_PWRDNZ_MASK | - OMAP4_USBC1_ICUSB_PWRDNZ_MASK); + OMAP4_MMC1_PWRDNZ_MASK); omap4_ctrl_pad_writel(reg, control_pbias_offset); } @@ -156,8 +155,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot, else reg |= OMAP4_MMC1_PBIASLITE_VMODE_MASK; reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | - OMAP4_MMC1_PWRDNZ_MASK | - OMAP4_USBC1_ICUSB_PWRDNZ_MASK); + OMAP4_MMC1_PWRDNZ_MASK); omap4_ctrl_pad_writel(reg, control_pbias_offset); timeout = jiffies + msecs_to_jiffies(5); @@ -171,16 +169,14 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot, if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) { pr_err("Pbias Voltage is not same as LDO\n"); /* Caution : On VMODE_ERROR Power Down MMC IO */ - reg &= ~(OMAP4_MMC1_PWRDNZ_MASK | - OMAP4_USBC1_ICUSB_PWRDNZ_MASK); + reg &= ~(OMAP4_MMC1_PWRDNZ_MASK); omap4_ctrl_pad_writel(reg, control_pbias_offset); } } else { reg = omap4_ctrl_pad_readl(control_pbias_offset); reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | OMAP4_MMC1_PWRDNZ_MASK | - OMAP4_MMC1_PBIASLITE_VMODE_MASK | - OMAP4_USBC1_ICUSB_PWRDNZ_MASK); + OMAP4_MMC1_PBIASLITE_VMODE_MASK); omap4_ctrl_pad_writel(reg, control_pbias_offset); } } diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index a65145b02a5..19e4dac62a8 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c @@ -137,9 +137,6 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data) musb_plat.mode = board_data->mode; musb_plat.extvbus = board_data->extvbus; - if (cpu_is_omap44xx()) - omap4430_phy_init(dev); - if (cpu_is_omap3517() || cpu_is_omap3505()) { oh_name = "am35x_otg_hs"; name = "musb-am35x"; diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c index f1d3bd8f6f1..343a540d86a 100644 --- a/arch/arm/mach-s3c2410/s3c2410.c +++ b/arch/arm/mach-s3c2410/s3c2410.c @@ -170,7 +170,9 @@ int __init s3c2410_init(void) { printk("S3C2410: Initialising architecture\n"); +#ifdef CONFIG_PM register_syscore_ops(&s3c2410_pm_syscore_ops); +#endif register_syscore_ops(&s3c24xx_irq_syscore_ops); return sysdev_register(&s3c2410_sysdev); diff --git a/arch/arm/mach-s3c2412/s3c2412.c b/arch/arm/mach-s3c2412/s3c2412.c index ef0958d3e5c..57a1e01e4e5 100644 --- a/arch/arm/mach-s3c2412/s3c2412.c +++ b/arch/arm/mach-s3c2412/s3c2412.c @@ -245,7 +245,9 @@ int __init s3c2412_init(void) { printk("S3C2412: Initialising architecture\n"); +#ifdef CONFIG_PM register_syscore_ops(&s3c2412_pm_syscore_ops); +#endif register_syscore_ops(&s3c24xx_irq_syscore_ops); return sysdev_register(&s3c2412_sysdev); diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c index 494ce913dc9..20b3fdfb305 100644 --- a/arch/arm/mach-s3c2416/s3c2416.c +++ b/arch/arm/mach-s3c2416/s3c2416.c @@ -97,7 +97,9 @@ int __init s3c2416_init(void) s3c_fb_setname("s3c2443-fb"); +#ifdef CONFIG_PM register_syscore_ops(&s3c2416_pm_syscore_ops); +#endif register_syscore_ops(&s3c24xx_irq_syscore_ops); return sysdev_register(&s3c2416_sysdev); diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c index ce99ff72838..2270d336021 100644 --- a/arch/arm/mach-s3c2440/s3c2440.c +++ b/arch/arm/mach-s3c2440/s3c2440.c @@ -55,7 +55,9 @@ int __init s3c2440_init(void) /* register suspend/resume handlers */ +#ifdef CONFIG_PM register_syscore_ops(&s3c2410_pm_syscore_ops); +#endif register_syscore_ops(&s3c244x_pm_syscore_ops); register_syscore_ops(&s3c24xx_irq_syscore_ops); diff --git a/arch/arm/mach-s3c2440/s3c2442.c b/arch/arm/mach-s3c2440/s3c2442.c index 9ad99f8016a..6f2b65e6e06 100644 --- a/arch/arm/mach-s3c2440/s3c2442.c +++ b/arch/arm/mach-s3c2440/s3c2442.c @@ -169,7 +169,9 @@ int __init s3c2442_init(void) { printk("S3C2442: Initialising architecture\n"); +#ifdef CONFIG_PM register_syscore_ops(&s3c2410_pm_syscore_ops); +#endif register_syscore_ops(&s3c244x_pm_syscore_ops); register_syscore_ops(&s3c24xx_irq_syscore_ops); diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c index 0e1016a827a..0e0fd4d889b 100644 --- a/arch/arm/mach-tegra/cpu-tegra.c +++ b/arch/arm/mach-tegra/cpu-tegra.c @@ -32,7 +32,6 @@ #include <asm/system.h> -#include <mach/hardware.h> #include <mach/clk.h> /* Frequency table index must be sequential starting at 0 */ diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index 4210cb434db..a3e0c8692f0 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig @@ -6,6 +6,7 @@ config UX500_SOC_COMMON select ARM_GIC select HAS_MTU select ARM_ERRATA_753970 + select ARM_ERRATA_754322 menu "Ux500 SoC" diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index cc7e2d8be9a..f8037ba338a 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -496,6 +496,13 @@ static void __init free_unused_memmap(struct meminfo *mi) */ bank_start = min(bank_start, ALIGN(prev_bank_end, PAGES_PER_SECTION)); +#else + /* + * Align down here since the VM subsystem insists that the + * memmap entries are valid from the bank start aligned to + * MAX_ORDER_NR_PAGES. + */ + bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES); #endif /* * If we had a previous bank, and there is a space diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c index f88216d2399..c65eb791d1b 100644 --- a/arch/arm/plat-s5p/irq-gpioint.c +++ b/arch/arm/plat-s5p/irq-gpioint.c @@ -163,9 +163,9 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip) ct->chip.irq_mask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_clr_bit; ct->chip.irq_set_type = s5p_gpioint_set_type, - ct->regs.ack = PEND_OFFSET + REG_OFFSET(chip->group); - ct->regs.mask = MASK_OFFSET + REG_OFFSET(chip->group); - ct->regs.type = CON_OFFSET + REG_OFFSET(chip->group); + ct->regs.ack = PEND_OFFSET + REG_OFFSET(group - bank->start); + ct->regs.mask = MASK_OFFSET + REG_OFFSET(group - bank->start); + ct->regs.type = CON_OFFSET + REG_OFFSET(group - bank->start); irq_setup_generic_chip(gc, IRQ_MSK(chip->chip.ngpio), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST | IRQ_NOPROBE, 0); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 177cdaf8356..b122adc8bdb 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -24,6 +24,7 @@ config MIPS select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select HAVE_ARCH_JUMP_LABEL + select IRQ_FORCED_THREADING menu "Machine selection" @@ -722,6 +723,7 @@ config CAVIUM_OCTEON_SIMULATOR select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_HOTPLUG_CPU select SYS_HAS_CPU_CAVIUM_OCTEON + select HOLES_IN_ZONE help The Octeon simulator is software performance model of the Cavium Octeon Processor. It supports simulating Octeon processors on x86 @@ -744,6 +746,7 @@ config CAVIUM_OCTEON_REFERENCE_BOARD select ZONE_DMA32 select USB_ARCH_HAS_OHCI select USB_ARCH_HAS_EHCI + select HOLES_IN_ZONE help This option supports all of the Octeon reference boards from Cavium Networks. It builds a kernel that dynamically determines the Octeon @@ -973,6 +976,9 @@ config ISA_DMA_API config GENERIC_GPIO bool +config HOLES_IN_ZONE + bool + # # Endianess selection. Sufficiently obscure so many users don't know what to # answer,so we try hard to limit the available choices. Also the use of a diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c index 3b2c18b1434..f72c48d4804 100644 --- a/arch/mips/alchemy/common/platform.c +++ b/arch/mips/alchemy/common/platform.c @@ -492,7 +492,7 @@ static void __init alchemy_setup_macs(int ctype) memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); ret = platform_device_register(&au1xxx_eth0_device); - if (!ret) + if (ret) printk(KERN_INFO "Alchemy: failed to register MAC0\n"); diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c index 647e518c90b..b86324a4260 100644 --- a/arch/mips/alchemy/common/power.c +++ b/arch/mips/alchemy/common/power.c @@ -158,15 +158,21 @@ static void restore_core_regs(void) void au_sleep(void) { - int cpuid = alchemy_get_cputype(); - if (cpuid != ALCHEMY_CPU_UNKNOWN) { - save_core_regs(); - if (cpuid <= ALCHEMY_CPU_AU1500) - alchemy_sleep_au1000(); - else if (cpuid <= ALCHEMY_CPU_AU1200) - alchemy_sleep_au1550(); - restore_core_regs(); + save_core_regs(); + + switch (alchemy_get_cputype()) { + case ALCHEMY_CPU_AU1000: + case ALCHEMY_CPU_AU1500: + case ALCHEMY_CPU_AU1100: + alchemy_sleep_au1000(); + break; + case ALCHEMY_CPU_AU1550: + case ALCHEMY_CPU_AU1200: + alchemy_sleep_au1550(); + break; } + + restore_core_regs(); } #endif /* CONFIG_PM */ diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c index 596ad00e7f0..463d2c4d944 100644 --- a/arch/mips/alchemy/devboards/bcsr.c +++ b/arch/mips/alchemy/devboards/bcsr.c @@ -89,8 +89,12 @@ static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d) { unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT); + disable_irq_nosync(irq); + for ( ; bisr; bisr &= bisr - 1) generic_handle_irq(bcsr_csc_base + __ffs(bisr)); + + enable_irq(irq); } /* NOTE: both the enable and mask bits must be cleared, otherwise the diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c index 1dac4f27d33..4a8980027ec 100644 --- a/arch/mips/alchemy/devboards/db1200/setup.c +++ b/arch/mips/alchemy/devboards/db1200/setup.c @@ -23,13 +23,6 @@ void __init board_setup(void) unsigned long freq0, clksrc, div, pfc; unsigned short whoami; - /* Set Config[OD] (disable overlapping bus transaction): - * This gets rid of a _lot_ of spurious interrupts (especially - * wrt. IDE); but incurs ~10% performance hit in some - * cpu-bound applications. - */ - set_c0_config(1 << 19); - bcsr_init(DB1200_BCSR_PHYS_ADDR, DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS); diff --git a/arch/mips/ar7/irq.c b/arch/mips/ar7/irq.c index 03db3daadbd..88c4babfdb5 100644 --- a/arch/mips/ar7/irq.c +++ b/arch/mips/ar7/irq.c @@ -98,7 +98,8 @@ static struct irq_chip ar7_sec_irq_type = { static struct irqaction ar7_cascade_action = { .handler = no_action, - .name = "AR7 cascade interrupt" + .name = "AR7 cascade interrupt", + .flags = IRQF_NO_THREAD, }; static void __init ar7_irq_init(int base) diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c index cea6021cb8d..162e11b4ed7 100644 --- a/arch/mips/bcm63xx/irq.c +++ b/arch/mips/bcm63xx/irq.c @@ -222,6 +222,7 @@ static struct irq_chip bcm63xx_external_irq_chip = { static struct irqaction cpu_ip2_cascade_action = { .handler = no_action, .name = "cascade_ip2", + .flags = IRQF_NO_THREAD, }; void __init arch_init_irq(void) diff --git a/arch/mips/cobalt/irq.c b/arch/mips/cobalt/irq.c index cb9bf820fe5..965c777d356 100644 --- a/arch/mips/cobalt/irq.c +++ b/arch/mips/cobalt/irq.c @@ -48,6 +48,7 @@ asmlinkage void plat_irq_dispatch(void) static struct irqaction cascade = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; void __init arch_init_irq(void) diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c index fa45e924be0..f7b7ba6d5c4 100644 --- a/arch/mips/dec/setup.c +++ b/arch/mips/dec/setup.c @@ -101,20 +101,24 @@ int cpu_fpu_mask = DEC_CPU_IRQ_MASK(DEC_CPU_INR_FPU); static struct irqaction ioirq = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; static struct irqaction fpuirq = { .handler = no_action, .name = "fpu", + .flags = IRQF_NO_THREAD, }; static struct irqaction busirq = { .flags = IRQF_DISABLED, .name = "bus error", + .flags = IRQF_NO_THREAD, }; static struct irqaction haltirq = { .handler = dec_intr_halt, .name = "halt", + .flags = IRQF_NO_THREAD, }; diff --git a/arch/mips/emma/markeins/irq.c b/arch/mips/emma/markeins/irq.c index 3dbd7a5a6ad..7798887a128 100644 --- a/arch/mips/emma/markeins/irq.c +++ b/arch/mips/emma/markeins/irq.c @@ -169,7 +169,7 @@ void emma2rh_gpio_irq_init(void) static struct irqaction irq_cascade = { .handler = no_action, - .flags = 0, + .flags = IRQF_NO_THREAD, .name = "cascade", .dev_id = NULL, .next = NULL, diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h index 0d5a42b5f47..a58addb98cf 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h @@ -54,7 +54,6 @@ #define cpu_has_mips_r2_exec_hazard 0 #define cpu_has_dsp 0 #define cpu_has_mipsmt 0 -#define cpu_has_userlocal 0 #define cpu_has_vint 0 #define cpu_has_veic 0 #define cpu_hwrena_impl_bits 0xc0000000 diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h index 62c09408594..35371641575 100644 --- a/arch/mips/include/asm/mach-powertv/dma-coherence.h +++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h @@ -13,7 +13,6 @@ #define __ASM_MACH_POWERTV_DMA_COHERENCE_H #include <linux/sched.h> -#include <linux/version.h> #include <linux/device.h> #include <asm/mach-powertv/asic.h> diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index b4ba2449444..cb41af5f340 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -195,9 +195,9 @@ * to cover the pipeline delay. */ .set mips32 - mfc0 v1, CP0_TCSTATUS + mfc0 k0, CP0_TCSTATUS .set mips0 - LONG_S v1, PT_TCSTATUS(sp) + LONG_S k0, PT_TCSTATUS(sp) #endif /* CONFIG_MIPS_MT_SMTC */ LONG_S $4, PT_R4(sp) LONG_S $5, PT_R5(sp) diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c index 73031f7fc82..4397972949f 100644 --- a/arch/mips/jz4740/gpio.c +++ b/arch/mips/jz4740/gpio.c @@ -18,7 +18,7 @@ #include <linux/init.h> #include <linux/spinlock.h> -#include <linux/sysdev.h> +#include <linux/syscore_ops.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/delay.h> @@ -86,7 +86,6 @@ struct jz_gpio_chip { spinlock_t lock; struct gpio_chip gpio_chip; - struct sys_device sysdev; }; static struct jz_gpio_chip jz4740_gpio_chips[]; @@ -459,49 +458,47 @@ static struct jz_gpio_chip jz4740_gpio_chips[] = { JZ4740_GPIO_CHIP(D), }; -static inline struct jz_gpio_chip *sysdev_to_chip(struct sys_device *dev) +static void jz4740_gpio_suspend_chip(struct jz_gpio_chip *chip) { - return container_of(dev, struct jz_gpio_chip, sysdev); + chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK); + writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET); + writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR); } -static int jz4740_gpio_suspend(struct sys_device *dev, pm_message_t state) +static int jz4740_gpio_suspend(void) { - struct jz_gpio_chip *chip = sysdev_to_chip(dev); + int i; - chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK); - writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET); - writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR); + for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); i++) + jz4740_gpio_suspend_chip(&jz4740_gpio_chips[i]); return 0; } -static int jz4740_gpio_resume(struct sys_device *dev) +static void jz4740_gpio_resume_chip(struct jz_gpio_chip *chip) { - struct jz_gpio_chip *chip = sysdev_to_chip(dev); uint32_t mask = chip->suspend_mask; writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR); writel(mask, chip->base + JZ_REG_GPIO_MASK_SET); +} - return 0; +static void jz4740_gpio_resume(void) +{ + int i; + + for (i = ARRAY_SIZE(jz4740_gpio_chips) - 1; i >= 0 ; i--) + jz4740_gpio_resume_chip(&jz4740_gpio_chips[i]); } -static struct sysdev_class jz4740_gpio_sysdev_class = { - .name = "gpio", +static struct syscore_ops jz4740_gpio_syscore_ops = { .suspend = jz4740_gpio_suspend, .resume = jz4740_gpio_resume, }; -static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) +static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) { - int ret, irq; - - chip->sysdev.id = id; - chip->sysdev.cls = &jz4740_gpio_sysdev_class; - ret = sysdev_register(&chip->sysdev); - - if (ret) - return ret; + int irq; spin_lock_init(&chip->lock); @@ -519,22 +516,17 @@ static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) irq_set_chip_and_handler(irq, &jz_gpio_irq_chip, handle_level_irq); } - - return 0; } static int __init jz4740_gpio_init(void) { unsigned int i; - int ret; - - ret = sysdev_class_register(&jz4740_gpio_sysdev_class); - if (ret) - return ret; for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i) jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i); + register_syscore_ops(&jz4740_gpio_syscore_ops); + printk(KERN_INFO "JZ4740 GPIO initialized\n"); return 0; diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index feb8021a305..6a2d758dd8e 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -19,6 +19,26 @@ #include <asm-generic/sections.h> +#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) +#define MCOUNT_OFFSET_INSNS 5 +#else +#define MCOUNT_OFFSET_INSNS 4 +#endif + +/* + * Check if the address is in kernel space + * + * Clone core_kernel_text() from kernel/extable.c, but doesn't call + * init_kernel_text() for Ftrace doesn't trace functions in init sections. + */ +static inline int in_kernel_space(unsigned long ip) +{ + if (ip >= (unsigned long)_stext && + ip <= (unsigned long)_etext) + return 1; + return 0; +} + #ifdef CONFIG_DYNAMIC_FTRACE #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ @@ -54,20 +74,6 @@ static inline void ftrace_dyn_arch_init_insns(void) #endif } -/* - * Check if the address is in kernel space - * - * Clone core_kernel_text() from kernel/extable.c, but doesn't call - * init_kernel_text() for Ftrace doesn't trace functions in init sections. - */ -static inline int in_kernel_space(unsigned long ip) -{ - if (ip >= (unsigned long)_stext && - ip <= (unsigned long)_etext) - return 1; - return 0; -} - static int ftrace_modify_code(unsigned long ip, unsigned int new_code) { int faulted; @@ -112,11 +118,6 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) * 1: offset = 4 instructions */ -#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) -#define MCOUNT_OFFSET_INSNS 5 -#else -#define MCOUNT_OFFSET_INSNS 4 -#endif #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) int ftrace_make_nop(struct module *mod, diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 5c74eb797f0..32b397b646e 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -229,7 +229,7 @@ static void i8259A_shutdown(void) */ if (i8259A_auto_eoi >= 0) { outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ - outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ + outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ } } @@ -295,6 +295,7 @@ static void init_8259A(int auto_eoi) static struct irqaction irq2 = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; static struct resource pic1_io_resource = { diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 876a75cc376..922a554cd10 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -349,3 +349,10 @@ SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags, return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), dfd, pathname); } + +SYSCALL_DEFINE6(32_futex, u32 __user *, uaddr, int, op, u32, val, + struct compat_timespec __user *, utime, u32 __user *, uaddr2, + u32, val3) +{ + return compat_sys_futex(uaddr, op, val, utime, uaddr2, val3); +} diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index f9296e894e4..6de1f598346 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -315,7 +315,7 @@ EXPORT(sysn32_call_table) PTR sys_fremovexattr PTR sys_tkill PTR sys_ni_syscall - PTR compat_sys_futex + PTR sys_32_futex PTR compat_sys_sched_setaffinity /* 6195 */ PTR compat_sys_sched_getaffinity PTR sys_cacheflush diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 4d7c9827706..1d813169e45 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -441,7 +441,7 @@ sys_call_table: PTR sys_fremovexattr /* 4235 */ PTR sys_tkill PTR sys_sendfile64 - PTR compat_sys_futex + PTR sys_32_futex PTR compat_sys_sched_setaffinity PTR compat_sys_sched_getaffinity /* 4240 */ PTR compat_sys_io_setup diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index dbbe0ce48d8..f8524003676 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -8,6 +8,7 @@ * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/cache.h> +#include <linux/irqflags.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/personality.h> @@ -658,6 +659,8 @@ static void do_signal(struct pt_regs *regs) asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { + local_irq_enable(); + /* deal with pending signal delivery */ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) do_signal(regs); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index b7517e3abc8..cbea618af0b 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -14,6 +14,7 @@ #include <linux/bug.h> #include <linux/compiler.h> #include <linux/init.h> +#include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/sched.h> @@ -364,21 +365,26 @@ static int regs_to_trapnr(struct pt_regs *regs) return (regs->cp0_cause >> 2) & 0x1f; } -static DEFINE_SPINLOCK(die_lock); +static DEFINE_RAW_SPINLOCK(die_lock); void __noreturn die(const char *str, struct pt_regs *regs) { static int die_counter; int sig = SIGSEGV; #ifdef CONFIG_MIPS_MT_SMTC - unsigned long dvpret = dvpe(); + unsigned long dvpret; #endif /* CONFIG_MIPS_MT_SMTC */ + oops_enter(); + if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) sig = 0; console_verbose(); - spin_lock_irq(&die_lock); + raw_spin_lock_irq(&die_lock); +#ifdef CONFIG_MIPS_MT_SMTC + dvpret = dvpe(); +#endif /* CONFIG_MIPS_MT_SMTC */ bust_spinlocks(1); #ifdef CONFIG_MIPS_MT_SMTC mips_mt_regdump(dvpret); @@ -387,7 +393,9 @@ void __noreturn die(const char *str, struct pt_regs *regs) printk("%s[#%d]:\n", str, ++die_counter); show_registers(regs); add_taint(TAINT_DIE); - spin_unlock_irq(&die_lock); + raw_spin_unlock_irq(&die_lock); + + oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 2cd50ad0d5c..3efcb065f78 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -192,7 +192,7 @@ static struct tc *get_tc(int index) } spin_unlock(&vpecontrol.tc_list_lock); - return NULL; + return res; } /* allocate a vpe and associate it with this minor (or index) */ diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index fc89795cafd..f9737bb3c5a 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -123,11 +123,10 @@ void ltq_enable_irq(struct irq_data *d) static unsigned int ltq_startup_eiu_irq(struct irq_data *d) { int i; - int irq_nr = d->irq - INT_NUM_IRQ0; ltq_enable_irq(d); for (i = 0; i < MAX_EIU; i++) { - if (irq_nr == ltq_eiu_irq[i]) { + if (d->irq == ltq_eiu_irq[i]) { /* low level - we should really handle set_type */ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | (0x6 << (i * 4)), LTQ_EIU_EXIN_C); @@ -147,11 +146,10 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d) static void ltq_shutdown_eiu_irq(struct irq_data *d) { int i; - int irq_nr = d->irq - INT_NUM_IRQ0; ltq_disable_irq(d); for (i = 0; i < MAX_EIU; i++) { - if (irq_nr == ltq_eiu_irq[i]) { + if (d->irq == ltq_eiu_irq[i]) { /* disable */ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), LTQ_EIU_EXIN_INEN); diff --git a/arch/mips/lantiq/xway/ebu.c b/arch/mips/lantiq/xway/ebu.c index 66eb52fa50a..033b3184c7a 100644 --- a/arch/mips/lantiq/xway/ebu.c +++ b/arch/mips/lantiq/xway/ebu.c @@ -10,7 +10,6 @@ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/version.h> #include <linux/ioport.h> #include <lantiq_soc.h> diff --git a/arch/mips/lantiq/xway/pmu.c b/arch/mips/lantiq/xway/pmu.c index 9d69f01e352..39f0d2641cb 100644 --- a/arch/mips/lantiq/xway/pmu.c +++ b/arch/mips/lantiq/xway/pmu.c @@ -8,7 +8,6 @@ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/version.h> #include <linux/ioport.h> #include <lantiq_soc.h> diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c index de4c165515d..d608b6ef0ed 100644 --- a/arch/mips/lasat/interrupt.c +++ b/arch/mips/lasat/interrupt.c @@ -105,6 +105,7 @@ asmlinkage void plat_irq_dispatch(void) static struct irqaction cascade = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; void __init arch_init_irq(void) diff --git a/arch/mips/loongson/fuloong-2e/irq.c b/arch/mips/loongson/fuloong-2e/irq.c index d61a04222b8..3cf1fef29f0 100644 --- a/arch/mips/loongson/fuloong-2e/irq.c +++ b/arch/mips/loongson/fuloong-2e/irq.c @@ -42,6 +42,7 @@ asmlinkage void mach_irq_dispatch(unsigned int pending) static struct irqaction cascade_irqaction = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; void __init mach_init_irq(void) diff --git a/arch/mips/loongson/lemote-2f/irq.c b/arch/mips/loongson/lemote-2f/irq.c index 081db102bb9..14b081841b6 100644 --- a/arch/mips/loongson/lemote-2f/irq.c +++ b/arch/mips/loongson/lemote-2f/irq.c @@ -96,12 +96,13 @@ static irqreturn_t ip6_action(int cpl, void *dev_id) struct irqaction ip6_irqaction = { .handler = ip6_action, .name = "cascade", - .flags = IRQF_SHARED, + .flags = IRQF_SHARED | IRQF_NO_THREAD, }; struct irqaction cascade_irqaction = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; void __init mach_init_irq(void) diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 9ff5d0fac55..302d779d5b0 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -6,6 +6,7 @@ * Copyright (C) 2011 Wind River Systems, * written by Ralf Baechle <ralf@linux-mips.org> */ +#include <linux/compiler.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/mman.h> @@ -15,12 +16,11 @@ #include <linux/sched.h> unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ - EXPORT_SYMBOL(shm_align_mask); /* gap between mmap and stack */ #define MIN_GAP (128*1024*1024UL) -#define MAX_GAP ((TASK_SIZE)/6*5) +#define MAX_GAP ((TASK_SIZE)/6*5) static int mmap_is_legacy(void) { @@ -57,13 +57,13 @@ static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, return base - off; } -#define COLOUR_ALIGN(addr,pgoff) \ +#define COLOUR_ALIGN(addr, pgoff) \ ((((addr) + shm_align_mask) & ~shm_align_mask) + \ (((pgoff) << PAGE_SHIFT) & shm_align_mask)) enum mmap_allocation_direction {UP, DOWN}; -static unsigned long arch_get_unmapped_area_foo(struct file *filp, +static unsigned long arch_get_unmapped_area_common(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags, enum mmap_allocation_direction dir) { @@ -103,16 +103,16 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vma->vm_start)) return addr; } if (dir == UP) { addr = mm->mmap_base; - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(addr); + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ @@ -131,28 +131,30 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp, mm->free_area_cache = mm->mmap_base; } - /* either no address requested or can't fit in requested address hole */ + /* + * either no address requested, or the mapping can't fit into + * the requested address hole + */ addr = mm->free_area_cache; - if (do_color_align) { - unsigned long base = - COLOUR_ALIGN_DOWN(addr - len, pgoff); - + if (do_color_align) { + unsigned long base = + COLOUR_ALIGN_DOWN(addr - len, pgoff); addr = base + len; - } + } /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr - len); if (!vma || addr <= vma->vm_start) { - /* remember the address as a hint for next time */ - return mm->free_area_cache = addr-len; + /* cache the address as a hint for next time */ + return mm->free_area_cache = addr - len; } } if (unlikely(mm->mmap_base < len)) goto bottomup; - addr = mm->mmap_base-len; + addr = mm->mmap_base - len; if (do_color_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); @@ -163,8 +165,8 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp, * return with success: */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { - /* remember the address as a hint for next time */ + if (likely(!vma || addr + len <= vma->vm_start)) { + /* cache the address as a hint for next time */ return mm->free_area_cache = addr; } @@ -173,7 +175,7 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp, mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; + addr = vma->vm_start - len; if (do_color_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); } while (likely(len < vma->vm_start)); @@ -201,7 +203,7 @@ bottomup: unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags) { - return arch_get_unmapped_area_foo(filp, + return arch_get_unmapped_area_common(filp, addr0, len, pgoff, flags, UP); } @@ -213,7 +215,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags) { - return arch_get_unmapped_area_foo(filp, + return arch_get_unmapped_area_common(filp, addr0, len, pgoff, flags, DOWN); } diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index b6e1cff5066..e06370f58ef 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1759,14 +1759,13 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) u32 *p = handle_tlbm; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; - struct work_registers wr; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); + build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); uasm_i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_pte_reload_tlbwi(&p, K0, K1); @@ -1963,7 +1962,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void) uasm_i_andi(&p, wr.r3, wr.r3, 2); uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); } - + if (PM_DEFAULT_MASK == 0) + uasm_i_nop(&p); /* * We clobbered C0_PAGEMASK, restore it. On the other branch * it is restored in build_huge_tlb_write_entry. diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index 1d36c511a7a..d53ff91b277 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -350,12 +350,14 @@ unsigned int plat_ipi_resched_int_xlate(unsigned int cpu) static struct irqaction i8259irq = { .handler = no_action, - .name = "XT-PIC cascade" + .name = "XT-PIC cascade", + .flags = IRQF_NO_THREAD, }; static struct irqaction corehi_irqaction = { .handler = no_action, - .name = "CoreHi" + .name = "CoreHi", + .flags = IRQF_NO_THREAD, }; static msc_irqmap_t __initdata msc_irqmap[] = { diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile index 9bd3f731f62..2dca585dd2f 100644 --- a/arch/mips/netlogic/xlr/Makefile +++ b/arch/mips/netlogic/xlr/Makefile @@ -2,4 +2,4 @@ obj-y += setup.o platform.o irq.o setup.o time.o obj-$(CONFIG_SMP) += smp.o smpboot.o obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o -EXTRA_CFLAGS += -Werror +ccflags-y += -Werror diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c index 603d7493e96..8656388b34b 100644 --- a/arch/mips/pci/pci-lantiq.c +++ b/arch/mips/pci/pci-lantiq.c @@ -171,8 +171,13 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf) u32 temp_buffer; /* set clock to 33Mhz */ - ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR); - ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR); + if (ltq_is_ar9()) { + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0x1f00000, LTQ_CGU_IFCCR); + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0xe00000, LTQ_CGU_IFCCR); + } else { + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR); + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR); + } /* external or internal clock ? */ if (conf->clock) { diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c index 764362ce5e4..5f3a69cebad 100644 --- a/arch/mips/pci/pci-rc32434.c +++ b/arch/mips/pci/pci-rc32434.c @@ -215,7 +215,7 @@ static int __init rc32434_pci_init(void) rc32434_pcibridge_init(); io_map_base = ioremap(rc32434_res_pci_io1.start, - resource_size(&rcrc32434_res_pci_io1)); + resource_size(&rc32434_res_pci_io1)); if (!io_map_base) return -ENOMEM; diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq.c b/arch/mips/pmc-sierra/msp71xx/msp_irq.c index 4531c4a514b..d3c3d81757a 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_irq.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_irq.c @@ -108,12 +108,14 @@ asmlinkage void plat_irq_dispatch(struct pt_regs *regs) static struct irqaction cic_cascade_msp = { .handler = no_action, - .name = "MSP CIC cascade" + .name = "MSP CIC cascade", + .flags = IRQF_NO_THREAD, }; static struct irqaction per_cascade_msp = { .handler = no_action, - .name = "MSP PER cascade" + .name = "MSP PER cascade", + .flags = IRQF_NO_THREAD, }; void __init arch_init_irq(void) diff --git a/arch/mips/pnx8550/common/int.c b/arch/mips/pnx8550/common/int.c index 6b93c81779c..1ebe22bdadc 100644 --- a/arch/mips/pnx8550/common/int.c +++ b/arch/mips/pnx8550/common/int.c @@ -167,7 +167,7 @@ static struct irq_chip level_irq_type = { static struct irqaction gic_action = { .handler = no_action, - .flags = IRQF_DISABLED, + .flags = IRQF_DISABLED | IRQF_NO_THREAD, .name = "GIC", }; diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c index b4d08e4d2ea..f72c336ea27 100644 --- a/arch/mips/sgi-ip22/ip22-int.c +++ b/arch/mips/sgi-ip22/ip22-int.c @@ -155,32 +155,32 @@ static void __irq_entry indy_buserror_irq(void) static struct irqaction local0_cascade = { .handler = no_action, - .flags = IRQF_DISABLED, + .flags = IRQF_DISABLED | IRQF_NO_THREAD, .name = "local0 cascade", }; static struct irqaction local1_cascade = { .handler = no_action, - .flags = IRQF_DISABLED, + .flags = IRQF_DISABLED | IRQF_NO_THREAD, .name = "local1 cascade", }; static struct irqaction buserr = { .handler = no_action, - .flags = IRQF_DISABLED, + .flags = IRQF_DISABLED | IRQF_NO_THREAD, .name = "Bus Error", }; static struct irqaction map0_cascade = { .handler = no_action, - .flags = IRQF_DISABLED, + .flags = IRQF_DISABLED | IRQF_NO_THREAD, .name = "mapable0 cascade", }; #ifdef USE_LIO3_IRQ static struct irqaction map1_cascade = { .handler = no_action, - .flags = IRQF_DISABLED, + .flags = IRQF_DISABLED | IRQF_NO_THREAD, .name = "mapable1 cascade", }; #define SGI_INTERRUPTS SGINT_END diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c index a7e5a6d917b..3ab5b5d25b0 100644 --- a/arch/mips/sni/rm200.c +++ b/arch/mips/sni/rm200.c @@ -359,6 +359,7 @@ void sni_rm200_init_8259A(void) static struct irqaction sni_rm200_irq2 = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; static struct resource sni_rm200_pic1_resource = { diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c index 70a3b85f375..fad2bef432c 100644 --- a/arch/mips/vr41xx/common/irq.c +++ b/arch/mips/vr41xx/common/irq.c @@ -34,6 +34,7 @@ static irq_cascade_t irq_cascade[NR_IRQS] __cacheline_aligned; static struct irqaction cascade_irqaction = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int)) diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h index 1407c07bdad..f6ae2b2b687 100644 --- a/arch/sparc/include/asm/pgtsrmmu.h +++ b/arch/sparc/include/asm/pgtsrmmu.h @@ -280,7 +280,7 @@ static inline unsigned long srmmu_hwprobe(unsigned long vaddr) return retval; } #else -#define srmmu_hwprobe(addr) (srmmu_swprobe(addr, 0) & SRMMU_PTE_PMASK) +#define srmmu_hwprobe(addr) srmmu_swprobe(addr, 0) #endif static inline int diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 1e94f946570..8aa0d440858 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -230,7 +230,8 @@ static void pci_parse_of_addrs(struct platform_device *op, res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; } else if (i == dev->rom_base_reg) { res = &dev->resource[PCI_ROM_RESOURCE]; - flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; + flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE + | IORESOURCE_SIZEALIGN; } else { printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); continue; diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 1ba95aff5d5..2caa556db86 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -273,10 +273,7 @@ void do_sigreturn32(struct pt_regs *regs) case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32); } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); return; segv: @@ -377,10 +374,7 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32); } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); return; segv: force_sig(SIGSEGV, current); @@ -782,6 +776,7 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { + sigset_t blocked; int err; if (ka->sa.sa_flags & SA_SIGINFO) @@ -792,12 +787,10 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka, if (err) return err; - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); + sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NOMASK)) - sigaddset(¤t->blocked,signr); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + sigaddset(&blocked, signr); + set_current_blocked(&blocked); tracehook_signal_handler(signr, info, ka, regs, 0); @@ -881,7 +874,7 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs, */ if (current_thread_info()->status & TS_RESTORE_SIGMASK) { current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + set_current_blocked(¤t->saved_sigmask); } } diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 04ede8f04ad..8ce247ac04c 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -62,12 +62,13 @@ struct rt_signal_frame { static int _sigpause_common(old_sigset_t set) { - set &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); + sigset_t blocked; + current->saved_sigmask = current->blocked; - siginitset(¤t->blocked, set); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + + set &= _BLOCKABLE; + siginitset(&blocked, set); + set_current_blocked(&blocked); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -139,10 +140,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) goto segv_and_exit; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); return; segv_and_exit: @@ -209,10 +207,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); return; segv: force_sig(SIGSEGV, current); @@ -470,6 +465,7 @@ static inline int handle_signal(unsigned long signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { + sigset_t blocked; int err; if (ka->sa.sa_flags & SA_SIGINFO) @@ -480,12 +476,10 @@ handle_signal(unsigned long signr, struct k_sigaction *ka, if (err) return err; - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); + sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NOMASK)) - sigaddset(¤t->blocked, signr); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + sigaddset(&blocked, signr); + set_current_blocked(&blocked); tracehook_signal_handler(signr, info, ka, regs, 0); @@ -581,7 +575,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + set_current_blocked(¤t->saved_sigmask); } } diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 47509df3b89..a2b81598d90 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -70,10 +70,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) goto do_sigsegv; } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); } if (test_thread_flag(TIF_32BIT)) { pc &= 0xffffffff; @@ -242,12 +239,13 @@ struct rt_signal_frame { static long _sigpause_common(old_sigset_t set) { - set &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); + sigset_t blocked; + current->saved_sigmask = current->blocked; - siginitset(¤t->blocked, set); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + + set &= _BLOCKABLE; + siginitset(&blocked, set); + set_current_blocked(&blocked); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -327,10 +325,7 @@ void do_rt_sigreturn(struct pt_regs *regs) pt_regs_clear_syscall(regs); sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); return; segv: force_sig(SIGSEGV, current); @@ -484,18 +479,17 @@ static inline int handle_signal(unsigned long signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { + sigset_t blocked; int err; err = setup_rt_frame(ka, regs, signr, oldset, (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); if (err) return err; - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); + sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NOMASK)) - sigaddset(¤t->blocked,signr); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + sigaddset(&blocked, signr); + set_current_blocked(&blocked); tracehook_signal_handler(signr, info, ka, regs, 0); @@ -601,7 +595,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) */ if (current_thread_info()->status & TS_RESTORE_SIGMASK) { current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + set_current_blocked(¤t->saved_sigmask); } } diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c index e485a680499..13c2169822a 100644 --- a/arch/sparc/mm/leon_mm.c +++ b/arch/sparc/mm/leon_mm.c @@ -162,7 +162,7 @@ ready: printk(KERN_INFO "swprobe: padde %x\n", paddr_calc); if (paddr) *paddr = paddr_calc; - return paddrbase; + return pte; } void leon_flush_icache_all(void) diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index fc94607f0bd..aecc8ed5f39 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -21,7 +21,7 @@ #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/irqflags.h> -#include <linux/atomic.h> +#include <asm/atomic_32.h> #include <asm/asm-offsets.h> #include <hv/hypervisor.h> #include <arch/abi.h> diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S index 1f75a2a5610..30638042691 100644 --- a/arch/tile/lib/atomic_asm_32.S +++ b/arch/tile/lib/atomic_asm_32.S @@ -70,7 +70,7 @@ */ #include <linux/linkage.h> -#include <linux/atomic.h> +#include <asm/atomic_32.h> #include <asm/page.h> #include <asm/processor.h> diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 18ae83dd1cd..b56c65de384 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -56,7 +56,7 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), }; -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; +static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE; static int __init vsyscall_setup(char *str) { diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 30326443ab8..87488b93a65 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -63,9 +63,8 @@ static void __init find_early_table_space(unsigned long end, int use_pse, #ifdef CONFIG_X86_32 /* for fixmap */ tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); - - good_end = max_pfn_mapped << PAGE_SHIFT; #endif + good_end = max_pfn_mapped << PAGE_SHIFT; base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); if (base == MEMBLOCK_ERROR) diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 039d91315bc..404f21a3ff9 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -43,6 +43,17 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"), }, }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */ + /* 2006 AMD HT/VIA system with two host bridges */ + { + .callback = set_use_crs, + .ident = "ASUS M2V-MX SE", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"), + DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), + }, + }, {} }; diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index 58425adc22c..fe73276e026 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c @@ -678,38 +678,40 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) pentry = (struct sfi_device_table_entry *)sb->pentry; for (i = 0; i < num; i++, pentry++) { - if (pentry->irq != (u8)0xff) { /* native RTE case */ + int irq = pentry->irq; + + if (irq != (u8)0xff) { /* native RTE case */ /* these SPI2 devices are not exposed to system as PCI * devices, but they have separate RTE entry in IOAPIC * so we have to enable them one by one here */ - ioapic = mp_find_ioapic(pentry->irq); + ioapic = mp_find_ioapic(irq); irq_attr.ioapic = ioapic; - irq_attr.ioapic_pin = pentry->irq; + irq_attr.ioapic_pin = irq; irq_attr.trigger = 1; irq_attr.polarity = 1; - io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr); + io_apic_set_pci_routing(NULL, irq, &irq_attr); } else - pentry->irq = 0; /* No irq */ + irq = 0; /* No irq */ switch (pentry->type) { case SFI_DEV_TYPE_IPC: /* ID as IRQ is a hack that will go away */ - pdev = platform_device_alloc(pentry->name, pentry->irq); + pdev = platform_device_alloc(pentry->name, irq); if (pdev == NULL) { pr_err("out of memory for SFI platform device '%s'.\n", pentry->name); continue; } - install_irq_resource(pdev, pentry->irq); + install_irq_resource(pdev, irq); pr_debug("info[%2d]: IPC bus, name = %16.16s, " - "irq = 0x%2x\n", i, pentry->name, pentry->irq); + "irq = 0x%2x\n", i, pentry->name, irq); sfi_handle_ipc_dev(pdev); break; case SFI_DEV_TYPE_SPI: memset(&spi_info, 0, sizeof(spi_info)); strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN); - spi_info.irq = pentry->irq; + spi_info.irq = irq; spi_info.bus_num = pentry->host_num; spi_info.chip_select = pentry->addr; spi_info.max_speed_hz = pentry->max_freq; @@ -726,7 +728,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) memset(&i2c_info, 0, sizeof(i2c_info)); bus = pentry->host_num; strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN); - i2c_info.irq = pentry->irq; + i2c_info.irq = irq; i2c_info.addr = pentry->addr; pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, " "irq = 0x%2x, addr = 0x%x\n", i, bus, diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index be442561693..7835b8fc94d 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c @@ -67,6 +67,9 @@ static int ghash_update(struct shash_desc *desc, struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; + if (!ctx->gf128) + return -ENOKEY; + if (dctx->bytes) { int n = min(srclen, dctx->bytes); u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); @@ -119,6 +122,9 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *buf = dctx->buffer; + if (!ctx->gf128) + return -ENOKEY; + ghash_flush(ctx, dctx); memcpy(dst, buf, GHASH_BLOCK_SIZE); diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 0599854e221..118ec12d2d5 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -34,8 +34,8 @@ struct gpio_bank { u16 irq; u16 virtual_irq_start; int method; -#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) u32 suspend_wakeup; +#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) u32 saved_wakeup; #endif u32 non_wakeup_gpios; diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index c43b8ff626a..0550dcb8581 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -577,6 +577,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) void pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) { + *gpio_base = -1; } #endif diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index e88c64417a8..14cc88aaf3a 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -277,7 +277,12 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, case ATOM_ARG_FB: idx = U8(*ptr); (*ptr)++; - val = gctx->scratch[((gctx->fb_base + idx) / 4)]; + if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { + DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n", + gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); + val = 0; + } else + val = gctx->scratch[(gctx->fb_base / 4) + idx]; if (print) DEBUG("FB[0x%02X]", idx); break; @@ -531,7 +536,11 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, case ATOM_ARG_FB: idx = U8(*ptr); (*ptr)++; - gctx->scratch[((gctx->fb_base + idx) / 4)] = val; + if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { + DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n", + gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); + } else + gctx->scratch[(gctx->fb_base / 4) + idx] = val; DEBUG("FB[0x%02X]", idx); break; case ATOM_ARG_PLL: @@ -1370,11 +1379,13 @@ int atom_allocate_fb_scratch(struct atom_context *ctx) usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; } + ctx->scratch_size_bytes = 0; if (usage_bytes == 0) usage_bytes = 20 * 1024; /* allocate some scratch memory */ ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); if (!ctx->scratch) return -ENOMEM; + ctx->scratch_size_bytes = usage_bytes; return 0; } diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index a589a55b223..93cfe2086ba 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h @@ -137,6 +137,7 @@ struct atom_context { int cs_equal, cs_above; int io_mode; uint32_t *scratch; + int scratch_size_bytes; }; extern int atom_debug; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c742944d380..a515b2a09d8 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -466,7 +466,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, return; } args.v2.ucEnable = enable; - if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) + if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev)) args.v2.ucEnable = ATOM_DISABLE; } else if (ASIC_IS_DCE3(rdev)) { args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 4da23889fea..79e8ebc0530 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -129,7 +129,9 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, for (retry = 0; retry < 4; retry++) { ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_bytes, NULL, 0, delay, &ack); - if (ret < 0) + if (ret == -EBUSY) + continue; + else if (ret < 0) return ret; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) return send_bytes; @@ -160,7 +162,9 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, for (retry = 0; retry < 4; retry++) { ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_bytes, recv, recv_bytes, delay, &ack); - if (ret < 0) + if (ret == -EBUSY) + continue; + else if (ret < 0) return ret; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) return ret; @@ -236,7 +240,9 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, for (retry = 0; retry < 4; retry++) { ret = radeon_process_aux_ch(auxch, msg, msg_bytes, reply, reply_bytes, 0, &ack); - if (ret < 0) { + if (ret == -EBUSY) + continue; + else if (ret < 0) { DRM_DEBUG_KMS("aux_ch failed %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index bce63fd329d..449c3d8c683 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1303,23 +1303,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force) /* get the DPCD from the bridge */ radeon_dp_getdpcd(radeon_connector); - if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) - ret = connector_status_connected; - else { - /* need to setup ddc on the bridge */ - if (encoder) - radeon_atom_ext_encoder_setup_ddc(encoder); + if (encoder) { + /* setup ddc on the bridge */ + radeon_atom_ext_encoder_setup_ddc(encoder); if (radeon_ddc_probe(radeon_connector, - radeon_connector->requires_extended_probe)) + radeon_connector->requires_extended_probe)) /* try DDC */ ret = connector_status_connected; - } - - if ((ret == connector_status_disconnected) && - radeon_connector->dac_load_detect) { - struct drm_encoder *encoder = radeon_best_single_encoder(connector); - struct drm_encoder_helper_funcs *encoder_funcs; - if (encoder) { - encoder_funcs = encoder->helper_private; + else if (radeon_connector->dac_load_detect) { /* try load detection */ + struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; ret = encoder_funcs->detect(encoder, connector); } } diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 13690f3eb4a..eb3f6dc6df8 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -1638,7 +1638,17 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) break; case 2: args.v2.ucCRTC = radeon_crtc->crtc_id; - args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); + if (radeon_encoder_is_dp_bridge(encoder)) { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) + args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; + else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA) + args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; + else + args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); + } else + args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: @@ -1755,9 +1765,17 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) /* DCE4/5 */ if (ASIC_IS_DCE4(rdev)) { dig = radeon_encoder->enc_priv; - if (ASIC_IS_DCE41(rdev)) - return radeon_crtc->crtc_id; - else { + if (ASIC_IS_DCE41(rdev)) { + /* ontario follows DCE4 */ + if (rdev->family == CHIP_PALM) { + if (dig->linkb) + return 1; + else + return 0; + } else + /* llano follows DCE3.2 */ + return radeon_crtc->crtc_id; + } else { switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: if (dig->linkb) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ae3c6f5dd2b..082fcaea583 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -321,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg old_copy; + struct ttm_mem_reg old_copy = *old_mem; void *old_iomap; void *new_iomap; int ret; diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index f2b377c56a3..36d7f270b14 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -390,7 +390,7 @@ temp_from_reg(u16 reg, s16 regval) { if (is_word_sized(reg)) return LM75_TEMP_FROM_REG(regval); - return regval * 1000; + return ((s8)regval) * 1000; } static inline u16 @@ -398,7 +398,8 @@ temp_to_reg(u16 reg, long temp) { if (is_word_sized(reg)) return LM75_TEMP_TO_REG(temp); - return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), 1000); + return (s8)DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), + 1000); } /* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */ @@ -1715,7 +1716,8 @@ static void w83627ehf_device_remove_files(struct device *dev) } /* Get the monitoring functions started */ -static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data) +static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data, + enum kinds kind) { int i; u8 tmp, diode; @@ -1746,10 +1748,16 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data) w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01); /* Get thermal sensor types */ - diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE); + switch (kind) { + case w83627ehf: + diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE); + break; + default: + diode = 0x70; + } for (i = 0; i < 3; i++) { if ((tmp & (0x02 << i))) - data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 2; + data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3; else data->temp_type[i] = 4; /* thermistor */ } @@ -2016,7 +2024,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) } /* Initialize the chip */ - w83627ehf_init_device(data); + w83627ehf_init_device(data, sio_data->kind); data->vrm = vid_which_vrm(); superio_enter(sio_data->sioreg); diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 9827c5e686c..811dbbd9306 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -327,7 +327,7 @@ config BLK_DEV_OPTI621 select BLK_DEV_IDEPCI help This is a driver for the OPTi 82C621 EIDE controller. - Please read the comments at the top of <file:drivers/ide/pci/opti621.c>. + Please read the comments at the top of <file:drivers/ide/opti621.c>. config BLK_DEV_RZ1000 tristate "RZ1000 chipset bugfix/support" @@ -365,7 +365,7 @@ config BLK_DEV_ALI15X3 normal dual channel support. Please read the comments at the top of - <file:drivers/ide/pci/alim15x3.c>. + <file:drivers/ide/alim15x3.c>. If unsure, say N. @@ -528,7 +528,7 @@ config BLK_DEV_NS87415 This driver adds detection and support for the NS87415 chip (used mainly on SPARC64 and PA-RISC machines). - Please read the comments at the top of <file:drivers/ide/pci/ns87415.c>. + Please read the comments at the top of <file:drivers/ide/ns87415.c>. config BLK_DEV_PDC202XX_OLD tristate "PROMISE PDC202{46|62|65|67} support" @@ -547,7 +547,7 @@ config BLK_DEV_PDC202XX_OLD for more than one card. Please read the comments at the top of - <file:drivers/ide/pci/pdc202xx_old.c>. + <file:drivers/ide/pdc202xx_old.c>. If unsure, say N. @@ -593,7 +593,7 @@ config BLK_DEV_SIS5513 ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740, SiS745, SiS750 - Please read the comments at the top of <file:drivers/ide/pci/sis5513.c>. + Please read the comments at the top of <file:drivers/ide/sis5513.c>. config BLK_DEV_SL82C105 tristate "Winbond SL82c105 support" @@ -616,7 +616,7 @@ config BLK_DEV_SLC90E66 look-a-like to the PIIX4 it should be a nice addition. Please read the comments at the top of - <file:drivers/ide/pci/slc90e66.c>. + <file:drivers/ide/slc90e66.c>. config BLK_DEV_TRM290 tristate "Tekram TRM290 chipset support" @@ -625,7 +625,7 @@ config BLK_DEV_TRM290 This driver adds support for bus master DMA transfers using the Tekram TRM290 PCI IDE chip. Volunteers are needed for further tweaking and development. - Please read the comments at the top of <file:drivers/ide/pci/trm290.c>. + Please read the comments at the top of <file:drivers/ide/trm290.c>. config BLK_DEV_VIA82CXXX tristate "VIA82CXXX chipset support" @@ -836,7 +836,7 @@ config BLK_DEV_ALI14XX of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster I/O speeds to be set as well. See the files <file:Documentation/ide/ide.txt> and - <file:drivers/ide/legacy/ali14xx.c> for more info. + <file:drivers/ide/ali14xx.c> for more info. config BLK_DEV_DTC2278 tristate "DTC-2278 support" @@ -847,7 +847,7 @@ config BLK_DEV_DTC2278 boot parameter. It enables support for the secondary IDE interface of the DTC-2278 card, and permits faster I/O speeds to be set as well. See the <file:Documentation/ide/ide.txt> and - <file:drivers/ide/legacy/dtc2278.c> files for more info. + <file:drivers/ide/dtc2278.c> files for more info. config BLK_DEV_HT6560B tristate "Holtek HT6560B support" @@ -858,7 +858,7 @@ config BLK_DEV_HT6560B boot parameter. It enables support for the secondary IDE interface of the Holtek card, and permits faster I/O speeds to be set as well. See the <file:Documentation/ide/ide.txt> and - <file:drivers/ide/legacy/ht6560b.c> files for more info. + <file:drivers/ide/ht6560b.c> files for more info. config BLK_DEV_QD65XX tristate "QDI QD65xx support" @@ -867,7 +867,7 @@ config BLK_DEV_QD65XX help This driver is enabled at runtime using the "qd65xx.probe" kernel boot parameter. It permits faster I/O speeds to be set. See the - <file:Documentation/ide/ide.txt> and <file:drivers/ide/legacy/qd65xx.c> + <file:Documentation/ide/ide.txt> and <file:drivers/ide/qd65xx.c> for more info. config BLK_DEV_UMC8672 @@ -879,7 +879,7 @@ config BLK_DEV_UMC8672 boot parameter. It enables support for the secondary IDE interface of the UMC-8672, and permits faster I/O speeds to be set as well. See the files <file:Documentation/ide/ide.txt> and - <file:drivers/ide/legacy/umc8672.c> for more info. + <file:drivers/ide/umc8672.c> for more info. endif diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 0dc97ec15c2..9dea71849f4 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -1124,11 +1124,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, for (i = 0; i < 8; i++) __set_bit(BTN_0 + i, input_dev->keybit); - if (wacom_wac->features.type != WACOM_21UX2) { - input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); - input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); - } - + input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); + input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index c621c98c99d..a88f3cbb100 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -306,6 +306,11 @@ static inline bool dma_pte_present(struct dma_pte *pte) return (pte->val & 3) != 0; } +static inline bool dma_pte_superpage(struct dma_pte *pte) +{ + return (pte->val & (1 << 7)); +} + static inline int first_pte_in_page(struct dma_pte *pte) { return !((unsigned long)pte & ~VTD_PAGE_MASK); @@ -404,6 +409,9 @@ static int dmar_forcedac; static int intel_iommu_strict; static int intel_iommu_superpage = 1; +int intel_iommu_gfx_mapped; +EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); + #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) static DEFINE_SPINLOCK(device_domain_lock); static LIST_HEAD(device_domain_list); @@ -577,17 +585,18 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain) static void domain_update_iommu_superpage(struct dmar_domain *domain) { - int i, mask = 0xf; + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu = NULL; + int mask = 0xf; if (!intel_iommu_superpage) { domain->iommu_superpage = 0; return; } - domain->iommu_superpage = 4; /* 1TiB */ - - for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { - mask |= cap_super_page_val(g_iommus[i]->cap); + /* set iommu_superpage to the smallest common denominator */ + for_each_active_iommu(iommu, drhd) { + mask &= cap_super_page_val(iommu->cap); if (!mask) { break; } @@ -730,29 +739,23 @@ out: } static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, - unsigned long pfn, int large_level) + unsigned long pfn, int target_level) { int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; struct dma_pte *parent, *pte = NULL; int level = agaw_to_level(domain->agaw); - int offset, target_level; + int offset; BUG_ON(!domain->pgd); BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); parent = domain->pgd; - /* Search pte */ - if (!large_level) - target_level = 1; - else - target_level = large_level; - while (level > 0) { void *tmp_page; offset = pfn_level_offset(pfn, level); pte = &parent[offset]; - if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE)) + if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) break; if (level == target_level) break; @@ -816,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, } /* clear last level pte, a tlb flush should be followed */ -static void dma_pte_clear_range(struct dmar_domain *domain, +static int dma_pte_clear_range(struct dmar_domain *domain, unsigned long start_pfn, unsigned long last_pfn) { int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; unsigned int large_page = 1; struct dma_pte *first_pte, *pte; + int order; BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); @@ -846,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain, (void *)pte - (void *)first_pte); } while (start_pfn && start_pfn <= last_pfn); + + order = (large_page - 1) * 9; + return order; } /* free page table pages. last level pte should already be cleared */ @@ -3226,9 +3233,6 @@ static void __init init_no_remapping_devices(void) } } - if (dmar_map_gfx) - return; - for_each_drhd_unit(drhd) { int i; if (drhd->ignored || drhd->include_all) @@ -3236,18 +3240,23 @@ static void __init init_no_remapping_devices(void) for (i = 0; i < drhd->devices_cnt; i++) if (drhd->devices[i] && - !IS_GFX_DEVICE(drhd->devices[i])) + !IS_GFX_DEVICE(drhd->devices[i])) break; if (i < drhd->devices_cnt) continue; - /* bypass IOMMU if it is just for gfx devices */ - drhd->ignored = 1; - for (i = 0; i < drhd->devices_cnt; i++) { - if (!drhd->devices[i]) - continue; - drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; + /* This IOMMU has *only* gfx devices. Either bypass it or + set the gfx_mapped flag, as appropriate */ + if (dmar_map_gfx) { + intel_iommu_gfx_mapped = 1; + } else { + drhd->ignored = 1; + for (i = 0; i < drhd->devices_cnt; i++) { + if (!drhd->devices[i]) + continue; + drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; + } } } } @@ -3568,6 +3577,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, found = 1; } + spin_unlock_irqrestore(&device_domain_lock, flags); + if (found == 0) { unsigned long tmp_flags; spin_lock_irqsave(&domain->iommu_lock, tmp_flags); @@ -3584,8 +3595,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, spin_unlock_irqrestore(&iommu->lock, tmp_flags); } } - - spin_unlock_irqrestore(&device_domain_lock, flags); } static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) @@ -3739,6 +3748,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) vm_domain_exit(dmar_domain); return -ENOMEM; } + domain_update_iommu_cap(dmar_domain); domain->priv = dmar_domain; return 0; @@ -3864,14 +3874,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain, { struct dmar_domain *dmar_domain = domain->priv; size_t size = PAGE_SIZE << gfp_order; + int order; - dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, + order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, (iova + size - 1) >> VTD_PAGE_SHIFT); if (dmar_domain->max_addr == iova + size) dmar_domain->max_addr = iova; - return gfp_order; + return order; } static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, @@ -3950,7 +3961,11 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev) if (!(ggc & GGC_MEMORY_VT_ENABLED)) { printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); dmar_map_gfx = 0; - } + } else if (dmar_map_gfx) { + /* we have to ensure the gfx device is idle before we flush */ + printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n"); + intel_iommu_strict = 1; + } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 49da55c1528..8c2a000cf3f 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ti->num_flush_requests = 1; + ti->discard_zeroes_data_unsupported = 1; + return 0; bad: diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 89f73ca22cf..f84c08029b2 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> */ if (!strcasecmp(arg_name, "corrupt_bio_byte")) { - if (!argc) + if (!argc) { ti->error = "Feature corrupt_bio_byte requires parameters"; + return -EINVAL; + } r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); if (r) diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index f8214702963..32ac70861d6 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -628,6 +628,7 @@ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc, job->kc = kc; job->fn = fn; job->context = context; + job->master_job = job; atomic_inc(&kc->nr_jobs); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index a002dd85db1..86df8b2cf92 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, rs->ti->error = "write_mostly option is only valid for RAID1"; return -EINVAL; } - if (value > rs->md.raid_disks) { + if (value >= rs->md.raid_disks) { rs->ti->error = "Invalid write_mostly drive index given"; return -EINVAL; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 986b8754bb0..bc04518e9d8 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t) return; template_disk = dm_table_get_integrity_disk(t, true); - if (!template_disk && - blk_integrity_is_initialized(dm_disk(t->md))) { + if (template_disk) + blk_integrity_register(dm_disk(t->md), + blk_get_integrity(template_disk)); + else if (blk_integrity_is_initialized(dm_disk(t->md))) DMWARN("%s: device no longer has a valid integrity profile", dm_device_name(t->md)); - return; - } - blk_integrity_register(dm_disk(t->md), - blk_get_integrity(template_disk)); + else + DMWARN("%s: unable to establish an integrity profile", + dm_device_name(t->md)); } static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, @@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) return 0; } +static bool dm_table_discard_zeroes_data(struct dm_table *t) +{ + struct dm_target *ti; + unsigned i = 0; + + /* Ensure that all targets supports discard_zeroes_data. */ + while (i < dm_table_get_num_targets(t)) { + ti = dm_table_get_target(t, i++); + + if (ti->discard_zeroes_data_unsupported) + return 0; + } + + return 1; +} + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { @@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, } blk_queue_flush(q, flush); + if (!dm_table_discard_zeroes_data(t)) + q->limits.discard_zeroes_data = 0; + dm_table_set_integrity(t); /* diff --git a/drivers/md/md.c b/drivers/md/md.c index 5404b229582..5c95ccb5950 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -61,6 +61,11 @@ static void autostart_arrays(int part); #endif +/* pers_list is a list of registered personalities protected + * by pers_lock. + * pers_lock does extra service to protect accesses to + * mddev->thread when the mutex cannot be held. + */ static LIST_HEAD(pers_list); static DEFINE_SPINLOCK(pers_lock); @@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev) } else mutex_unlock(&mddev->reconfig_mutex); + /* was we've dropped the mutex we need a spinlock to + * make sur the thread doesn't disappear + */ + spin_lock(&pers_lock); md_wakeup_thread(mddev->thread); + spin_unlock(&pers_lock); } static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) @@ -6429,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, return thread; } -void md_unregister_thread(mdk_thread_t *thread) +void md_unregister_thread(mdk_thread_t **threadp) { + mdk_thread_t *thread = *threadp; if (!thread) return; dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); + /* Locking ensures that mddev_unlock does not wake_up a + * non-existent thread + */ + spin_lock(&pers_lock); + *threadp = NULL; + spin_unlock(&pers_lock); kthread_stop(thread->tsk); kfree(thread); @@ -7340,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev) mdk_rdev_t *rdev; /* resync has finished, collect result */ - md_unregister_thread(mddev->sync_thread); - mddev->sync_thread = NULL; + md_unregister_thread(&mddev->sync_thread); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { /* success...*/ diff --git a/drivers/md/md.h b/drivers/md/md.h index 1e586bb4452..0a309dc29b4 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p); extern int unregister_md_personality(struct mdk_personality *p); extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), mddev_t *mddev, const char *name); -extern void md_unregister_thread(mdk_thread_t *thread); +extern void md_unregister_thread(mdk_thread_t **threadp); extern void md_wakeup_thread(mdk_thread_t *thread); extern void md_check_recovery(mddev_t *mddev); extern void md_write_start(mddev_t *mddev, struct bio *bi); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 3535c23af28..d5b5fb30017 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev) { multipath_conf_t *conf = mddev->private; - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ mempool_destroy(conf->pool); kfree(conf->multipaths); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f4622dd8fc5..d9587dffe53 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2562,8 +2562,7 @@ static int stop(mddev_t *mddev) raise_barrier(conf); lower_barrier(conf); - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); if (conf->r1bio_pool) mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d7a8468ddea..0cd9672cf9c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2955,7 +2955,7 @@ static int run(mddev_t *mddev) return 0; out_free_conf: - md_unregister_thread(mddev->thread); + md_unregister_thread(&mddev->thread); if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); @@ -2973,8 +2973,7 @@ static int stop(mddev_t *mddev) raise_barrier(conf, 0); lower_barrier(conf); - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 43709fa6b6d..ac5e8b57e50 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev) return 0; abort: - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); if (conf) { print_raid5_conf(conf); free_conf(conf); @@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev) { raid5_conf_t *conf = mddev->private; - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); if (mddev->queue) mddev->queue->backing_dev_info.congested_fn = NULL; free_conf(conf); diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index d7215651772..a5c9ed128b9 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c @@ -181,7 +181,7 @@ static void v4l2_device_release(struct device *cd) * TODO: In the long run all drivers that use v4l2_device should use the * v4l2_device release callback. This check will then be unnecessary. */ - if (v4l2_dev->release == NULL) + if (v4l2_dev && v4l2_dev->release == NULL) v4l2_dev = NULL; /* Release video_device and perform other diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index e46df5331c5..9a7eb3b36cf 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -239,13 +239,19 @@ void bnx2x_int_disable(struct bnx2x *bp); * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X * */ -/* iSCSI L2 */ -#define BNX2X_ISCSI_ETH_CL_ID_IDX 1 -#define BNX2X_ISCSI_ETH_CID 49 +enum { + BNX2X_ISCSI_ETH_CL_ID_IDX, + BNX2X_FCOE_ETH_CL_ID_IDX, + BNX2X_MAX_CNIC_ETH_CL_ID_IDX, +}; -/* FCoE L2 */ -#define BNX2X_FCOE_ETH_CL_ID_IDX 2 -#define BNX2X_FCOE_ETH_CID 50 +#define BNX2X_CNIC_START_ETH_CID 48 +enum { + /* iSCSI L2 */ + BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, + /* FCoE L2 */ + BNX2X_FCOE_ETH_CID, +}; /** Additional rings budgeting */ #ifdef BCM_CNIC diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 223bfeebc59..2dc1199239d 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h @@ -1297,7 +1297,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp, static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) { return bp->cnic_base_cl_id + cl_idx + - (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE; + (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; } static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6d79b78cfc7..de3d351ccb6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1435,6 +1435,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) struct sk_buff *skb = *pskb; struct slave *slave; struct bonding *bond; + void (*recv_probe)(struct sk_buff *, struct bonding *, + struct slave *); skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) @@ -1448,11 +1450,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) if (bond->params.arp_interval) slave->dev->last_rx = jiffies; - if (bond->recv_probe) { + recv_probe = ACCESS_ONCE(bond->recv_probe); + if (recv_probe) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); if (likely(nskb)) { - bond->recv_probe(nskb, bond, slave); + recv_probe(nskb, bond, slave); dev_kfree_skb(nskb); } } diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index 92feac68b66..4cc6f44c2ba 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) void __iomem *data = ®s->tx.dsr1_0; u16 *payload = (u16 *)frame->data; - /* It is safe to write into dsr[dlc+1] */ - for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { + for (i = 0; i < frame->can_dlc / 2; i++) { out_be16(data, *payload++); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } + /* write remaining byte if necessary */ + if (frame->can_dlc & 1) + out_8(data, frame->data[frame->can_dlc - 1]); } out_8(®s->tx.dlr, frame->can_dlc); @@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) void __iomem *data = ®s->rx.dsr1_0; u16 *payload = (u16 *)frame->data; - for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { + for (i = 0; i < frame->can_dlc / 2; i++) { *payload++ = in_be16(data); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } + /* read remaining byte if necessary */ + if (frame->can_dlc & 1) + frame->data[frame->can_dlc - 1] = in_8(data); } out_8(®s->canrflg, MSCAN_RXF); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 05172c39a0c..376e3e94bae 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) dest = macvlan_hash_lookup(port, eth->h_dest); if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { /* send to lowerdev first for its network taps */ - vlan->forward(vlan->lowerdev, skb); + dev_forward_skb(vlan->lowerdev, skb); return NET_XMIT_SUCCESS; } diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 6e03de034ac..f76ab6bf309 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c @@ -172,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, memset(ring->buf, 0, ring->buf_size); ring->qp_state = MLX4_QP_STATE_RST; - ring->doorbell_qpn = swab32(ring->qp.qpn << 8); + ring->doorbell_qpn = ring->qp.qpn << 8; mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, ring->cqn, &ring->context); @@ -791,7 +791,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) skb_orphan(skb); if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { - *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn; + *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); op_own |= htonl((bf_index & 0xffff) << 8); /* Ensure new descirptor hits memory * before setting ownership of this descriptor to HW */ @@ -812,7 +812,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) wmb(); tx_desc->ctrl.owner_opcode = op_own; wmb(); - writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); + iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); } /* Poll CQ here */ diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index ed2a3977c6e..e8882023576 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -307,6 +307,11 @@ static ssize_t store_enabled(struct netconsole_target *nt, return err; if (enabled < 0 || enabled > 1) return -EINVAL; + if (enabled == nt->enabled) { + printk(KERN_INFO "netconsole: network logging has already %s\n", + nt->enabled ? "started" : "stopped"); + return -EINVAL; + } if (enabled) { /* 1 */ diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c index eae542a7e98..89f829f5f72 100644 --- a/drivers/net/pptp.c +++ b/drivers/net/pptp.c @@ -285,8 +285,10 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) ip_send_check(iph); ip_local_out(skb); + return 1; tx_error: + kfree_skb(skb); return 1; } @@ -305,11 +307,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) } header = (struct pptp_gre_header *)(skb->data); + headersize = sizeof(*header); /* test if acknowledgement present */ if (PPTP_GRE_IS_A(header->ver)) { - __u32 ack = (PPTP_GRE_IS_S(header->flags)) ? - header->ack : header->seq; /* ack in different place if S = 0 */ + __u32 ack; + + if (!pskb_may_pull(skb, headersize)) + goto drop; + header = (struct pptp_gre_header *)(skb->data); + + /* ack in different place if S = 0 */ + ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq; ack = ntohl(ack); @@ -318,21 +327,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) /* also handle sequence number wrap-around */ if (WRAPPED(ack, opt->ack_recv)) opt->ack_recv = ack; + } else { + headersize -= sizeof(header->ack); } - /* test if payload present */ if (!PPTP_GRE_IS_S(header->flags)) goto drop; - headersize = sizeof(*header); payload_len = ntohs(header->payload_len); seq = ntohl(header->seq); - /* no ack present? */ - if (!PPTP_GRE_IS_A(header->ver)) - headersize -= sizeof(header->ack); /* check for incomplete packet (length smaller than expected) */ - if (skb->len - headersize < payload_len) + if (!pskb_may_pull(skb, headersize + payload_len)) goto drop; payload = skb->data + headersize; diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index c2366701792..6d657cabb95 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -2859,7 +2859,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0004); rtl_writephy(tp, 0x1f, 0x0007); rtl_writephy(tp, 0x1e, 0x0020); - rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); rtl_writephy(tp, 0x1f, 0x0002); rtl_writephy(tp, 0x1f, 0x0000); rtl_writephy(tp, 0x0d, 0x0007); @@ -3316,6 +3316,37 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp) } } +static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + RTL_W32(RxConfig, RTL_R32(RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + break; + default: + break; + } +} + +static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) +{ + if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) + return false; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, MII_BMCR, 0x0000); + + rtl_wol_suspend_quirk(tp); + + return true; +} + static void r810x_phy_power_down(struct rtl8169_private *tp) { rtl_writephy(tp, 0x1f, 0x0000); @@ -3330,18 +3361,8 @@ static void r810x_phy_power_up(struct rtl8169_private *tp) static void r810x_pll_power_down(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - if (__rtl8169_get_wol(tp) & WAKE_ANY) { - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); - - if (tp->mac_version == RTL_GIGA_MAC_VER_29 || - tp->mac_version == RTL_GIGA_MAC_VER_30) - RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | - AcceptMulticast | AcceptMyPhys); + if (rtl_wol_pll_power_down(tp)) return; - } r810x_phy_power_down(tp); } @@ -3430,17 +3451,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) tp->mac_version == RTL_GIGA_MAC_VER_33) rtl_ephy_write(ioaddr, 0x19, 0xff64); - if (__rtl8169_get_wol(tp) & WAKE_ANY) { - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); - - if (tp->mac_version == RTL_GIGA_MAC_VER_32 || - tp->mac_version == RTL_GIGA_MAC_VER_33 || - tp->mac_version == RTL_GIGA_MAC_VER_34) - RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | - AcceptMulticast | AcceptMyPhys); + if (rtl_wol_pll_power_down(tp)) return; - } r8168_phy_power_down(tp); @@ -5788,11 +5800,30 @@ static const struct dev_pm_ops rtl8169_pm_ops = { #endif /* !CONFIG_PM */ +static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* WoL fails with 8168b when the receiver is disabled. */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + pci_clear_master(tp->pci_dev); + + RTL_W8(ChipCmd, CmdRxEnb); + /* PCI commit */ + RTL_R8(ChipCmd); + break; + default: + break; + } +} + static void rtl_shutdown(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; rtl8169_net_suspend(dev); @@ -5806,16 +5837,9 @@ static void rtl_shutdown(struct pci_dev *pdev) spin_unlock_irq(&tp->lock); if (system_state == SYSTEM_POWER_OFF) { - /* WoL fails with 8168b when the receiver is disabled. */ - if ((tp->mac_version == RTL_GIGA_MAC_VER_11 || - tp->mac_version == RTL_GIGA_MAC_VER_12 || - tp->mac_version == RTL_GIGA_MAC_VER_17) && - (tp->features & RTL_FEATURE_WOL)) { - pci_clear_master(pdev); - - RTL_W8(ChipCmd, CmdRxEnb); - /* PCI commit */ - RTL_R8(ChipCmd); + if (__rtl8169_get_wol(tp) & WAKE_ANY) { + rtl_wol_suspend_quirk(tp); + rtl_wol_shutdown_quirk(tp); } pci_wake_from_d3(pdev, true); diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index b9016a30cdc..c90ddb61cc5 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -26,6 +26,7 @@ * LAN9215, LAN9216, LAN9217, LAN9218 * LAN9210, LAN9211 * LAN9220, LAN9221 + * LAN89218 * */ @@ -1983,6 +1984,7 @@ static int __devinit smsc911x_init(struct net_device *dev) case 0x01170000: case 0x01160000: case 0x01150000: + case 0x218A0000: /* LAN911[5678] family */ pdata->generation = pdata->idrev & 0x0000FFFF; break; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 4a1374df608..c11a2b8327f 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -15577,7 +15577,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev) cancel_work_sync(&tp->reset_task); - if (!tg3_flag(tp, USE_PHYLIB)) { + if (tg3_flag(tp, USE_PHYLIB)) { tg3_phy_fini(tp); tg3_mdio_fini(tp); } diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index c9e3dc024bc..16ad97df5ba 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, sas_disable_routing(parent, phy->attached_sas_addr); } memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); - sas_port_delete_phy(phy->port, phy->phy); - if (phy->port->num_phys == 0) - sas_port_delete(phy->port); - phy->port = NULL; + if (phy->port) { + sas_port_delete_phy(phy->port, phy->phy); + if (phy->port->num_phys == 0) + sas_port_delete(phy->port); + phy->port = NULL; + } } static int sas_discover_bfs_by_root_level(struct domain_device *root, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 4cace3f20c0..1e69527f1e4 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1328,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) qla2x00_sp_compl(ha, sp); } else { ctx = sp->ctx; - if (ctx->type == SRB_LOGIN_CMD || - ctx->type == SRB_LOGOUT_CMD) { - ctx->u.iocb_cmd->free(sp); - } else { + if (ctx->type == SRB_ELS_CMD_RPT || + ctx->type == SRB_ELS_CMD_HST || + ctx->type == SRB_CT_CMD) { struct fc_bsg_job *bsg_job = ctx->u.bsg_job; if (bsg_job->request->msgcode @@ -1343,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) kfree(sp->ctx); mempool_free(sp, ha->srb_mempool); + } else { + ctx->u.iocb_cmd->free(sp); } } } diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 1a7c19ae766..8b307b42879 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -411,7 +411,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) skb->protocol = eth_type_trans(skb, dev); skb->dev = dev; - if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error)) + if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || + work->word2.s.L4_error || !work->word2.s.tcp_or_udp)) skb->ip_summed = CHECKSUM_NONE; else skb->ip_summed = CHECKSUM_UNNECESSARY; diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c index 58cf279ed87..bc95f52cad8 100644 --- a/drivers/tty/serial/lantiq.c +++ b/drivers/tty/serial/lantiq.c @@ -478,8 +478,10 @@ lqasc_set_termios(struct uart_port *port, spin_unlock_irqrestore(<q_asc_lock, flags); /* Don't rewrite B0 */ - if (tty_termios_baud_rate(new)) + if (tty_termios_baud_rate(new)) tty_termios_encode_baud_rate(new, baud, baud); + + uart_update_timeout(port, cflag, baud); } static const char* diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 538f65a79ec..dae5dfe41ba 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1047,7 +1047,16 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, if (!max_to_defrag) max_to_defrag = last_index - 1; - while (i <= last_index && defrag_count < max_to_defrag) { + /* + * make writeback starts from i, so the defrag range can be + * written sequentially. + */ + if (i < inode->i_mapping->writeback_index) + inode->i_mapping->writeback_index = i; + + while (i <= last_index && defrag_count < max_to_defrag && + (i < (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT)) { /* * make sure we stop running if someone unmounts * the FS diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index f4af4cc3750..71beb020197 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2018,7 +2018,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) warned_on_ntlm = true; cERROR(1, "default security mechanism requested. The default " "security mechanism will be upgraded from ntlm to " - "ntlmv2 in kernel release 3.1"); + "ntlmv2 in kernel release 3.2"); } ses->overrideSecFlg = volume_info->secFlg; diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index cac2ecfa674..ef43fce519a 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -629,7 +629,7 @@ xfs_buf_item_push( * the xfsbufd to get this buffer written. We have to unlock the buffer * to allow the xfsbufd to write it, too. */ -STATIC void +STATIC bool xfs_buf_item_pushbuf( struct xfs_log_item *lip) { @@ -643,6 +643,7 @@ xfs_buf_item_pushbuf( xfs_buf_delwri_promote(bp); xfs_buf_relse(bp); + return true; } STATIC void diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c index 9e0e2fa3f2c..bb3f71d236d 100644 --- a/fs/xfs/xfs_dquot_item.c +++ b/fs/xfs/xfs_dquot_item.c @@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait( * search the buffer cache can be a time consuming thing, and AIL lock is a * spinlock. */ -STATIC void +STATIC bool xfs_qm_dquot_logitem_pushbuf( struct xfs_log_item *lip) { struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); struct xfs_dquot *dqp = qlip->qli_dquot; struct xfs_buf *bp; + bool ret = true; ASSERT(XFS_DQ_IS_LOCKED(dqp)); @@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf( if (completion_done(&dqp->q_flush) || !(lip->li_flags & XFS_LI_IN_AIL)) { xfs_dqunlock(dqp); - return; + return true; } bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno, dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); xfs_dqunlock(dqp); if (!bp) - return; + return true; if (XFS_BUF_ISDELAYWRITE(bp)) xfs_buf_delwri_promote(bp); + if (xfs_buf_ispinned(bp)) + ret = false; xfs_buf_relse(bp); + return ret; } /* diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 588406dc6a3..836ad80d4f2 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -708,13 +708,14 @@ xfs_inode_item_committed( * marked delayed write. If that's the case, we'll promote it and that will * allow the caller to write the buffer by triggering the xfsbufd to run. */ -STATIC void +STATIC bool xfs_inode_item_pushbuf( struct xfs_log_item *lip) { struct xfs_inode_log_item *iip = INODE_ITEM(lip); struct xfs_inode *ip = iip->ili_inode; struct xfs_buf *bp; + bool ret = true; ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); @@ -725,7 +726,7 @@ xfs_inode_item_pushbuf( if (completion_done(&ip->i_flush) || !(lip->li_flags & XFS_LI_IN_AIL)) { xfs_iunlock(ip, XFS_ILOCK_SHARED); - return; + return true; } bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno, @@ -733,10 +734,13 @@ xfs_inode_item_pushbuf( xfs_iunlock(ip, XFS_ILOCK_SHARED); if (!bp) - return; + return true; if (XFS_BUF_ISDELAYWRITE(bp)) xfs_buf_delwri_promote(bp); + if (xfs_buf_ispinned(bp)) + ret = false; xfs_buf_relse(bp); + return ret; } /* diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index 1e8a45e74c3..828662f70d6 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -68,6 +68,8 @@ #include <linux/ctype.h> #include <linux/writeback.h> #include <linux/capability.h> +#include <linux/kthread.h> +#include <linux/freezer.h> #include <linux/list_sort.h> #include <asm/page.h> diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 2366c54cc4f..5cf06b85fd9 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1652,24 +1652,13 @@ xfs_init_workqueues(void) */ xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8); if (!xfs_syncd_wq) - goto out; - - xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8); - if (!xfs_ail_wq) - goto out_destroy_syncd; - + return -ENOMEM; return 0; - -out_destroy_syncd: - destroy_workqueue(xfs_syncd_wq); -out: - return -ENOMEM; } STATIC void xfs_destroy_workqueues(void) { - destroy_workqueue(xfs_ail_wq); destroy_workqueue(xfs_syncd_wq); } diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 06a9759b635..53597f4db9b 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -350,7 +350,7 @@ typedef struct xfs_item_ops { void (*iop_unlock)(xfs_log_item_t *); xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); void (*iop_push)(xfs_log_item_t *); - void (*iop_pushbuf)(xfs_log_item_t *); + bool (*iop_pushbuf)(xfs_log_item_t *); void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); } xfs_item_ops_t; diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index c15aa29fa16..3a1e7ca54c2 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -28,8 +28,6 @@ #include "xfs_trans_priv.h" #include "xfs_error.h" -struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ - #ifdef DEBUG /* * Check that the list is sorted as it should be. @@ -356,16 +354,10 @@ xfs_ail_delete( xfs_trans_ail_cursor_clear(ailp, lip); } -/* - * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself - * to run at a later time if there is more work to do to complete the push. - */ -STATIC void -xfs_ail_worker( - struct work_struct *work) +static long +xfsaild_push( + struct xfs_ail *ailp) { - struct xfs_ail *ailp = container_of(to_delayed_work(work), - struct xfs_ail, xa_work); xfs_mount_t *mp = ailp->xa_mount; struct xfs_ail_cursor cur; xfs_log_item_t *lip; @@ -427,8 +419,13 @@ xfs_ail_worker( case XFS_ITEM_PUSHBUF: XFS_STATS_INC(xs_push_ail_pushbuf); - IOP_PUSHBUF(lip); - ailp->xa_last_pushed_lsn = lsn; + + if (!IOP_PUSHBUF(lip)) { + stuck++; + flush_log = 1; + } else { + ailp->xa_last_pushed_lsn = lsn; + } push_xfsbufd = 1; break; @@ -440,7 +437,6 @@ xfs_ail_worker( case XFS_ITEM_LOCKED: XFS_STATS_INC(xs_push_ail_locked); - ailp->xa_last_pushed_lsn = lsn; stuck++; break; @@ -501,20 +497,6 @@ out_done: /* We're past our target or empty, so idle */ ailp->xa_last_pushed_lsn = 0; - /* - * We clear the XFS_AIL_PUSHING_BIT first before checking - * whether the target has changed. If the target has changed, - * this pushes the requeue race directly onto the result of the - * atomic test/set bit, so we are guaranteed that either the - * the pusher that changed the target or ourselves will requeue - * the work (but not both). - */ - clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); - smp_rmb(); - if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || - test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) - return; - tout = 50; } else if (XFS_LSN_CMP(lsn, target) >= 0) { /* @@ -537,9 +519,30 @@ out_done: tout = 20; } - /* There is more to do, requeue us. */ - queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, - msecs_to_jiffies(tout)); + return tout; +} + +static int +xfsaild( + void *data) +{ + struct xfs_ail *ailp = data; + long tout = 0; /* milliseconds */ + + while (!kthread_should_stop()) { + if (tout && tout <= 20) + __set_current_state(TASK_KILLABLE); + else + __set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(tout ? + msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT); + + try_to_freeze(); + + tout = xfsaild_push(ailp); + } + + return 0; } /* @@ -574,8 +577,9 @@ xfs_ail_push( */ smp_wmb(); xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); - if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) - queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); + smp_wmb(); + + wake_up_process(ailp->xa_task); } /* @@ -813,9 +817,18 @@ xfs_trans_ail_init( INIT_LIST_HEAD(&ailp->xa_ail); INIT_LIST_HEAD(&ailp->xa_cursors); spin_lock_init(&ailp->xa_lock); - INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); + + ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s", + ailp->xa_mount->m_fsname); + if (IS_ERR(ailp->xa_task)) + goto out_free_ailp; + mp->m_ail = ailp; return 0; + +out_free_ailp: + kmem_free(ailp); + return ENOMEM; } void @@ -824,6 +837,6 @@ xfs_trans_ail_destroy( { struct xfs_ail *ailp = mp->m_ail; - cancel_delayed_work_sync(&ailp->xa_work); + kthread_stop(ailp->xa_task); kmem_free(ailp); } diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 212946b9723..22750b5e4a8 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -64,23 +64,17 @@ struct xfs_ail_cursor { */ struct xfs_ail { struct xfs_mount *xa_mount; + struct task_struct *xa_task; struct list_head xa_ail; xfs_lsn_t xa_target; struct list_head xa_cursors; spinlock_t xa_lock; - struct delayed_work xa_work; xfs_lsn_t xa_last_pushed_lsn; - unsigned long xa_flags; }; -#define XFS_AIL_PUSHING_BIT 0 - /* * From xfs_trans_ail.c */ - -extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ - void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, struct xfs_ail_cursor *cur, struct xfs_log_item **log_items, int nr_items, diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 3fa1f3d90ce..99e3e50b5c5 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -197,6 +197,11 @@ struct dm_target { * whether or not its underlying devices have support. */ unsigned discards_supported:1; + + /* + * Set if this target does not return zeroes on discarded blocks. + */ + unsigned discard_zeroes_data_unsupported:1; }; /* Each target can link one of these into the table */ diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 1aaf915656f..8fa4430f99c 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -900,6 +900,7 @@ struct netns_ipvs { volatile int sync_state; volatile int master_syncid; volatile int backup_syncid; + struct mutex sync_mutex; /* multicast interface name */ char master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; diff --git a/include/net/udplite.h b/include/net/udplite.h index 673a024c6b2..5f097ca7d5c 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -66,40 +66,34 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) return 0; } -static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh) +/* Slow-path computation of checksum. Socket is locked. */ +static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) { + const struct udp_sock *up = udp_sk(skb->sk); int cscov = up->len; + __wsum csum = 0; - /* - * Sender has set `partial coverage' option on UDP-Lite socket - */ - if (up->pcflag & UDPLITE_SEND_CC) { + if (up->pcflag & UDPLITE_SEND_CC) { + /* + * Sender has set `partial coverage' option on UDP-Lite socket. + * The special case "up->pcslen == 0" signifies full coverage. + */ if (up->pcslen < up->len) { - /* up->pcslen == 0 means that full coverage is required, - * partial coverage only if 0 < up->pcslen < up->len */ - if (0 < up->pcslen) { - cscov = up->pcslen; - } - uh->len = htons(up->pcslen); + if (0 < up->pcslen) + cscov = up->pcslen; + udp_hdr(skb)->len = htons(up->pcslen); } - /* - * NOTE: Causes for the error case `up->pcslen > up->len': - * (i) Application error (will not be penalized). - * (ii) Payload too big for send buffer: data is split - * into several packets, each with its own header. - * In this case (e.g. last segment), coverage may - * exceed packet length. - * Since packets with coverage length > packet length are - * illegal, we fall back to the defaults here. - */ + /* + * NOTE: Causes for the error case `up->pcslen > up->len': + * (i) Application error (will not be penalized). + * (ii) Payload too big for send buffer: data is split + * into several packets, each with its own header. + * In this case (e.g. last segment), coverage may + * exceed packet length. + * Since packets with coverage length > packet length are + * illegal, we fall back to the defaults here. + */ } - return cscov; -} - -static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) -{ - int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb)); - __wsum csum = 0; skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ @@ -115,16 +109,21 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) return csum; } +/* Fast-path computation of checksum. Socket may not be locked. */ static inline __wsum udplite_csum(struct sk_buff *skb) { - struct sock *sk = skb->sk; - int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb)); + const struct udp_sock *up = udp_sk(skb->sk); const int off = skb_transport_offset(skb); - const int len = skb->len - off; + int len = skb->len - off; + if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) { + if (0 < up->pcslen) + len = up->pcslen; + udp_hdr(skb)->len = htons(up->pcslen); + } skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ - return skb_checksum(skb, off, min(cscov, len), 0); + return skb_checksum(skb, off, len, 0); } extern void udplite4_register(void); diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index c8008dd58ef..640ded8f5c4 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -274,9 +274,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) struct task_cputime sum; unsigned long flags; - spin_lock_irqsave(&cputimer->lock, flags); if (!cputimer->running) { - cputimer->running = 1; /* * The POSIX timer interface allows for absolute time expiry * values through the TIMER_ABSTIME flag, therefore we have @@ -284,8 +282,11 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) * it. */ thread_group_cputime(tsk, &sum); + spin_lock_irqsave(&cputimer->lock, flags); + cputimer->running = 1; update_gt_cputime(&cputimer->cputime, &sum); - } + } else + spin_lock_irqsave(&cputimer->lock, flags); *times = cputimer->cputime; spin_unlock_irqrestore(&cputimer->lock, flags); } diff --git a/kernel/sys.c b/kernel/sys.c index 18ee1d2f647..1dbbe695a5e 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1172,7 +1172,7 @@ DECLARE_RWSEM(uts_sem); static int override_release(char __user *release, int len) { int ret = 0; - char buf[len]; + char buf[65]; if (current->personality & UNAME26) { char *rest = UTS_RELEASE; diff --git a/mm/migrate.c b/mm/migrate.c index 666e4e67741..14d0a6a632f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -120,10 +120,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, ptep = pte_offset_map(pmd, addr); - if (!is_swap_pte(*ptep)) { - pte_unmap(ptep); - goto out; - } + /* + * Peek to check is_swap_pte() before taking ptlock? No, we + * can race mremap's move_ptes(), which skips anon_vma lock. + */ ptl = pte_lockptr(mm, pmd); } diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 61f1f623091..e8292369cdc 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -26,6 +26,8 @@ /* Bluetooth L2CAP sockets. */ +#include <linux/security.h> + #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> @@ -933,6 +935,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) chan->force_reliable = pchan->force_reliable; chan->flushable = pchan->flushable; chan->force_active = pchan->force_active; + + security_sk_clone(parent, sk); } else { switch (sk->sk_type) { diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 482722bbc7a..5417f612732 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -42,6 +42,7 @@ #include <linux/device.h> #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/security.h> #include <net/sock.h> #include <asm/system.h> @@ -264,6 +265,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent) pi->sec_level = rfcomm_pi(parent)->sec_level; pi->role_switch = rfcomm_pi(parent)->role_switch; + + security_sk_clone(parent, sk); } else { pi->dlc->defer_setup = 0; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 8270f05e3f1..a324b009e34 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -41,6 +41,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/list.h> +#include <linux/security.h> #include <net/sock.h> #include <asm/system.h> @@ -403,8 +404,10 @@ static void sco_sock_init(struct sock *sk, struct sock *parent) { BT_DBG("sk %p", sk); - if (parent) + if (parent) { sk->sk_type = parent->sk_type; + security_sk_clone(parent, sk); + } } static struct proto sco_proto = { diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 32b8f9f7f79..ff3ed6086ce 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -91,7 +91,6 @@ static int br_dev_open(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); - netif_carrier_off(dev); netdev_update_features(dev); netif_start_queue(dev); br_stp_enable_bridge(br); @@ -108,8 +107,6 @@ static int br_dev_stop(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); - netif_carrier_off(dev); - br_stp_disable_bridge(br); br_multicast_stop(br); diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index e73815456ad..1d420f64ff2 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -161,9 +161,10 @@ static void del_nbp(struct net_bridge_port *p) call_rcu(&p->rcu, destroy_nbp_rcu); } -/* called with RTNL */ -static void del_br(struct net_bridge *br, struct list_head *head) +/* Delete bridge device */ +void br_dev_delete(struct net_device *dev, struct list_head *head) { + struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p, *n; list_for_each_entry_safe(p, n, &br->port_list, list) { @@ -268,7 +269,7 @@ int br_del_bridge(struct net *net, const char *name) } else - del_br(netdev_priv(dev), NULL); + br_dev_delete(dev, NULL); rtnl_unlock(); return ret; @@ -449,7 +450,7 @@ void __net_exit br_net_exit(struct net *net) rtnl_lock(); for_each_netdev(net, dev) if (dev->priv_flags & IFF_EBRIDGE) - del_br(netdev_priv(dev), &list); + br_dev_delete(dev, &list); unregister_netdevice_many(&list); rtnl_unlock(); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 5b1ed1ba9aa..e5f9ece3c9a 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -210,6 +210,7 @@ static struct rtnl_link_ops br_link_ops __read_mostly = { .priv_size = sizeof(struct net_bridge), .setup = br_dev_setup, .validate = br_validate, + .dellink = br_dev_delete, }; int __init br_netlink_init(void) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 78cc364997d..857a021deea 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -294,6 +294,7 @@ static inline int br_is_root_bridge(const struct net_bridge *br) /* br_device.c */ extern void br_dev_setup(struct net_device *dev); +extern void br_dev_delete(struct net_device *dev, struct list_head *list); extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev); #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 3231b468bb7..27071ee2a4e 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -475,8 +475,11 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) list_del_rcu(&rule->list); - if (rule->action == FR_ACT_GOTO) + if (rule->action == FR_ACT_GOTO) { ops->nr_goto_rules--; + if (rtnl_dereference(rule->ctarget) == NULL) + ops->unresolved_rules--; + } /* * Check if this rule is a target to any of them. If so, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 21fab3edb92..d73aab3fbfc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1389,9 +1389,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, BUG_ON(!pcount); - /* Tweak before seqno plays */ - if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint && - !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq)) + if (skb == tp->lost_skb_hint) tp->lost_cnt_hint += pcount; TCP_SKB_CB(prev)->end_seq += shifted; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index c34f0151394..7963e03f106 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -927,18 +927,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, } sk_nocaps_add(sk, NETIF_F_GSO_MASK); } - if (tcp_alloc_md5sig_pool(sk) == NULL) { + + md5sig = tp->md5sig_info; + if (md5sig->entries4 == 0 && + tcp_alloc_md5sig_pool(sk) == NULL) { kfree(newkey); return -ENOMEM; } - md5sig = tp->md5sig_info; if (md5sig->alloced4 == md5sig->entries4) { keys = kmalloc((sizeof(*keys) * (md5sig->entries4 + 1)), GFP_ATOMIC); if (!keys) { kfree(newkey); - tcp_free_md5sig_pool(); + if (md5sig->entries4 == 0) + tcp_free_md5sig_pool(); return -ENOMEM; } @@ -982,6 +985,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) kfree(tp->md5sig_info->keys4); tp->md5sig_info->keys4 = NULL; tp->md5sig_info->alloced4 = 0; + tcp_free_md5sig_pool(); } else if (tp->md5sig_info->entries4 != i) { /* Need to do some manipulation */ memmove(&tp->md5sig_info->keys4[i], @@ -989,7 +993,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) (tp->md5sig_info->entries4 - i) * sizeof(struct tcp4_md5sig_key)); } - tcp_free_md5sig_pool(); return 0; } } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index d2fe4e06b47..0ce3d06dce6 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -328,6 +328,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); + tw->tw_transparent = inet_sk(sk)->transparent; tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; tcptw->tw_rcv_nxt = tp->rcv_nxt; tcptw->tw_snd_nxt = tp->snd_nxt; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 3b5669a2582..d27c797f9f0 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -875,6 +875,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, skb_reset_transport_header(skb); __skb_push(skb, skb_gro_offset(skb)); + ops = rcu_dereference(inet6_protos[proto]); if (!ops || !ops->gro_receive) goto out_unlock; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 79cc6469508..7b8fc579435 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -591,7 +591,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer, } sk_nocaps_add(sk, NETIF_F_GSO_MASK); } - if (tcp_alloc_md5sig_pool(sk) == NULL) { + if (tp->md5sig_info->entries6 == 0 && + tcp_alloc_md5sig_pool(sk) == NULL) { kfree(newkey); return -ENOMEM; } @@ -600,8 +601,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer, (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); if (!keys) { - tcp_free_md5sig_pool(); kfree(newkey); + if (tp->md5sig_info->entries6 == 0) + tcp_free_md5sig_pool(); return -ENOMEM; } @@ -647,6 +649,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer) kfree(tp->md5sig_info->keys6); tp->md5sig_info->keys6 = NULL; tp->md5sig_info->alloced6 = 0; + tcp_free_md5sig_pool(); } else { /* shrink the database */ if (tp->md5sig_info->entries6 != i) @@ -655,7 +658,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer) (tp->md5sig_info->entries6 - i) * sizeof (tp->md5sig_info->keys6[0])); } - tcp_free_md5sig_pool(); return 0; } } diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index ad4ac2601a5..34b2ddeacb6 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1045,8 +1045,10 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + hdr_len; old_headroom = skb_headroom(skb); - if (skb_cow_head(skb, headroom)) + if (skb_cow_head(skb, headroom)) { + dev_kfree_skb(skb); goto abort; + } new_headroom = skb_headroom(skb); skb_orphan(skb); diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 2b771dc708a..e3be48bf4dc 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2283,6 +2283,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) struct ip_vs_service *svc; struct ip_vs_dest_user *udest_compat; struct ip_vs_dest_user_kern udest; + struct netns_ipvs *ipvs = net_ipvs(net); if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -2303,6 +2304,24 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) /* increase the module use count */ ip_vs_use_count_inc(); + /* Handle daemons since they have another lock */ + if (cmd == IP_VS_SO_SET_STARTDAEMON || + cmd == IP_VS_SO_SET_STOPDAEMON) { + struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; + + if (mutex_lock_interruptible(&ipvs->sync_mutex)) { + ret = -ERESTARTSYS; + goto out_dec; + } + if (cmd == IP_VS_SO_SET_STARTDAEMON) + ret = start_sync_thread(net, dm->state, dm->mcast_ifn, + dm->syncid); + else + ret = stop_sync_thread(net, dm->state); + mutex_unlock(&ipvs->sync_mutex); + goto out_dec; + } + if (mutex_lock_interruptible(&__ip_vs_mutex)) { ret = -ERESTARTSYS; goto out_dec; @@ -2316,15 +2335,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) /* Set timeout values for (tcp tcpfin udp) */ ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg); goto out_unlock; - } else if (cmd == IP_VS_SO_SET_STARTDAEMON) { - struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; - ret = start_sync_thread(net, dm->state, dm->mcast_ifn, - dm->syncid); - goto out_unlock; - } else if (cmd == IP_VS_SO_SET_STOPDAEMON) { - struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; - ret = stop_sync_thread(net, dm->state); - goto out_unlock; } usvc_compat = (struct ip_vs_service_user *)arg; @@ -2584,6 +2594,33 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) if (copy_from_user(arg, user, copylen) != 0) return -EFAULT; + /* + * Handle daemons first since it has its own locking + */ + if (cmd == IP_VS_SO_GET_DAEMON) { + struct ip_vs_daemon_user d[2]; + + memset(&d, 0, sizeof(d)); + if (mutex_lock_interruptible(&ipvs->sync_mutex)) + return -ERESTARTSYS; + + if (ipvs->sync_state & IP_VS_STATE_MASTER) { + d[0].state = IP_VS_STATE_MASTER; + strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn, + sizeof(d[0].mcast_ifn)); + d[0].syncid = ipvs->master_syncid; + } + if (ipvs->sync_state & IP_VS_STATE_BACKUP) { + d[1].state = IP_VS_STATE_BACKUP; + strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn, + sizeof(d[1].mcast_ifn)); + d[1].syncid = ipvs->backup_syncid; + } + if (copy_to_user(user, &d, sizeof(d)) != 0) + ret = -EFAULT; + mutex_unlock(&ipvs->sync_mutex); + return ret; + } if (mutex_lock_interruptible(&__ip_vs_mutex)) return -ERESTARTSYS; @@ -2681,28 +2718,6 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) } break; - case IP_VS_SO_GET_DAEMON: - { - struct ip_vs_daemon_user d[2]; - - memset(&d, 0, sizeof(d)); - if (ipvs->sync_state & IP_VS_STATE_MASTER) { - d[0].state = IP_VS_STATE_MASTER; - strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn, - sizeof(d[0].mcast_ifn)); - d[0].syncid = ipvs->master_syncid; - } - if (ipvs->sync_state & IP_VS_STATE_BACKUP) { - d[1].state = IP_VS_STATE_BACKUP; - strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn, - sizeof(d[1].mcast_ifn)); - d[1].syncid = ipvs->backup_syncid; - } - if (copy_to_user(user, &d, sizeof(d)) != 0) - ret = -EFAULT; - } - break; - default: ret = -EINVAL; } @@ -3205,7 +3220,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb, struct net *net = skb_sknet(skb); struct netns_ipvs *ipvs = net_ipvs(net); - mutex_lock(&__ip_vs_mutex); + mutex_lock(&ipvs->sync_mutex); if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) { if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER, ipvs->master_mcast_ifn, @@ -3225,7 +3240,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb, } nla_put_failure: - mutex_unlock(&__ip_vs_mutex); + mutex_unlock(&ipvs->sync_mutex); return skb->len; } @@ -3271,13 +3286,9 @@ static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs) return ip_vs_set_timeout(net, &t); } -static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) +static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info) { - struct ip_vs_service *svc = NULL; - struct ip_vs_service_user_kern usvc; - struct ip_vs_dest_user_kern udest; int ret = 0, cmd; - int need_full_svc = 0, need_full_dest = 0; struct net *net; struct netns_ipvs *ipvs; @@ -3285,19 +3296,10 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) ipvs = net_ipvs(net); cmd = info->genlhdr->cmd; - mutex_lock(&__ip_vs_mutex); - - if (cmd == IPVS_CMD_FLUSH) { - ret = ip_vs_flush(net); - goto out; - } else if (cmd == IPVS_CMD_SET_CONFIG) { - ret = ip_vs_genl_set_config(net, info->attrs); - goto out; - } else if (cmd == IPVS_CMD_NEW_DAEMON || - cmd == IPVS_CMD_DEL_DAEMON) { - + if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) { struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1]; + mutex_lock(&ipvs->sync_mutex); if (!info->attrs[IPVS_CMD_ATTR_DAEMON] || nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX, info->attrs[IPVS_CMD_ATTR_DAEMON], @@ -3310,6 +3312,33 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) ret = ip_vs_genl_new_daemon(net, daemon_attrs); else ret = ip_vs_genl_del_daemon(net, daemon_attrs); +out: + mutex_unlock(&ipvs->sync_mutex); + } + return ret; +} + +static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) +{ + struct ip_vs_service *svc = NULL; + struct ip_vs_service_user_kern usvc; + struct ip_vs_dest_user_kern udest; + int ret = 0, cmd; + int need_full_svc = 0, need_full_dest = 0; + struct net *net; + struct netns_ipvs *ipvs; + + net = skb_sknet(skb); + ipvs = net_ipvs(net); + cmd = info->genlhdr->cmd; + + mutex_lock(&__ip_vs_mutex); + + if (cmd == IPVS_CMD_FLUSH) { + ret = ip_vs_flush(net); + goto out; + } else if (cmd == IPVS_CMD_SET_CONFIG) { + ret = ip_vs_genl_set_config(net, info->attrs); goto out; } else if (cmd == IPVS_CMD_ZERO && !info->attrs[IPVS_CMD_ATTR_SERVICE]) { @@ -3536,13 +3565,13 @@ static struct genl_ops ip_vs_genl_ops[] __read_mostly = { .cmd = IPVS_CMD_NEW_DAEMON, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, - .doit = ip_vs_genl_set_cmd, + .doit = ip_vs_genl_set_daemon, }, { .cmd = IPVS_CMD_DEL_DAEMON, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, - .doit = ip_vs_genl_set_cmd, + .doit = ip_vs_genl_set_daemon, }, { .cmd = IPVS_CMD_GET_DAEMON, @@ -3679,7 +3708,7 @@ int __net_init ip_vs_control_net_init(struct net *net) int idx; struct netns_ipvs *ipvs = net_ipvs(net); - ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock); + rwlock_init(&ipvs->rs_lock); /* Initialize rs_table */ for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++) diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 7ee7215b8ba..3cdd479f9b5 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -61,6 +61,7 @@ #define SYNC_PROTO_VER 1 /* Protocol version in header */ +static struct lock_class_key __ipvs_sync_key; /* * IPVS sync connection entry * Version 0, i.e. original version. @@ -1545,6 +1546,7 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", sizeof(struct ip_vs_sync_conn_v0)); + if (state == IP_VS_STATE_MASTER) { if (ipvs->master_thread) return -EEXIST; @@ -1667,6 +1669,7 @@ int __net_init ip_vs_sync_net_init(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); + __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key); INIT_LIST_HEAD(&ipvs->sync_queue); spin_lock_init(&ipvs->sync_lock); spin_lock_init(&ipvs->sync_buff_lock); @@ -1680,7 +1683,9 @@ int __net_init ip_vs_sync_net_init(struct net *net) void ip_vs_sync_net_cleanup(struct net *net) { int retc; + struct netns_ipvs *ipvs = net_ipvs(net); + mutex_lock(&ipvs->sync_mutex); retc = stop_sync_thread(net, IP_VS_STATE_MASTER); if (retc && retc != -ESRCH) pr_err("Failed to stop Master Daemon\n"); @@ -1688,4 +1693,5 @@ void ip_vs_sync_net_cleanup(struct net *net) retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); if (retc && retc != -ESRCH) pr_err("Failed to stop Backup Daemon\n"); + mutex_unlock(&ipvs->sync_mutex); } diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index cf616e55ca4..d69facdd9a7 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -241,8 +241,8 @@ static int gre_packet(struct nf_conn *ct, nf_ct_refresh_acct(ct, ctinfo, skb, ct->proto.gre.stream_timeout); /* Also, more likely to be important, and not a probe. */ - set_bit(IPS_ASSURED_BIT, &ct->status); - nf_conntrack_event_cache(IPCT_ASSURED, ct); + if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) + nf_conntrack_event_cache(IPCT_ASSURED, ct); } else nf_ct_refresh_acct(ct, ctinfo, skb, ct->proto.gre.timeout); diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index d30615419b4..5f03e4ea65b 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -91,7 +91,7 @@ int x25_parse_address_block(struct sk_buff *skb, int needed; int rc; - if (skb->len < 1) { + if (!pskb_may_pull(skb, 1)) { /* packet has no address block */ rc = 0; goto empty; @@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb, len = *skb->data; needed = 1 + (len >> 4) + (len & 0x0f); - if (skb->len < needed) { + if (!pskb_may_pull(skb, needed)) { /* packet is too short to hold the addresses it claims to hold */ rc = -1; @@ -295,7 +295,8 @@ static struct sock *x25_find_listener(struct x25_address *addr, * Found a listening socket, now check the incoming * call user data vs this sockets call user data */ - if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) { + if (x25_sk(s)->cudmatchlength > 0 && + skb->len >= x25_sk(s)->cudmatchlength) { if((memcmp(x25_sk(s)->calluserdata.cuddata, skb->data, x25_sk(s)->cudmatchlength)) == 0) { @@ -951,14 +952,27 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, * * Facilities length is mandatory in call request packets */ - if (skb->len < 1) + if (!pskb_may_pull(skb, 1)) goto out_clear_request; len = skb->data[0] + 1; - if (skb->len < len) + if (!pskb_may_pull(skb, len)) goto out_clear_request; skb_pull(skb,len); /* + * Ensure that the amount of call user data is valid. + */ + if (skb->len > X25_MAX_CUD_LEN) + goto out_clear_request; + + /* + * Get all the call user data so it can be used in + * x25_find_listener and skb_copy_from_linear_data up ahead. + */ + if (!pskb_may_pull(skb, skb->len)) + goto out_clear_request; + + /* * Find a listener for the particular address/cud pair. */ sk = x25_find_listener(&source_addr,skb); @@ -1166,6 +1180,9 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, * byte of the user data is the logical value of the Q Bit. */ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { + if (!pskb_may_pull(skb, 1)) + goto out_kfree_skb; + qbit = skb->data[0]; skb_pull(skb, 1); } @@ -1244,7 +1261,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; size_t copied; - int qbit; + int qbit, header_len = x25->neighbour->extended ? + X25_EXT_MIN_LEN : X25_STD_MIN_LEN; + struct sk_buff *skb; unsigned char *asmptr; int rc = -ENOTCONN; @@ -1265,6 +1284,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, skb = skb_dequeue(&x25->interrupt_in_queue); + if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) + goto out_free_dgram; + skb_pull(skb, X25_STD_MIN_LEN); /* @@ -1285,10 +1307,12 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, if (!skb) goto out; + if (!pskb_may_pull(skb, header_len)) + goto out_free_dgram; + qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; - skb_pull(skb, x25->neighbour->extended ? - X25_EXT_MIN_LEN : X25_STD_MIN_LEN); + skb_pull(skb, header_len); if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index e547ca1578c..fa2b41888bd 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -32,6 +32,9 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) unsigned short frametype; unsigned int lci; + if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) + return 0; + frametype = skb->data[2]; lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); @@ -115,6 +118,9 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, goto drop; } + if (!pskb_may_pull(skb, 1)) + return 0; + switch (skb->data[0]) { case X25_IFACE_DATA: diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index f77e4e75f91..36384a1fa9f 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c @@ -44,7 +44,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) { - unsigned char *p = skb->data; + unsigned char *p; unsigned int len; *vc_fac_mask = 0; @@ -60,14 +60,16 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); - if (skb->len < 1) + if (!pskb_may_pull(skb, 1)) return 0; - len = *p++; + len = skb->data[0]; - if (len >= skb->len) + if (!pskb_may_pull(skb, 1 + len)) return -1; + p = skb->data + 1; + while (len > 0) { switch (*p & X25_FAC_CLASS_MASK) { case X25_FAC_CLASS_A: diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 0b073b51b18..a49cd4ec551 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -107,6 +107,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp /* * Parse the data in the frame. */ + if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) + goto out_clear; skb_pull(skb, X25_STD_MIN_LEN); len = x25_parse_address_block(skb, &source_addr, @@ -127,9 +129,11 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp * Copy any Call User Data. */ if (skb->len > 0) { - skb_copy_from_linear_data(skb, - x25->calluserdata.cuddata, - skb->len); + if (skb->len > X25_MAX_CUD_LEN) + goto out_clear; + + skb_copy_bits(skb, 0, x25->calluserdata.cuddata, + skb->len); x25->calluserdata.cudlength = skb->len; } if (!sock_flag(sk, SOCK_DEAD)) @@ -137,6 +141,9 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp break; } case X25_CLEAR_REQUEST: + if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) + goto out_clear; + x25_write_internal(sk, X25_CLEAR_CONFIRMATION); x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); break; @@ -164,6 +171,9 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp switch (frametype) { case X25_CLEAR_REQUEST: + if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) + goto out_clear; + x25_write_internal(sk, X25_CLEAR_CONFIRMATION); x25_disconnect(sk, 0, skb->data[3], skb->data[4]); break; @@ -177,6 +187,11 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp } return 0; + +out_clear: + x25_write_internal(sk, X25_CLEAR_REQUEST); + x25_start_t23timer(sk); + return 0; } /* @@ -206,6 +221,9 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp break; case X25_CLEAR_REQUEST: + if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) + goto out_clear; + x25_write_internal(sk, X25_CLEAR_CONFIRMATION); x25_disconnect(sk, 0, skb->data[3], skb->data[4]); break; @@ -304,6 +322,12 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp } return queued; + +out_clear: + x25_write_internal(sk, X25_CLEAR_REQUEST); + x25->state = X25_STATE_2; + x25_start_t23timer(sk); + return 0; } /* @@ -313,13 +337,13 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp */ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype) { + struct x25_sock *x25 = x25_sk(sk); + switch (frametype) { case X25_RESET_REQUEST: x25_write_internal(sk, X25_RESET_CONFIRMATION); case X25_RESET_CONFIRMATION: { - struct x25_sock *x25 = x25_sk(sk); - x25_stop_timer(sk); x25->condition = 0x00; x25->va = 0; @@ -331,6 +355,9 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp break; } case X25_CLEAR_REQUEST: + if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) + goto out_clear; + x25_write_internal(sk, X25_CLEAR_CONFIRMATION); x25_disconnect(sk, 0, skb->data[3], skb->data[4]); break; @@ -340,6 +367,12 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp } return 0; + +out_clear: + x25_write_internal(sk, X25_CLEAR_REQUEST); + x25->state = X25_STATE_2; + x25_start_t23timer(sk); + return 0; } /* Higher level upcall for a LAPB frame */ diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c index 037958ff8ee..4acacf3c661 100644 --- a/net/x25/x25_link.c +++ b/net/x25/x25_link.c @@ -90,6 +90,9 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb, break; case X25_DIAGNOSTIC: + if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4)) + break; + printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n", skb->data[3], skb->data[4], skb->data[5], skb->data[6]); diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c index 24a342ebc7f..5170d52bfd9 100644 --- a/net/x25/x25_subr.c +++ b/net/x25/x25_subr.c @@ -269,7 +269,11 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m) { struct x25_sock *x25 = x25_sk(sk); - unsigned char *frame = skb->data; + unsigned char *frame; + + if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) + return X25_ILLEGAL; + frame = skb->data; *ns = *nr = *q = *d = *m = 0; @@ -294,6 +298,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q, if (frame[2] == X25_RR || frame[2] == X25_RNR || frame[2] == X25_REJ) { + if (!pskb_may_pull(skb, X25_EXT_MIN_LEN)) + return X25_ILLEGAL; + frame = skb->data; + *nr = (frame[3] >> 1) & 0x7F; return frame[2]; } @@ -308,6 +316,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q, if (x25->neighbour->extended) { if ((frame[2] & 0x01) == X25_DATA) { + if (!pskb_may_pull(skb, X25_EXT_MIN_LEN)) + return X25_ILLEGAL; + frame = skb->data; + *q = (frame[0] & X25_Q_BIT) == X25_Q_BIT; *d = (frame[0] & X25_D_BIT) == X25_D_BIT; *m = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT; diff --git a/security/security.c b/security/security.c index 0e4fccfef12..d9e15339092 100644 --- a/security/security.c +++ b/security/security.c @@ -1097,6 +1097,7 @@ void security_sk_clone(const struct sock *sk, struct sock *newsk) { security_ops->sk_clone_security(sk, newsk); } +EXPORT_SYMBOL(security_sk_clone); void security_sk_classify_flow(struct sock *sk, struct flowi *fl) { diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index e9a2a8795d1..191284a1c0a 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2370,6 +2370,7 @@ static int azx_dev_free(struct snd_device *device) static struct snd_pci_quirk position_fix_list[] __devinitdata = { SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB), SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1028, 0x02c6, "Dell Inspiron 1010", POS_FIX_LPIB), SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 7696d05b935..76752d8ea73 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -3110,6 +3110,7 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), + SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO), SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS), |