diff options
256 files changed, 1871 insertions, 1185 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 854ed5ca7e3..d6e6724446c 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2706,10 +2706,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. functions are at fixed addresses, they make nice targets for exploits that can control RIP. - emulate [default] Vsyscalls turn into traps and are - emulated reasonably safely. + emulate Vsyscalls turn into traps and are emulated + reasonably safely. - native Vsyscalls are native syscall instructions. + native [default] Vsyscalls are native syscall + instructions. This is a little bit faster than trapping and makes a few dynamic recompilers work better than they would in emulation mode. diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 81546990f41..ca5cdcd0f0e 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1042,7 +1042,7 @@ conf/interface/*: The functional behaviour for certain settings is different depending on whether local forwarding is enabled or not. -accept_ra - BOOLEAN +accept_ra - INTEGER Accept Router Advertisements; autoconfigure using them. Possible values are: @@ -1106,7 +1106,7 @@ dad_transmits - INTEGER The amount of Duplicate Address Detection probes to send. Default: 1 -forwarding - BOOLEAN +forwarding - INTEGER Configure interface-specific Host/Router behaviour. Note: It is recommended to have the same setting on all diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt index 58fd7414e6c..fe67b5c79f0 100644 --- a/Documentation/networking/scaling.txt +++ b/Documentation/networking/scaling.txt @@ -27,7 +27,7 @@ applying a filter to each packet that assigns it to one of a small number of logical flows. Packets for each flow are steered to a separate receive queue, which in turn can be processed by separate CPUs. This mechanism is generally known as “Receive-side Scaling” (RSS). The goal of RSS and -the other scaling techniques to increase performance uniformly. +the other scaling techniques is to increase performance uniformly. Multi-queue distribution can also be used for traffic prioritization, but that is not the focus of these techniques. @@ -186,10 +186,10 @@ are steered using plain RPS. Multiple table entries may point to the same CPU. Indeed, with many flows and few CPUs, it is very likely that a single application thread handles flows with many different flow hashes. -rps_sock_table is a global flow table that contains the *desired* CPU for -flows: the CPU that is currently processing the flow in userspace. Each -table value is a CPU index that is updated during calls to recvmsg and -sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage() +rps_sock_flow_table is a global flow table that contains the *desired* CPU +for flows: the CPU that is currently processing the flow in userspace. +Each table value is a CPU index that is updated during calls to recvmsg +and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage() and tcp_splice_read()). When the scheduler moves a thread to a new CPU while it has outstanding @@ -243,7 +243,7 @@ configured. The number of entries in the global flow table is set through: The number of entries in the per-queue flow table are set through: - /sys/class/net/<dev>/queues/tx-<n>/rps_flow_cnt + /sys/class/net/<dev>/queues/rx-<n>/rps_flow_cnt == Suggested Configuration diff --git a/MAINTAINERS b/MAINTAINERS index ae8820e173a..6185d051358 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2460,7 +2460,7 @@ S: Supported F: drivers/infiniband/hw/ehca/ EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER -M: Breno Leitao <leitao@linux.vnet.ibm.com> +M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com> L: netdev@vger.kernel.org S: Maintained F: drivers/net/ehea/ @@ -3313,7 +3313,7 @@ M: David Woodhouse <dwmw2@infradead.org> L: iommu@lists.linux-foundation.org T: git git://git.infradead.org/iommu-2.6.git S: Supported -F: drivers/pci/intel-iommu.c +F: drivers/iommu/intel-iommu.c F: include/linux/intel-iommu.h INTEL IOP-ADMA DMA DRIVER @@ -6366,15 +6366,14 @@ F: net/ipv4/tcp_lp.c TEGRA SUPPORT M: Colin Cross <ccross@android.com> -M: Erik Gilling <konkers@android.com> M: Olof Johansson <olof@lixom.net> +M: Stephen Warren <swarren@nvidia.com> L: linux-tegra@vger.kernel.org -T: git git://android.git.kernel.org/kernel/tegra.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/olof/tegra.git S: Supported F: arch/arm/mach-tegra TEHUTI ETHERNET DRIVER -M: Alexander Indenbaum <baum@tehutinetworks.net> M: Andy Gospodarek <andy@greyhouse.net> L: netdev@vger.kernel.org S: Supported @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc8 +EXTRAVERSION = NAME = "Divemaster Edition" # *DOCUMENTATION* diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 012ff5fbb7e..863c078ce2e 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -559,6 +559,7 @@ config ARCH_MMP select TICK_ONESHOT select PLAT_PXA select SPARSE_IRQ + select GENERIC_ALLOCATOR help Support for Marvell's PXA168/PXA910(MMP) and MMP2 processor line. diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c index 7aa4262ada7..197f81c7735 100644 --- a/arch/arm/common/vic.c +++ b/arch/arm/common/vic.c @@ -259,7 +259,6 @@ static void __init vic_disable(void __iomem *base) writel(0, base + VIC_INT_SELECT); writel(0, base + VIC_INT_ENABLE); writel(~0, base + VIC_INT_ENABLE_CLEAR); - writel(0, base + VIC_IRQ_STATUS); writel(0, base + VIC_ITCR); writel(~0, base + VIC_INT_SOFT_CLEAR); } diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h index 080d74f8128..ff66638ff54 100644 --- a/arch/arm/include/asm/localtimer.h +++ b/arch/arm/include/asm/localtimer.h @@ -10,6 +10,8 @@ #ifndef __ASM_ARM_LOCALTIMER_H #define __ASM_ARM_LOCALTIMER_H +#include <linux/errno.h> + struct clock_event_device; /* diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 4c851834f68..6be3e2e4d83 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -321,8 +321,8 @@ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, - [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT, - [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, diff --git a/arch/arm/mach-mmp/Makefile b/arch/arm/mach-mmp/Makefile index b0ac942327a..169c6743bde 100644 --- a/arch/arm/mach-mmp/Makefile +++ b/arch/arm/mach-mmp/Makefile @@ -7,7 +7,7 @@ obj-y += common.o clock.o devices.o time.o # SoC support obj-$(CONFIG_CPU_PXA168) += pxa168.o irq-pxa168.o obj-$(CONFIG_CPU_PXA910) += pxa910.o irq-pxa168.o -obj-$(CONFIG_CPU_MMP2) += mmp2.o irq-mmp2.o +obj-$(CONFIG_CPU_MMP2) += mmp2.o irq-mmp2.o sram.o # board support obj-$(CONFIG_MACH_ASPENITE) += aspenite.o diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c index c79162a50f2..59dcf9df153 100644 --- a/arch/arm/mach-mmp/brownstone.c +++ b/arch/arm/mach-mmp/brownstone.c @@ -186,6 +186,15 @@ static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc2 = { | PXA_FLAG_SD_8_BIT_CAPABLE_SLOT, }; +static struct sram_platdata mmp2_asram_platdata = { + .pool_name = "asram", + .granularity = SRAM_GRANULARITY, +}; + +static struct sram_platdata mmp2_isram_platdata = { + .pool_name = "isram", + .granularity = SRAM_GRANULARITY, +}; static void __init brownstone_init(void) { @@ -197,6 +206,8 @@ static void __init brownstone_init(void) mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(brownstone_twsi1_info)); mmp2_add_sdhost(0, &mmp2_sdh_platdata_mmc0); /* SD/MMC */ mmp2_add_sdhost(2, &mmp2_sdh_platdata_mmc2); /* eMMC */ + mmp2_add_asram(&mmp2_asram_platdata); + mmp2_add_isram(&mmp2_isram_platdata); /* enable 5v regulator */ platform_device_register(&brownstone_v_5vp_device); diff --git a/arch/arm/mach-mmp/include/mach/mmp2.h b/arch/arm/mach-mmp/include/mach/mmp2.h index de7b88826ad..2f7b2d3c2b1 100644 --- a/arch/arm/mach-mmp/include/mach/mmp2.h +++ b/arch/arm/mach-mmp/include/mach/mmp2.h @@ -13,6 +13,7 @@ extern void mmp2_clear_pmic_int(void); #include <linux/i2c.h> #include <linux/i2c/pxa-i2c.h> #include <mach/devices.h> +#include <mach/sram.h> extern struct pxa_device_desc mmp2_device_uart1; extern struct pxa_device_desc mmp2_device_uart2; @@ -28,6 +29,8 @@ extern struct pxa_device_desc mmp2_device_sdh0; extern struct pxa_device_desc mmp2_device_sdh1; extern struct pxa_device_desc mmp2_device_sdh2; extern struct pxa_device_desc mmp2_device_sdh3; +extern struct pxa_device_desc mmp2_device_asram; +extern struct pxa_device_desc mmp2_device_isram; static inline int mmp2_add_uart(int id) { @@ -85,5 +88,15 @@ static inline int mmp2_add_sdhost(int id, struct sdhci_pxa_platdata *data) return pxa_register_device(d, data, sizeof(*data)); } +static inline int mmp2_add_asram(struct sram_platdata *data) +{ + return pxa_register_device(&mmp2_device_asram, data, sizeof(*data)); +} + +static inline int mmp2_add_isram(struct sram_platdata *data) +{ + return pxa_register_device(&mmp2_device_isram, data, sizeof(*data)); +} + #endif /* __ASM_MACH_MMP2_H */ diff --git a/arch/arm/mach-mmp/include/mach/sram.h b/arch/arm/mach-mmp/include/mach/sram.h new file mode 100644 index 00000000000..239e0fc1bb1 --- /dev/null +++ b/arch/arm/mach-mmp/include/mach/sram.h @@ -0,0 +1,35 @@ +/* + * linux/arch/arm/mach-mmp/include/mach/sram.h + * + * SRAM Memory Management + * + * Copyright (c) 2011 Marvell Semiconductors Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __ASM_ARCH_SRAM_H +#define __ASM_ARCH_SRAM_H + +#include <linux/genalloc.h> + +/* ARBITRARY: SRAM allocations are multiples of this 2^N size */ +#define SRAM_GRANULARITY 512 + +enum sram_type { + MMP_SRAM_UNDEFINED = 0, + MMP_ASRAM, + MMP_ISRAM, +}; + +struct sram_platdata { + char *pool_name; + int granularity; +}; + +extern struct gen_pool *sram_get_gpool(char *pool_name); + +#endif /* __ASM_ARCH_SRAM_H */ diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c index 079c18861d5..43266c44cb3 100644 --- a/arch/arm/mach-mmp/mmp2.c +++ b/arch/arm/mach-mmp/mmp2.c @@ -226,4 +226,7 @@ MMP2_DEVICE(sdh0, "sdhci-pxav3", 0, MMC, 0xd4280000, 0x120); MMP2_DEVICE(sdh1, "sdhci-pxav3", 1, MMC2, 0xd4280800, 0x120); MMP2_DEVICE(sdh2, "sdhci-pxav3", 2, MMC3, 0xd4281000, 0x120); MMP2_DEVICE(sdh3, "sdhci-pxav3", 3, MMC4, 0xd4281800, 0x120); +MMP2_DEVICE(asram, "asram", -1, NONE, 0xe0000000, 0x4000); +/* 0xd1000000 ~ 0xd101ffff is reserved for secure processor */ +MMP2_DEVICE(isram, "isram", -1, NONE, 0xd1020000, 0x18000); diff --git a/arch/arm/mach-mmp/sram.c b/arch/arm/mach-mmp/sram.c new file mode 100644 index 00000000000..4304f951937 --- /dev/null +++ b/arch/arm/mach-mmp/sram.c @@ -0,0 +1,168 @@ +/* + * linux/arch/arm/mach-mmp/sram.c + * + * based on mach-davinci/sram.c - DaVinci simple SRAM allocator + * + * Copyright (c) 2011 Marvell Semiconductors Inc. + * All Rights Reserved + * + * Add for mmp sram support - Leo Yan <leoy@marvell.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/genalloc.h> + +#include <mach/sram.h> + +struct sram_bank_info { + char *pool_name; + struct gen_pool *gpool; + int granularity; + + phys_addr_t sram_phys; + void __iomem *sram_virt; + u32 sram_size; + + struct list_head node; +}; + +static DEFINE_MUTEX(sram_lock); +static LIST_HEAD(sram_bank_list); + +struct gen_pool *sram_get_gpool(char *pool_name) +{ + struct sram_bank_info *info = NULL; + + if (!pool_name) + return NULL; + + mutex_lock(&sram_lock); + + list_for_each_entry(info, &sram_bank_list, node) + if (!strcmp(pool_name, info->pool_name)) + break; + + mutex_unlock(&sram_lock); + + if (&info->node == &sram_bank_list) + return NULL; + + return info->gpool; +} +EXPORT_SYMBOL(sram_get_gpool); + +static int __devinit sram_probe(struct platform_device *pdev) +{ + struct sram_platdata *pdata = pdev->dev.platform_data; + struct sram_bank_info *info; + struct resource *res; + int ret = 0; + + if (!pdata && !pdata->pool_name) + return -ENODEV; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "no memory resource defined\n"); + ret = -ENODEV; + goto out; + } + + if (!resource_size(res)) + return 0; + + info->sram_phys = (phys_addr_t)res->start; + info->sram_size = resource_size(res); + info->sram_virt = ioremap(info->sram_phys, info->sram_size); + info->pool_name = kstrdup(pdata->pool_name, GFP_KERNEL); + info->granularity = pdata->granularity; + + info->gpool = gen_pool_create(ilog2(info->granularity), -1); + if (!info->gpool) { + dev_err(&pdev->dev, "create pool failed\n"); + ret = -ENOMEM; + goto create_pool_err; + } + + ret = gen_pool_add_virt(info->gpool, (unsigned long)info->sram_virt, + info->sram_phys, info->sram_size, -1); + if (ret < 0) { + dev_err(&pdev->dev, "add new chunk failed\n"); + ret = -ENOMEM; + goto add_chunk_err; + } + + mutex_lock(&sram_lock); + list_add(&info->node, &sram_bank_list); + mutex_unlock(&sram_lock); + + platform_set_drvdata(pdev, info); + + dev_info(&pdev->dev, "initialized\n"); + return 0; + +add_chunk_err: + gen_pool_destroy(info->gpool); +create_pool_err: + iounmap(info->sram_virt); + kfree(info->pool_name); +out: + kfree(info); + return ret; +} + +static int __devexit sram_remove(struct platform_device *pdev) +{ + struct sram_bank_info *info; + + info = platform_get_drvdata(pdev); + if (info == NULL) + return -ENODEV; + + mutex_lock(&sram_lock); + list_del(&info->node); + mutex_unlock(&sram_lock); + + gen_pool_destroy(info->gpool); + iounmap(info->sram_virt); + kfree(info->pool_name); + kfree(info); + return 0; +} + +static const struct platform_device_id sram_id_table[] = { + { "asram", MMP_ASRAM }, + { "isram", MMP_ISRAM }, + { } +}; + +static struct platform_driver sram_driver = { + .probe = sram_probe, + .remove = sram_remove, + .driver = { + .name = "mmp-sram", + }, + .id_table = sram_id_table, +}; + +static int __init sram_init(void) +{ + return platform_driver_register(&sram_driver); +} +core_initcall(sram_init); + +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c index 2028464cf5b..f79b7d2a8ed 100644 --- a/arch/arm/mach-omap2/board-2430sdp.c +++ b/arch/arm/mach-omap2/board-2430sdp.c @@ -193,7 +193,8 @@ static int __init omap2430_i2c_init(void) { omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo, ARRAY_SIZE(sdp2430_i2c1_boardinfo)); - omap2_pmic_init("twl4030", &sdp2430_twldata); + omap_pmic_init(2, 100, "twl4030", INT_24XX_SYS_NIRQ, + &sdp2430_twldata); return 0; } diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index a9b45c76e1d..097a42d81e5 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c @@ -137,8 +137,7 @@ static void omap4_hsmmc1_before_set_reg(struct device *dev, int slot, */ reg = omap4_ctrl_pad_readl(control_pbias_offset); reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | - OMAP4_MMC1_PWRDNZ_MASK | - OMAP4_USBC1_ICUSB_PWRDNZ_MASK); + OMAP4_MMC1_PWRDNZ_MASK); omap4_ctrl_pad_writel(reg, control_pbias_offset); } @@ -156,8 +155,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot, else reg |= OMAP4_MMC1_PBIASLITE_VMODE_MASK; reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | - OMAP4_MMC1_PWRDNZ_MASK | - OMAP4_USBC1_ICUSB_PWRDNZ_MASK); + OMAP4_MMC1_PWRDNZ_MASK); omap4_ctrl_pad_writel(reg, control_pbias_offset); timeout = jiffies + msecs_to_jiffies(5); @@ -171,16 +169,14 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot, if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) { pr_err("Pbias Voltage is not same as LDO\n"); /* Caution : On VMODE_ERROR Power Down MMC IO */ - reg &= ~(OMAP4_MMC1_PWRDNZ_MASK | - OMAP4_USBC1_ICUSB_PWRDNZ_MASK); + reg &= ~(OMAP4_MMC1_PWRDNZ_MASK); omap4_ctrl_pad_writel(reg, control_pbias_offset); } } else { reg = omap4_ctrl_pad_readl(control_pbias_offset); reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | OMAP4_MMC1_PWRDNZ_MASK | - OMAP4_MMC1_PBIASLITE_VMODE_MASK | - OMAP4_USBC1_ICUSB_PWRDNZ_MASK); + OMAP4_MMC1_PBIASLITE_VMODE_MASK); omap4_ctrl_pad_writel(reg, control_pbias_offset); } } diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index a65145b02a5..19e4dac62a8 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c @@ -137,9 +137,6 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data) musb_plat.mode = board_data->mode; musb_plat.extvbus = board_data->extvbus; - if (cpu_is_omap44xx()) - omap4430_phy_init(dev); - if (cpu_is_omap3517() || cpu_is_omap3505()) { oh_name = "am35x_otg_hs"; name = "musb-am35x"; diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c index a99c2f4a523..3d7ebc557a7 100644 --- a/arch/arm/mach-s3c2410/s3c2410.c +++ b/arch/arm/mach-s3c2410/s3c2410.c @@ -170,7 +170,9 @@ int __init s3c2410_init(void) { printk("S3C2410: Initialising architecture\n"); +#ifdef CONFIG_PM register_syscore_ops(&s3c2410_pm_syscore_ops); +#endif register_syscore_ops(&s3c24xx_irq_syscore_ops); return sysdev_register(&s3c2410_sysdev); diff --git a/arch/arm/mach-s3c2412/s3c2412.c b/arch/arm/mach-s3c2412/s3c2412.c index ef0958d3e5c..57a1e01e4e5 100644 --- a/arch/arm/mach-s3c2412/s3c2412.c +++ b/arch/arm/mach-s3c2412/s3c2412.c @@ -245,7 +245,9 @@ int __init s3c2412_init(void) { printk("S3C2412: Initialising architecture\n"); +#ifdef CONFIG_PM register_syscore_ops(&s3c2412_pm_syscore_ops); +#endif register_syscore_ops(&s3c24xx_irq_syscore_ops); return sysdev_register(&s3c2412_sysdev); diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c index 081ef4cb868..ee214bc83c8 100644 --- a/arch/arm/mach-s3c2416/s3c2416.c +++ b/arch/arm/mach-s3c2416/s3c2416.c @@ -100,7 +100,9 @@ int __init s3c2416_init(void) s3c_adc_setname("s3c2416-adc"); +#ifdef CONFIG_PM register_syscore_ops(&s3c2416_pm_syscore_ops); +#endif register_syscore_ops(&s3c24xx_irq_syscore_ops); return sysdev_register(&s3c2416_sysdev); diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c index fc84e481efc..37f8cc6aabd 100644 --- a/arch/arm/mach-s3c2440/s3c2440.c +++ b/arch/arm/mach-s3c2440/s3c2440.c @@ -55,7 +55,9 @@ int __init s3c2440_init(void) /* register suspend/resume handlers */ +#ifdef CONFIG_PM register_syscore_ops(&s3c2410_pm_syscore_ops); +#endif register_syscore_ops(&s3c244x_pm_syscore_ops); register_syscore_ops(&s3c24xx_irq_syscore_ops); diff --git a/arch/arm/mach-s3c2440/s3c2442.c b/arch/arm/mach-s3c2440/s3c2442.c index 48e273ce9f9..2c822e09392 100644 --- a/arch/arm/mach-s3c2440/s3c2442.c +++ b/arch/arm/mach-s3c2440/s3c2442.c @@ -169,7 +169,9 @@ int __init s3c2442_init(void) { printk("S3C2442: Initialising architecture\n"); +#ifdef CONFIG_PM register_syscore_ops(&s3c2410_pm_syscore_ops); +#endif register_syscore_ops(&s3c244x_pm_syscore_ops); register_syscore_ops(&s3c24xx_irq_syscore_ops); diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c index 0e1016a827a..0e0fd4d889b 100644 --- a/arch/arm/mach-tegra/cpu-tegra.c +++ b/arch/arm/mach-tegra/cpu-tegra.c @@ -32,7 +32,6 @@ #include <asm/system.h> -#include <mach/hardware.h> #include <mach/clk.h> /* Frequency table index must be sequential starting at 0 */ diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index 4210cb434db..a3e0c8692f0 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig @@ -6,6 +6,7 @@ config UX500_SOC_COMMON select ARM_GIC select HAS_MTU select ARM_ERRATA_753970 + select ARM_ERRATA_754322 menu "Ux500 SoC" diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index cc7e2d8be9a..f8037ba338a 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -496,6 +496,13 @@ static void __init free_unused_memmap(struct meminfo *mi) */ bank_start = min(bank_start, ALIGN(prev_bank_end, PAGES_PER_SECTION)); +#else + /* + * Align down here since the VM subsystem insists that the + * memmap entries are valid from the bank start aligned to + * MAX_ORDER_NR_PAGES. + */ + bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES); #endif /* * If we had a previous bank, and there is a space diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c index a566523d34e..1fdfaa4599c 100644 --- a/arch/arm/plat-s5p/irq-gpioint.c +++ b/arch/arm/plat-s5p/irq-gpioint.c @@ -163,9 +163,9 @@ static __init int s5p_gpioint_add(struct samsung_gpio_chip *chip) ct->chip.irq_mask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_clr_bit; ct->chip.irq_set_type = s5p_gpioint_set_type, - ct->regs.ack = PEND_OFFSET + REG_OFFSET(chip->group); - ct->regs.mask = MASK_OFFSET + REG_OFFSET(chip->group); - ct->regs.type = CON_OFFSET + REG_OFFSET(chip->group); + ct->regs.ack = PEND_OFFSET + REG_OFFSET(group - bank->start); + ct->regs.mask = MASK_OFFSET + REG_OFFSET(group - bank->start); + ct->regs.type = CON_OFFSET + REG_OFFSET(group - bank->start); irq_setup_generic_chip(gc, IRQ_MSK(chip->chip.ngpio), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST | IRQ_NOPROBE, 0); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 177cdaf8356..b122adc8bdb 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -24,6 +24,7 @@ config MIPS select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select HAVE_ARCH_JUMP_LABEL + select IRQ_FORCED_THREADING menu "Machine selection" @@ -722,6 +723,7 @@ config CAVIUM_OCTEON_SIMULATOR select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_HOTPLUG_CPU select SYS_HAS_CPU_CAVIUM_OCTEON + select HOLES_IN_ZONE help The Octeon simulator is software performance model of the Cavium Octeon Processor. It supports simulating Octeon processors on x86 @@ -744,6 +746,7 @@ config CAVIUM_OCTEON_REFERENCE_BOARD select ZONE_DMA32 select USB_ARCH_HAS_OHCI select USB_ARCH_HAS_EHCI + select HOLES_IN_ZONE help This option supports all of the Octeon reference boards from Cavium Networks. It builds a kernel that dynamically determines the Octeon @@ -973,6 +976,9 @@ config ISA_DMA_API config GENERIC_GPIO bool +config HOLES_IN_ZONE + bool + # # Endianess selection. Sufficiently obscure so many users don't know what to # answer,so we try hard to limit the available choices. Also the use of a diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c index 3b2c18b1434..f72c48d4804 100644 --- a/arch/mips/alchemy/common/platform.c +++ b/arch/mips/alchemy/common/platform.c @@ -492,7 +492,7 @@ static void __init alchemy_setup_macs(int ctype) memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); ret = platform_device_register(&au1xxx_eth0_device); - if (!ret) + if (ret) printk(KERN_INFO "Alchemy: failed to register MAC0\n"); diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c index 647e518c90b..b86324a4260 100644 --- a/arch/mips/alchemy/common/power.c +++ b/arch/mips/alchemy/common/power.c @@ -158,15 +158,21 @@ static void restore_core_regs(void) void au_sleep(void) { - int cpuid = alchemy_get_cputype(); - if (cpuid != ALCHEMY_CPU_UNKNOWN) { - save_core_regs(); - if (cpuid <= ALCHEMY_CPU_AU1500) - alchemy_sleep_au1000(); - else if (cpuid <= ALCHEMY_CPU_AU1200) - alchemy_sleep_au1550(); - restore_core_regs(); + save_core_regs(); + + switch (alchemy_get_cputype()) { + case ALCHEMY_CPU_AU1000: + case ALCHEMY_CPU_AU1500: + case ALCHEMY_CPU_AU1100: + alchemy_sleep_au1000(); + break; + case ALCHEMY_CPU_AU1550: + case ALCHEMY_CPU_AU1200: + alchemy_sleep_au1550(); + break; } + + restore_core_regs(); } #endif /* CONFIG_PM */ diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c index 596ad00e7f0..463d2c4d944 100644 --- a/arch/mips/alchemy/devboards/bcsr.c +++ b/arch/mips/alchemy/devboards/bcsr.c @@ -89,8 +89,12 @@ static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d) { unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT); + disable_irq_nosync(irq); + for ( ; bisr; bisr &= bisr - 1) generic_handle_irq(bcsr_csc_base + __ffs(bisr)); + + enable_irq(irq); } /* NOTE: both the enable and mask bits must be cleared, otherwise the diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c index 1dac4f27d33..4a8980027ec 100644 --- a/arch/mips/alchemy/devboards/db1200/setup.c +++ b/arch/mips/alchemy/devboards/db1200/setup.c @@ -23,13 +23,6 @@ void __init board_setup(void) unsigned long freq0, clksrc, div, pfc; unsigned short whoami; - /* Set Config[OD] (disable overlapping bus transaction): - * This gets rid of a _lot_ of spurious interrupts (especially - * wrt. IDE); but incurs ~10% performance hit in some - * cpu-bound applications. - */ - set_c0_config(1 << 19); - bcsr_init(DB1200_BCSR_PHYS_ADDR, DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS); diff --git a/arch/mips/ar7/irq.c b/arch/mips/ar7/irq.c index 03db3daadbd..88c4babfdb5 100644 --- a/arch/mips/ar7/irq.c +++ b/arch/mips/ar7/irq.c @@ -98,7 +98,8 @@ static struct irq_chip ar7_sec_irq_type = { static struct irqaction ar7_cascade_action = { .handler = no_action, - .name = "AR7 cascade interrupt" + .name = "AR7 cascade interrupt", + .flags = IRQF_NO_THREAD, }; static void __init ar7_irq_init(int base) diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c index cea6021cb8d..162e11b4ed7 100644 --- a/arch/mips/bcm63xx/irq.c +++ b/arch/mips/bcm63xx/irq.c @@ -222,6 +222,7 @@ static struct irq_chip bcm63xx_external_irq_chip = { static struct irqaction cpu_ip2_cascade_action = { .handler = no_action, .name = "cascade_ip2", + .flags = IRQF_NO_THREAD, }; void __init arch_init_irq(void) diff --git a/arch/mips/cobalt/irq.c b/arch/mips/cobalt/irq.c index cb9bf820fe5..965c777d356 100644 --- a/arch/mips/cobalt/irq.c +++ b/arch/mips/cobalt/irq.c @@ -48,6 +48,7 @@ asmlinkage void plat_irq_dispatch(void) static struct irqaction cascade = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; void __init arch_init_irq(void) diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c index fa45e924be0..f7b7ba6d5c4 100644 --- a/arch/mips/dec/setup.c +++ b/arch/mips/dec/setup.c @@ -101,20 +101,24 @@ int cpu_fpu_mask = DEC_CPU_IRQ_MASK(DEC_CPU_INR_FPU); static struct irqaction ioirq = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; static struct irqaction fpuirq = { .handler = no_action, .name = "fpu", + .flags = IRQF_NO_THREAD, }; static struct irqaction busirq = { .flags = IRQF_DISABLED, .name = "bus error", + .flags = IRQF_NO_THREAD, }; static struct irqaction haltirq = { .handler = dec_intr_halt, .name = "halt", + .flags = IRQF_NO_THREAD, }; diff --git a/arch/mips/emma/markeins/irq.c b/arch/mips/emma/markeins/irq.c index 3dbd7a5a6ad..7798887a128 100644 --- a/arch/mips/emma/markeins/irq.c +++ b/arch/mips/emma/markeins/irq.c @@ -169,7 +169,7 @@ void emma2rh_gpio_irq_init(void) static struct irqaction irq_cascade = { .handler = no_action, - .flags = 0, + .flags = IRQF_NO_THREAD, .name = "cascade", .dev_id = NULL, .next = NULL, diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h index 0d5a42b5f47..a58addb98cf 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h @@ -54,7 +54,6 @@ #define cpu_has_mips_r2_exec_hazard 0 #define cpu_has_dsp 0 #define cpu_has_mipsmt 0 -#define cpu_has_userlocal 0 #define cpu_has_vint 0 #define cpu_has_veic 0 #define cpu_hwrena_impl_bits 0xc0000000 diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h index 62c09408594..35371641575 100644 --- a/arch/mips/include/asm/mach-powertv/dma-coherence.h +++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h @@ -13,7 +13,6 @@ #define __ASM_MACH_POWERTV_DMA_COHERENCE_H #include <linux/sched.h> -#include <linux/version.h> #include <linux/device.h> #include <asm/mach-powertv/asic.h> diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index b4ba2449444..cb41af5f340 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -195,9 +195,9 @@ * to cover the pipeline delay. */ .set mips32 - mfc0 v1, CP0_TCSTATUS + mfc0 k0, CP0_TCSTATUS .set mips0 - LONG_S v1, PT_TCSTATUS(sp) + LONG_S k0, PT_TCSTATUS(sp) #endif /* CONFIG_MIPS_MT_SMTC */ LONG_S $4, PT_R4(sp) LONG_S $5, PT_R5(sp) diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c index 73031f7fc82..4397972949f 100644 --- a/arch/mips/jz4740/gpio.c +++ b/arch/mips/jz4740/gpio.c @@ -18,7 +18,7 @@ #include <linux/init.h> #include <linux/spinlock.h> -#include <linux/sysdev.h> +#include <linux/syscore_ops.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/delay.h> @@ -86,7 +86,6 @@ struct jz_gpio_chip { spinlock_t lock; struct gpio_chip gpio_chip; - struct sys_device sysdev; }; static struct jz_gpio_chip jz4740_gpio_chips[]; @@ -459,49 +458,47 @@ static struct jz_gpio_chip jz4740_gpio_chips[] = { JZ4740_GPIO_CHIP(D), }; -static inline struct jz_gpio_chip *sysdev_to_chip(struct sys_device *dev) +static void jz4740_gpio_suspend_chip(struct jz_gpio_chip *chip) { - return container_of(dev, struct jz_gpio_chip, sysdev); + chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK); + writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET); + writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR); } -static int jz4740_gpio_suspend(struct sys_device *dev, pm_message_t state) +static int jz4740_gpio_suspend(void) { - struct jz_gpio_chip *chip = sysdev_to_chip(dev); + int i; - chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK); - writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET); - writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR); + for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); i++) + jz4740_gpio_suspend_chip(&jz4740_gpio_chips[i]); return 0; } -static int jz4740_gpio_resume(struct sys_device *dev) +static void jz4740_gpio_resume_chip(struct jz_gpio_chip *chip) { - struct jz_gpio_chip *chip = sysdev_to_chip(dev); uint32_t mask = chip->suspend_mask; writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR); writel(mask, chip->base + JZ_REG_GPIO_MASK_SET); +} - return 0; +static void jz4740_gpio_resume(void) +{ + int i; + + for (i = ARRAY_SIZE(jz4740_gpio_chips) - 1; i >= 0 ; i--) + jz4740_gpio_resume_chip(&jz4740_gpio_chips[i]); } -static struct sysdev_class jz4740_gpio_sysdev_class = { - .name = "gpio", +static struct syscore_ops jz4740_gpio_syscore_ops = { .suspend = jz4740_gpio_suspend, .resume = jz4740_gpio_resume, }; -static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) +static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) { - int ret, irq; - - chip->sysdev.id = id; - chip->sysdev.cls = &jz4740_gpio_sysdev_class; - ret = sysdev_register(&chip->sysdev); - - if (ret) - return ret; + int irq; spin_lock_init(&chip->lock); @@ -519,22 +516,17 @@ static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) irq_set_chip_and_handler(irq, &jz_gpio_irq_chip, handle_level_irq); } - - return 0; } static int __init jz4740_gpio_init(void) { unsigned int i; - int ret; - - ret = sysdev_class_register(&jz4740_gpio_sysdev_class); - if (ret) - return ret; for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i) jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i); + register_syscore_ops(&jz4740_gpio_syscore_ops); + printk(KERN_INFO "JZ4740 GPIO initialized\n"); return 0; diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index feb8021a305..6a2d758dd8e 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -19,6 +19,26 @@ #include <asm-generic/sections.h> +#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) +#define MCOUNT_OFFSET_INSNS 5 +#else +#define MCOUNT_OFFSET_INSNS 4 +#endif + +/* + * Check if the address is in kernel space + * + * Clone core_kernel_text() from kernel/extable.c, but doesn't call + * init_kernel_text() for Ftrace doesn't trace functions in init sections. + */ +static inline int in_kernel_space(unsigned long ip) +{ + if (ip >= (unsigned long)_stext && + ip <= (unsigned long)_etext) + return 1; + return 0; +} + #ifdef CONFIG_DYNAMIC_FTRACE #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ @@ -54,20 +74,6 @@ static inline void ftrace_dyn_arch_init_insns(void) #endif } -/* - * Check if the address is in kernel space - * - * Clone core_kernel_text() from kernel/extable.c, but doesn't call - * init_kernel_text() for Ftrace doesn't trace functions in init sections. - */ -static inline int in_kernel_space(unsigned long ip) -{ - if (ip >= (unsigned long)_stext && - ip <= (unsigned long)_etext) - return 1; - return 0; -} - static int ftrace_modify_code(unsigned long ip, unsigned int new_code) { int faulted; @@ -112,11 +118,6 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) * 1: offset = 4 instructions */ -#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) -#define MCOUNT_OFFSET_INSNS 5 -#else -#define MCOUNT_OFFSET_INSNS 4 -#endif #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) int ftrace_make_nop(struct module *mod, diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 5c74eb797f0..32b397b646e 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -229,7 +229,7 @@ static void i8259A_shutdown(void) */ if (i8259A_auto_eoi >= 0) { outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ - outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ + outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ } } @@ -295,6 +295,7 @@ static void init_8259A(int auto_eoi) static struct irqaction irq2 = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; static struct resource pic1_io_resource = { diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 876a75cc376..922a554cd10 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -349,3 +349,10 @@ SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags, return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), dfd, pathname); } + +SYSCALL_DEFINE6(32_futex, u32 __user *, uaddr, int, op, u32, val, + struct compat_timespec __user *, utime, u32 __user *, uaddr2, + u32, val3) +{ + return compat_sys_futex(uaddr, op, val, utime, uaddr2, val3); +} diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index f9296e894e4..6de1f598346 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -315,7 +315,7 @@ EXPORT(sysn32_call_table) PTR sys_fremovexattr PTR sys_tkill PTR sys_ni_syscall - PTR compat_sys_futex + PTR sys_32_futex PTR compat_sys_sched_setaffinity /* 6195 */ PTR compat_sys_sched_getaffinity PTR sys_cacheflush diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 4d7c9827706..1d813169e45 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -441,7 +441,7 @@ sys_call_table: PTR sys_fremovexattr /* 4235 */ PTR sys_tkill PTR sys_sendfile64 - PTR compat_sys_futex + PTR sys_32_futex PTR compat_sys_sched_setaffinity PTR compat_sys_sched_getaffinity /* 4240 */ PTR compat_sys_io_setup diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index dbbe0ce48d8..f8524003676 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -8,6 +8,7 @@ * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/cache.h> +#include <linux/irqflags.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/personality.h> @@ -658,6 +659,8 @@ static void do_signal(struct pt_regs *regs) asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { + local_irq_enable(); + /* deal with pending signal delivery */ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) do_signal(regs); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index b7517e3abc8..cbea618af0b 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -14,6 +14,7 @@ #include <linux/bug.h> #include <linux/compiler.h> #include <linux/init.h> +#include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/sched.h> @@ -364,21 +365,26 @@ static int regs_to_trapnr(struct pt_regs *regs) return (regs->cp0_cause >> 2) & 0x1f; } -static DEFINE_SPINLOCK(die_lock); +static DEFINE_RAW_SPINLOCK(die_lock); void __noreturn die(const char *str, struct pt_regs *regs) { static int die_counter; int sig = SIGSEGV; #ifdef CONFIG_MIPS_MT_SMTC - unsigned long dvpret = dvpe(); + unsigned long dvpret; #endif /* CONFIG_MIPS_MT_SMTC */ + oops_enter(); + if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) sig = 0; console_verbose(); - spin_lock_irq(&die_lock); + raw_spin_lock_irq(&die_lock); +#ifdef CONFIG_MIPS_MT_SMTC + dvpret = dvpe(); +#endif /* CONFIG_MIPS_MT_SMTC */ bust_spinlocks(1); #ifdef CONFIG_MIPS_MT_SMTC mips_mt_regdump(dvpret); @@ -387,7 +393,9 @@ void __noreturn die(const char *str, struct pt_regs *regs) printk("%s[#%d]:\n", str, ++die_counter); show_registers(regs); add_taint(TAINT_DIE); - spin_unlock_irq(&die_lock); + raw_spin_unlock_irq(&die_lock); + + oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 2cd50ad0d5c..3efcb065f78 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -192,7 +192,7 @@ static struct tc *get_tc(int index) } spin_unlock(&vpecontrol.tc_list_lock); - return NULL; + return res; } /* allocate a vpe and associate it with this minor (or index) */ diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index fc89795cafd..f9737bb3c5a 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -123,11 +123,10 @@ void ltq_enable_irq(struct irq_data *d) static unsigned int ltq_startup_eiu_irq(struct irq_data *d) { int i; - int irq_nr = d->irq - INT_NUM_IRQ0; ltq_enable_irq(d); for (i = 0; i < MAX_EIU; i++) { - if (irq_nr == ltq_eiu_irq[i]) { + if (d->irq == ltq_eiu_irq[i]) { /* low level - we should really handle set_type */ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | (0x6 << (i * 4)), LTQ_EIU_EXIN_C); @@ -147,11 +146,10 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d) static void ltq_shutdown_eiu_irq(struct irq_data *d) { int i; - int irq_nr = d->irq - INT_NUM_IRQ0; ltq_disable_irq(d); for (i = 0; i < MAX_EIU; i++) { - if (irq_nr == ltq_eiu_irq[i]) { + if (d->irq == ltq_eiu_irq[i]) { /* disable */ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), LTQ_EIU_EXIN_INEN); diff --git a/arch/mips/lantiq/xway/ebu.c b/arch/mips/lantiq/xway/ebu.c index 66eb52fa50a..033b3184c7a 100644 --- a/arch/mips/lantiq/xway/ebu.c +++ b/arch/mips/lantiq/xway/ebu.c @@ -10,7 +10,6 @@ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/version.h> #include <linux/ioport.h> #include <lantiq_soc.h> diff --git a/arch/mips/lantiq/xway/pmu.c b/arch/mips/lantiq/xway/pmu.c index 9d69f01e352..39f0d2641cb 100644 --- a/arch/mips/lantiq/xway/pmu.c +++ b/arch/mips/lantiq/xway/pmu.c @@ -8,7 +8,6 @@ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/version.h> #include <linux/ioport.h> #include <lantiq_soc.h> diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c index de4c165515d..d608b6ef0ed 100644 --- a/arch/mips/lasat/interrupt.c +++ b/arch/mips/lasat/interrupt.c @@ -105,6 +105,7 @@ asmlinkage void plat_irq_dispatch(void) static struct irqaction cascade = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; void __init arch_init_irq(void) diff --git a/arch/mips/loongson/fuloong-2e/irq.c b/arch/mips/loongson/fuloong-2e/irq.c index d61a04222b8..3cf1fef29f0 100644 --- a/arch/mips/loongson/fuloong-2e/irq.c +++ b/arch/mips/loongson/fuloong-2e/irq.c @@ -42,6 +42,7 @@ asmlinkage void mach_irq_dispatch(unsigned int pending) static struct irqaction cascade_irqaction = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; void __init mach_init_irq(void) diff --git a/arch/mips/loongson/lemote-2f/irq.c b/arch/mips/loongson/lemote-2f/irq.c index 081db102bb9..14b081841b6 100644 --- a/arch/mips/loongson/lemote-2f/irq.c +++ b/arch/mips/loongson/lemote-2f/irq.c @@ -96,12 +96,13 @@ static irqreturn_t ip6_action(int cpl, void *dev_id) struct irqaction ip6_irqaction = { .handler = ip6_action, .name = "cascade", - .flags = IRQF_SHARED, + .flags = IRQF_SHARED | IRQF_NO_THREAD, }; struct irqaction cascade_irqaction = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; void __init mach_init_irq(void) diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 9ff5d0fac55..302d779d5b0 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -6,6 +6,7 @@ * Copyright (C) 2011 Wind River Systems, * written by Ralf Baechle <ralf@linux-mips.org> */ +#include <linux/compiler.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/mman.h> @@ -15,12 +16,11 @@ #include <linux/sched.h> unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ - EXPORT_SYMBOL(shm_align_mask); /* gap between mmap and stack */ #define MIN_GAP (128*1024*1024UL) -#define MAX_GAP ((TASK_SIZE)/6*5) +#define MAX_GAP ((TASK_SIZE)/6*5) static int mmap_is_legacy(void) { @@ -57,13 +57,13 @@ static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, return base - off; } -#define COLOUR_ALIGN(addr,pgoff) \ +#define COLOUR_ALIGN(addr, pgoff) \ ((((addr) + shm_align_mask) & ~shm_align_mask) + \ (((pgoff) << PAGE_SHIFT) & shm_align_mask)) enum mmap_allocation_direction {UP, DOWN}; -static unsigned long arch_get_unmapped_area_foo(struct file *filp, +static unsigned long arch_get_unmapped_area_common(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags, enum mmap_allocation_direction dir) { @@ -103,16 +103,16 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vma->vm_start)) return addr; } if (dir == UP) { addr = mm->mmap_base; - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(addr); + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ @@ -131,28 +131,30 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp, mm->free_area_cache = mm->mmap_base; } - /* either no address requested or can't fit in requested address hole */ + /* + * either no address requested, or the mapping can't fit into + * the requested address hole + */ addr = mm->free_area_cache; - if (do_color_align) { - unsigned long base = - COLOUR_ALIGN_DOWN(addr - len, pgoff); - + if (do_color_align) { + unsigned long base = + COLOUR_ALIGN_DOWN(addr - len, pgoff); addr = base + len; - } + } /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr - len); if (!vma || addr <= vma->vm_start) { - /* remember the address as a hint for next time */ - return mm->free_area_cache = addr-len; + /* cache the address as a hint for next time */ + return mm->free_area_cache = addr - len; } } if (unlikely(mm->mmap_base < len)) goto bottomup; - addr = mm->mmap_base-len; + addr = mm->mmap_base - len; if (do_color_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); @@ -163,8 +165,8 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp, * return with success: */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { - /* remember the address as a hint for next time */ + if (likely(!vma || addr + len <= vma->vm_start)) { + /* cache the address as a hint for next time */ return mm->free_area_cache = addr; } @@ -173,7 +175,7 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp, mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; + addr = vma->vm_start - len; if (do_color_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); } while (likely(len < vma->vm_start)); @@ -201,7 +203,7 @@ bottomup: unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags) { - return arch_get_unmapped_area_foo(filp, + return arch_get_unmapped_area_common(filp, addr0, len, pgoff, flags, UP); } @@ -213,7 +215,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags) { - return arch_get_unmapped_area_foo(filp, + return arch_get_unmapped_area_common(filp, addr0, len, pgoff, flags, DOWN); } diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index b6e1cff5066..e06370f58ef 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1759,14 +1759,13 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) u32 *p = handle_tlbm; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; - struct work_registers wr; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); + build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); uasm_i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_pte_reload_tlbwi(&p, K0, K1); @@ -1963,7 +1962,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void) uasm_i_andi(&p, wr.r3, wr.r3, 2); uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); } - + if (PM_DEFAULT_MASK == 0) + uasm_i_nop(&p); /* * We clobbered C0_PAGEMASK, restore it. On the other branch * it is restored in build_huge_tlb_write_entry. diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index 1d36c511a7a..d53ff91b277 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -350,12 +350,14 @@ unsigned int plat_ipi_resched_int_xlate(unsigned int cpu) static struct irqaction i8259irq = { .handler = no_action, - .name = "XT-PIC cascade" + .name = "XT-PIC cascade", + .flags = IRQF_NO_THREAD, }; static struct irqaction corehi_irqaction = { .handler = no_action, - .name = "CoreHi" + .name = "CoreHi", + .flags = IRQF_NO_THREAD, }; static msc_irqmap_t __initdata msc_irqmap[] = { diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile index 9bd3f731f62..2dca585dd2f 100644 --- a/arch/mips/netlogic/xlr/Makefile +++ b/arch/mips/netlogic/xlr/Makefile @@ -2,4 +2,4 @@ obj-y += setup.o platform.o irq.o setup.o time.o obj-$(CONFIG_SMP) += smp.o smpboot.o obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o -EXTRA_CFLAGS += -Werror +ccflags-y += -Werror diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c index 603d7493e96..8656388b34b 100644 --- a/arch/mips/pci/pci-lantiq.c +++ b/arch/mips/pci/pci-lantiq.c @@ -171,8 +171,13 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf) u32 temp_buffer; /* set clock to 33Mhz */ - ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR); - ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR); + if (ltq_is_ar9()) { + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0x1f00000, LTQ_CGU_IFCCR); + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0xe00000, LTQ_CGU_IFCCR); + } else { + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR); + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR); + } /* external or internal clock ? */ if (conf->clock) { diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c index 764362ce5e4..5f3a69cebad 100644 --- a/arch/mips/pci/pci-rc32434.c +++ b/arch/mips/pci/pci-rc32434.c @@ -215,7 +215,7 @@ static int __init rc32434_pci_init(void) rc32434_pcibridge_init(); io_map_base = ioremap(rc32434_res_pci_io1.start, - resource_size(&rcrc32434_res_pci_io1)); + resource_size(&rc32434_res_pci_io1)); if (!io_map_base) return -ENOMEM; diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq.c b/arch/mips/pmc-sierra/msp71xx/msp_irq.c index 4531c4a514b..d3c3d81757a 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_irq.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_irq.c @@ -108,12 +108,14 @@ asmlinkage void plat_irq_dispatch(struct pt_regs *regs) static struct irqaction cic_cascade_msp = { .handler = no_action, - .name = "MSP CIC cascade" + .name = "MSP CIC cascade", + .flags = IRQF_NO_THREAD, }; static struct irqaction per_cascade_msp = { .handler = no_action, - .name = "MSP PER cascade" + .name = "MSP PER cascade", + .flags = IRQF_NO_THREAD, }; void __init arch_init_irq(void) diff --git a/arch/mips/pnx8550/common/int.c b/arch/mips/pnx8550/common/int.c index 6b93c81779c..1ebe22bdadc 100644 --- a/arch/mips/pnx8550/common/int.c +++ b/arch/mips/pnx8550/common/int.c @@ -167,7 +167,7 @@ static struct irq_chip level_irq_type = { static struct irqaction gic_action = { .handler = no_action, - .flags = IRQF_DISABLED, + .flags = IRQF_DISABLED | IRQF_NO_THREAD, .name = "GIC", }; diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c index b4d08e4d2ea..f72c336ea27 100644 --- a/arch/mips/sgi-ip22/ip22-int.c +++ b/arch/mips/sgi-ip22/ip22-int.c @@ -155,32 +155,32 @@ static void __irq_entry indy_buserror_irq(void) static struct irqaction local0_cascade = { .handler = no_action, - .flags = IRQF_DISABLED, + .flags = IRQF_DISABLED | IRQF_NO_THREAD, .name = "local0 cascade", }; static struct irqaction local1_cascade = { .handler = no_action, - .flags = IRQF_DISABLED, + .flags = IRQF_DISABLED | IRQF_NO_THREAD, .name = "local1 cascade", }; static struct irqaction buserr = { .handler = no_action, - .flags = IRQF_DISABLED, + .flags = IRQF_DISABLED | IRQF_NO_THREAD, .name = "Bus Error", }; static struct irqaction map0_cascade = { .handler = no_action, - .flags = IRQF_DISABLED, + .flags = IRQF_DISABLED | IRQF_NO_THREAD, .name = "mapable0 cascade", }; #ifdef USE_LIO3_IRQ static struct irqaction map1_cascade = { .handler = no_action, - .flags = IRQF_DISABLED, + .flags = IRQF_DISABLED | IRQF_NO_THREAD, .name = "mapable1 cascade", }; #define SGI_INTERRUPTS SGINT_END diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c index a7e5a6d917b..3ab5b5d25b0 100644 --- a/arch/mips/sni/rm200.c +++ b/arch/mips/sni/rm200.c @@ -359,6 +359,7 @@ void sni_rm200_init_8259A(void) static struct irqaction sni_rm200_irq2 = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; static struct resource sni_rm200_pic1_resource = { diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c index 70a3b85f375..fad2bef432c 100644 --- a/arch/mips/vr41xx/common/irq.c +++ b/arch/mips/vr41xx/common/irq.c @@ -34,6 +34,7 @@ static irq_cascade_t irq_cascade[NR_IRQS] __cacheline_aligned; static struct irqaction cascade_irqaction = { .handler = no_action, .name = "cascade", + .flags = IRQF_NO_THREAD, }; int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int)) diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 5cc83851ad0..31a7d3a7ce2 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -561,6 +561,20 @@ static struct pci_ops u4_pcie_pci_ops = .write = u4_pcie_write_config, }; +static void __devinit pmac_pci_fixup_u4_of_node(struct pci_dev *dev) +{ + /* Apple's device-tree "hides" the root complex virtual P2P bridge + * on U4. However, Linux sees it, causing the PCI <-> OF matching + * code to fail to properly match devices below it. This works around + * it by setting the node of the bridge to point to the PHB node, + * which is not entirely correct but fixes the matching code and + * doesn't break anything else. It's also the simplest possible fix. + */ + if (dev->dev.of_node == NULL) + dev->dev.of_node = pcibios_get_phb_of_node(dev->bus); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, 0x5b, pmac_pci_fixup_u4_of_node); + #endif /* CONFIG_PPC64 */ #ifdef CONFIG_PPC32 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 64b61bf72e9..547f1a6a35d 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -188,7 +188,8 @@ extern char elf_platform[]; #define SET_PERSONALITY(ex) \ do { \ if (personality(current->personality) != PER_LINUX32) \ - set_personality(PER_LINUX); \ + set_personality(PER_LINUX | \ + (current->personality & ~PER_MASK)); \ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ set_thread_flag(TIF_31BIT); \ else \ diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index f69ff3c1349..5d56c2b95b1 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -303,15 +303,15 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) /* Walk the guest addr space page table */ table = gmap->table + (((to + off) >> 53) & 0x7ff); if (*table & _REGION_ENTRY_INV) - return 0; + goto out; table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + (((to + off) >> 42) & 0x7ff); if (*table & _REGION_ENTRY_INV) - return 0; + goto out; table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + (((to + off) >> 31) & 0x7ff); if (*table & _REGION_ENTRY_INV) - return 0; + goto out; table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + (((to + off) >> 20) & 0x7ff); @@ -319,6 +319,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) flush |= gmap_unlink_segment(gmap, table); *table = _SEGMENT_ENTRY_INV; } +out: up_read(&gmap->mm->mmap_sem); if (flush) gmap_flush_tlb(gmap); diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h index 1407c07bdad..f6ae2b2b687 100644 --- a/arch/sparc/include/asm/pgtsrmmu.h +++ b/arch/sparc/include/asm/pgtsrmmu.h @@ -280,7 +280,7 @@ static inline unsigned long srmmu_hwprobe(unsigned long vaddr) return retval; } #else -#define srmmu_hwprobe(addr) (srmmu_swprobe(addr, 0) & SRMMU_PTE_PMASK) +#define srmmu_hwprobe(addr) srmmu_swprobe(addr, 0) #endif static inline int diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h index 55a17c6efeb..d06a2660175 100644 --- a/arch/sparc/include/asm/spitfire.h +++ b/arch/sparc/include/asm/spitfire.h @@ -43,6 +43,8 @@ #define SUN4V_CHIP_NIAGARA1 0x01 #define SUN4V_CHIP_NIAGARA2 0x02 #define SUN4V_CHIP_NIAGARA3 0x03 +#define SUN4V_CHIP_NIAGARA4 0x04 +#define SUN4V_CHIP_NIAGARA5 0x05 #define SUN4V_CHIP_UNKNOWN 0xff #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/xor_64.h b/arch/sparc/include/asm/xor_64.h index 9ed6ff679ab..ee8edc68423 100644 --- a/arch/sparc/include/asm/xor_64.h +++ b/arch/sparc/include/asm/xor_64.h @@ -66,6 +66,8 @@ static struct xor_block_template xor_block_niagara = { ((tlb_type == hypervisor && \ (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \ sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || \ - sun4v_chip_type == SUN4V_CHIP_NIAGARA3)) ? \ + sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || \ + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || \ + sun4v_chip_type == SUN4V_CHIP_NIAGARA5)) ? \ &xor_block_niagara : \ &xor_block_VIS) diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index 9810fd88105..ba9b1cec4e6 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c @@ -481,6 +481,18 @@ static void __init sun4v_cpu_probe(void) sparc_pmu_type = "niagara3"; break; + case SUN4V_CHIP_NIAGARA4: + sparc_cpu_type = "UltraSparc T4 (Niagara4)"; + sparc_fpu_type = "UltraSparc T4 integrated FPU"; + sparc_pmu_type = "niagara4"; + break; + + case SUN4V_CHIP_NIAGARA5: + sparc_cpu_type = "UltraSparc T5 (Niagara5)"; + sparc_fpu_type = "UltraSparc T5 integrated FPU"; + sparc_pmu_type = "niagara5"; + break; + default: printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", prom_cpu_compatible); diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c index 4197e8d62d4..9323eafccb9 100644 --- a/arch/sparc/kernel/cpumap.c +++ b/arch/sparc/kernel/cpumap.c @@ -325,6 +325,8 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index) case SUN4V_CHIP_NIAGARA1: case SUN4V_CHIP_NIAGARA2: case SUN4V_CHIP_NIAGARA3: + case SUN4V_CHIP_NIAGARA4: + case SUN4V_CHIP_NIAGARA5: rover_inc_table = niagara_iterate_method; break; default: diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 0eac1b2fc53..0d810c2f1d0 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -133,7 +133,7 @@ prom_sun4v_name: prom_niagara_prefix: .asciz "SUNW,UltraSPARC-T" prom_sparc_prefix: - .asciz "SPARC-T" + .asciz "SPARC-" .align 4 prom_root_compatible: .skip 64 @@ -396,7 +396,7 @@ sun4v_chip_type: or %g1, %lo(prom_cpu_compatible), %g1 sethi %hi(prom_sparc_prefix), %g7 or %g7, %lo(prom_sparc_prefix), %g7 - mov 7, %g3 + mov 6, %g3 90: ldub [%g7], %g2 ldub [%g1], %g4 cmp %g2, %g4 @@ -408,10 +408,23 @@ sun4v_chip_type: sethi %hi(prom_cpu_compatible), %g1 or %g1, %lo(prom_cpu_compatible), %g1 - ldub [%g1 + 7], %g2 + ldub [%g1 + 6], %g2 + cmp %g2, 'T' + be,pt %xcc, 70f + cmp %g2, 'M' + bne,pn %xcc, 4f + nop + +70: ldub [%g1 + 7], %g2 cmp %g2, '3' be,pt %xcc, 5f mov SUN4V_CHIP_NIAGARA3, %g4 + cmp %g2, '4' + be,pt %xcc, 5f + mov SUN4V_CHIP_NIAGARA4, %g4 + cmp %g2, '5' + be,pt %xcc, 5f + mov SUN4V_CHIP_NIAGARA5, %g4 ba,pt %xcc, 4f nop @@ -545,6 +558,12 @@ niagara_tlb_fixup: cmp %g1, SUN4V_CHIP_NIAGARA3 be,pt %xcc, niagara2_patch nop + cmp %g1, SUN4V_CHIP_NIAGARA4 + be,pt %xcc, niagara2_patch + nop + cmp %g1, SUN4V_CHIP_NIAGARA5 + be,pt %xcc, niagara2_patch + nop call generic_patch_copyops nop diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 1e94f946570..8aa0d440858 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -230,7 +230,8 @@ static void pci_parse_of_addrs(struct platform_device *op, res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; } else if (i == dev->rom_base_reg) { res = &dev->resource[PCI_ROM_RESOURCE]; - flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; + flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE + | IORESOURCE_SIZEALIGN; } else { printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); continue; diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index c8cc461ff75..f793742eec2 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -380,8 +380,7 @@ void flush_thread(void) #endif } - /* Now, this task is no longer a kernel thread. */ - current->thread.current_ds = USER_DS; + /* This task is no longer a kernel thread. */ if (current->thread.flags & SPARC_FLAG_KTHREAD) { current->thread.flags &= ~SPARC_FLAG_KTHREAD; diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index c158a95ec66..d959cd0a4aa 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -368,9 +368,6 @@ void flush_thread(void) /* Clear FPU register state. */ t->fpsaved[0] = 0; - - if (get_thread_current_ds() != ASI_AIUS) - set_fs(USER_DS); } /* It's a bit more tricky when 64-bit tasks are involved... */ diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index d26e1f6c717..3e3e2914c70 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -137,7 +137,7 @@ static void __init process_switch(char c) prom_halt(); break; case 'p': - /* Just ignore, this behavior is now the default. */ + prom_early_console.flags &= ~CON_BOOT; break; default: printk("Unknown boot switch (-%c)\n", c); diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 3c5bb784214..c965595aa7e 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -106,7 +106,7 @@ static void __init process_switch(char c) prom_halt(); break; case 'p': - /* Just ignore, this behavior is now the default. */ + prom_early_console.flags &= ~CON_BOOT; break; case 'P': /* Force UltraSPARC-III P-Cache on. */ @@ -425,10 +425,14 @@ static void __init init_sparc64_elf_hwcap(void) else if (tlb_type == hypervisor) { if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || - sun4v_chip_type == SUN4V_CHIP_NIAGARA3) + sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA5) cap |= HWCAP_SPARC_BLKINIT; if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || - sun4v_chip_type == SUN4V_CHIP_NIAGARA3) + sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA5) cap |= HWCAP_SPARC_N2; } @@ -452,11 +456,15 @@ static void __init init_sparc64_elf_hwcap(void) if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1) cap |= AV_SPARC_ASI_BLK_INIT; if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || - sun4v_chip_type == SUN4V_CHIP_NIAGARA3) + sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA5) cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | AV_SPARC_ASI_BLK_INIT | AV_SPARC_POPC); - if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3) + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA5) cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | AV_SPARC_FMAF); } diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 1ba95aff5d5..2caa556db86 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -273,10 +273,7 @@ void do_sigreturn32(struct pt_regs *regs) case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32); } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); return; segv: @@ -377,10 +374,7 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32); } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); return; segv: force_sig(SIGSEGV, current); @@ -782,6 +776,7 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { + sigset_t blocked; int err; if (ka->sa.sa_flags & SA_SIGINFO) @@ -792,12 +787,10 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka, if (err) return err; - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); + sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NOMASK)) - sigaddset(¤t->blocked,signr); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + sigaddset(&blocked, signr); + set_current_blocked(&blocked); tracehook_signal_handler(signr, info, ka, regs, 0); @@ -881,7 +874,7 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs, */ if (current_thread_info()->status & TS_RESTORE_SIGMASK) { current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + set_current_blocked(¤t->saved_sigmask); } } diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 04ede8f04ad..8ce247ac04c 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -62,12 +62,13 @@ struct rt_signal_frame { static int _sigpause_common(old_sigset_t set) { - set &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); + sigset_t blocked; + current->saved_sigmask = current->blocked; - siginitset(¤t->blocked, set); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + + set &= _BLOCKABLE; + siginitset(&blocked, set); + set_current_blocked(&blocked); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -139,10 +140,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) goto segv_and_exit; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); return; segv_and_exit: @@ -209,10 +207,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); return; segv: force_sig(SIGSEGV, current); @@ -470,6 +465,7 @@ static inline int handle_signal(unsigned long signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { + sigset_t blocked; int err; if (ka->sa.sa_flags & SA_SIGINFO) @@ -480,12 +476,10 @@ handle_signal(unsigned long signr, struct k_sigaction *ka, if (err) return err; - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); + sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NOMASK)) - sigaddset(¤t->blocked, signr); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + sigaddset(&blocked, signr); + set_current_blocked(&blocked); tracehook_signal_handler(signr, info, ka, regs, 0); @@ -581,7 +575,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + set_current_blocked(¤t->saved_sigmask); } } diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 47509df3b89..a2b81598d90 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -70,10 +70,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) goto do_sigsegv; } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); } if (test_thread_flag(TIF_32BIT)) { pc &= 0xffffffff; @@ -242,12 +239,13 @@ struct rt_signal_frame { static long _sigpause_common(old_sigset_t set) { - set &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); + sigset_t blocked; + current->saved_sigmask = current->blocked; - siginitset(¤t->blocked, set); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + + set &= _BLOCKABLE; + siginitset(&blocked, set); + set_current_blocked(&blocked); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -327,10 +325,7 @@ void do_rt_sigreturn(struct pt_regs *regs) pt_regs_clear_syscall(regs); sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); return; segv: force_sig(SIGSEGV, current); @@ -484,18 +479,17 @@ static inline int handle_signal(unsigned long signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { + sigset_t blocked; int err; err = setup_rt_frame(ka, regs, signr, oldset, (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); if (err) return err; - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); + sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NOMASK)) - sigaddset(¤t->blocked,signr); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + sigaddset(&blocked, signr); + set_current_blocked(&blocked); tracehook_signal_handler(signr, info, ka, regs, 0); @@ -601,7 +595,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) */ if (current_thread_info()->status & TS_RESTORE_SIGMASK) { current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + set_current_blocked(¤t->saved_sigmask); } } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 581531dbc8b..8e073d80213 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -511,6 +511,11 @@ static void __init read_obp_translations(void) for (i = 0; i < prom_trans_ents; i++) prom_trans[i].data &= ~0x0003fe0000000000UL; } + + /* Force execute bit on. */ + for (i = 0; i < prom_trans_ents; i++) + prom_trans[i].data |= (tlb_type == hypervisor ? + _PAGE_EXEC_4V : _PAGE_EXEC_4U); } static void __init hypervisor_tlb_lock(unsigned long vaddr, diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c index e485a680499..13c2169822a 100644 --- a/arch/sparc/mm/leon_mm.c +++ b/arch/sparc/mm/leon_mm.c @@ -162,7 +162,7 @@ ready: printk(KERN_INFO "swprobe: padde %x\n", paddr_calc); if (paddr) *paddr = paddr_calc; - return paddrbase; + return pte; } void leon_flush_icache_all(void) diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index fc94607f0bd..aecc8ed5f39 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -21,7 +21,7 @@ #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/irqflags.h> -#include <linux/atomic.h> +#include <asm/atomic_32.h> #include <asm/asm-offsets.h> #include <hv/hypervisor.h> #include <arch/abi.h> diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S index 1f75a2a5610..30638042691 100644 --- a/arch/tile/lib/atomic_asm_32.S +++ b/arch/tile/lib/atomic_asm_32.S @@ -70,7 +70,7 @@ */ #include <linux/linkage.h> -#include <linux/atomic.h> +#include <asm/atomic_32.h> #include <asm/page.h> #include <asm/processor.h> diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 3f2ad2640d8..ccdbc16b894 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -42,8 +42,11 @@ int mach_set_rtc_mmss(unsigned long nowtime) { int real_seconds, real_minutes, cmos_minutes; unsigned char save_control, save_freq_select; + unsigned long flags; int retval = 0; + spin_lock_irqsave(&rtc_lock, flags); + /* tell the clock it's being set */ save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); @@ -93,12 +96,17 @@ int mach_set_rtc_mmss(unsigned long nowtime) CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + spin_unlock_irqrestore(&rtc_lock, flags); + return retval; } unsigned long mach_get_cmos_time(void) { unsigned int status, year, mon, day, hour, min, sec, century = 0; + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); /* * If UIP is clear, then we have >= 244 microseconds before @@ -125,6 +133,8 @@ unsigned long mach_get_cmos_time(void) status = CMOS_READ(RTC_CONTROL); WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); + spin_unlock_irqrestore(&rtc_lock, flags); + if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) { sec = bcd2bin(sec); min = bcd2bin(min); @@ -169,24 +179,15 @@ EXPORT_SYMBOL(rtc_cmos_write); int update_persistent_clock(struct timespec now) { - unsigned long flags; - int retval; - - spin_lock_irqsave(&rtc_lock, flags); - retval = x86_platform.set_wallclock(now.tv_sec); - spin_unlock_irqrestore(&rtc_lock, flags); - - return retval; + return x86_platform.set_wallclock(now.tv_sec); } /* not static: needed by APM */ void read_persistent_clock(struct timespec *ts) { - unsigned long retval, flags; + unsigned long retval; - spin_lock_irqsave(&rtc_lock, flags); retval = x86_platform.get_wallclock(); - spin_unlock_irqrestore(&rtc_lock, flags); ts->tv_sec = retval; ts->tv_nsec = 0; diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 18ae83dd1cd..b56c65de384 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -56,7 +56,7 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), }; -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; +static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE; static int __init vsyscall_setup(char *str) { diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 30326443ab8..87488b93a65 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -63,9 +63,8 @@ static void __init find_early_table_space(unsigned long end, int use_pse, #ifdef CONFIG_X86_32 /* for fixmap */ tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); - - good_end = max_pfn_mapped << PAGE_SHIFT; #endif + good_end = max_pfn_mapped << PAGE_SHIFT; base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); if (base == MEMBLOCK_ERROR) diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 039d91315bc..404f21a3ff9 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -43,6 +43,17 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"), }, }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */ + /* 2006 AMD HT/VIA system with two host bridges */ + { + .callback = set_use_crs, + .ident = "ASUS M2V-MX SE", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"), + DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), + }, + }, {} }; diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index 58425adc22c..fe73276e026 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c @@ -678,38 +678,40 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) pentry = (struct sfi_device_table_entry *)sb->pentry; for (i = 0; i < num; i++, pentry++) { - if (pentry->irq != (u8)0xff) { /* native RTE case */ + int irq = pentry->irq; + + if (irq != (u8)0xff) { /* native RTE case */ /* these SPI2 devices are not exposed to system as PCI * devices, but they have separate RTE entry in IOAPIC * so we have to enable them one by one here */ - ioapic = mp_find_ioapic(pentry->irq); + ioapic = mp_find_ioapic(irq); irq_attr.ioapic = ioapic; - irq_attr.ioapic_pin = pentry->irq; + irq_attr.ioapic_pin = irq; irq_attr.trigger = 1; irq_attr.polarity = 1; - io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr); + io_apic_set_pci_routing(NULL, irq, &irq_attr); } else - pentry->irq = 0; /* No irq */ + irq = 0; /* No irq */ switch (pentry->type) { case SFI_DEV_TYPE_IPC: /* ID as IRQ is a hack that will go away */ - pdev = platform_device_alloc(pentry->name, pentry->irq); + pdev = platform_device_alloc(pentry->name, irq); if (pdev == NULL) { pr_err("out of memory for SFI platform device '%s'.\n", pentry->name); continue; } - install_irq_resource(pdev, pentry->irq); + install_irq_resource(pdev, irq); pr_debug("info[%2d]: IPC bus, name = %16.16s, " - "irq = 0x%2x\n", i, pentry->name, pentry->irq); + "irq = 0x%2x\n", i, pentry->name, irq); sfi_handle_ipc_dev(pdev); break; case SFI_DEV_TYPE_SPI: memset(&spi_info, 0, sizeof(spi_info)); strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN); - spi_info.irq = pentry->irq; + spi_info.irq = irq; spi_info.bus_num = pentry->host_num; spi_info.chip_select = pentry->addr; spi_info.max_speed_hz = pentry->max_freq; @@ -726,7 +728,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) memset(&i2c_info, 0, sizeof(i2c_info)); bus = pentry->host_num; strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN); - i2c_info.irq = pentry->irq; + i2c_info.irq = irq; i2c_info.addr = pentry->addr; pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, " "irq = 0x%2x, addr = 0x%x\n", i, bus, diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c index 73d70d65e76..6d5dbcdd444 100644 --- a/arch/x86/platform/mrst/vrtc.c +++ b/arch/x86/platform/mrst/vrtc.c @@ -58,8 +58,11 @@ EXPORT_SYMBOL_GPL(vrtc_cmos_write); unsigned long vrtc_get_time(void) { u8 sec, min, hour, mday, mon; + unsigned long flags; u32 year; + spin_lock_irqsave(&rtc_lock, flags); + while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP)) cpu_relax(); @@ -70,6 +73,8 @@ unsigned long vrtc_get_time(void) mon = vrtc_cmos_read(RTC_MONTH); year = vrtc_cmos_read(RTC_YEAR); + spin_unlock_irqrestore(&rtc_lock, flags); + /* vRTC YEAR reg contains the offset to 1960 */ year += 1960; @@ -83,8 +88,10 @@ unsigned long vrtc_get_time(void) int vrtc_set_mmss(unsigned long nowtime) { int real_sec, real_min; + unsigned long flags; int vrtc_min; + spin_lock_irqsave(&rtc_lock, flags); vrtc_min = vrtc_cmos_read(RTC_MINUTES); real_sec = nowtime % 60; @@ -95,6 +102,8 @@ int vrtc_set_mmss(unsigned long nowtime) vrtc_cmos_write(real_sec, RTC_SECONDS); vrtc_cmos_write(real_min, RTC_MINUTES); + spin_unlock_irqrestore(&rtc_lock, flags); + return 0; } diff --git a/block/blk-core.c b/block/blk-core.c index b2ed78afd9f..d34433ae791 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -348,9 +348,10 @@ void blk_put_queue(struct request_queue *q) EXPORT_SYMBOL(blk_put_queue); /* - * Note: If a driver supplied the queue lock, it should not zap that lock - * unexpectedly as some queue cleanup components like elevator_exit() and - * blk_throtl_exit() need queue lock. + * Note: If a driver supplied the queue lock, it is disconnected + * by this function. The actual state of the lock doesn't matter + * here as the request_queue isn't accessible after this point + * (QUEUE_FLAG_DEAD is set) and no other requests will be queued. */ void blk_cleanup_queue(struct request_queue *q) { @@ -367,10 +368,8 @@ void blk_cleanup_queue(struct request_queue *q) queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); mutex_unlock(&q->sysfs_lock); - if (q->elevator) - elevator_exit(q->elevator); - - blk_throtl_exit(q); + if (q->queue_lock != &q->__queue_lock) + q->queue_lock = &q->__queue_lock; blk_put_queue(q); } diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index e681805cdb4..60fda88c57f 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -479,6 +479,11 @@ static void blk_release_queue(struct kobject *kobj) blk_sync_queue(q); + if (q->elevator) + elevator_exit(q->elevator); + + blk_throtl_exit(q); + if (rl->rq_pool) mempool_destroy(rl->rq_pool); diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index be442561693..7835b8fc94d 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c @@ -67,6 +67,9 @@ static int ghash_update(struct shash_desc *desc, struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; + if (!ctx->gf128) + return -ENOKEY; + if (dctx->bytes) { int n = min(srclen, dctx->bytes); u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); @@ -119,6 +122,9 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *buf = dctx->buffer; + if (!ctx->gf128) + return -ENOKEY; + ghash_flush(ctx, dctx); memcpy(dst, buf, GHASH_BLOCK_SIZE); diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 0599854e221..118ec12d2d5 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -34,8 +34,8 @@ struct gpio_bank { u16 irq; u16 virtual_irq_start; int method; -#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) u32 suspend_wakeup; +#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) u32 saved_wakeup; #endif u32 non_wakeup_gpios; diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index c43b8ff626a..0550dcb8581 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -577,6 +577,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) void pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) { + *gpio_base = -1; } #endif diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ce045a8cf82..f07e4252b70 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -67,11 +67,11 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); MODULE_PARM_DESC(i915_enable_rc6, "Enable power-saving render C-state 6 (default: true)"); -unsigned int i915_enable_fbc __read_mostly = 1; +unsigned int i915_enable_fbc __read_mostly = -1; module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); MODULE_PARM_DESC(i915_enable_fbc, "Enable frame buffer compression for power savings " - "(default: false)"); + "(default: -1 (use per-chip default))"); unsigned int i915_lvds_downclock __read_mostly = 0; module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 56a8554d903..04411ad2e77 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1799,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev) struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; + int enable_fbc; DRM_DEBUG_KMS("\n"); @@ -1839,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev) intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; - if (!i915_enable_fbc) { - DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); + enable_fbc = i915_enable_fbc; + if (enable_fbc < 0) { + DRM_DEBUG_KMS("fbc set to per-chip default\n"); + enable_fbc = 1; + if (INTEL_INFO(dev)->gen <= 5) + enable_fbc = 0; + } + if (!enable_fbc) { + DRM_DEBUG_KMS("fbc disabled per module param\n"); dev_priv->no_fbc_reason = FBC_MODULE_PARAM; goto out_disable; } @@ -4687,13 +4695,13 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, bpc = 6; /* min is 18bpp */ break; case 24: - bpc = min((unsigned int)8, display_bpc); + bpc = 8; break; case 30: - bpc = min((unsigned int)10, display_bpc); + bpc = 10; break; case 48: - bpc = min((unsigned int)12, display_bpc); + bpc = 12; break; default: DRM_DEBUG("unsupported depth, assuming 24 bits\n"); @@ -4701,10 +4709,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, break; } + display_bpc = min(display_bpc, bpc); + DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", bpc, display_bpc); - *pipe_bpp = bpc * 3; + *pipe_bpp = display_bpc * 3; return display_bpc != bpc; } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0b2ee9d3998..fe1099d8817 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -337,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_connector *connector, struct intel_load_detect_pipe *old); -extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); -extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); -extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); extern void intelfb_restore(void); extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 30fe554d893..6348c499616 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -92,6 +92,11 @@ struct intel_sdvo { */ uint16_t attached_output; + /* + * Hotplug activation bits for this device + */ + uint8_t hotplug_active[2]; + /** * This is used to select the color range of RBG outputs in HDMI mode. * It is only valid when using TMDS encoding and 8 bit per color mode. @@ -1208,74 +1213,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in return true; } -/* No use! */ -#if 0 -struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) -{ - struct drm_connector *connector = NULL; - struct intel_sdvo *iout = NULL; - struct intel_sdvo *sdvo; - - /* find the sdvo connector */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - iout = to_intel_sdvo(connector); - - if (iout->type != INTEL_OUTPUT_SDVO) - continue; - - sdvo = iout->dev_priv; - - if (sdvo->sdvo_reg == SDVOB && sdvoB) - return connector; - - if (sdvo->sdvo_reg == SDVOC && !sdvoB) - return connector; - - } - - return NULL; -} - -int intel_sdvo_supports_hotplug(struct drm_connector *connector) +static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) { u8 response[2]; - u8 status; - struct intel_sdvo *intel_sdvo; - DRM_DEBUG_KMS("\n"); - - if (!connector) - return 0; - - intel_sdvo = to_intel_sdvo(connector); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, &response, 2) && response[0]; } -void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) +static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) { - u8 response[2]; - u8 status; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector); - - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); - intel_sdvo_read_response(intel_sdvo, &response, 2); - - if (on) { - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); - status = intel_sdvo_read_response(intel_sdvo, &response, 2); - - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); - } else { - response[0] = 0; - response[1] = 0; - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); - } + struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); - intel_sdvo_read_response(intel_sdvo, &response, 2); + intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); } -#endif static bool intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) @@ -2045,6 +1996,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; @@ -2062,7 +2014,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { + connector->polled = DRM_CONNECTOR_POLL_HPD; + intel_sdvo->hotplug_active[0] |= 1 << device; + /* Some SDVO devices have one-shot hotplug interrupts. + * Ensure that they get re-enabled when an interrupt happens. + */ + intel_encoder->hot_plug = intel_sdvo_enable_hotplug; + intel_sdvo_enable_hotplug(intel_encoder); + } + else + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; @@ -2569,6 +2531,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) goto err; + /* Set up hotplug command - note paranoia about contents of reply. + * We assume that the hardware is in a sane state, and only touch + * the bits we think we understand. + */ + intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, + &intel_sdvo->hotplug_active, 2); + intel_sdvo->hotplug_active[0] &= ~0x3; + if (intel_sdvo_output_setup(intel_sdvo, intel_sdvo->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index e88c64417a8..14cc88aaf3a 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -277,7 +277,12 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, case ATOM_ARG_FB: idx = U8(*ptr); (*ptr)++; - val = gctx->scratch[((gctx->fb_base + idx) / 4)]; + if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { + DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n", + gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); + val = 0; + } else + val = gctx->scratch[(gctx->fb_base / 4) + idx]; if (print) DEBUG("FB[0x%02X]", idx); break; @@ -531,7 +536,11 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, case ATOM_ARG_FB: idx = U8(*ptr); (*ptr)++; - gctx->scratch[((gctx->fb_base + idx) / 4)] = val; + if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { + DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n", + gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); + } else + gctx->scratch[(gctx->fb_base / 4) + idx] = val; DEBUG("FB[0x%02X]", idx); break; case ATOM_ARG_PLL: @@ -1370,11 +1379,13 @@ int atom_allocate_fb_scratch(struct atom_context *ctx) usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; } + ctx->scratch_size_bytes = 0; if (usage_bytes == 0) usage_bytes = 20 * 1024; /* allocate some scratch memory */ ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); if (!ctx->scratch) return -ENOMEM; + ctx->scratch_size_bytes = usage_bytes; return 0; } diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index a589a55b223..93cfe2086ba 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h @@ -137,6 +137,7 @@ struct atom_context { int cs_equal, cs_above; int io_mode; uint32_t *scratch; + int scratch_size_bytes; }; extern int atom_debug; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c742944d380..a515b2a09d8 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -466,7 +466,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, return; } args.v2.ucEnable = enable; - if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) + if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev)) args.v2.ucEnable = ATOM_DISABLE; } else if (ASIC_IS_DCE3(rdev)) { args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 7ad43c6b1db..79e8ebc0530 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, u8 msg[20]; int msg_bytes = send_bytes + 4; u8 ack; + unsigned retry; if (send_bytes > 16) return -1; @@ -125,20 +126,22 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, msg[3] = (msg_bytes << 4) | (send_bytes - 1); memcpy(&msg[4], send, send_bytes); - while (1) { + for (retry = 0; retry < 4; retry++) { ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_bytes, NULL, 0, delay, &ack); - if (ret < 0) + if (ret == -EBUSY) + continue; + else if (ret < 0) return ret; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) - break; + return send_bytes; else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) udelay(400); else return -EIO; } - return send_bytes; + return -EIO; } static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, @@ -149,26 +152,31 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, int msg_bytes = 4; u8 ack; int ret; + unsigned retry; msg[0] = address; msg[1] = address >> 8; msg[2] = AUX_NATIVE_READ << 4; msg[3] = (msg_bytes << 4) | (recv_bytes - 1); - while (1) { + for (retry = 0; retry < 4; retry++) { ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_bytes, recv, recv_bytes, delay, &ack); - if (ret == 0) - return -EPROTO; - if (ret < 0) + if (ret == -EBUSY) + continue; + else if (ret < 0) return ret; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) return ret; else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) udelay(400); + else if (ret == 0) + return -EPROTO; else return -EIO; } + + return -EIO; } static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, @@ -232,7 +240,9 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, for (retry = 0; retry < 4; retry++) { ret = radeon_process_aux_ch(auxch, msg, msg_bytes, reply, reply_bytes, 0, &ack); - if (ret < 0) { + if (ret == -EBUSY) + continue; + else if (ret < 0) { DRM_DEBUG_KMS("aux_ch failed %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e8a746712b5..c4ffa14fb2f 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1590,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, return backend_map; } -static void evergreen_program_channel_remap(struct radeon_device *rdev) -{ - u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; - - tmp = RREG32(MC_SHARED_CHMAP); - switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { - case 0: - case 1: - case 2: - case 3: - default: - /* default mapping */ - mc_shared_chremap = 0x00fac688; - break; - } - - switch (rdev->family) { - case CHIP_HEMLOCK: - case CHIP_CYPRESS: - case CHIP_BARTS: - tcp_chan_steer_lo = 0x54763210; - tcp_chan_steer_hi = 0x0000ba98; - break; - case CHIP_JUNIPER: - case CHIP_REDWOOD: - case CHIP_CEDAR: - case CHIP_PALM: - case CHIP_SUMO: - case CHIP_SUMO2: - case CHIP_TURKS: - case CHIP_CAICOS: - default: - tcp_chan_steer_lo = 0x76543210; - tcp_chan_steer_hi = 0x0000ba98; - break; - } - - WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); - WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); - WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); -} - static void evergreen_gpu_init(struct radeon_device *rdev) { u32 cc_rb_backend_disable = 0; @@ -2078,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(DMIF_ADDR_CONFIG, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config); - evergreen_program_channel_remap(rdev); - num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; grbm_gfx_index = INSTANCE_BROADCAST_WRITES; diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 99fbd793c08..8c79ca97753 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -569,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, return backend_map; } -static void cayman_program_channel_remap(struct radeon_device *rdev) -{ - u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; - - tmp = RREG32(MC_SHARED_CHMAP); - switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { - case 0: - case 1: - case 2: - case 3: - default: - /* default mapping */ - mc_shared_chremap = 0x00fac688; - break; - } - - switch (rdev->family) { - case CHIP_CAYMAN: - default: - //tcp_chan_steer_lo = 0x54763210 - tcp_chan_steer_lo = 0x76543210; - tcp_chan_steer_hi = 0x0000ba98; - break; - } - - WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); - WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); - WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); -} - static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, u32 disable_mask_per_se, u32 max_disable_mask_per_se, @@ -842,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) WREG32(DMIF_ADDR_CONFIG, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config); - cayman_program_channel_remap(rdev); - /* primary versions */ WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index c4b8741dbf5..449c3d8c683 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector) if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { int saved_dpms = connector->dpms; - if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && - radeon_dp_needs_link_train(radeon_connector)) - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - else + /* Only turn off the display it it's physically disconnected */ + if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + else if (radeon_dp_needs_link_train(radeon_connector)) + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); connector->dpms = saved_dpms; } } @@ -1303,23 +1303,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force) /* get the DPCD from the bridge */ radeon_dp_getdpcd(radeon_connector); - if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) - ret = connector_status_connected; - else { - /* need to setup ddc on the bridge */ - if (encoder) - radeon_atom_ext_encoder_setup_ddc(encoder); + if (encoder) { + /* setup ddc on the bridge */ + radeon_atom_ext_encoder_setup_ddc(encoder); if (radeon_ddc_probe(radeon_connector, - radeon_connector->requires_extended_probe)) + radeon_connector->requires_extended_probe)) /* try DDC */ ret = connector_status_connected; - } - - if ((ret == connector_status_disconnected) && - radeon_connector->dac_load_detect) { - struct drm_encoder *encoder = radeon_best_single_encoder(connector); - struct drm_encoder_helper_funcs *encoder_funcs; - if (encoder) { - encoder_funcs = encoder->helper_private; + else if (radeon_connector->dac_load_detect) { /* try load detection */ + struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; ret = encoder_funcs->detect(encoder, connector); } } diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 3189a7efb2e..fde25c0d65a 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -208,23 +208,25 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, int xorigin = 0, yorigin = 0; int w = radeon_crtc->cursor_width; - if (x < 0) - xorigin = -x + 1; - if (y < 0) - yorigin = -y + 1; - if (xorigin >= CURSOR_WIDTH) - xorigin = CURSOR_WIDTH - 1; - if (yorigin >= CURSOR_HEIGHT) - yorigin = CURSOR_HEIGHT - 1; - if (ASIC_IS_AVIVO(rdev)) { - int i = 0; - struct drm_crtc *crtc_p; - /* avivo cursor are offset into the total surface */ x += crtc->x; y += crtc->y; - DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); + } + DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); + + if (x < 0) { + xorigin = min(-x, CURSOR_WIDTH - 1); + x = 0; + } + if (y < 0) { + yorigin = min(-y, CURSOR_HEIGHT - 1); + y = 0; + } + + if (ASIC_IS_AVIVO(rdev)) { + int i = 0; + struct drm_crtc *crtc_p; /* avivo cursor image can't end on 128 pixel boundary or * go past the end of the frame if both crtcs are enabled @@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, radeon_lock_cursor(crtc, true); if (ASIC_IS_DCE4(rdev)) { - WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, - ((xorigin ? 0 : x) << 16) | - (yorigin ? 0 : y)); + WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); } else if (ASIC_IS_AVIVO(rdev)) { - WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, - ((xorigin ? 0 : x) << 16) | - (yorigin ? 0 : y)); + WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); @@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | yorigin)); WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, (RADEON_CUR_LOCK - | ((xorigin ? 0 : x) << 16) - | (yorigin ? 0 : y))); + | (x << 16) + | y)); /* offset is from DISP(2)_BASE_ADDRESS */ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + (yorigin * 256))); diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 13690f3eb4a..eb3f6dc6df8 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -1638,7 +1638,17 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) break; case 2: args.v2.ucCRTC = radeon_crtc->crtc_id; - args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); + if (radeon_encoder_is_dp_bridge(encoder)) { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) + args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; + else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA) + args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; + else + args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); + } else + args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: @@ -1755,9 +1765,17 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) /* DCE4/5 */ if (ASIC_IS_DCE4(rdev)) { dig = radeon_encoder->enc_priv; - if (ASIC_IS_DCE41(rdev)) - return radeon_crtc->crtc_id; - else { + if (ASIC_IS_DCE41(rdev)) { + /* ontario follows DCE4 */ + if (rdev->family == CHIP_PALM) { + if (dig->linkb) + return 1; + else + return 0; + } else + /* llano follows DCE3.2 */ + return radeon_crtc->crtc_id; + } else { switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: if (dig->linkb) diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4720d000d44..b13c2eedc32 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, return backend_map; } -static void rv770_program_channel_remap(struct radeon_device *rdev) -{ - u32 tcp_chan_steer, mc_shared_chremap, tmp; - bool force_no_swizzle; - - switch (rdev->family) { - case CHIP_RV770: - case CHIP_RV730: - force_no_swizzle = false; - break; - case CHIP_RV710: - case CHIP_RV740: - default: - force_no_swizzle = true; - break; - } - - tmp = RREG32(MC_SHARED_CHMAP); - switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { - case 0: - case 1: - default: - /* default mapping */ - mc_shared_chremap = 0x00fac688; - break; - case 2: - case 3: - if (force_no_swizzle) - mc_shared_chremap = 0x00fac688; - else - mc_shared_chremap = 0x00bbc298; - break; - } - - if (rdev->family == CHIP_RV740) - tcp_chan_steer = 0x00ef2a60; - else - tcp_chan_steer = 0x00fac688; - - /* RV770 CE has special chremap setup */ - if (rdev->pdev->device == 0x944e) { - tcp_chan_steer = 0x00b08b08; - mc_shared_chremap = 0x00b08b08; - } - - WREG32(TCP_CHAN_STEER, tcp_chan_steer); - WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); -} - static void rv770_gpu_init(struct radeon_device *rdev) { int i, j, num_qd_pipes; @@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); - rv770_program_channel_remap(rdev); - WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ae3c6f5dd2b..082fcaea583 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -321,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg old_copy; + struct ttm_mem_reg old_copy = *old_mem; void *old_iomap; void *new_iomap; int ret; diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 44b23917d4c..93238378664 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -377,9 +377,9 @@ exit_free: } -static int __devinit chk_ucode_version(struct platform_device *pdev) +static int __cpuinit chk_ucode_version(unsigned int cpu) { - struct cpuinfo_x86 *c = &cpu_data(pdev->id); + struct cpuinfo_x86 *c = &cpu_data(cpu); int err; u32 edx; @@ -390,17 +390,15 @@ static int __devinit chk_ucode_version(struct platform_device *pdev) */ if (c->x86_model == 0xe && c->x86_mask < 0xc) { /* check for microcode update */ - err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu, + err = smp_call_function_single(cpu, get_ucode_rev_on_cpu, &edx, 1); if (err) { - dev_err(&pdev->dev, - "Cannot determine microcode revision of " - "CPU#%u (%d)!\n", pdev->id, err); + pr_err("Cannot determine microcode revision of " + "CPU#%u (%d)!\n", cpu, err); return -ENODEV; } else if (edx < 0x39) { - dev_err(&pdev->dev, - "Errata AE18 not fixed, update BIOS or " - "microcode of the CPU!\n"); + pr_err("Errata AE18 not fixed, update BIOS or " + "microcode of the CPU!\n"); return -ENODEV; } } @@ -508,6 +506,7 @@ static int create_core_data(struct platform_device *pdev, return 0; exit_free: + pdata->core_data[attr_no] = NULL; kfree(tdata); return err; } @@ -544,11 +543,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev) struct platform_data *pdata; int err; - /* Check the microcode version of the CPU */ - err = chk_ucode_version(pdev); - if (err) - return err; - /* Initialize the per-package data structures */ pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); if (!pdata) @@ -630,7 +624,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu) } pdev_entry->pdev = pdev; - pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); + pdev_entry->phys_proc_id = pdev->id; list_add_tail(&pdev_entry->list, &pdev_list); mutex_unlock(&pdev_list_mutex); @@ -691,6 +685,10 @@ static void __cpuinit get_core_online(unsigned int cpu) return; if (!pdev) { + /* Check the microcode version of the CPU */ + if (chk_ucode_version(cpu)) + return; + /* * Alright, we have DTS support. * We are bringing the _first_ core in this pkg diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index f2b377c56a3..36d7f270b14 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -390,7 +390,7 @@ temp_from_reg(u16 reg, s16 regval) { if (is_word_sized(reg)) return LM75_TEMP_FROM_REG(regval); - return regval * 1000; + return ((s8)regval) * 1000; } static inline u16 @@ -398,7 +398,8 @@ temp_to_reg(u16 reg, long temp) { if (is_word_sized(reg)) return LM75_TEMP_TO_REG(temp); - return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), 1000); + return (s8)DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), + 1000); } /* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */ @@ -1715,7 +1716,8 @@ static void w83627ehf_device_remove_files(struct device *dev) } /* Get the monitoring functions started */ -static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data) +static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data, + enum kinds kind) { int i; u8 tmp, diode; @@ -1746,10 +1748,16 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data) w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01); /* Get thermal sensor types */ - diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE); + switch (kind) { + case w83627ehf: + diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE); + break; + default: + diode = 0x70; + } for (i = 0; i < 3; i++) { if ((tmp & (0x02 << i))) - data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 2; + data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3; else data->temp_type[i] = 4; /* thermistor */ } @@ -2016,7 +2024,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) } /* Initialize the chip */ - w83627ehf_init_device(data); + w83627ehf_init_device(data, sio_data->kind); data->vrm = vid_which_vrm(); superio_enter(sio_data->sioreg); diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 9827c5e686c..811dbbd9306 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -327,7 +327,7 @@ config BLK_DEV_OPTI621 select BLK_DEV_IDEPCI help This is a driver for the OPTi 82C621 EIDE controller. - Please read the comments at the top of <file:drivers/ide/pci/opti621.c>. + Please read the comments at the top of <file:drivers/ide/opti621.c>. config BLK_DEV_RZ1000 tristate "RZ1000 chipset bugfix/support" @@ -365,7 +365,7 @@ config BLK_DEV_ALI15X3 normal dual channel support. Please read the comments at the top of - <file:drivers/ide/pci/alim15x3.c>. + <file:drivers/ide/alim15x3.c>. If unsure, say N. @@ -528,7 +528,7 @@ config BLK_DEV_NS87415 This driver adds detection and support for the NS87415 chip (used mainly on SPARC64 and PA-RISC machines). - Please read the comments at the top of <file:drivers/ide/pci/ns87415.c>. + Please read the comments at the top of <file:drivers/ide/ns87415.c>. config BLK_DEV_PDC202XX_OLD tristate "PROMISE PDC202{46|62|65|67} support" @@ -547,7 +547,7 @@ config BLK_DEV_PDC202XX_OLD for more than one card. Please read the comments at the top of - <file:drivers/ide/pci/pdc202xx_old.c>. + <file:drivers/ide/pdc202xx_old.c>. If unsure, say N. @@ -593,7 +593,7 @@ config BLK_DEV_SIS5513 ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740, SiS745, SiS750 - Please read the comments at the top of <file:drivers/ide/pci/sis5513.c>. + Please read the comments at the top of <file:drivers/ide/sis5513.c>. config BLK_DEV_SL82C105 tristate "Winbond SL82c105 support" @@ -616,7 +616,7 @@ config BLK_DEV_SLC90E66 look-a-like to the PIIX4 it should be a nice addition. Please read the comments at the top of - <file:drivers/ide/pci/slc90e66.c>. + <file:drivers/ide/slc90e66.c>. config BLK_DEV_TRM290 tristate "Tekram TRM290 chipset support" @@ -625,7 +625,7 @@ config BLK_DEV_TRM290 This driver adds support for bus master DMA transfers using the Tekram TRM290 PCI IDE chip. Volunteers are needed for further tweaking and development. - Please read the comments at the top of <file:drivers/ide/pci/trm290.c>. + Please read the comments at the top of <file:drivers/ide/trm290.c>. config BLK_DEV_VIA82CXXX tristate "VIA82CXXX chipset support" @@ -836,7 +836,7 @@ config BLK_DEV_ALI14XX of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster I/O speeds to be set as well. See the files <file:Documentation/ide/ide.txt> and - <file:drivers/ide/legacy/ali14xx.c> for more info. + <file:drivers/ide/ali14xx.c> for more info. config BLK_DEV_DTC2278 tristate "DTC-2278 support" @@ -847,7 +847,7 @@ config BLK_DEV_DTC2278 boot parameter. It enables support for the secondary IDE interface of the DTC-2278 card, and permits faster I/O speeds to be set as well. See the <file:Documentation/ide/ide.txt> and - <file:drivers/ide/legacy/dtc2278.c> files for more info. + <file:drivers/ide/dtc2278.c> files for more info. config BLK_DEV_HT6560B tristate "Holtek HT6560B support" @@ -858,7 +858,7 @@ config BLK_DEV_HT6560B boot parameter. It enables support for the secondary IDE interface of the Holtek card, and permits faster I/O speeds to be set as well. See the <file:Documentation/ide/ide.txt> and - <file:drivers/ide/legacy/ht6560b.c> files for more info. + <file:drivers/ide/ht6560b.c> files for more info. config BLK_DEV_QD65XX tristate "QDI QD65xx support" @@ -867,7 +867,7 @@ config BLK_DEV_QD65XX help This driver is enabled at runtime using the "qd65xx.probe" kernel boot parameter. It permits faster I/O speeds to be set. See the - <file:Documentation/ide/ide.txt> and <file:drivers/ide/legacy/qd65xx.c> + <file:Documentation/ide/ide.txt> and <file:drivers/ide/qd65xx.c> for more info. config BLK_DEV_UMC8672 @@ -879,7 +879,7 @@ config BLK_DEV_UMC8672 boot parameter. It enables support for the secondary IDE interface of the UMC-8672, and permits faster I/O speeds to be set as well. See the files <file:Documentation/ide/ide.txt> and - <file:drivers/ide/legacy/umc8672.c> for more info. + <file:drivers/ide/umc8672.c> for more info. endif diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 274798068a5..16f69be820c 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq) if (!(rq->cmd_flags & REQ_FLUSH)) return BLKPREP_OK; - cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); + if (rq->special) { + cmd = rq->special; + memset(cmd, 0, sizeof(*cmd)); + } else { + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); + } /* FIXME: map struct ide_taskfile on rq->cmd[] */ BUG_ON(cmd == NULL); diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 17bf9d95463..6cd642aaa4d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -287,7 +287,7 @@ void __free_ep(struct kref *kref) if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); dst_release(ep->dst); - l2t_release(L2DATA(ep->com.tdev), ep->l2t); + l2t_release(ep->com.tdev, ep->l2t); } kfree(ep); } @@ -1178,7 +1178,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) release_tid(ep->com.tdev, GET_TID(rpl), NULL); cxgb3_free_atid(ep->com.tdev, ep->atid); dst_release(ep->dst); - l2t_release(L2DATA(ep->com.tdev), ep->l2t); + l2t_release(ep->com.tdev, ep->l2t); put_ep(&ep->com); return CPL_RET_BUF_DONE; } @@ -1377,7 +1377,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) if (!child_ep) { printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", __func__); - l2t_release(L2DATA(tdev), l2t); + l2t_release(tdev, l2t); dst_release(dst); goto reject; } @@ -1956,7 +1956,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (!err) goto out; - l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t); + l2t_release(h->rdev.t3cdev_p, ep->l2t); fail4: dst_release(ep->dst); fail3: @@ -2127,7 +2127,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, l2t); dst_hold(new); - l2t_release(L2DATA(ep->com.tdev), ep->l2t); + l2t_release(ep->com.tdev, ep->l2t); ep->l2t = l2t; dst_release(old); ep->dst = new; diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 0dc97ec15c2..9dea71849f4 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -1124,11 +1124,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, for (i = 0; i < 8; i++) __set_bit(BTN_0 + i, input_dev->keybit); - if (wacom_wac->features.type != WACOM_21UX2) { - input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); - input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); - } - + input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); + input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index c621c98c99d..a88f3cbb100 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -306,6 +306,11 @@ static inline bool dma_pte_present(struct dma_pte *pte) return (pte->val & 3) != 0; } +static inline bool dma_pte_superpage(struct dma_pte *pte) +{ + return (pte->val & (1 << 7)); +} + static inline int first_pte_in_page(struct dma_pte *pte) { return !((unsigned long)pte & ~VTD_PAGE_MASK); @@ -404,6 +409,9 @@ static int dmar_forcedac; static int intel_iommu_strict; static int intel_iommu_superpage = 1; +int intel_iommu_gfx_mapped; +EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); + #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) static DEFINE_SPINLOCK(device_domain_lock); static LIST_HEAD(device_domain_list); @@ -577,17 +585,18 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain) static void domain_update_iommu_superpage(struct dmar_domain *domain) { - int i, mask = 0xf; + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu = NULL; + int mask = 0xf; if (!intel_iommu_superpage) { domain->iommu_superpage = 0; return; } - domain->iommu_superpage = 4; /* 1TiB */ - - for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { - mask |= cap_super_page_val(g_iommus[i]->cap); + /* set iommu_superpage to the smallest common denominator */ + for_each_active_iommu(iommu, drhd) { + mask &= cap_super_page_val(iommu->cap); if (!mask) { break; } @@ -730,29 +739,23 @@ out: } static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, - unsigned long pfn, int large_level) + unsigned long pfn, int target_level) { int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; struct dma_pte *parent, *pte = NULL; int level = agaw_to_level(domain->agaw); - int offset, target_level; + int offset; BUG_ON(!domain->pgd); BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); parent = domain->pgd; - /* Search pte */ - if (!large_level) - target_level = 1; - else - target_level = large_level; - while (level > 0) { void *tmp_page; offset = pfn_level_offset(pfn, level); pte = &parent[offset]; - if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE)) + if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) break; if (level == target_level) break; @@ -816,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, } /* clear last level pte, a tlb flush should be followed */ -static void dma_pte_clear_range(struct dmar_domain *domain, +static int dma_pte_clear_range(struct dmar_domain *domain, unsigned long start_pfn, unsigned long last_pfn) { int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; unsigned int large_page = 1; struct dma_pte *first_pte, *pte; + int order; BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); @@ -846,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain, (void *)pte - (void *)first_pte); } while (start_pfn && start_pfn <= last_pfn); + + order = (large_page - 1) * 9; + return order; } /* free page table pages. last level pte should already be cleared */ @@ -3226,9 +3233,6 @@ static void __init init_no_remapping_devices(void) } } - if (dmar_map_gfx) - return; - for_each_drhd_unit(drhd) { int i; if (drhd->ignored || drhd->include_all) @@ -3236,18 +3240,23 @@ static void __init init_no_remapping_devices(void) for (i = 0; i < drhd->devices_cnt; i++) if (drhd->devices[i] && - !IS_GFX_DEVICE(drhd->devices[i])) + !IS_GFX_DEVICE(drhd->devices[i])) break; if (i < drhd->devices_cnt) continue; - /* bypass IOMMU if it is just for gfx devices */ - drhd->ignored = 1; - for (i = 0; i < drhd->devices_cnt; i++) { - if (!drhd->devices[i]) - continue; - drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; + /* This IOMMU has *only* gfx devices. Either bypass it or + set the gfx_mapped flag, as appropriate */ + if (dmar_map_gfx) { + intel_iommu_gfx_mapped = 1; + } else { + drhd->ignored = 1; + for (i = 0; i < drhd->devices_cnt; i++) { + if (!drhd->devices[i]) + continue; + drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; + } } } } @@ -3568,6 +3577,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, found = 1; } + spin_unlock_irqrestore(&device_domain_lock, flags); + if (found == 0) { unsigned long tmp_flags; spin_lock_irqsave(&domain->iommu_lock, tmp_flags); @@ -3584,8 +3595,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, spin_unlock_irqrestore(&iommu->lock, tmp_flags); } } - - spin_unlock_irqrestore(&device_domain_lock, flags); } static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) @@ -3739,6 +3748,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) vm_domain_exit(dmar_domain); return -ENOMEM; } + domain_update_iommu_cap(dmar_domain); domain->priv = dmar_domain; return 0; @@ -3864,14 +3874,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain, { struct dmar_domain *dmar_domain = domain->priv; size_t size = PAGE_SIZE << gfp_order; + int order; - dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, + order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, (iova + size - 1) >> VTD_PAGE_SHIFT); if (dmar_domain->max_addr == iova + size) dmar_domain->max_addr = iova; - return gfp_order; + return order; } static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, @@ -3950,7 +3961,11 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev) if (!(ggc & GGC_MEMORY_VT_ENABLED)) { printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); dmar_map_gfx = 0; - } + } else if (dmar_map_gfx) { + /* we have to ensure the gfx device is idle before we flush */ + printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n"); + intel_iommu_strict = 1; + } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 49da55c1528..8c2a000cf3f 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ti->num_flush_requests = 1; + ti->discard_zeroes_data_unsupported = 1; + return 0; bad: diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 89f73ca22cf..f84c08029b2 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> */ if (!strcasecmp(arg_name, "corrupt_bio_byte")) { - if (!argc) + if (!argc) { ti->error = "Feature corrupt_bio_byte requires parameters"; + return -EINVAL; + } r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); if (r) diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index f8214702963..32ac70861d6 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -628,6 +628,7 @@ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc, job->kc = kc; job->fn = fn; job->context = context; + job->master_job = job; atomic_inc(&kc->nr_jobs); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index a002dd85db1..86df8b2cf92 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, rs->ti->error = "write_mostly option is only valid for RAID1"; return -EINVAL; } - if (value > rs->md.raid_disks) { + if (value >= rs->md.raid_disks) { rs->ti->error = "Invalid write_mostly drive index given"; return -EINVAL; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 986b8754bb0..bc04518e9d8 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t) return; template_disk = dm_table_get_integrity_disk(t, true); - if (!template_disk && - blk_integrity_is_initialized(dm_disk(t->md))) { + if (template_disk) + blk_integrity_register(dm_disk(t->md), + blk_get_integrity(template_disk)); + else if (blk_integrity_is_initialized(dm_disk(t->md))) DMWARN("%s: device no longer has a valid integrity profile", dm_device_name(t->md)); - return; - } - blk_integrity_register(dm_disk(t->md), - blk_get_integrity(template_disk)); + else + DMWARN("%s: unable to establish an integrity profile", + dm_device_name(t->md)); } static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, @@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) return 0; } +static bool dm_table_discard_zeroes_data(struct dm_table *t) +{ + struct dm_target *ti; + unsigned i = 0; + + /* Ensure that all targets supports discard_zeroes_data. */ + while (i < dm_table_get_num_targets(t)) { + ti = dm_table_get_target(t, i++); + + if (ti->discard_zeroes_data_unsupported) + return 0; + } + + return 1; +} + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { @@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, } blk_queue_flush(q, flush); + if (!dm_table_discard_zeroes_data(t)) + q->limits.discard_zeroes_data = 0; + dm_table_set_integrity(t); /* diff --git a/drivers/md/md.c b/drivers/md/md.c index 5404b229582..5c95ccb5950 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -61,6 +61,11 @@ static void autostart_arrays(int part); #endif +/* pers_list is a list of registered personalities protected + * by pers_lock. + * pers_lock does extra service to protect accesses to + * mddev->thread when the mutex cannot be held. + */ static LIST_HEAD(pers_list); static DEFINE_SPINLOCK(pers_lock); @@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev) } else mutex_unlock(&mddev->reconfig_mutex); + /* was we've dropped the mutex we need a spinlock to + * make sur the thread doesn't disappear + */ + spin_lock(&pers_lock); md_wakeup_thread(mddev->thread); + spin_unlock(&pers_lock); } static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) @@ -6429,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, return thread; } -void md_unregister_thread(mdk_thread_t *thread) +void md_unregister_thread(mdk_thread_t **threadp) { + mdk_thread_t *thread = *threadp; if (!thread) return; dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); + /* Locking ensures that mddev_unlock does not wake_up a + * non-existent thread + */ + spin_lock(&pers_lock); + *threadp = NULL; + spin_unlock(&pers_lock); kthread_stop(thread->tsk); kfree(thread); @@ -7340,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev) mdk_rdev_t *rdev; /* resync has finished, collect result */ - md_unregister_thread(mddev->sync_thread); - mddev->sync_thread = NULL; + md_unregister_thread(&mddev->sync_thread); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { /* success...*/ diff --git a/drivers/md/md.h b/drivers/md/md.h index 1e586bb4452..0a309dc29b4 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p); extern int unregister_md_personality(struct mdk_personality *p); extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), mddev_t *mddev, const char *name); -extern void md_unregister_thread(mdk_thread_t *thread); +extern void md_unregister_thread(mdk_thread_t **threadp); extern void md_wakeup_thread(mdk_thread_t *thread); extern void md_check_recovery(mddev_t *mddev); extern void md_write_start(mddev_t *mddev, struct bio *bi); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 3535c23af28..d5b5fb30017 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev) { multipath_conf_t *conf = mddev->private; - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ mempool_destroy(conf->pool); kfree(conf->multipaths); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f4622dd8fc5..d9587dffe53 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2562,8 +2562,7 @@ static int stop(mddev_t *mddev) raise_barrier(conf); lower_barrier(conf); - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); if (conf->r1bio_pool) mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d7a8468ddea..0cd9672cf9c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2955,7 +2955,7 @@ static int run(mddev_t *mddev) return 0; out_free_conf: - md_unregister_thread(mddev->thread); + md_unregister_thread(&mddev->thread); if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); @@ -2973,8 +2973,7 @@ static int stop(mddev_t *mddev) raise_barrier(conf, 0); lower_barrier(conf); - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 43709fa6b6d..ac5e8b57e50 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev) return 0; abort: - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); if (conf) { print_raid5_conf(conf); free_conf(conf); @@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev) { raid5_conf_t *conf = mddev->private; - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); if (mddev->queue) mddev->queue->backing_dev_info.congested_fn = NULL; free_conf(conf); diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c index b5ef3622244..b3a5ecdb33a 100644 --- a/drivers/media/video/omap/omap_vout.c +++ b/drivers/media/video/omap/omap_vout.c @@ -2194,19 +2194,6 @@ static int __init omap_vout_probe(struct platform_device *pdev) "'%s' Display already enabled\n", def_display->name); } - /* set the update mode */ - if (def_display->caps & - OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { - if (dssdrv->enable_te) - dssdrv->enable_te(def_display, 0); - if (dssdrv->set_update_mode) - dssdrv->set_update_mode(def_display, - OMAP_DSS_UPDATE_MANUAL); - } else { - if (dssdrv->set_update_mode) - dssdrv->set_update_mode(def_display, - OMAP_DSS_UPDATE_AUTO); - } } } diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c index 9d3459de04b..80796eb0c53 100644 --- a/drivers/media/video/omap3isp/ispccdc.c +++ b/drivers/media/video/omap3isp/ispccdc.c @@ -31,6 +31,7 @@ #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/sched.h> +#include <linux/slab.h> #include <media/v4l2-event.h> #include "isp.h" diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index d29f9c2d085..e4100b1f68d 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c @@ -1961,7 +1961,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset) list_for_each_entry(stream, &dev->streams, list) { if (stream->intf == intf) - return uvc_video_resume(stream); + return uvc_video_resume(stream, reset); } uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c index 48fea373c25..29e239911d0 100644 --- a/drivers/media/video/uvc/uvc_entity.c +++ b/drivers/media/video/uvc/uvc_entity.c @@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain, if (remote == NULL) return -EINVAL; - source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING) + source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING) ? (remote->vdev ? &remote->vdev->entity : NULL) : &remote->subdev.entity; if (source == NULL) diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c index 8244167c891..ffd1158628b 100644 --- a/drivers/media/video/uvc/uvc_video.c +++ b/drivers/media/video/uvc/uvc_video.c @@ -1104,10 +1104,18 @@ int uvc_video_suspend(struct uvc_streaming *stream) * buffers, making sure userspace applications are notified of the problem * instead of waiting forever. */ -int uvc_video_resume(struct uvc_streaming *stream) +int uvc_video_resume(struct uvc_streaming *stream, int reset) { int ret; + /* If the bus has been reset on resume, set the alternate setting to 0. + * This should be the default value, but some devices crash or otherwise + * misbehave if they don't receive a SET_INTERFACE request before any + * other video control request. + */ + if (reset) + usb_set_interface(stream->dev->udev, stream->intfnum, 0); + stream->frozen = 0; ret = uvc_commit_video(stream, &stream->ctrl); diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h index df32a43ca86..cbdd49bf8b6 100644 --- a/drivers/media/video/uvc/uvcvideo.h +++ b/drivers/media/video/uvc/uvcvideo.h @@ -638,7 +638,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity); /* Video */ extern int uvc_video_init(struct uvc_streaming *stream); extern int uvc_video_suspend(struct uvc_streaming *stream); -extern int uvc_video_resume(struct uvc_streaming *stream); +extern int uvc_video_resume(struct uvc_streaming *stream, int reset); extern int uvc_video_enable(struct uvc_streaming *stream, int enable); extern int uvc_probe_video(struct uvc_streaming *stream, struct uvc_streaming_control *probe); diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index 06f14008b34..a5c9ed128b9 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c @@ -173,6 +173,17 @@ static void v4l2_device_release(struct device *cd) media_device_unregister_entity(&vdev->entity); #endif + /* Do not call v4l2_device_put if there is no release callback set. + * Drivers that have no v4l2_device release callback might free the + * v4l2_dev instance in the video_device release callback below, so we + * must perform this check here. + * + * TODO: In the long run all drivers that use v4l2_device should use the + * v4l2_device release callback. This check will then be unnecessary. + */ + if (v4l2_dev && v4l2_dev->release == NULL) + v4l2_dev = NULL; + /* Release video_device and perform other cleanups as needed. */ vdev->release(vdev); diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c index c72856c4143..e6a2c3b302d 100644 --- a/drivers/media/video/v4l2-device.c +++ b/drivers/media/video/v4l2-device.c @@ -38,6 +38,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev) mutex_init(&v4l2_dev->ioctl_lock); v4l2_prio_init(&v4l2_dev->prio); kref_init(&v4l2_dev->ref); + get_device(dev); v4l2_dev->dev = dev; if (dev == NULL) { /* If dev == NULL, then name must be filled in by the caller */ @@ -93,6 +94,7 @@ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev) if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) dev_set_drvdata(v4l2_dev->dev, NULL); + put_device(v4l2_dev->dev); v4l2_dev->dev = NULL; } EXPORT_SYMBOL_GPL(v4l2_device_disconnect); diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c index 21131c7b0f1..563654c9b19 100644 --- a/drivers/mfd/jz4740-adc.c +++ b/drivers/mfd/jz4740-adc.c @@ -273,7 +273,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev) ct->regs.ack = JZ_REG_ADC_STATUS; ct->chip.irq_mask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_clr_bit; - ct->chip.irq_ack = irq_gc_ack; + ct->chip.irq_ack = irq_gc_ack_set_bit; irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index b928bc14e97..8b51cd62d06 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3) * both have been read. So the value read will always be correct. * Set BOOT bit to refresh factory tuning values. */ - lis3->read(lis3, CTRL_REG2, ®); - if (lis3->whoami == WAI_12B) - reg |= CTRL2_BDU | CTRL2_BOOT; - else - reg |= CTRL2_BOOT_8B; - lis3->write(lis3, CTRL_REG2, reg); + if (lis3->pdata) { + lis3->read(lis3, CTRL_REG2, ®); + if (lis3->whoami == WAI_12B) + reg |= CTRL2_BDU | CTRL2_BOOT; + else + reg |= CTRL2_BOOT_8B; + lis3->write(lis3, CTRL_REG2, reg); + } /* LIS3 power on delay is quite long */ msleep(lis3->pwron_delay / lis3lv02d_get_odr()); diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index e46df5331c5..9a7eb3b36cf 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -239,13 +239,19 @@ void bnx2x_int_disable(struct bnx2x *bp); * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X * */ -/* iSCSI L2 */ -#define BNX2X_ISCSI_ETH_CL_ID_IDX 1 -#define BNX2X_ISCSI_ETH_CID 49 +enum { + BNX2X_ISCSI_ETH_CL_ID_IDX, + BNX2X_FCOE_ETH_CL_ID_IDX, + BNX2X_MAX_CNIC_ETH_CL_ID_IDX, +}; -/* FCoE L2 */ -#define BNX2X_FCOE_ETH_CL_ID_IDX 2 -#define BNX2X_FCOE_ETH_CID 50 +#define BNX2X_CNIC_START_ETH_CID 48 +enum { + /* iSCSI L2 */ + BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, + /* FCoE L2 */ + BNX2X_FCOE_ETH_CID, +}; /** Additional rings budgeting */ #ifdef BCM_CNIC diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 223bfeebc59..2dc1199239d 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h @@ -1297,7 +1297,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp, static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) { return bp->cnic_base_cl_id + cl_idx + - (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE; + (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; } static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c index a1e004a82f7..0b4acf67e0c 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.c +++ b/drivers/net/bnx2x/bnx2x_dcb.c @@ -2120,6 +2120,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) break; case DCB_CAP_ATTR_DCBX: *cap = BNX2X_DCBX_CAPS; + break; default: rval = -EINVAL; break; diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index c027e9341a1..15f800085bb 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -4943,7 +4943,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) int igu_seg_id; int port = BP_PORT(bp); int func = BP_FUNC(bp); - int reg_offset; + int reg_offset, reg_offset_en5; u64 section; int index; struct hc_sp_status_block_data sp_sb_data; @@ -4966,6 +4966,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { int sindex; /* take care of sig[0]..sig[4] */ @@ -4980,7 +4982,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) * and not 16 between the different groups */ bp->attn_group[index].sig[4] = REG_RD(bp, - reg_offset + 0x10 + 0x4*index); + reg_offset_en5 + 0x4*index); else bp->attn_group[index].sig[4] = 0; } @@ -7625,8 +7627,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; u8 *mac_addr = bp->dev->dev_addr; u32 val; + u16 pmc; + /* The mac address is written to entries 1-4 to - preserve entry 0 which is used by the PMF */ + * preserve entry 0 which is used by the PMF + */ u8 entry = (BP_VN(bp) + 1)*8; val = (mac_addr[0] << 8) | mac_addr[1]; @@ -7636,6 +7641,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) (mac_addr[4] << 8) | mac_addr[5]; EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); + /* Enable the PME and clear the status */ + pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); + pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; + pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; } else diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 750e8445dac..fc7bd0f23c0 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h @@ -1384,6 +1384,18 @@ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 +/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped + * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 + * parity; [31-10] Reserved; */ +#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688 +/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped + * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 + * parity; [31-10] Reserved; */ +#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0 /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu 128 bit vector */ #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index a047eb973e3..47b928ed08f 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work) } re_arm: - queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); + if (!bond->kill_timers) + queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); out: read_unlock(&bond->lock); } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 7f8b20a34ee..d4fbd2e6261 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work) } re_arm: - queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); + if (!bond->kill_timers) + queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); out: read_unlock(&bond->lock); } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 43f2ea54108..de3d351ccb6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -777,6 +777,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) read_lock(&bond->lock); + if (bond->kill_timers) + goto out; + /* rejoin all groups on bond device */ __bond_resend_igmp_join_requests(bond->dev); @@ -790,9 +793,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) __bond_resend_igmp_join_requests(vlan_dev); } - if (--bond->igmp_retrans > 0) + if ((--bond->igmp_retrans > 0) && !bond->kill_timers) queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); - +out: read_unlock(&bond->lock); } @@ -1432,6 +1435,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) struct sk_buff *skb = *pskb; struct slave *slave; struct bonding *bond; + void (*recv_probe)(struct sk_buff *, struct bonding *, + struct slave *); skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) @@ -1445,11 +1450,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) if (bond->params.arp_interval) slave->dev->last_rx = jiffies; - if (bond->recv_probe) { + recv_probe = ACCESS_ONCE(bond->recv_probe); + if (recv_probe) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); if (likely(nskb)) { - bond->recv_probe(nskb, bond, slave); + recv_probe(nskb, bond, slave); dev_kfree_skb(nskb); } } @@ -2538,7 +2544,7 @@ void bond_mii_monitor(struct work_struct *work) } re_arm: - if (bond->params.miimon) + if (bond->params.miimon && !bond->kill_timers) queue_delayed_work(bond->wq, &bond->mii_work, msecs_to_jiffies(bond->params.miimon)); out: @@ -2886,7 +2892,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) } re_arm: - if (bond->params.arp_interval) + if (bond->params.arp_interval && !bond->kill_timers) queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); out: read_unlock(&bond->lock); @@ -3154,7 +3160,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) bond_ab_arp_probe(bond); re_arm: - if (bond->params.arp_interval) + if (bond->params.arp_interval && !bond->kill_timers) queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); out: read_unlock(&bond->lock); diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index 92feac68b66..4cc6f44c2ba 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) void __iomem *data = ®s->tx.dsr1_0; u16 *payload = (u16 *)frame->data; - /* It is safe to write into dsr[dlc+1] */ - for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { + for (i = 0; i < frame->can_dlc / 2; i++) { out_be16(data, *payload++); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } + /* write remaining byte if necessary */ + if (frame->can_dlc & 1) + out_8(data, frame->data[frame->can_dlc - 1]); } out_8(®s->tx.dlr, frame->can_dlc); @@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) void __iomem *data = ®s->rx.dsr1_0; u16 *payload = (u16 *)frame->data; - for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { + for (i = 0; i < frame->can_dlc / 2; i++) { *payload++ = in_be16(data); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } + /* read remaining byte if necessary */ + if (frame->can_dlc & 1) + frame->data[frame->can_dlc - 1] = in_8(data); } out_8(®s->canrflg, MSCAN_RXF); diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 805076c54f1..da5a5d9b8af 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c @@ -1146,12 +1146,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) if (te && te->ctx && te->client && te->client->redirect) { update_tcb = te->client->redirect(te->ctx, old, new, e); if (update_tcb) { + rcu_read_lock(); l2t_hold(L2DATA(tdev), e); + rcu_read_unlock(); set_l2t_ix(tdev, tid, e); } } } - l2t_release(L2DATA(tdev), e); + l2t_release(tdev, e); } /* @@ -1264,7 +1266,7 @@ int cxgb3_offload_activate(struct adapter *adapter) goto out_free; err = -ENOMEM; - L2DATA(dev) = t3_init_l2t(l2t_capacity); + RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); if (!L2DATA(dev)) goto out_free; @@ -1298,16 +1300,24 @@ int cxgb3_offload_activate(struct adapter *adapter) out_free_l2t: t3_free_l2t(L2DATA(dev)); - L2DATA(dev) = NULL; + rcu_assign_pointer(dev->l2opt, NULL); out_free: kfree(t); return err; } +static void clean_l2_data(struct rcu_head *head) +{ + struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); + t3_free_l2t(d); +} + + void cxgb3_offload_deactivate(struct adapter *adapter) { struct t3cdev *tdev = &adapter->tdev; struct t3c_data *t = T3C_DATA(tdev); + struct l2t_data *d; remove_adapter(adapter); if (list_empty(&adapter_list)) @@ -1315,8 +1325,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter) free_tid_maps(&t->tid_maps); T3C_DATA(tdev) = NULL; - t3_free_l2t(L2DATA(tdev)); - L2DATA(tdev) = NULL; + rcu_read_lock(); + d = L2DATA(tdev); + rcu_read_unlock(); + rcu_assign_pointer(tdev->l2opt, NULL); + call_rcu(&d->rcu_head, clean_l2_data); if (t->nofail_skb) kfree_skb(t->nofail_skb); kfree(t); diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c index f452c400325..41540978a17 100644 --- a/drivers/net/cxgb3/l2t.c +++ b/drivers/net/cxgb3/l2t.c @@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, struct net_device *dev) { - struct l2t_entry *e; - struct l2t_data *d = L2DATA(cdev); + struct l2t_entry *e = NULL; + struct l2t_data *d; + int hash; u32 addr = *(u32 *) neigh->primary_key; int ifidx = neigh->dev->ifindex; - int hash = arp_hash(addr, ifidx, d); struct port_info *p = netdev_priv(dev); int smt_idx = p->port_id; + rcu_read_lock(); + d = L2DATA(cdev); + if (!d) + goto done_rcu; + + hash = arp_hash(addr, ifidx, d); + write_lock_bh(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) if (e->addr == addr && e->ifindex == ifidx && @@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, } done: write_unlock_bh(&d->lock); +done_rcu: + rcu_read_unlock(); return e; } diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h index 7a12d52ed4f..c5f54796e2c 100644 --- a/drivers/net/cxgb3/l2t.h +++ b/drivers/net/cxgb3/l2t.h @@ -76,6 +76,7 @@ struct l2t_data { atomic_t nfree; /* number of free entries */ rwlock_t lock; struct l2t_entry l2tab[0]; + struct rcu_head rcu_head; /* to handle rcu cleanup */ }; typedef void (*arp_failure_handler_func)(struct t3cdev * dev, @@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb, /* * Getting to the L2 data from an offload device. */ -#define L2DATA(dev) ((dev)->l2opt) +#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt)) #define W_TCB_L2T_IX 0 #define S_TCB_L2T_IX 7 @@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb, return t3_l2t_send_slow(dev, skb, e); } -static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) +static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e) { - if (atomic_dec_and_test(&e->refcnt)) + struct l2t_data *d; + + rcu_read_lock(); + d = L2DATA(t); + + if (atomic_dec_and_test(&e->refcnt) && d) t3_l2e_free(d, e); + + rcu_read_unlock(); } static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) { - if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ + if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ atomic_dec(&d->nfree); } diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index c9957b7f17b..b4efa292fd6 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c @@ -3712,6 +3712,9 @@ static int __devinit init_one(struct pci_dev *pdev, setup_debugfs(adapter); } + /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ + pdev->needs_freset = 1; + if (is_offload(adapter)) attach_ulds(adapter); diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 8dd5fccef72..d393f1e764e 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -636,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev) netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", netdev->irq, rc); do { - rc = h_free_logical_lan(adapter->vdev->unit_address); - } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); + lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); + } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); goto err_out; } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 05172c39a0c..376e3e94bae 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) dest = macvlan_hash_lookup(port, eth->h_dest); if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { /* send to lowerdev first for its network taps */ - vlan->forward(vlan->lowerdev, skb); + dev_forward_skb(vlan->lowerdev, skb); return NET_XMIT_SUCCESS; } diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 6e03de034ac..f76ab6bf309 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c @@ -172,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, memset(ring->buf, 0, ring->buf_size); ring->qp_state = MLX4_QP_STATE_RST; - ring->doorbell_qpn = swab32(ring->qp.qpn << 8); + ring->doorbell_qpn = ring->qp.qpn << 8; mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, ring->cqn, &ring->context); @@ -791,7 +791,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) skb_orphan(skb); if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { - *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn; + *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); op_own |= htonl((bf_index & 0xffff) << 8); /* Ensure new descirptor hits memory * before setting ownership of this descriptor to HW */ @@ -812,7 +812,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) wmb(); tx_desc->ctrl.owner_opcode = op_own; wmb(); - writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); + iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); } /* Poll CQ here */ diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index ed2a3977c6e..e8882023576 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -307,6 +307,11 @@ static ssize_t store_enabled(struct netconsole_target *nt, return err; if (enabled < 0 || enabled > 1) return -EINVAL; + if (enabled == nt->enabled) { + printk(KERN_INFO "netconsole: network logging has already %s\n", + nt->enabled ? "started" : "stopped"); + return -EINVAL; + } if (enabled) { /* 1 */ diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 567ff10889b..b8b4ba27b0e 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -1199,6 +1199,8 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), &hw->reg->INT_EN); pch_gbe_stop_receive(adapter); + int_st |= ioread32(&hw->reg->INT_ST); + int_st = int_st & ioread32(&hw->reg->INT_EN); } if (int_st & PCH_GBE_INT_RX_DMA_ERR) adapter->stats.intr_rx_dma_err_count++; @@ -1218,14 +1220,11 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) /* Set Pause packet */ pch_gbe_mac_set_pause_packet(hw); } - if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) - == 0) { - return IRQ_HANDLED; - } } /* When request status is Receive interruption */ - if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) { + if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) || + (adapter->rx_stop_flag == true)) { if (likely(napi_schedule_prep(&adapter->napi))) { /* Enable only Rx Descriptor empty */ atomic_inc(&adapter->irq_sem); @@ -1385,7 +1384,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, struct sk_buff *skb; unsigned int i; unsigned int cleaned_count = 0; - bool cleaned = false; + bool cleaned = true; pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); @@ -1396,7 +1395,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); - cleaned = true; buffer_info = &tx_ring->buffer_info[i]; skb = buffer_info->skb; @@ -1439,8 +1437,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); /* weight of a sort for tx, to avoid endless transmit cleanup */ - if (cleaned_count++ == PCH_GBE_TX_WEIGHT) + if (cleaned_count++ == PCH_GBE_TX_WEIGHT) { + cleaned = false; break; + } } pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", cleaned_count); @@ -2168,7 +2168,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) { struct pch_gbe_adapter *adapter = container_of(napi, struct pch_gbe_adapter, napi); - struct net_device *netdev = adapter->netdev; int work_done = 0; bool poll_end_flag = false; bool cleaned = false; @@ -2176,33 +2175,32 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) pr_debug("budget : %d\n", budget); - /* Keep link state information with original netdev */ - if (!netif_carrier_ok(netdev)) { + pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); + cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); + + if (!cleaned) + work_done = budget; + /* If no Tx and not enough Rx work done, + * exit the polling mode + */ + if (work_done < budget) poll_end_flag = true; - } else { - pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); + + if (poll_end_flag) { + napi_complete(napi); + if (adapter->rx_stop_flag) { + adapter->rx_stop_flag = false; + pch_gbe_start_receive(&adapter->hw); + } + pch_gbe_irq_enable(adapter); + } else if (adapter->rx_stop_flag) { adapter->rx_stop_flag = false; pch_gbe_start_receive(&adapter->hw); int_en = ioread32(&adapter->hw.reg->INT_EN); iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), - &adapter->hw.reg->INT_EN); + &adapter->hw.reg->INT_EN); } - cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); - - if (cleaned) - work_done = budget; - /* If no Tx and not enough Rx work done, - * exit the polling mode - */ - if ((work_done < budget) || !netif_running(netdev)) - poll_end_flag = true; - } - - if (poll_end_flag) { - napi_complete(napi); - pch_gbe_irq_enable(adapter); - } pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", poll_end_flag, work_done, budget); diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index cb6e0b486b1..edd7304773e 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -589,7 +589,7 @@ static void decode_rxts(struct dp83640_private *dp83640, prune_rx_ts(dp83640); if (list_empty(&dp83640->rxpool)) { - pr_warning("dp83640: rx timestamp pool is empty\n"); + pr_debug("dp83640: rx timestamp pool is empty\n"); goto out; } rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); @@ -612,7 +612,7 @@ static void decode_txts(struct dp83640_private *dp83640, skb = skb_dequeue(&dp83640->tx_queue); if (!skb) { - pr_warning("dp83640: have timestamp but tx_queue empty\n"); + pr_debug("dp83640: have timestamp but tx_queue empty\n"); return; } ns = phy2txts(phy_txts); diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c index eae542a7e98..89f829f5f72 100644 --- a/drivers/net/pptp.c +++ b/drivers/net/pptp.c @@ -285,8 +285,10 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) ip_send_check(iph); ip_local_out(skb); + return 1; tx_error: + kfree_skb(skb); return 1; } @@ -305,11 +307,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) } header = (struct pptp_gre_header *)(skb->data); + headersize = sizeof(*header); /* test if acknowledgement present */ if (PPTP_GRE_IS_A(header->ver)) { - __u32 ack = (PPTP_GRE_IS_S(header->flags)) ? - header->ack : header->seq; /* ack in different place if S = 0 */ + __u32 ack; + + if (!pskb_may_pull(skb, headersize)) + goto drop; + header = (struct pptp_gre_header *)(skb->data); + + /* ack in different place if S = 0 */ + ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq; ack = ntohl(ack); @@ -318,21 +327,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) /* also handle sequence number wrap-around */ if (WRAPPED(ack, opt->ack_recv)) opt->ack_recv = ack; + } else { + headersize -= sizeof(header->ack); } - /* test if payload present */ if (!PPTP_GRE_IS_S(header->flags)) goto drop; - headersize = sizeof(*header); payload_len = ntohs(header->payload_len); seq = ntohl(header->seq); - /* no ack present? */ - if (!PPTP_GRE_IS_A(header->ver)) - headersize -= sizeof(header->ack); /* check for incomplete packet (length smaller than expected) */ - if (skb->len - headersize < payload_len) + if (!pskb_may_pull(skb, headersize + payload_len)) goto drop; payload = skb->data + headersize; diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index c2366701792..6d657cabb95 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -2859,7 +2859,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0004); rtl_writephy(tp, 0x1f, 0x0007); rtl_writephy(tp, 0x1e, 0x0020); - rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); rtl_writephy(tp, 0x1f, 0x0002); rtl_writephy(tp, 0x1f, 0x0000); rtl_writephy(tp, 0x0d, 0x0007); @@ -3316,6 +3316,37 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp) } } +static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + RTL_W32(RxConfig, RTL_R32(RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + break; + default: + break; + } +} + +static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) +{ + if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) + return false; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, MII_BMCR, 0x0000); + + rtl_wol_suspend_quirk(tp); + + return true; +} + static void r810x_phy_power_down(struct rtl8169_private *tp) { rtl_writephy(tp, 0x1f, 0x0000); @@ -3330,18 +3361,8 @@ static void r810x_phy_power_up(struct rtl8169_private *tp) static void r810x_pll_power_down(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - if (__rtl8169_get_wol(tp) & WAKE_ANY) { - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); - - if (tp->mac_version == RTL_GIGA_MAC_VER_29 || - tp->mac_version == RTL_GIGA_MAC_VER_30) - RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | - AcceptMulticast | AcceptMyPhys); + if (rtl_wol_pll_power_down(tp)) return; - } r810x_phy_power_down(tp); } @@ -3430,17 +3451,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) tp->mac_version == RTL_GIGA_MAC_VER_33) rtl_ephy_write(ioaddr, 0x19, 0xff64); - if (__rtl8169_get_wol(tp) & WAKE_ANY) { - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); - - if (tp->mac_version == RTL_GIGA_MAC_VER_32 || - tp->mac_version == RTL_GIGA_MAC_VER_33 || - tp->mac_version == RTL_GIGA_MAC_VER_34) - RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | - AcceptMulticast | AcceptMyPhys); + if (rtl_wol_pll_power_down(tp)) return; - } r8168_phy_power_down(tp); @@ -5788,11 +5800,30 @@ static const struct dev_pm_ops rtl8169_pm_ops = { #endif /* !CONFIG_PM */ +static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* WoL fails with 8168b when the receiver is disabled. */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + pci_clear_master(tp->pci_dev); + + RTL_W8(ChipCmd, CmdRxEnb); + /* PCI commit */ + RTL_R8(ChipCmd); + break; + default: + break; + } +} + static void rtl_shutdown(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; rtl8169_net_suspend(dev); @@ -5806,16 +5837,9 @@ static void rtl_shutdown(struct pci_dev *pdev) spin_unlock_irq(&tp->lock); if (system_state == SYSTEM_POWER_OFF) { - /* WoL fails with 8168b when the receiver is disabled. */ - if ((tp->mac_version == RTL_GIGA_MAC_VER_11 || - tp->mac_version == RTL_GIGA_MAC_VER_12 || - tp->mac_version == RTL_GIGA_MAC_VER_17) && - (tp->features & RTL_FEATURE_WOL)) { - pci_clear_master(pdev); - - RTL_W8(ChipCmd, CmdRxEnb); - /* PCI commit */ - RTL_R8(ChipCmd); + if (__rtl8169_get_wol(tp) & WAKE_ANY) { + rtl_wol_suspend_quirk(tp); + rtl_wol_shutdown_quirk(tp); } pci_wake_from_d3(pdev, true); diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index b9016a30cdc..c90ddb61cc5 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -26,6 +26,7 @@ * LAN9215, LAN9216, LAN9217, LAN9218 * LAN9210, LAN9211 * LAN9220, LAN9221 + * LAN89218 * */ @@ -1983,6 +1984,7 @@ static int __devinit smsc911x_init(struct net_device *dev) case 0x01170000: case 0x01160000: case 0x01150000: + case 0x218A0000: /* LAN911[5678] family */ pdata->generation = pdata->idrev & 0x0000FFFF; break; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 4a1374df608..c11a2b8327f 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -15577,7 +15577,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev) cancel_work_sync(&tp->reset_task); - if (!tg3_flag(tp, USE_PHYLIB)) { + if (tg3_flag(tp, USE_PHYLIB)) { tg3_phy_fini(tp); tg3_mdio_fini(tp); } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index 2339728a730..3e69c631ebb 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h @@ -1514,7 +1514,7 @@ static const u32 ar9300_2p2_mac_core[][2] = { {0x00008258, 0x00000000}, {0x0000825c, 0x40000000}, {0x00008260, 0x00080922}, - {0x00008264, 0x9bc00010}, + {0x00008264, 0x9d400010}, {0x00008268, 0xffffffff}, {0x0000826c, 0x0000ffff}, {0x00008270, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 9a4850154fb..4c21f8cbdeb 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -205,14 +205,22 @@ static void ath_rx_remove_buffer(struct ath_softc *sc, static void ath_rx_edma_cleanup(struct ath_softc *sc) { + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); struct ath_buf *bf; ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); list_for_each_entry(bf, &sc->rx.rxbuf, list) { - if (bf->bf_mpdu) + if (bf->bf_mpdu) { + dma_unmap_single(sc->dev, bf->bf_buf_addr, + common->rx_bufsize, + DMA_BIDIRECTIONAL); dev_kfree_skb_any(bf->bf_mpdu); + bf->bf_buf_addr = 0; + bf->bf_mpdu = NULL; + } } INIT_LIST_HEAD(&sc->rx.rxbuf); diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c index 35cd2537e7f..e5971fe9d16 100644 --- a/drivers/net/wireless/iwlegacy/iwl-core.c +++ b/drivers/net/wireless/iwlegacy/iwl-core.c @@ -937,7 +937,7 @@ void iwl_legacy_irq_handle_error(struct iwl_priv *priv) &priv->contexts[IWL_RXON_CTX_BSS]); #endif - wake_up_interruptible(&priv->wait_command_queue); + wake_up(&priv->wait_command_queue); /* Keep the restart process from trying to send host * commands by clearing the INIT status bit */ @@ -1746,7 +1746,7 @@ int iwl_legacy_force_reset(struct iwl_priv *priv, bool external) /* Set the FW error flag -- cleared on iwl_down */ set_bit(STATUS_FW_ERROR, &priv->status); - wake_up_interruptible(&priv->wait_command_queue); + wake_up(&priv->wait_command_queue); /* * Keep the restart process from trying to send host * commands by clearing the INIT status bit diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c index 62b4b09122c..ce1fc9feb61 100644 --- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c +++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c @@ -167,7 +167,7 @@ int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) goto out; } - ret = wait_event_interruptible_timeout(priv->wait_command_queue, + ret = wait_event_timeout(priv->wait_command_queue, !test_bit(STATUS_HCMD_ACTIVE, &priv->status), HOST_COMPLETE_TIMEOUT); if (!ret) { diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c index 4fff995c6f3..ef9e268bf8a 100644 --- a/drivers/net/wireless/iwlegacy/iwl-tx.c +++ b/drivers/net/wireless/iwlegacy/iwl-tx.c @@ -625,6 +625,8 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) cmd = txq->cmd[cmd_index]; meta = &txq->meta[cmd_index]; + txq->time_stamp = jiffies; + pci_unmap_single(priv->pci_dev, dma_unmap_addr(meta, mapping), dma_unmap_len(meta, len), @@ -645,7 +647,7 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) clear_bit(STATUS_HCMD_ACTIVE, &priv->status); IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", iwl_legacy_get_cmd_string(cmd->hdr.cmd)); - wake_up_interruptible(&priv->wait_command_queue); + wake_up(&priv->wait_command_queue); } /* Mark as unmapped */ diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c index 795826a014e..66ee15629a7 100644 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c @@ -841,7 +841,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, wiphy_rfkill_set_hw_state(priv->hw->wiphy, test_bit(STATUS_RF_KILL_HW, &priv->status)); else - wake_up_interruptible(&priv->wait_command_queue); + wake_up(&priv->wait_command_queue); } /** @@ -2269,7 +2269,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv) iwl3945_reg_txpower_periodic(priv); IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); - wake_up_interruptible(&priv->wait_command_queue); + wake_up(&priv->wait_command_queue); return; @@ -2300,7 +2300,7 @@ static void __iwl3945_down(struct iwl_priv *priv) iwl_legacy_clear_driver_stations(priv); /* Unblock any waiting calls */ - wake_up_interruptible_all(&priv->wait_command_queue); + wake_up_all(&priv->wait_command_queue); /* Wipe out the EXIT_PENDING status bit if we are not actually * exiting the module */ @@ -2853,7 +2853,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw) /* Wait for START_ALIVE from ucode. Otherwise callbacks from * mac80211 will not be run successfully. */ - ret = wait_event_interruptible_timeout(priv->wait_command_queue, + ret = wait_event_timeout(priv->wait_command_queue, test_bit(STATUS_READY, &priv->status), UCODE_READY_TIMEOUT); if (!ret) { diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c index 14334668034..aa0c2539761 100644 --- a/drivers/net/wireless/iwlegacy/iwl4965-base.c +++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c @@ -576,7 +576,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, wiphy_rfkill_set_hw_state(priv->hw->wiphy, test_bit(STATUS_RF_KILL_HW, &priv->status)); else - wake_up_interruptible(&priv->wait_command_queue); + wake_up(&priv->wait_command_queue); } /** @@ -926,7 +926,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv) handled |= CSR_INT_BIT_FH_TX; /* Wake up uCode load routine, now that load is complete */ priv->ucode_write_complete = 1; - wake_up_interruptible(&priv->wait_command_queue); + wake_up(&priv->wait_command_queue); } if (inta & ~handled) { @@ -1795,7 +1795,7 @@ static void iwl4965_alive_start(struct iwl_priv *priv) iwl4965_rf_kill_ct_config(priv); IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); - wake_up_interruptible(&priv->wait_command_queue); + wake_up(&priv->wait_command_queue); iwl_legacy_power_update_mode(priv, true); IWL_DEBUG_INFO(priv, "Updated power mode\n"); @@ -1828,7 +1828,7 @@ static void __iwl4965_down(struct iwl_priv *priv) iwl_legacy_clear_driver_stations(priv); /* Unblock any waiting calls */ - wake_up_interruptible_all(&priv->wait_command_queue); + wake_up_all(&priv->wait_command_queue); /* Wipe out the EXIT_PENDING status bit if we are not actually * exiting the module */ @@ -2266,7 +2266,7 @@ int iwl4965_mac_start(struct ieee80211_hw *hw) /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from * mac80211 will not be run successfully. */ - ret = wait_event_interruptible_timeout(priv->wait_command_queue, + ret = wait_event_timeout(priv->wait_command_queue, test_bit(STATUS_READY, &priv->status), UCODE_READY_TIMEOUT); if (!ret) { diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index dd6937e9705..77e528f5db8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -405,31 +405,33 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw, mutex_lock(&priv->mutex); - if (test_bit(STATUS_SCANNING, &priv->status) && - priv->scan_type != IWL_SCAN_NORMAL) { - IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); - ret = -EAGAIN; - goto out_unlock; - } - - /* mac80211 will only ask for one band at a time */ - priv->scan_request = req; - priv->scan_vif = vif; - /* * If an internal scan is in progress, just set * up the scan_request as per above. */ if (priv->scan_type != IWL_SCAN_NORMAL) { - IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n"); + IWL_DEBUG_SCAN(priv, + "SCAN request during internal scan - defer\n"); + priv->scan_request = req; + priv->scan_vif = vif; ret = 0; - } else + } else { + priv->scan_request = req; + priv->scan_vif = vif; + /* + * mac80211 will only ask for one band at a time + * so using channels[0] here is ok + */ ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, req->channels[0]->band); + if (ret) { + priv->scan_request = NULL; + priv->scan_vif = NULL; + } + } IWL_DEBUG_MAC80211(priv, "leave\n"); -out_unlock: mutex_unlock(&priv->mutex); return ret; diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 8b1cef0ffde..4bf3cf457ef 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -863,6 +863,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb, u8 tid = 0; u16 seq_number = 0; + memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); if (ieee80211_is_auth(fc)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); rtl_ips_nic_on(hw); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 0ca86f9ec4e..182562952c7 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, xenvif_get(vif); rtnl_lock(); - if (netif_running(vif->dev)) - xenvif_up(vif); if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) dev_set_mtu(vif->dev, ETH_DATA_LEN); netdev_update_features(vif->dev); netif_carrier_on(vif->dev); + if (netif_running(vif->dev)) + xenvif_up(vif); rtnl_unlock(); return 0; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 4e84fd4a431..e9651f0a881 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; -enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; /* * The default CLS is used if arch didn't set CLS explicitly and not @@ -3568,10 +3568,14 @@ static int __init pci_setup(char *str) pci_hotplug_io_size = memparse(str + 9, &str); } else if (!strncmp(str, "hpmemsize=", 10)) { pci_hotplug_mem_size = memparse(str + 10, &str); + } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { + pcie_bus_config = PCIE_BUS_TUNE_OFF; } else if (!strncmp(str, "pcie_bus_safe", 13)) { pcie_bus_config = PCIE_BUS_SAFE; } else if (!strncmp(str, "pcie_bus_perf", 13)) { pcie_bus_config = PCIE_BUS_PERFORMANCE; + } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { + pcie_bus_config = PCIE_BUS_PEER2PEER; } else { printk(KERN_ERR "PCI: Unknown option `%s'\n", str); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index f3f94a5c068..6ab6bd3df4b 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1458,12 +1458,24 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data) */ void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) { - u8 smpss = mpss; + u8 smpss; if (!pci_is_pcie(bus->self)) return; + if (pcie_bus_config == PCIE_BUS_TUNE_OFF) + return; + + /* FIXME - Peer to peer DMA is possible, though the endpoint would need + * to be aware to the MPS of the destination. To work around this, + * simply force the MPS of the entire system to the smallest possible. + */ + if (pcie_bus_config == PCIE_BUS_PEER2PEER) + smpss = 0; + if (pcie_bus_config == PCIE_BUS_SAFE) { + smpss = mpss; + pcie_find_smpss(bus->self, &smpss); pci_walk_bus(bus, pcie_find_smpss, &smpss); } diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index cbde448f994..eb3140ee821 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -654,8 +654,8 @@ static struct io_subchannel_private console_priv; static int console_subchannel_in_use; /* - * Use tpi to get a pending interrupt, call the interrupt handler and - * return a pointer to the subchannel structure. + * Use cio_tpi to get a pending interrupt and call the interrupt handler. + * Return non-zero if an interrupt was processed, zero otherwise. */ static int cio_tpi(void) { @@ -667,6 +667,10 @@ static int cio_tpi(void) tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; if (tpi(NULL) != 1) return 0; + if (tpi_info->adapter_IO) { + do_adapter_IO(tpi_info->isc); + return 1; + } irb = (struct irb *)&S390_lowcore.irb; /* Store interrupt response block to lowcore. */ if (tsch(tpi_info->schid, irb) != 0) diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index b7bd5b0cc7a..3868ab2397c 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -1800,10 +1800,12 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ switch (retval) { case SCSI_MLQUEUE_HOST_BUSY: twa_free_request_id(tw_dev, request_id); + twa_unmap_scsi_data(tw_dev, request_id); break; case 1: tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); + twa_unmap_scsi_data(tw_dev, request_id); SCpnt->result = (DID_ERROR << 16); done(SCpnt); retval = 0; diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 3c08f5352b2..6153a66a8a3 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -88,7 +88,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ -obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/ +obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ obj-$(CONFIG_SCSI_LPFC) += lpfc/ obj-$(CONFIG_SCSI_BFA_FC) += bfa/ obj-$(CONFIG_SCSI_PAS16) += pas16.o diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index e7d0d47b918..e5f2d7d9002 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1283,6 +1283,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) kfree(aac->queues); aac->queues = NULL; free_irq(aac->pdev->irq, aac); + if (aac->msi) + pci_disable_msi(aac->pdev); kfree(aac->fsa_dev); aac->fsa_dev = NULL; quirks = aac_get_driver_ident(index)->quirks; diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index bd22041e278..f5864485033 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -913,7 +913,7 @@ static void l2t_put(struct cxgbi_sock *csk) struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; if (csk->l2t) { - l2t_release(L2DATA(t3dev), csk->l2t); + l2t_release(t3dev, csk->l2t); csk->l2t = NULL; cxgbi_sock_put(csk); } diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index f84084bba2f..16ad97df5ba 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -1721,7 +1721,7 @@ static int sas_find_bcast_dev(struct domain_device *dev, list_for_each_entry(ch, &ex->children, siblings) { if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { res = sas_find_bcast_dev(ch, src_dev); - if (src_dev) + if (*src_dev) return res; } } @@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, sas_disable_routing(parent, phy->attached_sas_addr); } memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); - sas_port_delete_phy(phy->port, phy->phy); - if (phy->port->num_phys == 0) - sas_port_delete(phy->port); - phy->port = NULL; + if (phy->port) { + sas_port_delete_phy(phy->port, phy->phy); + if (phy->port->num_phys == 0) + sas_port_delete(phy->port); + phy->port = NULL; + } } static int sas_discover_bfs_by_root_level(struct domain_device *root, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 4cace3f20c0..1e69527f1e4 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1328,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) qla2x00_sp_compl(ha, sp); } else { ctx = sp->ctx; - if (ctx->type == SRB_LOGIN_CMD || - ctx->type == SRB_LOGOUT_CMD) { - ctx->u.iocb_cmd->free(sp); - } else { + if (ctx->type == SRB_ELS_CMD_RPT || + ctx->type == SRB_ELS_CMD_HST || + ctx->type == SRB_CT_CMD) { struct fc_bsg_job *bsg_job = ctx->u.bsg_job; if (bsg_job->request->msgcode @@ -1343,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) kfree(sp->ctx); mempool_free(sp, ha->srb_mempool); + } else { + ctx->u.iocb_cmd->free(sp); } } } diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 1d23f383186..6a80749391d 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -50,6 +50,8 @@ #define PCH_RX_THOLD 7 #define PCH_RX_THOLD_MAX 15 +#define PCH_TX_THOLD 2 + #define PCH_MAX_BAUDRATE 5000000 #define PCH_MAX_FIFO_DEPTH 16 @@ -58,6 +60,7 @@ #define PCH_SLEEP_TIME 10 #define SSN_LOW 0x02U +#define SSN_HIGH 0x03U #define SSN_NO_CONTROL 0x00U #define PCH_MAX_CS 0xFF #define PCI_DEVICE_ID_GE_SPI 0x8816 @@ -316,16 +319,19 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, /* if transfer complete interrupt */ if (reg_spsr_val & SPSR_FI_BIT) { - if (tx_index < bpw_len) + if ((tx_index == bpw_len) && (rx_index == tx_index)) { + /* disable interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); + + /* transfer is completed; + inform pch_spi_process_messages */ + data->transfer_complete = true; + data->transfer_active = false; + wake_up(&data->wait); + } else { dev_err(&data->master->dev, "%s : Transfer is not completed", __func__); - /* disable interrupts */ - pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); - - /* transfer is completed;inform pch_spi_process_messages */ - data->transfer_complete = true; - data->transfer_active = false; - wake_up(&data->wait); + } } } @@ -348,16 +354,26 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id) "%s returning due to suspend\n", __func__); return IRQ_NONE; } - if (data->use_dma) - return IRQ_NONE; io_remap_addr = data->io_remap_addr; spsr = io_remap_addr + PCH_SPSR; reg_spsr_val = ioread32(spsr); - if (reg_spsr_val & SPSR_ORF_BIT) - dev_err(&board_dat->pdev->dev, "%s Over run error", __func__); + if (reg_spsr_val & SPSR_ORF_BIT) { + dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__); + if (data->current_msg->complete != 0) { + data->transfer_complete = true; + data->current_msg->status = -EIO; + data->current_msg->complete(data->current_msg->context); + data->bcurrent_msg_processing = false; + data->current_msg = NULL; + data->cur_trans = NULL; + } + } + + if (data->use_dma) + return IRQ_NONE; /* Check if the interrupt is for SPI device */ if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { @@ -756,10 +772,6 @@ static void pch_spi_set_ir(struct pch_spi_data *data) wait_event_interruptible(data->wait, data->transfer_complete); - pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); - dev_dbg(&data->master->dev, - "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); - /* clear all interrupts */ pch_spi_writereg(data->master, PCH_SPSR, pch_spi_readreg(data->master, PCH_SPSR)); @@ -815,10 +827,11 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw) } } -static void pch_spi_start_transfer(struct pch_spi_data *data) +static int pch_spi_start_transfer(struct pch_spi_data *data) { struct pch_spi_dma_ctrl *dma; unsigned long flags; + int rtn; dma = &data->dma; @@ -833,19 +846,23 @@ static void pch_spi_start_transfer(struct pch_spi_data *data) initiating the transfer. */ dev_dbg(&data->master->dev, "%s:waiting for transfer to get over\n", __func__); - wait_event_interruptible(data->wait, data->transfer_complete); + rtn = wait_event_interruptible_timeout(data->wait, + data->transfer_complete, + msecs_to_jiffies(2 * HZ)); dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, DMA_FROM_DEVICE); + + dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent, + DMA_FROM_DEVICE); + memset(data->dma.tx_buf_virt, 0, PAGE_SIZE); + async_tx_ack(dma->desc_rx); async_tx_ack(dma->desc_tx); kfree(dma->sg_tx_p); kfree(dma->sg_rx_p); spin_lock_irqsave(&data->lock, flags); - pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); - dev_dbg(&data->master->dev, - "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); /* clear fifo threshold, disable interrupts, disable SPI transfer */ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, @@ -858,6 +875,8 @@ static void pch_spi_start_transfer(struct pch_spi_data *data) pch_spi_clear_fifo(data->master); spin_unlock_irqrestore(&data->lock, flags); + + return rtn; } static void pch_dma_rx_complete(void *arg) @@ -1023,8 +1042,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) /* set receive fifo threshold and transmit fifo threshold */ pch_spi_setclr_reg(data->master, PCH_SPCR, ((size - 1) << SPCR_RFIC_FIELD) | - ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) << - SPCR_TFIC_FIELD), + (PCH_TX_THOLD << SPCR_TFIC_FIELD), MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); spin_unlock_irqrestore(&data->lock, flags); @@ -1035,13 +1053,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) /* offset, length setting */ sg = dma->sg_rx_p; for (i = 0; i < num; i++, sg++) { - if (i == 0) { - sg->offset = 0; + if (i == (num - 2)) { + sg->offset = size * i; + sg->offset = sg->offset * (*bpw / 8); sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, sg->offset); sg_dma_len(sg) = rem; + } else if (i == (num - 1)) { + sg->offset = size * (i - 1) + rem; + sg->offset = sg->offset * (*bpw / 8); + sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, + sg->offset); + sg_dma_len(sg) = size; } else { - sg->offset = rem + size * (i - 1); + sg->offset = size * i; sg->offset = sg->offset * (*bpw / 8); sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, sg->offset); @@ -1065,6 +1090,16 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) dma->desc_rx = desc_rx; /* TX */ + if (data->bpw_len > PCH_DMA_TRANS_SIZE) { + num = data->bpw_len / PCH_DMA_TRANS_SIZE; + size = PCH_DMA_TRANS_SIZE; + rem = 16; + } else { + num = 1; + size = data->bpw_len; + rem = data->bpw_len; + } + dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ /* offset, length setting */ @@ -1162,6 +1197,7 @@ static void pch_spi_process_messages(struct work_struct *pwork) if (data->use_dma) pch_spi_request_dma(data, data->current_msg->spi->bits_per_word); + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); do { /* If we are already processing a message get the next transfer structure from the message otherwise retrieve @@ -1184,7 +1220,8 @@ static void pch_spi_process_messages(struct work_struct *pwork) if (data->use_dma) { pch_spi_handle_dma(data, &bpw); - pch_spi_start_transfer(data); + if (!pch_spi_start_transfer(data)) + goto out; pch_spi_copy_rx_data_for_dma(data, bpw); } else { pch_spi_set_tx(data, &bpw); @@ -1222,6 +1259,8 @@ static void pch_spi_process_messages(struct work_struct *pwork) } while (data->cur_trans != NULL); +out: + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH); if (data->use_dma) pch_spi_release_dma(data); } diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 1a7c19ae766..8b307b42879 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -411,7 +411,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) skb->protocol = eth_type_trans(skb, dev); skb->dev = dev; - if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error)) + if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || + work->word2.s.L4_error || !work->word2.s.tcp_or_udp)) skb->ip_summed = CHECKSUM_NONE; else skb->ip_summed = CHECKSUM_UNNECESSARY; diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c index 58cf279ed87..bc95f52cad8 100644 --- a/drivers/tty/serial/lantiq.c +++ b/drivers/tty/serial/lantiq.c @@ -478,8 +478,10 @@ lqasc_set_termios(struct uart_port *port, spin_unlock_irqrestore(<q_asc_lock, flags); /* Don't rewrite B0 */ - if (tty_termios_baud_rate(new)) + if (tty_termios_baud_rate(new)) tty_termios_encode_baud_rate(new, baud, baud); + + uart_update_timeout(port, cflag, baud); } static const char* diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index a381cd22f51..e4e57d59edb 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1036,11 +1036,13 @@ out: * on error we return an unlocked page and the error value * on success we return a locked page and 0 */ -static int prepare_uptodate_page(struct page *page, u64 pos) +static int prepare_uptodate_page(struct page *page, u64 pos, + bool force_uptodate) { int ret = 0; - if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) { + if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) && + !PageUptodate(page)) { ret = btrfs_readpage(NULL, page); if (ret) return ret; @@ -1061,7 +1063,7 @@ static int prepare_uptodate_page(struct page *page, u64 pos) static noinline int prepare_pages(struct btrfs_root *root, struct file *file, struct page **pages, size_t num_pages, loff_t pos, unsigned long first_index, - size_t write_bytes) + size_t write_bytes, bool force_uptodate) { struct extent_state *cached_state = NULL; int i; @@ -1086,10 +1088,11 @@ again: } if (i == 0) - err = prepare_uptodate_page(pages[i], pos); + err = prepare_uptodate_page(pages[i], pos, + force_uptodate); if (i == num_pages - 1) err = prepare_uptodate_page(pages[i], - pos + write_bytes); + pos + write_bytes, false); if (err) { page_cache_release(pages[i]); faili = i - 1; @@ -1158,6 +1161,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, size_t num_written = 0; int nrptrs; int ret = 0; + bool force_page_uptodate = false; nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / @@ -1200,7 +1204,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, * contents of pages from loop to loop */ ret = prepare_pages(root, file, pages, num_pages, - pos, first_index, write_bytes); + pos, first_index, write_bytes, + force_page_uptodate); if (ret) { btrfs_delalloc_release_space(inode, num_pages << PAGE_CACHE_SHIFT); @@ -1217,12 +1222,15 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, if (copied < write_bytes) nrptrs = 1; - if (copied == 0) + if (copied == 0) { + force_page_uptodate = true; dirty_pages = 0; - else + } else { + force_page_uptodate = false; dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + } /* * If we had a short copy we need to release the excess delaloc diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 538f65a79ec..dae5dfe41ba 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1047,7 +1047,16 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, if (!max_to_defrag) max_to_defrag = last_index - 1; - while (i <= last_index && defrag_count < max_to_defrag) { + /* + * make writeback starts from i, so the defrag range can be + * written sequentially. + */ + if (i < inode->i_mapping->writeback_index) + inode->i_mapping->writeback_index = i; + + while (i <= last_index && defrag_count < max_to_defrag && + (i < (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT)) { /* * make sure we stop running if someone unmounts * the FS diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index f4af4cc3750..71beb020197 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2018,7 +2018,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) warned_on_ntlm = true; cERROR(1, "default security mechanism requested. The default " "security mechanism will be upgraded from ntlm to " - "ntlmv2 in kernel release 3.1"); + "ntlmv2 in kernel release 3.2"); } ses->overrideSecFlg = volume_info->secFlg; diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index cac2ecfa674..ef43fce519a 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -629,7 +629,7 @@ xfs_buf_item_push( * the xfsbufd to get this buffer written. We have to unlock the buffer * to allow the xfsbufd to write it, too. */ -STATIC void +STATIC bool xfs_buf_item_pushbuf( struct xfs_log_item *lip) { @@ -643,6 +643,7 @@ xfs_buf_item_pushbuf( xfs_buf_delwri_promote(bp); xfs_buf_relse(bp); + return true; } STATIC void diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c index 9e0e2fa3f2c..bb3f71d236d 100644 --- a/fs/xfs/xfs_dquot_item.c +++ b/fs/xfs/xfs_dquot_item.c @@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait( * search the buffer cache can be a time consuming thing, and AIL lock is a * spinlock. */ -STATIC void +STATIC bool xfs_qm_dquot_logitem_pushbuf( struct xfs_log_item *lip) { struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); struct xfs_dquot *dqp = qlip->qli_dquot; struct xfs_buf *bp; + bool ret = true; ASSERT(XFS_DQ_IS_LOCKED(dqp)); @@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf( if (completion_done(&dqp->q_flush) || !(lip->li_flags & XFS_LI_IN_AIL)) { xfs_dqunlock(dqp); - return; + return true; } bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno, dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); xfs_dqunlock(dqp); if (!bp) - return; + return true; if (XFS_BUF_ISDELAYWRITE(bp)) xfs_buf_delwri_promote(bp); + if (xfs_buf_ispinned(bp)) + ret = false; xfs_buf_relse(bp); + return ret; } /* diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 588406dc6a3..836ad80d4f2 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -708,13 +708,14 @@ xfs_inode_item_committed( * marked delayed write. If that's the case, we'll promote it and that will * allow the caller to write the buffer by triggering the xfsbufd to run. */ -STATIC void +STATIC bool xfs_inode_item_pushbuf( struct xfs_log_item *lip) { struct xfs_inode_log_item *iip = INODE_ITEM(lip); struct xfs_inode *ip = iip->ili_inode; struct xfs_buf *bp; + bool ret = true; ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); @@ -725,7 +726,7 @@ xfs_inode_item_pushbuf( if (completion_done(&ip->i_flush) || !(lip->li_flags & XFS_LI_IN_AIL)) { xfs_iunlock(ip, XFS_ILOCK_SHARED); - return; + return true; } bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno, @@ -733,10 +734,13 @@ xfs_inode_item_pushbuf( xfs_iunlock(ip, XFS_ILOCK_SHARED); if (!bp) - return; + return true; if (XFS_BUF_ISDELAYWRITE(bp)) xfs_buf_delwri_promote(bp); + if (xfs_buf_ispinned(bp)) + ret = false; xfs_buf_relse(bp); + return ret; } /* diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index 1e8a45e74c3..828662f70d6 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -68,6 +68,8 @@ #include <linux/ctype.h> #include <linux/writeback.h> #include <linux/capability.h> +#include <linux/kthread.h> +#include <linux/freezer.h> #include <linux/list_sort.h> #include <asm/page.h> diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 2366c54cc4f..5cf06b85fd9 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1652,24 +1652,13 @@ xfs_init_workqueues(void) */ xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8); if (!xfs_syncd_wq) - goto out; - - xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8); - if (!xfs_ail_wq) - goto out_destroy_syncd; - + return -ENOMEM; return 0; - -out_destroy_syncd: - destroy_workqueue(xfs_syncd_wq); -out: - return -ENOMEM; } STATIC void xfs_destroy_workqueues(void) { - destroy_workqueue(xfs_ail_wq); destroy_workqueue(xfs_syncd_wq); } diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 06a9759b635..53597f4db9b 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -350,7 +350,7 @@ typedef struct xfs_item_ops { void (*iop_unlock)(xfs_log_item_t *); xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); void (*iop_push)(xfs_log_item_t *); - void (*iop_pushbuf)(xfs_log_item_t *); + bool (*iop_pushbuf)(xfs_log_item_t *); void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); } xfs_item_ops_t; diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index c15aa29fa16..3a1e7ca54c2 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -28,8 +28,6 @@ #include "xfs_trans_priv.h" #include "xfs_error.h" -struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ - #ifdef DEBUG /* * Check that the list is sorted as it should be. @@ -356,16 +354,10 @@ xfs_ail_delete( xfs_trans_ail_cursor_clear(ailp, lip); } -/* - * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself - * to run at a later time if there is more work to do to complete the push. - */ -STATIC void -xfs_ail_worker( - struct work_struct *work) +static long +xfsaild_push( + struct xfs_ail *ailp) { - struct xfs_ail *ailp = container_of(to_delayed_work(work), - struct xfs_ail, xa_work); xfs_mount_t *mp = ailp->xa_mount; struct xfs_ail_cursor cur; xfs_log_item_t *lip; @@ -427,8 +419,13 @@ xfs_ail_worker( case XFS_ITEM_PUSHBUF: XFS_STATS_INC(xs_push_ail_pushbuf); - IOP_PUSHBUF(lip); - ailp->xa_last_pushed_lsn = lsn; + + if (!IOP_PUSHBUF(lip)) { + stuck++; + flush_log = 1; + } else { + ailp->xa_last_pushed_lsn = lsn; + } push_xfsbufd = 1; break; @@ -440,7 +437,6 @@ xfs_ail_worker( case XFS_ITEM_LOCKED: XFS_STATS_INC(xs_push_ail_locked); - ailp->xa_last_pushed_lsn = lsn; stuck++; break; @@ -501,20 +497,6 @@ out_done: /* We're past our target or empty, so idle */ ailp->xa_last_pushed_lsn = 0; - /* - * We clear the XFS_AIL_PUSHING_BIT first before checking - * whether the target has changed. If the target has changed, - * this pushes the requeue race directly onto the result of the - * atomic test/set bit, so we are guaranteed that either the - * the pusher that changed the target or ourselves will requeue - * the work (but not both). - */ - clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); - smp_rmb(); - if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || - test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) - return; - tout = 50; } else if (XFS_LSN_CMP(lsn, target) >= 0) { /* @@ -537,9 +519,30 @@ out_done: tout = 20; } - /* There is more to do, requeue us. */ - queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, - msecs_to_jiffies(tout)); + return tout; +} + +static int +xfsaild( + void *data) +{ + struct xfs_ail *ailp = data; + long tout = 0; /* milliseconds */ + + while (!kthread_should_stop()) { + if (tout && tout <= 20) + __set_current_state(TASK_KILLABLE); + else + __set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(tout ? + msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT); + + try_to_freeze(); + + tout = xfsaild_push(ailp); + } + + return 0; } /* @@ -574,8 +577,9 @@ xfs_ail_push( */ smp_wmb(); xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); - if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) - queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); + smp_wmb(); + + wake_up_process(ailp->xa_task); } /* @@ -813,9 +817,18 @@ xfs_trans_ail_init( INIT_LIST_HEAD(&ailp->xa_ail); INIT_LIST_HEAD(&ailp->xa_cursors); spin_lock_init(&ailp->xa_lock); - INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); + + ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s", + ailp->xa_mount->m_fsname); + if (IS_ERR(ailp->xa_task)) + goto out_free_ailp; + mp->m_ail = ailp; return 0; + +out_free_ailp: + kmem_free(ailp); + return ENOMEM; } void @@ -824,6 +837,6 @@ xfs_trans_ail_destroy( { struct xfs_ail *ailp = mp->m_ail; - cancel_delayed_work_sync(&ailp->xa_work); + kthread_stop(ailp->xa_task); kmem_free(ailp); } diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 212946b9723..22750b5e4a8 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -64,23 +64,17 @@ struct xfs_ail_cursor { */ struct xfs_ail { struct xfs_mount *xa_mount; + struct task_struct *xa_task; struct list_head xa_ail; xfs_lsn_t xa_target; struct list_head xa_cursors; spinlock_t xa_lock; - struct delayed_work xa_work; xfs_lsn_t xa_last_pushed_lsn; - unsigned long xa_flags; }; -#define XFS_AIL_PUSHING_BIT 0 - /* * From xfs_trans_ail.c */ - -extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ - void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, struct xfs_ail_cursor *cur, struct xfs_log_item **log_items, int nr_items, diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 3fa1f3d90ce..99e3e50b5c5 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -197,6 +197,11 @@ struct dm_target { * whether or not its underlying devices have support. */ unsigned discards_supported:1; + + /* + * Set if this target does not return zeroes on discarded blocks. + */ + unsigned discard_zeroes_data_unsupported:1; }; /* Each target can link one of these into the table */ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index e807ad687a0..3ad553e8eae 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -80,6 +80,7 @@ extern void irq_domain_del(struct irq_domain *domain); #endif /* CONFIG_IRQ_DOMAIN */ #if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ) +extern struct irq_domain_ops irq_domain_simple_ops; extern void irq_domain_add_simple(struct device_node *controller, int irq_base); extern void irq_domain_generate_simple(const struct of_device_id *match, u64 phys_base, unsigned int irq_start); diff --git a/include/linux/pci.h b/include/linux/pci.h index 8c230cbcbb4..9fc01226055 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -621,8 +621,9 @@ struct pci_driver { extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss); enum pcie_bus_config_types { - PCIE_BUS_PERFORMANCE, + PCIE_BUS_TUNE_OFF, PCIE_BUS_SAFE, + PCIE_BUS_PERFORMANCE, PCIE_BUS_PEER2PEER, }; diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index e07e2742a86..1dc420ba213 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h @@ -51,6 +51,7 @@ #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) #define PTP_EV_PORT 319 +#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ #define OFF_ETYPE 12 #define OFF_IHL 14 @@ -116,14 +117,20 @@ static inline int ptp_filter_init(struct sock_filter *f, int len) {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \ {OP_RETA, 0, 0, 0 }, /* */ \ /*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ -/*L40*/ {OP_JEQ, 0, 6, ETH_P_8021Q }, /* f goto L50 */ \ +/*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \ {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \ - {OP_JEQ, 0, 9, ETH_P_1588 }, /* f goto L60 */ \ + {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \ + {OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ + {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \ + {OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \ {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \ {OP_RETA, 0, 0, 0 }, /* */ \ -/*L50*/ {OP_JEQ, 0, 4, ETH_P_1588 }, /* f goto L61 */ \ +/*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \ + {OP_LDB, 0, 0, ETH_HLEN }, /* */ \ + {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \ + {OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \ {OP_LDH, 0, 0, ETH_HLEN }, /* */ \ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 4ac2c0578e0..41d0237fd44 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1956,7 +1956,6 @@ static inline void disable_sched_clock_irqtime(void) {} extern unsigned long long task_sched_runtime(struct task_struct *task); -extern unsigned long long thread_group_sched_runtime(struct task_struct *task); /* sched_exec is called by processes performing an exec */ #ifdef CONFIG_SMP diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 1aaf915656f..8fa4430f99c 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -900,6 +900,7 @@ struct netns_ipvs { volatile int sync_state; volatile int master_syncid; volatile int backup_syncid; + struct mutex sync_mutex; /* multicast interface name */ char master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; diff --git a/include/net/udplite.h b/include/net/udplite.h index 673a024c6b2..5f097ca7d5c 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -66,40 +66,34 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) return 0; } -static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh) +/* Slow-path computation of checksum. Socket is locked. */ +static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) { + const struct udp_sock *up = udp_sk(skb->sk); int cscov = up->len; + __wsum csum = 0; - /* - * Sender has set `partial coverage' option on UDP-Lite socket - */ - if (up->pcflag & UDPLITE_SEND_CC) { + if (up->pcflag & UDPLITE_SEND_CC) { + /* + * Sender has set `partial coverage' option on UDP-Lite socket. + * The special case "up->pcslen == 0" signifies full coverage. + */ if (up->pcslen < up->len) { - /* up->pcslen == 0 means that full coverage is required, - * partial coverage only if 0 < up->pcslen < up->len */ - if (0 < up->pcslen) { - cscov = up->pcslen; - } - uh->len = htons(up->pcslen); + if (0 < up->pcslen) + cscov = up->pcslen; + udp_hdr(skb)->len = htons(up->pcslen); } - /* - * NOTE: Causes for the error case `up->pcslen > up->len': - * (i) Application error (will not be penalized). - * (ii) Payload too big for send buffer: data is split - * into several packets, each with its own header. - * In this case (e.g. last segment), coverage may - * exceed packet length. - * Since packets with coverage length > packet length are - * illegal, we fall back to the defaults here. - */ + /* + * NOTE: Causes for the error case `up->pcslen > up->len': + * (i) Application error (will not be penalized). + * (ii) Payload too big for send buffer: data is split + * into several packets, each with its own header. + * In this case (e.g. last segment), coverage may + * exceed packet length. + * Since packets with coverage length > packet length are + * illegal, we fall back to the defaults here. + */ } - return cscov; -} - -static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) -{ - int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb)); - __wsum csum = 0; skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ @@ -115,16 +109,21 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) return csum; } +/* Fast-path computation of checksum. Socket may not be locked. */ static inline __wsum udplite_csum(struct sk_buff *skb) { - struct sock *sk = skb->sk; - int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb)); + const struct udp_sock *up = udp_sk(skb->sk); const int off = skb_transport_offset(skb); - const int len = skb->len - off; + int len = skb->len - off; + if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) { + if (0 < up->pcslen) + len = up->pcslen; + udp_hdr(skb)->len = htons(up->pcslen); + } skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ - return skb_checksum(skb, off, min(cscov, len), 0); + return skb_checksum(skb, off, len, 0); } extern void udplite4_register(void); diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 6bca4cc0063..5f172703eb4 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -298,7 +298,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, __array(char, name, 32) __field(unsigned long, ino) __field(unsigned long, state) - __field(unsigned long, age) + __field(unsigned long, dirtied_when) __field(unsigned long, writeback_index) __field(long, nr_to_write) __field(unsigned long, wrote) @@ -309,19 +309,19 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, dev_name(inode->i_mapping->backing_dev_info->dev), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; - __entry->age = (jiffies - inode->dirtied_when) * - 1000 / HZ; + __entry->dirtied_when = inode->dirtied_when; __entry->writeback_index = inode->i_mapping->writeback_index; __entry->nr_to_write = nr_to_write; __entry->wrote = nr_to_write - wbc->nr_to_write; ), - TP_printk("bdi %s: ino=%lu state=%s age=%lu " + TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " "index=%lu to_write=%ld wrote=%lu", __entry->name, __entry->ino, show_inode_state(__entry->state), - __entry->age, + __entry->dirtied_when, + (jiffies - __entry->dirtied_when) / HZ, __entry->writeback_index, __entry->nr_to_write, __entry->wrote diff --git a/init/main.c b/init/main.c index 2a9b88aa5e7..03b408dff82 100644 --- a/init/main.c +++ b/init/main.c @@ -381,9 +381,6 @@ static noinline void __init_refok rest_init(void) preempt_enable_no_resched(); schedule(); - /* At this point, we can enable user mode helper functionality */ - usermodehelper_enable(); - /* Call into cpu_idle with preempt disabled */ preempt_disable(); cpu_idle(); @@ -733,6 +730,7 @@ static void __init do_basic_setup(void) driver_init(); init_irq_proc(); do_ctors(); + usermodehelper_enable(); do_initcalls(); } diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index d5828da3fd3..b57a3776de4 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -29,7 +29,11 @@ void irq_domain_add(struct irq_domain *domain) */ for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) { d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq)); - if (d || d->domain) { + if (!d) { + WARN(1, "error: assigning domain to non existant irq_desc"); + return; + } + if (d->domain) { /* things are broken; just report, don't clean up */ WARN(1, "error: irq_desc already assigned to a domain"); return; diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 58f405b581e..640ded8f5c4 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -250,7 +250,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) do { times->utime = cputime_add(times->utime, t->utime); times->stime = cputime_add(times->stime, t->stime); - times->sum_exec_runtime += t->se.sum_exec_runtime; + times->sum_exec_runtime += task_sched_runtime(t); } while_each_thread(tsk, t); out: rcu_read_unlock(); @@ -274,9 +274,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) struct task_cputime sum; unsigned long flags; - spin_lock_irqsave(&cputimer->lock, flags); if (!cputimer->running) { - cputimer->running = 1; /* * The POSIX timer interface allows for absolute time expiry * values through the TIMER_ABSTIME flag, therefore we have @@ -284,8 +282,11 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) * it. */ thread_group_cputime(tsk, &sum); + spin_lock_irqsave(&cputimer->lock, flags); + cputimer->running = 1; update_gt_cputime(&cputimer->cputime, &sum); - } + } else + spin_lock_irqsave(&cputimer->lock, flags); *times = cputimer->cputime; spin_unlock_irqrestore(&cputimer->lock, flags); } @@ -312,7 +313,8 @@ static int cpu_clock_sample_group(const clockid_t which_clock, cpu->cpu = cputime.utime; break; case CPUCLOCK_SCHED: - cpu->sched = thread_group_sched_runtime(p); + thread_group_cputime(p, &cputime); + cpu->sched = cputime.sum_exec_runtime; break; } return 0; diff --git a/kernel/resource.c b/kernel/resource.c index 3b3cedc5259..c8dc249da5c 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -419,6 +419,9 @@ static int __find_resource(struct resource *root, struct resource *old, else tmp.end = root->end; + if (tmp.end < tmp.start) + goto next; + resource_clip(&tmp, constraint->min, constraint->max); arch_remove_reservations(&tmp); @@ -436,8 +439,10 @@ static int __find_resource(struct resource *root, struct resource *old, return 0; } } - if (!this) + +next: if (!this || this->end == root->end) break; + if (this != old) tmp.start = this->end + 1; this = this->sibling; diff --git a/kernel/sched.c b/kernel/sched.c index ec5f472bc5b..b50b0f0c9aa 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3725,30 +3725,6 @@ unsigned long long task_sched_runtime(struct task_struct *p) } /* - * Return sum_exec_runtime for the thread group. - * In case the task is currently running, return the sum plus current's - * pending runtime that have not been accounted yet. - * - * Note that the thread group might have other running tasks as well, - * so the return value not includes other pending runtime that other - * running tasks might have. - */ -unsigned long long thread_group_sched_runtime(struct task_struct *p) -{ - struct task_cputime totals; - unsigned long flags; - struct rq *rq; - u64 ns; - - rq = task_rq_lock(p, &flags); - thread_group_cputime(p, &totals); - ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); - task_rq_unlock(rq, p, &flags); - - return ns; -} - -/* * Account user cpu time to a process. * @p: the process that the cpu time gets accounted to * @cputime: the cpu time spent in user space since the last update @@ -4372,7 +4348,7 @@ static inline void sched_submit_work(struct task_struct *tsk) blk_schedule_flush_plug(tsk); } -asmlinkage void schedule(void) +asmlinkage void __sched schedule(void) { struct task_struct *tsk = current; diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 97540f0c9e4..af1177858be 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1050,7 +1050,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) */ if (curr && unlikely(rt_task(curr)) && (curr->rt.nr_cpus_allowed < 2 || - curr->prio < p->prio) && + curr->prio <= p->prio) && (p->rt.nr_cpus_allowed > 1)) { int target = find_lowest_rq(p); @@ -1581,7 +1581,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) p->rt.nr_cpus_allowed > 1 && rt_task(rq->curr) && (rq->curr->rt.nr_cpus_allowed < 2 || - rq->curr->prio < p->prio)) + rq->curr->prio <= p->prio)) push_rt_tasks(rq); } diff --git a/kernel/sys.c b/kernel/sys.c index 18ee1d2f647..1dbbe695a5e 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1172,7 +1172,7 @@ DECLARE_RWSEM(uts_sem); static int override_release(char __user *release, int len) { int ret = 0; - char buf[len]; + char buf[65]; if (current->personality & UNAME26) { char *rest = UTS_RELEASE; diff --git a/mm/migrate.c b/mm/migrate.c index 666e4e67741..14d0a6a632f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -120,10 +120,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, ptep = pte_offset_map(pmd, addr); - if (!is_swap_pte(*ptep)) { - pte_unmap(ptep); - goto out; - } + /* + * Peek to check is_swap_pte() before taking ptlock? No, we + * can race mremap's move_ptes(), which skips anon_vma lock. + */ ptl = pte_lockptr(mm, pmd); } diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 3e2f91ffa4e..05dd35114a2 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -565,7 +565,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) struct orig_node *orig_node = NULL; int data_len = skb->len, ret; short vid = -1; - bool do_bcast = false; + bool do_bcast; if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) goto dropped; @@ -598,15 +598,15 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) tt_local_add(soft_iface, ethhdr->h_source); orig_node = transtable_search(bat_priv, ethhdr->h_dest); - if (is_multicast_ether_addr(ethhdr->h_dest) || - (orig_node && orig_node->gw_flags)) { + do_bcast = is_multicast_ether_addr(ethhdr->h_dest); + if (do_bcast || (orig_node && orig_node->gw_flags)) { ret = gw_is_target(bat_priv, skb, orig_node); if (ret < 0) goto dropped; - if (ret == 0) - do_bcast = true; + if (ret) + do_bcast = false; } /* ethernet packet should be broadcasted */ diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 61f1f623091..e8292369cdc 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -26,6 +26,8 @@ /* Bluetooth L2CAP sockets. */ +#include <linux/security.h> + #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> @@ -933,6 +935,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) chan->force_reliable = pchan->force_reliable; chan->flushable = pchan->flushable; chan->force_active = pchan->force_active; + + security_sk_clone(parent, sk); } else { switch (sk->sk_type) { diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 482722bbc7a..5417f612732 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -42,6 +42,7 @@ #include <linux/device.h> #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/security.h> #include <net/sock.h> #include <asm/system.h> @@ -264,6 +265,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent) pi->sec_level = rfcomm_pi(parent)->sec_level; pi->role_switch = rfcomm_pi(parent)->role_switch; + + security_sk_clone(parent, sk); } else { pi->dlc->defer_setup = 0; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 8270f05e3f1..a324b009e34 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -41,6 +41,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/list.h> +#include <linux/security.h> #include <net/sock.h> #include <asm/system.h> @@ -403,8 +404,10 @@ static void sco_sock_init(struct sock *sk, struct sock *parent) { BT_DBG("sk %p", sk); - if (parent) + if (parent) { sk->sk_type = parent->sk_type; + security_sk_clone(parent, sk); + } } static struct proto sco_proto = { diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 32b8f9f7f79..ff3ed6086ce 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -91,7 +91,6 @@ static int br_dev_open(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); - netif_carrier_off(dev); netdev_update_features(dev); netif_start_queue(dev); br_stp_enable_bridge(br); @@ -108,8 +107,6 @@ static int br_dev_stop(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); - netif_carrier_off(dev); - br_stp_disable_bridge(br); br_multicast_stop(br); diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index e73815456ad..1d420f64ff2 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -161,9 +161,10 @@ static void del_nbp(struct net_bridge_port *p) call_rcu(&p->rcu, destroy_nbp_rcu); } -/* called with RTNL */ -static void del_br(struct net_bridge *br, struct list_head *head) +/* Delete bridge device */ +void br_dev_delete(struct net_device *dev, struct list_head *head) { + struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p, *n; list_for_each_entry_safe(p, n, &br->port_list, list) { @@ -268,7 +269,7 @@ int br_del_bridge(struct net *net, const char *name) } else - del_br(netdev_priv(dev), NULL); + br_dev_delete(dev, NULL); rtnl_unlock(); return ret; @@ -449,7 +450,7 @@ void __net_exit br_net_exit(struct net *net) rtnl_lock(); for_each_netdev(net, dev) if (dev->priv_flags & IFF_EBRIDGE) - del_br(netdev_priv(dev), &list); + br_dev_delete(dev, &list); unregister_netdevice_many(&list); rtnl_unlock(); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 5b1ed1ba9aa..e5f9ece3c9a 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -210,6 +210,7 @@ static struct rtnl_link_ops br_link_ops __read_mostly = { .priv_size = sizeof(struct net_bridge), .setup = br_dev_setup, .validate = br_validate, + .dellink = br_dev_delete, }; int __init br_netlink_init(void) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 78cc364997d..857a021deea 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -294,6 +294,7 @@ static inline int br_is_root_bridge(const struct net_bridge *br) /* br_device.c */ extern void br_dev_setup(struct net_device *dev); +extern void br_dev_delete(struct net_device *dev, struct list_head *list); extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev); #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/net/can/bcm.c b/net/can/bcm.c index d6c8ae5b2e6..c84963d2dee 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -344,6 +344,18 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, } } +static void bcm_tx_start_timer(struct bcm_op *op) +{ + if (op->kt_ival1.tv64 && op->count) + hrtimer_start(&op->timer, + ktime_add(ktime_get(), op->kt_ival1), + HRTIMER_MODE_ABS); + else if (op->kt_ival2.tv64) + hrtimer_start(&op->timer, + ktime_add(ktime_get(), op->kt_ival2), + HRTIMER_MODE_ABS); +} + static void bcm_tx_timeout_tsklet(unsigned long data) { struct bcm_op *op = (struct bcm_op *)data; @@ -365,26 +377,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data) bcm_send_to_user(op, &msg_head, NULL, 0); } - } - - if (op->kt_ival1.tv64 && (op->count > 0)) { - - /* send (next) frame */ bcm_can_tx(op); - hrtimer_start(&op->timer, - ktime_add(ktime_get(), op->kt_ival1), - HRTIMER_MODE_ABS); - } else { - if (op->kt_ival2.tv64) { + } else if (op->kt_ival2.tv64) + bcm_can_tx(op); - /* send (next) frame */ - bcm_can_tx(op); - hrtimer_start(&op->timer, - ktime_add(ktime_get(), op->kt_ival2), - HRTIMER_MODE_ABS); - } - } + bcm_tx_start_timer(op); } /* @@ -964,23 +962,20 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, hrtimer_cancel(&op->timer); } - if ((op->flags & STARTTIMER) && - ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) { - + if (op->flags & STARTTIMER) { + hrtimer_cancel(&op->timer); /* spec: send can_frame when starting timer */ op->flags |= TX_ANNOUNCE; - - if (op->kt_ival1.tv64 && (op->count > 0)) { - /* op->count-- is done in bcm_tx_timeout_handler */ - hrtimer_start(&op->timer, op->kt_ival1, - HRTIMER_MODE_REL); - } else - hrtimer_start(&op->timer, op->kt_ival2, - HRTIMER_MODE_REL); } - if (op->flags & TX_ANNOUNCE) + if (op->flags & TX_ANNOUNCE) { bcm_can_tx(op); + if (op->count) + op->count--; + } + + if (op->flags & STARTTIMER) + bcm_tx_start_timer(op); return msg_head->nframes * CFSIZ + MHSIZ; } diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 132963abc26..2883ea01e68 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -232,6 +232,7 @@ void ceph_destroy_options(struct ceph_options *opt) ceph_crypto_key_destroy(opt->key); kfree(opt->key); } + kfree(opt->mon_addr); kfree(opt); } EXPORT_SYMBOL(ceph_destroy_options); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index c340e2e0765..9918e9eb276 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2307,6 +2307,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags) m->front_max = front_len; m->front_is_vmalloc = false; m->more_to_follow = false; + m->ack_stamp = 0; m->pool = NULL; /* middle */ diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 16836a7df7a..88ad8a2501b 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -217,6 +217,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, INIT_LIST_HEAD(&req->r_unsafe_item); INIT_LIST_HEAD(&req->r_linger_item); INIT_LIST_HEAD(&req->r_linger_osd); + INIT_LIST_HEAD(&req->r_req_lru_item); req->r_flags = flags; WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); @@ -816,13 +817,10 @@ static void __register_request(struct ceph_osd_client *osdc, { req->r_tid = ++osdc->last_tid; req->r_request->hdr.tid = cpu_to_le64(req->r_tid); - INIT_LIST_HEAD(&req->r_req_lru_item); - dout("__register_request %p tid %lld\n", req, req->r_tid); __insert_request(osdc, req); ceph_osdc_get_request(req); osdc->num_requests++; - if (osdc->num_requests == 1) { dout(" first request, scheduling timeout\n"); __schedule_osd_timeout(osdc); diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index e97c3588c3e..fd863fe7693 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -339,6 +339,7 @@ static int __insert_pg_mapping(struct ceph_pg_mapping *new, struct ceph_pg_mapping *pg = NULL; int c; + dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new); while (*p) { parent = *p; pg = rb_entry(parent, struct ceph_pg_mapping, node); @@ -366,16 +367,33 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root, while (n) { pg = rb_entry(n, struct ceph_pg_mapping, node); c = pgid_cmp(pgid, pg->pgid); - if (c < 0) + if (c < 0) { n = n->rb_left; - else if (c > 0) + } else if (c > 0) { n = n->rb_right; - else + } else { + dout("__lookup_pg_mapping %llx got %p\n", + *(u64 *)&pgid, pg); return pg; + } } return NULL; } +static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid) +{ + struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid); + + if (pg) { + dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg); + rb_erase(&pg->node, root); + kfree(pg); + return 0; + } + dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid); + return -ENOENT; +} + /* * rbtree of pg pool info */ @@ -711,7 +729,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, void *start = *p; int err = -EINVAL; u16 version; - struct rb_node *rbp; ceph_decode_16_safe(p, end, version, bad); if (version > CEPH_OSDMAP_INC_VERSION) { @@ -861,7 +878,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, } /* new_pg_temp */ - rbp = rb_first(&map->pg_temp); ceph_decode_32_safe(p, end, len, bad); while (len--) { struct ceph_pg_mapping *pg; @@ -872,18 +888,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, ceph_decode_copy(p, &pgid, sizeof(pgid)); pglen = ceph_decode_32(p); - /* remove any? */ - while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping, - node)->pgid, pgid) <= 0) { - struct ceph_pg_mapping *cur = - rb_entry(rbp, struct ceph_pg_mapping, node); - - rbp = rb_next(rbp); - dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid); - rb_erase(&cur->node, &map->pg_temp); - kfree(cur); - } - if (pglen) { /* insert */ ceph_decode_need(p, end, pglen*sizeof(u32), bad); @@ -903,17 +907,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, } dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, pglen); + } else { + /* remove */ + __remove_pg_mapping(&map->pg_temp, pgid); } } - while (rbp) { - struct ceph_pg_mapping *cur = - rb_entry(rbp, struct ceph_pg_mapping, node); - - rbp = rb_next(rbp); - dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid); - rb_erase(&cur->node, &map->pg_temp); - kfree(cur); - } /* ignore the rest */ *p = end; @@ -1046,10 +1044,25 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, struct ceph_pg_mapping *pg; struct ceph_pg_pool_info *pool; int ruleno; - unsigned poolid, ps, pps; + unsigned poolid, ps, pps, t; int preferred; + poolid = le32_to_cpu(pgid.pool); + ps = le16_to_cpu(pgid.ps); + preferred = (s16)le16_to_cpu(pgid.preferred); + + pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); + if (!pool) + return NULL; + /* pg_temp? */ + if (preferred >= 0) + t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num), + pool->lpgp_num_mask); + else + t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num), + pool->pgp_num_mask); + pgid.ps = cpu_to_le16(t); pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); if (pg) { *num = pg->len; @@ -1057,18 +1070,6 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, } /* crush */ - poolid = le32_to_cpu(pgid.pool); - ps = le16_to_cpu(pgid.ps); - preferred = (s16)le16_to_cpu(pgid.preferred); - - /* don't forcefeed bad device ids to crush */ - if (preferred >= osdmap->max_osd || - preferred >= osdmap->crush->max_devices) - preferred = -1; - - pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); - if (!pool) - return NULL; ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, pool->v.type, pool->v.size); if (ruleno < 0) { @@ -1078,6 +1079,11 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, return NULL; } + /* don't forcefeed bad device ids to crush */ + if (preferred >= osdmap->max_osd || + preferred >= osdmap->crush->max_devices) + preferred = -1; + if (preferred >= 0) pps = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpgp_num), diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 3231b468bb7..27071ee2a4e 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -475,8 +475,11 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) list_del_rcu(&rule->list); - if (rule->action == FR_ACT_GOTO) + if (rule->action == FR_ACT_GOTO) { ops->nr_goto_rules--; + if (rtnl_dereference(rule->ctarget) == NULL) + ops->unresolved_rules--; + } /* * Check if this rule is a target to any of them. If so, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 21fab3edb92..d73aab3fbfc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1389,9 +1389,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, BUG_ON(!pcount); - /* Tweak before seqno plays */ - if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint && - !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq)) + if (skb == tp->lost_skb_hint) tp->lost_cnt_hint += pcount; TCP_SKB_CB(prev)->end_seq += shifted; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index c34f0151394..7963e03f106 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -927,18 +927,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, } sk_nocaps_add(sk, NETIF_F_GSO_MASK); } - if (tcp_alloc_md5sig_pool(sk) == NULL) { + + md5sig = tp->md5sig_info; + if (md5sig->entries4 == 0 && + tcp_alloc_md5sig_pool(sk) == NULL) { kfree(newkey); return -ENOMEM; } - md5sig = tp->md5sig_info; if (md5sig->alloced4 == md5sig->entries4) { keys = kmalloc((sizeof(*keys) * (md5sig->entries4 + 1)), GFP_ATOMIC); if (!keys) { kfree(newkey); - tcp_free_md5sig_pool(); + if (md5sig->entries4 == 0) + tcp_free_md5sig_pool(); return -ENOMEM; } @@ -982,6 +985,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) kfree(tp->md5sig_info->keys4); tp->md5sig_info->keys4 = NULL; tp->md5sig_info->alloced4 = 0; + tcp_free_md5sig_pool(); } else if (tp->md5sig_info->entries4 != i) { /* Need to do some manipulation */ memmove(&tp->md5sig_info->keys4[i], @@ -989,7 +993,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) (tp->md5sig_info->entries4 - i) * sizeof(struct tcp4_md5sig_key)); } - tcp_free_md5sig_pool(); return 0; } } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index d2fe4e06b47..0ce3d06dce6 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -328,6 +328,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); + tw->tw_transparent = inet_sk(sk)->transparent; tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; tcptw->tw_rcv_nxt = tp->rcv_nxt; tcptw->tw_snd_nxt = tp->snd_nxt; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 3b5669a2582..d27c797f9f0 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -875,6 +875,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, skb_reset_transport_header(skb); __skb_push(skb, skb_gro_offset(skb)); + ops = rcu_dereference(inet6_protos[proto]); if (!ops || !ops->gro_receive) goto out_unlock; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 705c8288628..def0538e241 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -696,8 +696,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, int err; err = ip6mr_fib_lookup(net, &fl6, &mrt); - if (err < 0) + if (err < 0) { + kfree_skb(skb); return err; + } read_lock(&mrt_lock); dev->stats.tx_bytes += skb->len; @@ -2052,8 +2054,10 @@ int ip6_mr_input(struct sk_buff *skb) int err; err = ip6mr_fib_lookup(net, &fl6, &mrt); - if (err < 0) + if (err < 0) { + kfree_skb(skb); return err; + } read_lock(&mrt_lock); cache = ip6mr_cache_find(mrt, diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 1250f902067..fb545edef6e 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -244,7 +244,9 @@ static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops, { struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags); - memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry)); + if (rt != NULL) + memset(&rt->rt6i_table, 0, + sizeof(*rt) - sizeof(struct dst_entry)); return rt; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 3c9fa618b69..7b8fc579435 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -591,7 +591,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer, } sk_nocaps_add(sk, NETIF_F_GSO_MASK); } - if (tcp_alloc_md5sig_pool(sk) == NULL) { + if (tp->md5sig_info->entries6 == 0 && + tcp_alloc_md5sig_pool(sk) == NULL) { kfree(newkey); return -ENOMEM; } @@ -600,8 +601,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer, (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); if (!keys) { - tcp_free_md5sig_pool(); kfree(newkey); + if (tp->md5sig_info->entries6 == 0) + tcp_free_md5sig_pool(); return -ENOMEM; } @@ -647,6 +649,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer) kfree(tp->md5sig_info->keys6); tp->md5sig_info->keys6 = NULL; tp->md5sig_info->alloced6 = 0; + tcp_free_md5sig_pool(); } else { /* shrink the database */ if (tp->md5sig_info->entries6 != i) @@ -655,7 +658,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer) (tp->md5sig_info->entries6 - i) * sizeof (tp->md5sig_info->keys6[0])); } - tcp_free_md5sig_pool(); return 0; } } @@ -1383,6 +1385,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, newtp->af_specific = &tcp_sock_ipv6_mapped_specific; #endif + newnp->ipv6_ac_list = NULL; + newnp->ipv6_fl_list = NULL; newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); @@ -1447,6 +1451,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, First: no IPv4 options. */ newinet->inet_opt = NULL; + newnp->ipv6_ac_list = NULL; newnp->ipv6_fl_list = NULL; /* Clone RX bits */ diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index ad4ac2601a5..34b2ddeacb6 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1045,8 +1045,10 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + hdr_len; old_headroom = skb_headroom(skb); - if (skb_cow_head(skb, headroom)) + if (skb_cow_head(skb, headroom)) { + dev_kfree_skb(skb); goto abort; + } new_headroom = skb_headroom(skb); skb_orphan(skb); diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 2b771dc708a..e3be48bf4dc 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2283,6 +2283,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) struct ip_vs_service *svc; struct ip_vs_dest_user *udest_compat; struct ip_vs_dest_user_kern udest; + struct netns_ipvs *ipvs = net_ipvs(net); if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -2303,6 +2304,24 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) /* increase the module use count */ ip_vs_use_count_inc(); + /* Handle daemons since they have another lock */ + if (cmd == IP_VS_SO_SET_STARTDAEMON || + cmd == IP_VS_SO_SET_STOPDAEMON) { + struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; + + if (mutex_lock_interruptible(&ipvs->sync_mutex)) { + ret = -ERESTARTSYS; + goto out_dec; + } + if (cmd == IP_VS_SO_SET_STARTDAEMON) + ret = start_sync_thread(net, dm->state, dm->mcast_ifn, + dm->syncid); + else + ret = stop_sync_thread(net, dm->state); + mutex_unlock(&ipvs->sync_mutex); + goto out_dec; + } + if (mutex_lock_interruptible(&__ip_vs_mutex)) { ret = -ERESTARTSYS; goto out_dec; @@ -2316,15 +2335,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) /* Set timeout values for (tcp tcpfin udp) */ ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg); goto out_unlock; - } else if (cmd == IP_VS_SO_SET_STARTDAEMON) { - struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; - ret = start_sync_thread(net, dm->state, dm->mcast_ifn, - dm->syncid); - goto out_unlock; - } else if (cmd == IP_VS_SO_SET_STOPDAEMON) { - struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; - ret = stop_sync_thread(net, dm->state); - goto out_unlock; } usvc_compat = (struct ip_vs_service_user *)arg; @@ -2584,6 +2594,33 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) if (copy_from_user(arg, user, copylen) != 0) return -EFAULT; + /* + * Handle daemons first since it has its own locking + */ + if (cmd == IP_VS_SO_GET_DAEMON) { + struct ip_vs_daemon_user d[2]; + + memset(&d, 0, sizeof(d)); + if (mutex_lock_interruptible(&ipvs->sync_mutex)) + return -ERESTARTSYS; + + if (ipvs->sync_state & IP_VS_STATE_MASTER) { + d[0].state = IP_VS_STATE_MASTER; + strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn, + sizeof(d[0].mcast_ifn)); + d[0].syncid = ipvs->master_syncid; + } + if (ipvs->sync_state & IP_VS_STATE_BACKUP) { + d[1].state = IP_VS_STATE_BACKUP; + strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn, + sizeof(d[1].mcast_ifn)); + d[1].syncid = ipvs->backup_syncid; + } + if (copy_to_user(user, &d, sizeof(d)) != 0) + ret = -EFAULT; + mutex_unlock(&ipvs->sync_mutex); + return ret; + } if (mutex_lock_interruptible(&__ip_vs_mutex)) return -ERESTARTSYS; @@ -2681,28 +2718,6 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) } break; - case IP_VS_SO_GET_DAEMON: - { - struct ip_vs_daemon_user d[2]; - - memset(&d, 0, sizeof(d)); - if (ipvs->sync_state & IP_VS_STATE_MASTER) { - d[0].state = IP_VS_STATE_MASTER; - strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn, - sizeof(d[0].mcast_ifn)); - d[0].syncid = ipvs->master_syncid; - } - if (ipvs->sync_state & IP_VS_STATE_BACKUP) { - d[1].state = IP_VS_STATE_BACKUP; - strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn, - sizeof(d[1].mcast_ifn)); - d[1].syncid = ipvs->backup_syncid; - } - if (copy_to_user(user, &d, sizeof(d)) != 0) - ret = -EFAULT; - } - break; - default: ret = -EINVAL; } @@ -3205,7 +3220,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb, struct net *net = skb_sknet(skb); struct netns_ipvs *ipvs = net_ipvs(net); - mutex_lock(&__ip_vs_mutex); + mutex_lock(&ipvs->sync_mutex); if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) { if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER, ipvs->master_mcast_ifn, @@ -3225,7 +3240,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb, } nla_put_failure: - mutex_unlock(&__ip_vs_mutex); + mutex_unlock(&ipvs->sync_mutex); return skb->len; } @@ -3271,13 +3286,9 @@ static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs) return ip_vs_set_timeout(net, &t); } -static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) +static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info) { - struct ip_vs_service *svc = NULL; - struct ip_vs_service_user_kern usvc; - struct ip_vs_dest_user_kern udest; int ret = 0, cmd; - int need_full_svc = 0, need_full_dest = 0; struct net *net; struct netns_ipvs *ipvs; @@ -3285,19 +3296,10 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) ipvs = net_ipvs(net); cmd = info->genlhdr->cmd; - mutex_lock(&__ip_vs_mutex); - - if (cmd == IPVS_CMD_FLUSH) { - ret = ip_vs_flush(net); - goto out; - } else if (cmd == IPVS_CMD_SET_CONFIG) { - ret = ip_vs_genl_set_config(net, info->attrs); - goto out; - } else if (cmd == IPVS_CMD_NEW_DAEMON || - cmd == IPVS_CMD_DEL_DAEMON) { - + if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) { struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1]; + mutex_lock(&ipvs->sync_mutex); if (!info->attrs[IPVS_CMD_ATTR_DAEMON] || nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX, info->attrs[IPVS_CMD_ATTR_DAEMON], @@ -3310,6 +3312,33 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) ret = ip_vs_genl_new_daemon(net, daemon_attrs); else ret = ip_vs_genl_del_daemon(net, daemon_attrs); +out: + mutex_unlock(&ipvs->sync_mutex); + } + return ret; +} + +static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) +{ + struct ip_vs_service *svc = NULL; + struct ip_vs_service_user_kern usvc; + struct ip_vs_dest_user_kern udest; + int ret = 0, cmd; + int need_full_svc = 0, need_full_dest = 0; + struct net *net; + struct netns_ipvs *ipvs; + + net = skb_sknet(skb); + ipvs = net_ipvs(net); + cmd = info->genlhdr->cmd; + + mutex_lock(&__ip_vs_mutex); + + if (cmd == IPVS_CMD_FLUSH) { + ret = ip_vs_flush(net); + goto out; + } else if (cmd == IPVS_CMD_SET_CONFIG) { + ret = ip_vs_genl_set_config(net, info->attrs); goto out; } else if (cmd == IPVS_CMD_ZERO && !info->attrs[IPVS_CMD_ATTR_SERVICE]) { @@ -3536,13 +3565,13 @@ static struct genl_ops ip_vs_genl_ops[] __read_mostly = { .cmd = IPVS_CMD_NEW_DAEMON, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, - .doit = ip_vs_genl_set_cmd, + .doit = ip_vs_genl_set_daemon, }, { .cmd = IPVS_CMD_DEL_DAEMON, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, - .doit = ip_vs_genl_set_cmd, + .doit = ip_vs_genl_set_daemon, }, { .cmd = IPVS_CMD_GET_DAEMON, @@ -3679,7 +3708,7 @@ int __net_init ip_vs_control_net_init(struct net *net) int idx; struct netns_ipvs *ipvs = net_ipvs(net); - ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock); + rwlock_init(&ipvs->rs_lock); /* Initialize rs_table */ for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++) diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 7ee7215b8ba..3cdd479f9b5 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -61,6 +61,7 @@ #define SYNC_PROTO_VER 1 /* Protocol version in header */ +static struct lock_class_key __ipvs_sync_key; /* * IPVS sync connection entry * Version 0, i.e. original version. @@ -1545,6 +1546,7 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", sizeof(struct ip_vs_sync_conn_v0)); + if (state == IP_VS_STATE_MASTER) { if (ipvs->master_thread) return -EEXIST; @@ -1667,6 +1669,7 @@ int __net_init ip_vs_sync_net_init(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); + __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key); INIT_LIST_HEAD(&ipvs->sync_queue); spin_lock_init(&ipvs->sync_lock); spin_lock_init(&ipvs->sync_buff_lock); @@ -1680,7 +1683,9 @@ int __net_init ip_vs_sync_net_init(struct net *net) void ip_vs_sync_net_cleanup(struct net *net) { int retc; + struct netns_ipvs *ipvs = net_ipvs(net); + mutex_lock(&ipvs->sync_mutex); retc = stop_sync_thread(net, IP_VS_STATE_MASTER); if (retc && retc != -ESRCH) pr_err("Failed to stop Master Daemon\n"); @@ -1688,4 +1693,5 @@ void ip_vs_sync_net_cleanup(struct net *net) retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); if (retc && retc != -ESRCH) pr_err("Failed to stop Backup Daemon\n"); + mutex_unlock(&ipvs->sync_mutex); } diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index cf616e55ca4..d69facdd9a7 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -241,8 +241,8 @@ static int gre_packet(struct nf_conn *ct, nf_ct_refresh_acct(ct, ctinfo, skb, ct->proto.gre.stream_timeout); /* Also, more likely to be important, and not a probe. */ - set_bit(IPS_ASSURED_BIT, &ct->status); - nf_conntrack_event_cache(IPCT_ASSURED, ct); + if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) + nf_conntrack_event_cache(IPCT_ASSURED, ct); } else nf_ct_refresh_acct(ct, ctinfo, skb, ct->proto.gre.timeout); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c698cec0a44..fabb4fafa28 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -961,7 +961,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, return 0; drop_n_acct: - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); + spin_lock(&sk->sk_receive_queue.lock); + po->stats.tp_drops++; + atomic_inc(&sk->sk_drops); + spin_unlock(&sk->sk_receive_queue.lock); drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c index 8b77edbab27..4e1de171866 100644 --- a/net/rds/iw_rdma.c +++ b/net/rds/iw_rdma.c @@ -84,7 +84,8 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, struct list_head *unmap_list, - struct list_head *kill_list); + struct list_head *kill_list, + int *unpinned); static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) @@ -499,7 +500,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) LIST_HEAD(unmap_list); LIST_HEAD(kill_list); unsigned long flags; - unsigned int nfreed = 0, ncleaned = 0, free_goal; + unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal; int ret = 0; rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); @@ -524,7 +525,8 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) * will be destroyed by the unmap function. */ if (!list_empty(&unmap_list)) { - ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list); + ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, + &kill_list, &unpinned); /* If we've been asked to destroy all MRs, move those * that were simply cleaned to the kill list */ if (free_all) @@ -548,6 +550,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) spin_unlock_irqrestore(&pool->list_lock, flags); } + atomic_sub(unpinned, &pool->free_pinned); atomic_sub(ncleaned, &pool->dirty_count); atomic_sub(nfreed, &pool->item_count); @@ -828,7 +831,8 @@ static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, struct list_head *unmap_list, - struct list_head *kill_list) + struct list_head *kill_list, + int *unpinned) { struct rds_iw_mapping *mapping, *next; unsigned int ncleaned = 0; @@ -855,6 +859,7 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, spin_lock_irqsave(&pool->list_lock, flags); list_for_each_entry_safe(mapping, next, unmap_list, m_list) { + *unpinned += mapping->m_sg.len; list_move(&mapping->m_list, &laundered); ncleaned++; } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e83e7fee3bc..ea40d540a99 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4113,9 +4113,12 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, if (len % sizeof(u32)) return -EINVAL; + if (settings->n_akm_suites > NL80211_MAX_NR_AKM_SUITES) + return -EINVAL; + memcpy(settings->akm_suites, data, len); - for (i = 0; i < settings->n_ciphers_pairwise; i++) + for (i = 0; i < settings->n_akm_suites; i++) if (!nl80211_valid_akm_suite(settings->akm_suites[i])) return -EINVAL; } diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index d30615419b4..5f03e4ea65b 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -91,7 +91,7 @@ int x25_parse_address_block(struct sk_buff *skb, int needed; int rc; - if (skb->len < 1) { + if (!pskb_may_pull(skb, 1)) { /* packet has no address block */ rc = 0; goto empty; @@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb, len = *skb->data; needed = 1 + (len >> 4) + (len & 0x0f); - if (skb->len < needed) { + if (!pskb_may_pull(skb, needed)) { /* packet is too short to hold the addresses it claims to hold */ rc = -1; @@ -295,7 +295,8 @@ static struct sock *x25_find_listener(struct x25_address *addr, * Found a listening socket, now check the incoming * call user data vs this sockets call user data */ - if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) { + if (x25_sk(s)->cudmatchlength > 0 && + skb->len >= x25_sk(s)->cudmatchlength) { if((memcmp(x25_sk(s)->calluserdata.cuddata, skb->data, x25_sk(s)->cudmatchlength)) == 0) { @@ -951,14 +952,27 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, * * Facilities length is mandatory in call request packets */ - if (skb->len < 1) + if (!pskb_may_pull(skb, 1)) goto out_clear_request; len = skb->data[0] + 1; - if (skb->len < len) + if (!pskb_may_pull(skb, len)) goto out_clear_request; skb_pull(skb,len); /* + * Ensure that the amount of call user data is valid. + */ + if (skb->len > X25_MAX_CUD_LEN) + goto out_clear_request; + + /* + * Get all the call user data so it can be used in + * x25_find_listener and skb_copy_from_linear_data up ahead. + */ + if (!pskb_may_pull(skb, skb->len)) + goto out_clear_request; + + /* * Find a listener for the particular address/cud pair. */ sk = x25_find_listener(&source_addr,skb); @@ -1166,6 +1180,9 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, * byte of the user data is the logical value of the Q Bit. */ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { + if (!pskb_may_pull(skb, 1)) + goto out_kfree_skb; + qbit = skb->data[0]; skb_pull(skb, 1); } @@ -1244,7 +1261,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; size_t copied; - int qbit; + int qbit, header_len = x25->neighbour->extended ? + X25_EXT_MIN_LEN : X25_STD_MIN_LEN; + struct sk_buff *skb; unsigned char *asmptr; int rc = -ENOTCONN; @@ -1265,6 +1284,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, skb = skb_dequeue(&x25->interrupt_in_queue); + if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) + goto out_free_dgram; + skb_pull(skb, X25_STD_MIN_LEN); /* @@ -1285,10 +1307,12 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, if (!skb) goto out; + if (!pskb_may_pull(skb, header_len)) + goto out_free_dgram; + qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; - skb_pull(skb, x25->neighbour->extended ? - X25_EXT_MIN_LEN : X25_STD_MIN_LEN); + skb_pull(skb, header_len); if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index e547ca1578c..fa2b41888bd 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -32,6 +32,9 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) unsigned short frametype; unsigned int lci; + if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) + return 0; + frametype = skb->data[2]; lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); @@ -115,6 +118,9 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, goto drop; } + if (!pskb_may_pull(skb, 1)) + return 0; + switch (skb->data[0]) { case X25_IFACE_DATA: diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index f77e4e75f91..36384a1fa9f 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c @@ -44,7 +44,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) { - unsigned char *p = skb->data; + unsigned char *p; unsigned int len; *vc_fac_mask = 0; @@ -60,14 +60,16 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); - if (skb->len < 1) + if (!pskb_may_pull(skb, 1)) return 0; - len = *p++; + len = skb->data[0]; - if (len >= skb->len) + if (!pskb_may_pull(skb, 1 + len)) return -1; + p = skb->data + 1; + while (len > 0) { switch (*p & X25_FAC_CLASS_MASK) { case X25_FAC_CLASS_A: diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 0b073b51b18..a49cd4ec551 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -107,6 +107,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp /* * Parse the data in the frame. */ + if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) + goto out_clear; skb_pull(skb, X25_STD_MIN_LEN); len = x25_parse_address_block(skb, &source_addr, @@ -127,9 +129,11 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp * Copy any Call User Data. */ if (skb->len > 0) { - skb_copy_from_linear_data(skb, - x25->calluserdata.cuddata, - skb->len); + if (skb->len > X25_MAX_CUD_LEN) + goto out_clear; + + skb_copy_bits(skb, 0, x25->calluserdata.cuddata, + skb->len); x25->calluserdata.cudlength = skb->len; } if (!sock_flag(sk, SOCK_DEAD)) @@ -137,6 +141,9 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp break; } case X25_CLEAR_REQUEST: + if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) + goto out_clear; + x25_write_internal(sk, X25_CLEAR_CONFIRMATION); x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); break; @@ -164,6 +171,9 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp switch (frametype) { case X25_CLEAR_REQUEST: + if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) + goto out_clear; + x25_write_internal(sk, X25_CLEAR_CONFIRMATION); x25_disconnect(sk, 0, skb->data[3], skb->data[4]); break; @@ -177,6 +187,11 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp } return 0; + +out_clear: + x25_write_internal(sk, X25_CLEAR_REQUEST); + x25_start_t23timer(sk); + return 0; } /* @@ -206,6 +221,9 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp break; case X25_CLEAR_REQUEST: + if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) + goto out_clear; + x25_write_internal(sk, X25_CLEAR_CONFIRMATION); x25_disconnect(sk, 0, skb->data[3], skb->data[4]); break; @@ -304,6 +322,12 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp } return queued; + +out_clear: + x25_write_internal(sk, X25_CLEAR_REQUEST); + x25->state = X25_STATE_2; + x25_start_t23timer(sk); + return 0; } /* @@ -313,13 +337,13 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp */ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype) { + struct x25_sock *x25 = x25_sk(sk); + switch (frametype) { case X25_RESET_REQUEST: x25_write_internal(sk, X25_RESET_CONFIRMATION); case X25_RESET_CONFIRMATION: { - struct x25_sock *x25 = x25_sk(sk); - x25_stop_timer(sk); x25->condition = 0x00; x25->va = 0; @@ -331,6 +355,9 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp break; } case X25_CLEAR_REQUEST: + if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) + goto out_clear; + x25_write_internal(sk, X25_CLEAR_CONFIRMATION); x25_disconnect(sk, 0, skb->data[3], skb->data[4]); break; @@ -340,6 +367,12 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp } return 0; + +out_clear: + x25_write_internal(sk, X25_CLEAR_REQUEST); + x25->state = X25_STATE_2; + x25_start_t23timer(sk); + return 0; } /* Higher level upcall for a LAPB frame */ diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c index 037958ff8ee..4acacf3c661 100644 --- a/net/x25/x25_link.c +++ b/net/x25/x25_link.c @@ -90,6 +90,9 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb, break; case X25_DIAGNOSTIC: + if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4)) + break; + printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n", skb->data[3], skb->data[4], skb->data[5], skb->data[6]); diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c index 24a342ebc7f..5170d52bfd9 100644 --- a/net/x25/x25_subr.c +++ b/net/x25/x25_subr.c @@ -269,7 +269,11 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m) { struct x25_sock *x25 = x25_sk(sk); - unsigned char *frame = skb->data; + unsigned char *frame; + + if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) + return X25_ILLEGAL; + frame = skb->data; *ns = *nr = *q = *d = *m = 0; @@ -294,6 +298,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q, if (frame[2] == X25_RR || frame[2] == X25_RNR || frame[2] == X25_REJ) { + if (!pskb_may_pull(skb, X25_EXT_MIN_LEN)) + return X25_ILLEGAL; + frame = skb->data; + *nr = (frame[3] >> 1) & 0x7F; return frame[2]; } @@ -308,6 +316,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q, if (x25->neighbour->extended) { if ((frame[2] & 0x01) == X25_DATA) { + if (!pskb_may_pull(skb, X25_EXT_MIN_LEN)) + return X25_ILLEGAL; + frame = skb->data; + *q = (frame[0] & X25_Q_BIT) == X25_Q_BIT; *d = (frame[0] & X25_D_BIT) == X25_D_BIT; *m = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 94fdcc7f103..552df27dcf5 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1349,14 +1349,16 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) BUG(); } xdst = dst_alloc(dst_ops, NULL, 0, 0, 0); - memset(&xdst->u.rt6.rt6i_table, 0, sizeof(*xdst) - sizeof(struct dst_entry)); - xfrm_policy_put_afinfo(afinfo); - if (likely(xdst)) + if (likely(xdst)) { + memset(&xdst->u.rt6.rt6i_table, 0, + sizeof(*xdst) - sizeof(struct dst_entry)); xdst->flo.ops = &xfrm_bundle_fc_ops; - else + } else xdst = ERR_PTR(-ENOBUFS); + xfrm_policy_put_afinfo(afinfo); + return xdst; } diff --git a/security/security.c b/security/security.c index 0e4fccfef12..d9e15339092 100644 --- a/security/security.c +++ b/security/security.c @@ -1097,6 +1097,7 @@ void security_sk_clone(const struct sock *sk, struct sock *newsk) { security_ops->sk_clone_security(sk, newsk); } +EXPORT_SYMBOL(security_sk_clone); void security_sk_classify_flow(struct sock *sk, struct flowi *fl) { diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index be6982289c0..191284a1c0a 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1924,7 +1924,8 @@ static unsigned int azx_via_get_position(struct azx *chip, } static unsigned int azx_get_position(struct azx *chip, - struct azx_dev *azx_dev) + struct azx_dev *azx_dev, + bool with_check) { unsigned int pos; int stream = azx_dev->substream->stream; @@ -1940,7 +1941,7 @@ static unsigned int azx_get_position(struct azx *chip, default: /* use the position buffer */ pos = le32_to_cpu(*azx_dev->posbuf); - if (chip->position_fix[stream] == POS_FIX_AUTO) { + if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) { if (!pos || pos == (u32)-1) { printk(KERN_WARNING "hda-intel: Invalid position buffer, " @@ -1964,7 +1965,7 @@ static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream) struct azx *chip = apcm->chip; struct azx_dev *azx_dev = get_azx_dev(substream); return bytes_to_frames(substream->runtime, - azx_get_position(chip, azx_dev)); + azx_get_position(chip, azx_dev, false)); } /* @@ -1987,7 +1988,7 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev) return -1; /* bogus (too early) interrupt */ stream = azx_dev->substream->stream; - pos = azx_get_position(chip, azx_dev); + pos = azx_get_position(chip, azx_dev, true); if (WARN_ONCE(!azx_dev->period_bytes, "hda-intel: zero azx_dev->period_bytes")) @@ -2369,6 +2370,7 @@ static int azx_dev_free(struct snd_device *device) static struct snd_pci_quirk position_fix_list[] __devinitdata = { SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB), SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1028, 0x02c6, "Dell Inspiron 1010", POS_FIX_LPIB), SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 7696d05b935..76752d8ea73 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -3110,6 +3110,7 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), + SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO), SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS), diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c index ffa2ffe5ec1..aa091a0d818 100644 --- a/sound/soc/codecs/wm8753.c +++ b/sound/soc/codecs/wm8753.c @@ -1454,8 +1454,8 @@ static int wm8753_probe(struct snd_soc_codec *codec) /* set the update bits */ snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); - snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); - snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); + snd_soc_update_bits(codec, WM8753_LADC, 0x0100, 0x0100); + snd_soc_update_bits(codec, WM8753_RADC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100); diff --git a/sound/soc/omap/mcpdm.c b/sound/soc/omap/mcpdm.c index 928f0370745..50e59194ad8 100644 --- a/sound/soc/omap/mcpdm.c +++ b/sound/soc/omap/mcpdm.c @@ -449,7 +449,7 @@ exit: return ret; } -int __devexit omap_mcpdm_remove(struct platform_device *pdev) +int omap_mcpdm_remove(struct platform_device *pdev) { struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev); diff --git a/sound/soc/omap/mcpdm.h b/sound/soc/omap/mcpdm.h index df3e16fb51f..20c20a8649f 100644 --- a/sound/soc/omap/mcpdm.h +++ b/sound/soc/omap/mcpdm.h @@ -150,4 +150,4 @@ extern int omap_mcpdm_request(void); extern void omap_mcpdm_free(void); extern int omap_mcpdm_set_offset(int offset1, int offset2); int __devinit omap_mcpdm_probe(struct platform_device *pdev); -int __devexit omap_mcpdm_remove(struct platform_device *pdev); +int omap_mcpdm_remove(struct platform_device *pdev); diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c index b6445757fc5..2b8350b5223 100644 --- a/sound/soc/pxa/zylonite.c +++ b/sound/soc/pxa/zylonite.c @@ -196,20 +196,20 @@ static int zylonite_probe(struct snd_soc_card *card) if (clk_pout) { pout = clk_get(NULL, "CLK_POUT"); if (IS_ERR(pout)) { - dev_err(&pdev->dev, "Unable to obtain CLK_POUT: %ld\n", + dev_err(card->dev, "Unable to obtain CLK_POUT: %ld\n", PTR_ERR(pout)); return PTR_ERR(pout); } ret = clk_enable(pout); if (ret != 0) { - dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n", + dev_err(card->dev, "Unable to enable CLK_POUT: %d\n", ret); clk_put(pout); return ret; } - dev_dbg(&pdev->dev, "MCLK enabled at %luHz\n", + dev_dbg(card->dev, "MCLK enabled at %luHz\n", clk_get_rate(pout)); } @@ -241,7 +241,7 @@ static int zylonite_resume_pre(struct snd_soc_card *card) if (clk_pout) { ret = clk_enable(pout); if (ret != 0) - dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n", + dev_err(card->dev, "Unable to enable CLK_POUT: %d\n", ret); } diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index c5748c52318..e389815078d 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -449,6 +449,8 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, } if (type & PERF_SAMPLE_RAW) { + const u64 *pdata; + u.val64 = *array; if (WARN_ONCE(swapped, "Endianness of raw data not corrected!\n")) { @@ -462,11 +464,12 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, return -EFAULT; data->raw_size = u.val32[0]; + pdata = (void *) array + sizeof(u32); - if (sample_overlap(event, &u.val32[1], data->raw_size)) + if (sample_overlap(event, pdata, data->raw_size)) return -EFAULT; - data->raw_data = &u.val32[1]; + data->raw_data = (void *) pdata; } return 0; |