diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-02-25 20:41:34 +0000 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-02-25 20:41:34 +0000 |
commit | 2a2d10f386c1bacabe1a530c06dc8488eac419e5 (patch) | |
tree | 4606a4a3bafa42a8aa70e001d35883956a0f0589 | |
parent | 2f68ffd11aa76b251921976c982b814df9ebe890 (diff) | |
parent | c499546f43f2c31b681271ef7db922839fcde5fe (diff) |
Merge branch 'for-rmk' of git://git.marvell.com/orion into devel-stable
364 files changed, 4256 insertions, 1961 deletions
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy index 6434f0df012..6cd6daefaae 100644 --- a/Documentation/ABI/testing/ima_policy +++ b/Documentation/ABI/testing/ima_policy @@ -20,7 +20,7 @@ Description: lsm: [[subj_user=] [subj_role=] [subj_type=] [obj_user=] [obj_role=] [obj_type=]] - base: func:= [BPRM_CHECK][FILE_MMAP][INODE_PERMISSION] + base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK] mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC] fsmagic:= hex value uid:= decimal value @@ -40,11 +40,11 @@ Description: measure func=BPRM_CHECK measure func=FILE_MMAP mask=MAY_EXEC - measure func=INODE_PERM mask=MAY_READ uid=0 + measure func=FILE_CHECK mask=MAY_READ uid=0 The default policy measures all executables in bprm_check, all files mmapped executable in file_mmap, and all files - open for read by root in inode_permission. + open for read by root in do_filp_open. Examples of LSM specific definitions: @@ -54,8 +54,8 @@ Description: dont_measure obj_type=var_log_t dont_measure obj_type=auditd_log_t - measure subj_user=system_u func=INODE_PERM mask=MAY_READ - measure subj_role=system_r func=INODE_PERM mask=MAY_READ + measure subj_user=system_u func=FILE_CHECK mask=MAY_READ + measure subj_role=system_r func=FILE_CHECK mask=MAY_READ Smack: - measure subj_user=_ func=INODE_PERM mask=MAY_READ + measure subj_user=_ func=FILE_CHECK mask=MAY_READ diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt index aed082f49d0..737988fca64 100644 --- a/Documentation/cpu-freq/governors.txt +++ b/Documentation/cpu-freq/governors.txt @@ -145,8 +145,8 @@ show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT. up_threshold: defines what the average CPU usage between the samplings of 'sampling_rate' needs to be for the kernel to make a decision on whether it should increase the frequency. For example when it is set -to its default value of '80' it means that between the checking -intervals the CPU needs to be on average more than 80% in use to then +to its default value of '95' it means that between the checking +intervals the CPU needs to be on average more than 95% in use to then decide that the CPU frequency needs to be increased. ignore_nice_load: this parameter takes a value of '0' or '1'. When diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 736d4560288..826b6e14831 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -199,6 +199,10 @@ and is between 256 and 4096 characters. It is defined in the file acpi_display_output=video See above. + acpi_early_pdc_eval [HW,ACPI] Evaluate processor _PDC methods + early. Needed on some platforms to properly + initialize the EC. + acpi_irq_balance [HW,ACPI] ACPI will balance active IRQs default in APIC mode diff --git a/MAINTAINERS b/MAINTAINERS index 03f38c18f32..44c669d92a4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -616,10 +616,10 @@ M: Richard Purdie <rpurdie@rpsys.net> S: Maintained ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE -M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt> +M: Paulius Zaleckas <paulius.zaleckas@gmail.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://gitorious.org/linux-gemini/mainline.git -S: Maintained +S: Odd Fixes F: arch/arm/mach-gemini/ ARM/EBSA110 MACHINE SUPPORT @@ -641,9 +641,9 @@ T: topgit git://git.openezx.org/openezx.git F: arch/arm/mach-pxa/ezx.c ARM/FARADAY FA526 PORT -M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt> +M: Paulius Zaleckas <paulius.zaleckas@gmail.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Maintained +S: Odd Fixes F: arch/arm/mm/*-fa* ARM/FOOTBRIDGE ARCHITECTURE @@ -3411,8 +3411,10 @@ S: Maintained F: drivers/scsi/sym53c8xx_2/ LTP (Linux Test Project) -M: Subrata Modak <subrata@linux.vnet.ibm.com> -M: Mike Frysinger <vapier@gentoo.org> +M: Rishikesh K Rajak <risrajak@linux.vnet.ibm.com> +M: Garrett Cooper <yanegomi@gmail.com> +M: Mike Frysinger <vapier@gentoo.org> +M: Subrata Modak <subrata@linux.vnet.ibm.com> L: ltp-list@lists.sourceforge.net (subscribers-only) W: http://ltp.sourceforge.net/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git @@ -3836,6 +3838,7 @@ NETWORKING DRIVERS L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git S: Odd Fixes F: drivers/net/ F: include/linux/if_* @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 33 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc8 NAME = Man-Eating Seals of Antiquity # *DOCUMENTATION* diff --git a/arch/arm/configs/mv78xx0_defconfig b/arch/arm/configs/mv78xx0_defconfig index 6afa2c108ea..da4710dd1da 100644 --- a/arch/arm/configs/mv78xx0_defconfig +++ b/arch/arm/configs/mv78xx0_defconfig @@ -176,6 +176,7 @@ CONFIG_ARCH_MV78XX0=y # CONFIG_MACH_DB78X00_BP=y CONFIG_MACH_RD78X00_MASA=y +CONFIG_MACH_TERASTATION_WXL=y CONFIG_PLAT_ORION=y # diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index c77d2fa1f6e..8113bb5fb66 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -42,7 +42,8 @@ #endif #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ - defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) + defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ + defined(CONFIG_CPU_ARM1026) # define MULTI_CACHE 1 #endif diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index c6c57b640b6..621acad8ea4 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -102,6 +102,7 @@ struct cpu_cache_fns cpu_cache; #endif #ifdef CONFIG_OUTER_CACHE struct outer_cache_fns outer_cache; +EXPORT_SYMBOL(outer_cache); #endif struct stack { diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c index e7263854bc7..fe3bd5ac8b1 100644 --- a/arch/arm/mach-gemini/gpio.c +++ b/arch/arm/mach-gemini/gpio.c @@ -86,7 +86,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type) unsigned int reg_both, reg_level, reg_type; reg_type = __raw_readl(base + GPIO_INT_TYPE); - reg_level = __raw_readl(base + GPIO_INT_BOTH_EDGE); + reg_level = __raw_readl(base + GPIO_INT_LEVEL); reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE); switch (type) { @@ -117,7 +117,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type) } __raw_writel(reg_type, base + GPIO_INT_TYPE); - __raw_writel(reg_level, base + GPIO_INT_BOTH_EDGE); + __raw_writel(reg_level, base + GPIO_INT_LEVEL); __raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE); gpio_ack_irq(irq); diff --git a/arch/arm/mach-kirkwood/Kconfig b/arch/arm/mach-kirkwood/Kconfig index edc9aeb6e6b..17879a876be 100644 --- a/arch/arm/mach-kirkwood/Kconfig +++ b/arch/arm/mach-kirkwood/Kconfig @@ -32,6 +32,12 @@ config MACH_SHEEVAPLUG Say 'Y' here if you want your kernel to support the Marvell SheevaPlug Reference Board. +config MACH_ESATA_SHEEVAPLUG + bool "Marvell eSATA SheevaPlug Reference Board" + help + Say 'Y' here if you want your kernel to support the + Marvell eSATA SheevaPlug Reference Board. + config MACH_TS219 bool "QNAP TS-110, TS-119, TS-210, TS-219 and TS-219P Turbo NAS" help diff --git a/arch/arm/mach-kirkwood/Makefile b/arch/arm/mach-kirkwood/Makefile index 55b4ccf3729..a5530e36ba3 100644 --- a/arch/arm/mach-kirkwood/Makefile +++ b/arch/arm/mach-kirkwood/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_MACH_RD88F6192_NAS) += rd88f6192-nas-setup.o obj-$(CONFIG_MACH_RD88F6281) += rd88f6281-setup.o obj-$(CONFIG_MACH_MV88F6281GTW_GE) += mv88f6281gtw_ge-setup.o obj-$(CONFIG_MACH_SHEEVAPLUG) += sheevaplug-setup.o +obj-$(CONFIG_MACH_ESATA_SHEEVAPLUG) += sheevaplug-setup.o obj-$(CONFIG_MACH_TS219) += ts219-setup.o tsx1x-common.o obj-$(CONFIG_MACH_TS41X) += ts41x-setup.o tsx1x-common.o obj-$(CONFIG_MACH_OPENRD) += openrd-setup.o diff --git a/arch/arm/mach-kirkwood/sheevaplug-setup.c b/arch/arm/mach-kirkwood/sheevaplug-setup.c index c7319eeac8b..a00879d34d5 100644 --- a/arch/arm/mach-kirkwood/sheevaplug-setup.c +++ b/arch/arm/mach-kirkwood/sheevaplug-setup.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> +#include <linux/ata_platform.h> #include <linux/mtd/partitions.h> #include <linux/mv643xx_eth.h> #include <linux/gpio.h> @@ -42,10 +43,19 @@ static struct mv643xx_eth_platform_data sheevaplug_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(0), }; +static struct mv_sata_platform_data sheeva_esata_sata_data = { + .n_ports = 2, +}; + static struct mvsdio_platform_data sheevaplug_mvsdio_data = { /* unfortunately the CD signal has not been connected */ }; +static struct mvsdio_platform_data sheeva_esata_mvsdio_data = { + .gpio_write_protect = 44, /* MPP44 used as SD write protect */ + .gpio_card_detect = 47, /* MPP47 used as SD card detect */ +}; + static struct gpio_led sheevaplug_led_pins[] = { { .name = "plug:green:health", @@ -74,13 +84,26 @@ static unsigned int sheevaplug_mpp_config[] __initdata = { 0 }; +static unsigned int sheeva_esata_mpp_config[] __initdata = { + MPP29_GPIO, /* USB Power Enable */ + MPP44_GPIO, /* SD Write Protect */ + MPP47_GPIO, /* SD Card Detect */ + MPP49_GPIO, /* LED Green */ + 0 +}; + static void __init sheevaplug_init(void) { /* * Basic setup. Needs to be called early. */ kirkwood_init(); - kirkwood_mpp_conf(sheevaplug_mpp_config); + + /* setup gpio pin select */ + if (machine_is_sheeva_esata()) + kirkwood_mpp_conf(sheeva_esata_mpp_config); + else + kirkwood_mpp_conf(sheevaplug_mpp_config); kirkwood_uart0_init(); kirkwood_nand_init(ARRAY_AND_SIZE(sheevaplug_nand_parts), 25); @@ -91,11 +114,21 @@ static void __init sheevaplug_init(void) kirkwood_ehci_init(); kirkwood_ge00_init(&sheevaplug_ge00_data); - kirkwood_sdio_init(&sheevaplug_mvsdio_data); + + /* honor lower power consumption for plugs with out eSATA */ + if (machine_is_sheeva_esata()) + kirkwood_sata_init(&sheeva_esata_sata_data); + + /* enable sd wp and sd cd on plugs with esata */ + if (machine_is_sheeva_esata()) + kirkwood_sdio_init(&sheeva_esata_mvsdio_data); + else + kirkwood_sdio_init(&sheevaplug_mvsdio_data); platform_device_register(&sheevaplug_leds); } +#ifdef CONFIG_MACH_SHEEVAPLUG MACHINE_START(SHEEVAPLUG, "Marvell SheevaPlug Reference Board") /* Maintainer: shadi Ammouri <shadi@marvell.com> */ .phys_io = KIRKWOOD_REGS_PHYS_BASE, @@ -106,3 +139,16 @@ MACHINE_START(SHEEVAPLUG, "Marvell SheevaPlug Reference Board") .init_irq = kirkwood_init_irq, .timer = &kirkwood_timer, MACHINE_END +#endif + +#ifdef CONFIG_MACH_ESATA_SHEEVAPLUG +MACHINE_START(ESATA_SHEEVAPLUG, "Marvell eSATA SheevaPlug Reference Board") + .phys_io = KIRKWOOD_REGS_PHYS_BASE, + .io_pg_offst = ((KIRKWOOD_REGS_VIRT_BASE) >> 18) & 0xfffc, + .boot_params = 0x00000100, + .init_machine = sheevaplug_init, + .map_io = kirkwood_map_io, + .init_irq = kirkwood_init_irq, + .timer = &kirkwood_timer, +MACHINE_END +#endif diff --git a/arch/arm/mach-mv78xx0/Kconfig b/arch/arm/mach-mv78xx0/Kconfig index 6fbe68fe441..f2d309d0619 100644 --- a/arch/arm/mach-mv78xx0/Kconfig +++ b/arch/arm/mach-mv78xx0/Kconfig @@ -14,6 +14,12 @@ config MACH_RD78X00_MASA Say 'Y' here if you want your kernel to support the Marvell RD-78x00-mASA Reference Design. +config MACH_TERASTATION_WXL + bool "Buffalo WLX (Terastation Duo) NAS" + help + Say 'Y' here if you want your kernel to support the + Buffalo WXL Nas. + endmenu endif diff --git a/arch/arm/mach-mv78xx0/Makefile b/arch/arm/mach-mv78xx0/Makefile index da628b7f3bb..67a13f9bfe6 100644 --- a/arch/arm/mach-mv78xx0/Makefile +++ b/arch/arm/mach-mv78xx0/Makefile @@ -1,3 +1,4 @@ -obj-y += common.o addr-map.o irq.o pcie.o +obj-y += common.o addr-map.o mpp.o irq.o pcie.o obj-$(CONFIG_MACH_DB78X00_BP) += db78x00-bp-setup.o obj-$(CONFIG_MACH_RD78X00_MASA) += rd78x00-masa-setup.o +obj-$(CONFIG_MACH_TERASTATION_WXL) += buffalo-wxl-setup.o diff --git a/arch/arm/mach-mv78xx0/buffalo-wxl-setup.c b/arch/arm/mach-mv78xx0/buffalo-wxl-setup.c new file mode 100644 index 00000000000..61e5e583603 --- /dev/null +++ b/arch/arm/mach-mv78xx0/buffalo-wxl-setup.c @@ -0,0 +1,155 @@ +/* + * arch/arm/mach-mv78xx0/buffalo-wxl-setup.c + * + * Buffalo WXL (Terastation Duo) Setup routines + * + * sebastien requiem <sebastien@requiem.fr> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/ata_platform.h> +#include <linux/mv643xx_eth.h> +#include <linux/ethtool.h> +#include <linux/i2c.h> +#include <mach/mv78xx0.h> +#include <asm/mach-types.h> +#include <asm/mach/arch.h> +#include "common.h" +#include "mpp.h" + + +/* This arch has 2 Giga Ethernet */ + +static struct mv643xx_eth_platform_data db78x00_ge00_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(0), +}; + +static struct mv643xx_eth_platform_data db78x00_ge01_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(8), +}; + + +/* 2 SATA controller supporting HotPlug */ + +static struct mv_sata_platform_data db78x00_sata_data = { + .n_ports = 2, +}; + +static struct i2c_board_info __initdata db78x00_i2c_rtc = { + I2C_BOARD_INFO("ds1338", 0x68), +}; + + +static unsigned int wxl_mpp_config[] __initdata = { + MPP0_GE1_TXCLK, + MPP1_GE1_TXCTL, + MPP2_GE1_RXCTL, + MPP3_GE1_RXCLK, + MPP4_GE1_TXD0, + MPP5_GE1_TXD1, + MPP6_GE1_TXD2, + MPP7_GE1_TXD3, + MPP8_GE1_RXD0, + MPP9_GE1_RXD1, + MPP10_GE1_RXD2, + MPP11_GE1_RXD3, + MPP12_GPIO, + MPP13_SYSRST_OUTn, + MPP14_SATA1_ACTn, + MPP15_SATA0_ACTn, + MPP16_GPIO, + MPP17_GPIO, + MPP18_GPIO, + MPP19_GPIO, + MPP20_GPIO, + MPP21_GPIO, + MPP22_GPIO, + MPP23_GPIO, + MPP24_UA2_TXD, + MPP25_UA2_RXD, + MPP26_UA2_CTSn, + MPP27_UA2_RTSn, + MPP28_GPIO, + MPP29_SYSRST_OUTn, + MPP30_GPIO, + MPP31_GPIO, + MPP32_GPIO, + MPP33_GPIO, + MPP34_GPIO, + MPP35_GPIO, + MPP36_GPIO, + MPP37_GPIO, + MPP38_GPIO, + MPP39_GPIO, + MPP40_UNUSED, + MPP41_UNUSED, + MPP42_UNUSED, + MPP43_UNUSED, + MPP44_UNUSED, + MPP45_UNUSED, + MPP46_UNUSED, + MPP47_UNUSED, + MPP48_SATA1_ACTn, + MPP49_SATA0_ACTn, + 0 +}; + + +static void __init wxl_init(void) +{ + /* + * Basic MV78xx0 setup. Needs to be called early. + */ + mv78xx0_init(); + mv78xx0_mpp_conf(wxl_mpp_config); + + /* + * Partition on-chip peripherals between the two CPU cores. + */ + mv78xx0_ehci0_init(); + mv78xx0_ehci1_init(); + mv78xx0_ehci2_init(); + mv78xx0_ge00_init(&db78x00_ge00_data); + mv78xx0_ge01_init(&db78x00_ge01_data); + mv78xx0_sata_init(&db78x00_sata_data); + mv78xx0_uart0_init(); + mv78xx0_uart1_init(); + mv78xx0_uart2_init(); + mv78xx0_uart3_init(); + mv78xx0_i2c_init(); + i2c_register_board_info(0, &db78x00_i2c_rtc, 1); +} + +static int __init wxl_pci_init(void) +{ + if (machine_is_terastation_wxl()) { + /* + * Assign the x16 PCIe slot on the board to CPU core + * #0, and let CPU core #1 have the four x1 slots. + */ + if (mv78xx0_core_index() == 0) + mv78xx0_pcie_init(0, 1); + else + mv78xx0_pcie_init(1, 0); + } + + return 0; +} +subsys_initcall(wxl_pci_init); + +MACHINE_START(TERASTATION_WXL, "Buffalo Nas WXL") + /* Maintainer: Sebastien Requiem <sebastien@requiem.fr> */ + .phys_io = MV78XX0_REGS_PHYS_BASE, + .io_pg_offst = ((MV78XX0_REGS_VIRT_BASE) >> 18) & 0xfffc, + .boot_params = 0x00000100, + .init_machine = wxl_init, + .map_io = mv78xx0_map_io, + .init_irq = mv78xx0_init_irq, + .timer = &mv78xx0_timer, +MACHINE_END diff --git a/arch/arm/mach-mv78xx0/mpp.c b/arch/arm/mach-mv78xx0/mpp.c new file mode 100644 index 00000000000..354ac514eb8 --- /dev/null +++ b/arch/arm/mach-mv78xx0/mpp.c @@ -0,0 +1,96 @@ +/* + * arch/arm/mach-mv78x00/mpp.c + * + * MPP functions for Marvell MV78x00 SoCs + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/mbus.h> +#include <linux/io.h> +#include <asm/gpio.h> +#include <mach/hardware.h> +#include "common.h" +#include "mpp.h" + +static unsigned int __init mv78xx0_variant(void) +{ + u32 dev, rev; + + mv78xx0_pcie_id(&dev, &rev); + + if (dev == MV78100_DEV_ID && rev >= MV78100_REV_A0) + return MPP_78100_A0_MASK; + + printk(KERN_ERR "MPP setup: unknown mv78x00 variant " + "(dev %#x rev %#x)\n", dev, rev); + return 0; +} + +#define MPP_CTRL(i) (DEV_BUS_VIRT_BASE + (i) * 4) +#define MPP_NR_REGS (1 + MPP_MAX/8) + +void __init mv78xx0_mpp_conf(unsigned int *mpp_list) +{ + u32 mpp_ctrl[MPP_NR_REGS]; + unsigned int variant_mask; + int i; + + variant_mask = mv78xx0_variant(); + if (!variant_mask) + return; + + /* Initialize gpiolib. */ + orion_gpio_init(); + + printk(KERN_DEBUG "initial MPP regs:"); + for (i = 0; i < MPP_NR_REGS; i++) { + mpp_ctrl[i] = readl(MPP_CTRL(i)); + printk(" %08x", mpp_ctrl[i]); + } + printk("\n"); + + while (*mpp_list) { + unsigned int num = MPP_NUM(*mpp_list); + unsigned int sel = MPP_SEL(*mpp_list); + int shift, gpio_mode; + + if (num > MPP_MAX) { + printk(KERN_ERR "mv78xx0_mpp_conf: invalid MPP " + "number (%u)\n", num); + continue; + } + if (!(*mpp_list & variant_mask)) { + printk(KERN_WARNING + "mv78xx0_mpp_conf: requested MPP%u config " + "unavailable on this hardware\n", num); + continue; + } + + shift = (num & 7) << 2; + mpp_ctrl[num / 8] &= ~(0xf << shift); + mpp_ctrl[num / 8] |= sel << shift; + + gpio_mode = 0; + if (*mpp_list & MPP_INPUT_MASK) + gpio_mode |= GPIO_INPUT_OK; + if (*mpp_list & MPP_OUTPUT_MASK) + gpio_mode |= GPIO_OUTPUT_OK; + if (sel != 0) + gpio_mode = 0; + orion_gpio_set_valid(num, gpio_mode); + + mpp_list++; + } + + printk(KERN_DEBUG " final MPP regs:"); + for (i = 0; i < MPP_NR_REGS; i++) { + writel(mpp_ctrl[i], MPP_CTRL(i)); + printk(" %08x", mpp_ctrl[i]); + } + printk("\n"); +} diff --git a/arch/arm/mach-mv78xx0/mpp.h b/arch/arm/mach-mv78xx0/mpp.h new file mode 100644 index 00000000000..80840b781ea --- /dev/null +++ b/arch/arm/mach-mv78xx0/mpp.h @@ -0,0 +1,347 @@ +/* + * linux/arch/arm/mach-mv78xx0/mpp.h -- Multi Purpose Pins + * + * + * sebastien requiem <sebastien@requiem.fr> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __MV78X00_MPP_H +#define __MV78X00_MPP_H + +#define MPP(_num, _sel, _in, _out, _78100_A0) (\ + /* MPP number */ ((_num) & 0xff) | \ + /* MPP select value */ (((_sel) & 0xf) << 8) | \ + /* may be input signal */ ((!!(_in)) << 12) | \ + /* may be output signal */ ((!!(_out)) << 13) | \ + /* available on A0 */ ((!!(_78100_A0)) << 14)) + +#define MPP_NUM(x) ((x) & 0xff) +#define MPP_SEL(x) (((x) >> 8) & 0xf) + + /* num sel i o 78100_A0 */ + +#define MPP_INPUT_MASK MPP(0, 0x0, 1, 0, 0) +#define MPP_OUTPUT_MASK MPP(0, 0x0, 0, 1, 0) + +#define MPP_78100_A0_MASK MPP(0, 0x0, 0, 0, 1) + +#define MPP0_GPIO MPP(0, 0x0, 1, 1, 1) +#define MPP0_GE0_COL MPP(0, 0x1, 1, 0, 1) +#define MPP0_GE1_TXCLK MPP(0, 0x2, 0, 1, 1) +#define MPP0_UNUSED MPP(0, 0x3, 0, 0, 1) + +#define MPP1_GPIO MPP(1, 0x0, 1, 1, 1) +#define MPP1_GE0_RXERR MPP(1, 0x1, 1, 0, 1) +#define MPP1_GE1_TXCTL MPP(1, 0x2, 0, 1, 1) +#define MPP1_UNUSED MPP(1, 0x3, 0, 0, 1) + +#define MPP2_GPIO MPP(2, 0x0, 1, 1, 1) +#define MPP2_GE0_CRS MPP(2, 0x1, 1, 0, 1) +#define MPP2_GE1_RXCTL MPP(2, 0x2, 1, 0, 1) +#define MPP2_UNUSED MPP(2, 0x3, 0, 0, 1) + +#define MPP3_GPIO MPP(3, 0x0, 1, 1, 1) +#define MPP3_GE0_TXERR MPP(3, 0x1, 0, 1, 1) +#define MPP3_GE1_RXCLK MPP(3, 0x2, 1, 0, 1) +#define MPP3_UNUSED MPP(3, 0x3, 0, 0, 1) + +#define MPP4_GPIO MPP(4, 0x0, 1, 1, 1) +#define MPP4_GE0_TXD4 MPP(4, 0x1, 0, 1, 1) +#define MPP4_GE1_TXD0 MPP(4, 0x2, 0, 1, 1) +#define MPP4_UNUSED MPP(4, 0x3, 0, 0, 1) + +#define MPP5_GPIO MPP(5, 0x0, 1, 1, 1) +#define MPP5_GE0_TXD5 MPP(5, 0x1, 0, 1, 1) +#define MPP5_GE1_TXD1 MPP(5, 0x2, 0, 1, 1) +#define MPP5_UNUSED MPP(5, 0x3, 0, 0, 1) + +#define MPP6_GPIO MPP(6, 0x0, 1, 1, 1) +#define MPP6_GE0_TXD6 MPP(6, 0x1, 0, 1, 1) +#define MPP6_GE1_TXD2 MPP(6, 0x2, 0, 1, 1) +#define MPP6_UNUSED MPP(6, 0x3, 0, 0, 1) + +#define MPP7_GPIO MPP(7, 0x0, 1, 1, 1) +#define MPP7_GE0_TXD7 MPP(7, 0x1, 0, 1, 1) +#define MPP7_GE1_TXD3 MPP(7, 0x2, 0, 1, 1) +#define MPP7_UNUSED MPP(7, 0x3, 0, 0, 1) + +#define MPP8_GPIO MPP(8, 0x0, 1, 1, 1) +#define MPP8_GE0_RXD4 MPP(8, 0x1, 1, 0, 1) +#define MPP8_GE1_RXD0 MPP(8, 0x2, 1, 0, 1) +#define MPP8_UNUSED MPP(8, 0x3, 0, 0, 1) + +#define MPP9_GPIO MPP(9, 0x0, 1, 1, 1) +#define MPP9_GE0_RXD5 MPP(9, 0x1, 1, 0, 1) +#define MPP9_GE1_RXD1 MPP(9, 0x2, 1, 0, 1) +#define MPP9_UNUSED MPP(9, 0x3, 0, 0, 1) + +#define MPP10_GPIO MPP(10, 0x0, 1, 1, 1) +#define MPP10_GE0_RXD6 MPP(10, 0x1, 1, 0, 1) +#define MPP10_GE1_RXD2 MPP(10, 0x2, 1, 0, 1) +#define MPP10_UNUSED MPP(10, 0x3, 0, 0, 1) + +#define MPP11_GPIO MPP(11, 0x0, 1, 1, 1) +#define MPP11_GE0_RXD7 MPP(11, 0x1, 1, 0, 1) +#define MPP11_GE1_RXD3 MPP(11, 0x2, 1, 0, 1) +#define MPP11_UNUSED MPP(11, 0x3, 0, 0, 1) + +#define MPP12_GPIO MPP(12, 0x0, 1, 1, 1) +#define MPP12_M_BB MPP(12, 0x3, 1, 0, 1) +#define MPP12_UA0_CTSn MPP(12, 0x4, 1, 0, 1) +#define MPP12_NAND_FLASH_REn0 MPP(12, 0x5, 0, 1, 1) +#define MPP12_TDM0_SCSn MPP(12, 0X6, 0, 1, 1) +#define MPP12_UNUSED MPP(12, 0x1, 0, 0, 1) + +#define MPP13_GPIO MPP(13, 0x0, 1, 1, 1) +#define MPP13_SYSRST_OUTn MPP(13, 0x3, 0, 1, 1) +#define MPP13_UA0_RTSn MPP(13, 0x4, 0, 1, 1) +#define MPP13_NAN_FLASH_WEn0 MPP(13, 0x5, 0, 1, 1) +#define MPP13_TDM_SCLK MPP(13, 0x6, 0, 1, 1) +#define MPP13_UNUSED MPP(13, 0x1, 0, 0, 1) + +#define MPP14_GPIO MPP(14, 0x0, 1, 1, 1) +#define MPP14_SATA1_ACTn MPP(14, 0x3, 0, 1, 1) +#define MPP14_UA1_CTSn MPP(14, 0x4, 1, 0, 1) +#define MPP14_NAND_FLASH_REn1 MPP(14, 0x5, 0, 1, 1) +#define MPP14_TDM_SMOSI MPP(14, 0x6, 0, 1, 1) +#define MPP14_UNUSED MPP(14, 0x1, 0, 0, 1) + +#define MPP15_GPIO MPP(15, 0x0, 1, 1, 1) +#define MPP15_SATA0_ACTn MPP(15, 0x3, 0, 1, 1) +#define MPP15_UA1_RTSn MPP(15, 0x4, 0, 1, 1) +#define MPP15_NAND_FLASH_WEn1 MPP(15, 0x5, 0, 1, 1) +#define MPP15_TDM_SMISO MPP(15, 0x6, 1, 0, 1) +#define MPP15_UNUSED MPP(15, 0x1, 0, 0, 1) + +#define MPP16_GPIO MPP(16, 0x0, 1, 1, 1) +#define MPP16_SATA1_PRESENTn MPP(16, 0x3, 0, 1, 1) +#define MPP16_UA2_TXD MPP(16, 0x4, 0, 1, 1) +#define MPP16_NAND_FLASH_REn3 MPP(16, 0x5, 0, 1, 1) +#define MPP16_TDM_INTn MPP(16, 0x6, 1, 0, 1) +#define MPP16_UNUSED MPP(16, 0x1, 0, 0, 1) + + +#define MPP17_GPIO MPP(17, 0x0, 1, 1, 1) +#define MPP17_SATA0_PRESENTn MPP(17, 0x3, 0, 1, 1) +#define MPP17_UA2_RXD MPP(17, 0x4, 1, 0, 1) +#define MPP17_NAND_FLASH_WEn3 MPP(17, 0x5, 0, 1, 1) +#define MPP17_TDM_RSTn MPP(17, 0x6, 0, 1, 1) +#define MPP17_UNUSED MPP(17, 0x1, 0, 0, 1) + + +#define MPP18_GPIO MPP(18, 0x0, 1, 1, 1) +#define MPP18_UA0_CTSn MPP(18, 0x4, 1, 0, 1) +#define MPP18_BOOT_FLASH_REn MPP(18, 0x5, 0, 1, 1) +#define MPP18_UNUSED MPP(18, 0x1, 0, 0, 1) + + + +#define MPP19_GPIO MPP(19, 0x0, 1, 1, 1) +#define MPP19_UA0_CTSn MPP(19, 0x4, 0, 1, 1) +#define MPP19_BOOT_FLASH_WEn MPP(19, 0x5, 0, 1, 1) +#define MPP19_UNUSED MPP(19, 0x1, 0, 0, 1) + + +#define MPP20_GPIO MPP(20, 0x0, 1, 1, 1) +#define MPP20_UA1_CTSs MPP(20, 0x4, 1, 0, 1) +#define MPP20_TDM_PCLK MPP(20, 0x6, 1, 1, 0) +#define MPP20_UNUSED MPP(20, 0x1, 0, 0, 1) + + + +#define MPP21_GPIO MPP(21, 0x0, 1, 1, 1) +#define MPP21_UA1_CTSs MPP(21, 0x4, 0, 1, 1) +#define MPP21_TDM_FSYNC MPP(21, 0x6, 1, 1, 0) +#define MPP21_UNUSED MPP(21, 0x1, 0, 0, 1) + + + +#define MPP22_GPIO MPP(22, 0x0, 1, 1, 1) +#define MPP22_UA3_TDX MPP(22, 0x4, 0, 1, 1) +#define MPP22_NAND_FLASH_REn2 MPP(22, 0x5, 0, 1, 1) +#define MPP22_TDM_DRX MPP(22, 0x6, 1, 0, 1) +#define MPP22_UNUSED MPP(22, 0x1, 0, 0, 1) + + + +#define MPP23_GPIO MPP(23, 0x0, 1, 1, 1) +#define MPP23_UA3_RDX MPP(23, 0x4, 1, 0, 1) +#define MPP23_NAND_FLASH_WEn2 MPP(23, 0x5, 0, 1, 1) +#define MPP23_TDM_DTX MPP(23, 0x6, 0, 1, 1) +#define MPP23_UNUSED MPP(23, 0x1, 0, 0, 1) + + +#define MPP24_GPIO MPP(24, 0x0, 1, 1, 1) +#define MPP24_UA2_TXD MPP(24, 0x4, 0, 1, 1) +#define MPP24_TDM_INTn MPP(24, 0x6, 1, 0, 1) +#define MPP24_UNUSED MPP(24, 0x1, 0, 0, 1) + + +#define MPP25_GPIO MPP(25, 0x0, 1, 1, 1) +#define MPP25_UA2_RXD MPP(25, 0x4, 1, 0, 1) +#define MPP25_TDM_RSTn MPP(25, 0x6, 0, 1, 1) +#define MPP25_UNUSED MPP(25, 0x1, 0, 0, 1) + + +#define MPP26_GPIO MPP(26, 0x0, 1, 1, 1) +#define MPP26_UA2_CTSn MPP(26, 0x4, 1, 0, 1) +#define MPP26_TDM_PCLK MPP(26, 0x6, 1, 1, 1) +#define MPP26_UNUSED MPP(26, 0x1, 0, 0, 1) + + +#define MPP27_GPIO MPP(27, 0x0, 1, 1, 1) +#define MPP27_UA2_RTSn MPP(27, 0x4, 0, 1, 1) +#define MPP27_TDM_FSYNC MPP(27, 0x6, 1, 1, 1) +#define MPP27_UNUSED MPP(27, 0x1, 0, 0, 1) + + +#define MPP28_GPIO MPP(28, 0x0, 1, 1, 1) +#define MPP28_UA3_TXD MPP(28, 0x4, 0, 1, 1) +#define MPP28_TDM_DRX MPP(28, 0x6, 1, 0, 1) +#define MPP28_UNUSED MPP(28, 0x1, 0, 0, 1) + +#define MPP29_GPIO MPP(29, 0x0, 1, 1, 1) +#define MPP29_UA3_RXD MPP(29, 0x4, 1, 0, 1) +#define MPP29_SYSRST_OUTn MPP(29, 0x5, 0, 1, 1) +#define MPP29_TDM_DTX MPP(29, 0x6, 0, 1, 1) +#define MPP29_UNUSED MPP(29, 0x1, 0, 0, 1) + +#define MPP30_GPIO MPP(30, 0x0, 1, 1, 1) +#define MPP30_UA3_CTSn MPP(30, 0x4, 1, 0, 1) +#define MPP30_UNUSED MPP(30, 0x1, 0, 0, 1) + +#define MPP31_GPIO MPP(31, 0x0, 1, 1, 1) +#define MPP31_UA3_RTSn MPP(31, 0x4, 0, 1, 1) +#define MPP31_TDM1_SCSn MPP(31, 0x6, 0, 1, 1) +#define MPP31_UNUSED MPP(31, 0x1, 0, 0, 1) + + +#define MPP32_GPIO MPP(32, 0x1, 1, 1, 1) +#define MPP32_UA3_TDX MPP(32, 0x4, 0, 1, 1) +#define MPP32_SYSRST_OUTn MPP(32, 0x5, 0, 1, 1) +#define MPP32_TDM0_RXQ MPP(32, 0x6, 0, 1, 1) +#define MPP32_UNUSED MPP(32, 0x3, 0, 0, 1) + + +#define MPP33_GPIO MPP(33, 0x1, 1, 1, 1) +#define MPP33_UA3_RDX MPP(33, 0x4, 1, 0, 1) +#define MPP33_TDM0_TXQ MPP(33, 0x6, 0, 1, 1) +#define MPP33_UNUSED MPP(33, 0x3, 0, 0, 1) + + + +#define MPP34_GPIO MPP(34, 0x1, 1, 1, 1) +#define MPP34_UA2_TDX MPP(34, 0x4, 0, 1, 1) +#define MPP34_TDM1_RXQ MPP(34, 0x6, 0, 1, 1) +#define MPP34_UNUSED MPP(34, 0x3, 0, 0, 1) + + + +#define MPP35_GPIO MPP(35, 0x1, 1, 1, 1) +#define MPP35_UA2_RDX MPP(35, 0x4, 1, 0, 1) +#define MPP35_TDM1_TXQ MPP(35, 0x6, 0, 1, 1) +#define MPP35_UNUSED MPP(35, 0x3, 0, 0, 1) + +#define MPP36_GPIO MPP(36, 0x1, 1, 1, 1) +#define MPP36_UA0_CTSn MPP(36, 0x2, 1, 0, 1) +#define MPP36_UA2_TDX MPP(36, 0x4, 0, 1, 1) +#define MPP36_TDM0_SCSn MPP(36, 0x6, 0, 1, 1) +#define MPP36_UNUSED MPP(36, 0x3, 0, 0, 1) + + +#define MPP37_GPIO MPP(37, 0x1, 1, 1, 1) +#define MPP37_UA0_RTSn MPP(37, 0x2, 0, 1, 1) +#define MPP37_UA2_RXD MPP(37, 0x4, 1, 0, 1) +#define MPP37_SYSRST_OUTn MPP(37, 0x5, 0, 1, 1) +#define MPP37_TDM_SCLK MPP(37, 0x6, 0, 1, 1) +#define MPP37_UNUSED MPP(37, 0x3, 0, 0, 1) + + + + +#define MPP38_GPIO MPP(38, 0x1, 1, 1, 1) +#define MPP38_UA1_CTSn MPP(38, 0x2, 1, 0, 1) +#define MPP38_UA3_TXD MPP(38, 0x4, 0, 1, 1) +#define MPP38_SYSRST_OUTn MPP(38, 0x5, 0, 1, 1) +#define MPP38_TDM_SMOSI MPP(38, 0x6, 0, 1, 1) +#define MPP38_UNUSED MPP(38, 0x3, 0, 0, 1) + + + + +#define MPP39_GPIO MPP(39, 0x1, 1, 1, 1) +#define MPP39_UA1_RTSn MPP(39, 0x2, 0, 1, 1) +#define MPP39_UA3_RXD MPP(39, 0x4, 1, 0, 1) +#define MPP39_SYSRST_OUTn MPP(39, 0x5, 0, 1, 1) +#define MPP39_TDM_SMISO MPP(39, 0x6, 1, 0, 1) +#define MPP39_UNUSED MPP(39, 0x3, 0, 0, 1) + + + +#define MPP40_GPIO MPP(40, 0x1, 1, 1, 1) +#define MPP40_TDM_INTn MPP(40, 0x6, 1, 0, 1) +#define MPP40_UNUSED MPP(40, 0x0, 0, 0, 1) + + + +#define MPP41_GPIO MPP(41, 0x1, 1, 1, 1) +#define MPP41_TDM_RSTn MPP(41, 0x6, 0, 1, 1) +#define MPP41_UNUSED MPP(41, 0x0, 0, 0, 1) + + + +#define MPP42_GPIO MPP(42, 0x1, 1, 1, 1) +#define MPP42_TDM_PCLK MPP(42, 0x6, 1, 1, 1) +#define MPP42_UNUSED MPP(42, 0x0, 0, 0, 1) + + + +#define MPP43_GPIO MPP(43, 0x1, 1, 1, 1) +#define MPP43_TDM_FSYNC MPP(43, 0x6, 1, 1, 1) +#define MPP43_UNUSED MPP(43, 0x0, 0, 0, 1) + + + +#define MPP44_GPIO MPP(44, 0x1, 1, 1, 1) +#define MPP44_TDM_DRX MPP(44, 0x6, 1, 0, 1) +#define MPP44_UNUSED MPP(44, 0x0, 0, 0, 1) + + + +#define MPP45_GPIO MPP(45, 0x1, 1, 1, 1) +#define MPP45_SATA0_ACTn MPP(45, 0x3, 0, 1, 1) +#define MPP45_TDM_DRX MPP(45, 0x6, 0, 1, 1) +#define MPP45_UNUSED MPP(45, 0x0, 0, 0, 1) + + +#define MPP46_GPIO MPP(46, 0x1, 1, 1, 1) +#define MPP46_TDM_SCSn MPP(46, 0x6, 0, 1, 1) +#define MPP46_UNUSED MPP(46, 0x0, 0, 0, 1) + + +#define MPP47_GPIO MPP(47, 0x1, 1, 1, 1) +#define MPP47_UNUSED MPP(47, 0x0, 0, 0, 1) + + + +#define MPP48_GPIO MPP(48, 0x1, 1, 1, 1) +#define MPP48_SATA1_ACTn MPP(48, 0x3, 0, 1, 1) +#define MPP48_UNUSED MPP(48, 0x2, 0, 0, 1) + + + +#define MPP49_GPIO MPP(49, 0x1, 1, 1, 1) +#define MPP49_SATA0_ACTn MPP(49, 0x3, 0, 1, 1) +#define MPP49_M_BB MPP(49, 0x4, 1, 0, 1) +#define MPP49_UNUSED MPP(49, 0x2, 0, 0, 1) + + +#define MPP_MAX 49 + +void mv78xx0_mpp_conf(unsigned int *mpp_list); + +#endif diff --git a/arch/arm/mach-omap2/mmc-twl4030.c b/arch/arm/mach-omap2/mmc-twl4030.c index 0c3c72d934b..8afe9dd3f15 100644 --- a/arch/arm/mach-omap2/mmc-twl4030.c +++ b/arch/arm/mach-omap2/mmc-twl4030.c @@ -408,6 +408,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers) { struct twl4030_hsmmc_info *c; int nr_hsmmc = ARRAY_SIZE(hsmmc_data); + int i; if (cpu_is_omap2430()) { control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE; @@ -434,7 +435,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers) mmc = kzalloc(sizeof(struct omap_mmc_platform_data), GFP_KERNEL); if (!mmc) { pr_err("Cannot allocate memory for mmc device!\n"); - return; + goto done; } if (c->name) @@ -532,6 +533,10 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers) continue; c->dev = mmc->dev; } + +done: + for (i = 0; i < nr_hsmmc; i++) + kfree(hsmmc_data[i]); } #endif diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 5a79fc6ee81..31c2f4c30a9 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types @@ -12,7 +12,7 @@ # # http://www.arm.linux.org.uk/developer/machines/?action=new # -# Last update: Thu Jan 28 22:15:54 2010 +# Last update: Sat Feb 20 14:16:15 2010 # # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number # @@ -2257,7 +2257,7 @@ oratisalog MACH_ORATISALOG ORATISALOG 2268 oratismadi MACH_ORATISMADI ORATISMADI 2269 oratisot16 MACH_ORATISOT16 ORATISOT16 2270 oratisdesk MACH_ORATISDESK ORATISDESK 2271 -v2_ca9 MACH_V2P_CA9 V2P_CA9 2272 +vexpress MACH_VEXPRESS VEXPRESS 2272 sintexo MACH_SINTEXO SINTEXO 2273 cm3389 MACH_CM3389 CM3389 2274 omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275 @@ -2636,3 +2636,45 @@ hw90240 MACH_HW90240 HW90240 2648 dm365_leopard MACH_DM365_LEOPARD DM365_LEOPARD 2649 mityomapl138 MACH_MITYOMAPL138 MITYOMAPL138 2650 scat110 MACH_SCAT110 SCAT110 2651 +acer_a1 MACH_ACER_A1 ACER_A1 2652 +cmcontrol MACH_CMCONTROL CMCONTROL 2653 +pelco_lamar MACH_PELCO_LAMAR PELCO_LAMAR 2654 +rfp43 MACH_RFP43 RFP43 2655 +sk86r0301 MACH_SK86R0301 SK86R0301 2656 +ctpxa MACH_CTPXA CTPXA 2657 +epb_arm9_a MACH_EPB_ARM9_A EPB_ARM9_A 2658 +guruplug MACH_GURUPLUG GURUPLUG 2659 +spear310 MACH_SPEAR310 SPEAR310 2660 +spear320 MACH_SPEAR320 SPEAR320 2661 +robotx MACH_ROBOTX ROBOTX 2662 +lsxhl MACH_LSXHL LSXHL 2663 +smartlite MACH_SMARTLITE SMARTLITE 2664 +cws2 MACH_CWS2 CWS2 2665 +m619 MACH_M619 M619 2666 +smartview MACH_SMARTVIEW SMARTVIEW 2667 +lsa_salsa MACH_LSA_SALSA LSA_SALSA 2668 +kizbox MACH_KIZBOX KIZBOX 2669 +htccharmer MACH_HTCCHARMER HTCCHARMER 2670 +guf_neso_lt MACH_GUF_NESO_LT GUF_NESO_LT 2671 +pm9g45 MACH_PM9G45 PM9G45 2672 +htcpanther MACH_HTCPANTHER HTCPANTHER 2673 +htcpanther_cdma MACH_HTCPANTHER_CDMA HTCPANTHER_CDMA 2674 +reb01 MACH_REB01 REB01 2675 +aquila MACH_AQUILA AQUILA 2676 +spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677 +sheeva_esata MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678 +surf7x30 MACH_SURF7X30 SURF7X30 2679 +micro2440 MACH_MICRO2440 MICRO2440 2680 +am2440 MACH_AM2440 AM2440 2681 +tq2440 MACH_TQ2440 TQ2440 2682 +lpc2478oem MACH_LPC2478OEM LPC2478OEM 2683 +ak880x MACH_AK880X AK880X 2684 +cobra3530 MACH_COBRA3530 COBRA3530 2685 +pmppb MACH_PMPPB PMPPB 2686 +u6715 MACH_U6715 U6715 2687 +axar1500_sender MACH_AXAR1500_SENDER AXAR1500_SENDER 2688 +g30_dvb MACH_G30_DVB G30_DVB 2689 +vc088x MACH_VC088X VC088X 2690 +mioa702 MACH_MIOA702 MIOA702 2691 +hpmin MACH_HPMIN HPMIN 2692 +ak880xak MACH_AK880XAK AK880XAK 2693 diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index 1aa1ea5e921..b13d1879e51 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -1325,7 +1325,7 @@ struct platform_device *__init at32_add_device_mci(unsigned int id, struct mci_platform_data *data) { struct platform_device *pdev; - struct mci_dma_slave *slave; + struct mci_dma_data *slave; u32 pioa_mask; u32 piob_mask; @@ -1344,7 +1344,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) ARRAY_SIZE(atmel_mci0_resource))) goto fail; - slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL); + slave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL); + if (!slave) + goto fail; slave->sdata.dma_dev = &dw_dmac0_device.dev; slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT; @@ -1357,7 +1359,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) if (platform_device_add_data(pdev, data, sizeof(struct mci_platform_data))) - goto fail; + goto fail_free; /* CLK line is common to both slots */ pioa_mask = 1 << 10; @@ -1381,7 +1383,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) /* Slot is unused */ break; default: - goto fail; + goto fail_free; } select_peripheral(PIOA, pioa_mask, PERIPH_A, 0); @@ -1408,7 +1410,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) break; default: if (!data->slot[0].bus_width) - goto fail; + goto fail_free; data->slot[1].bus_width = 0; break; @@ -1419,9 +1421,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) platform_device_add(pdev); return pdev; +fail_free: + kfree(slave); fail: data->dma_slave = NULL; - kfree(slave); platform_device_put(pdev); return NULL; } diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 7ae58892ba8..e97b255d97b 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -94,6 +94,7 @@ ia64_acpi_release_global_lock (unsigned int *lock) #define acpi_noirq 0 /* ACPI always enabled on IA64 */ #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ +#define acpi_ht 0 /* no HT-only mode on IA64 */ #endif #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ static inline void disable_acpi(void) { } diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index e14108b19c0..4c41656ede8 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h @@ -201,7 +201,9 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst); relevant until we have real hardware to play with... */ #define ELF_PLATFORM NULL -#define SET_PERSONALITY(ex) set_personality(PER_LINUX) +#define SET_PERSONALITY(ex) \ + set_personality((current->personality & ~PER_MASK) | PER_LINUX) + #define elf_read_implies_exec(ex, executable_stack) \ ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0) diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 5372b24ad04..bb8c4b9ccb8 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -54,6 +54,7 @@ void __init setup_arch(char **cmdline_p) microblaze_cache_init(); + invalidate_dcache(); enable_dcache(); invalidate_icache(); diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index ed84b4cb3c8..84b6503f10b 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.23-rc2 -# Tue Aug 7 13:04:24 2007 +# Linux kernel version: 2.6.33-rc6 +# Wed Feb 3 18:12:31 2010 # CONFIG_MIPS=y @@ -9,20 +9,28 @@ CONFIG_MIPS=y # Machine selection # # CONFIG_MACH_ALCHEMY is not set +# CONFIG_AR7 is not set +# CONFIG_BCM47XX is not set +# CONFIG_BCM63XX is not set # CONFIG_MIPS_COBALT is not set # CONFIG_MACH_DECSTATION is not set # CONFIG_MACH_JAZZ is not set -# CONFIG_LEMOTE_FULONG is not set +# CONFIG_LASAT is not set +# CONFIG_MACH_LOONGSON is not set # CONFIG_MIPS_MALTA is not set # CONFIG_MIPS_SIM is not set -# CONFIG_MARKEINS is not set +# CONFIG_NEC_MARKEINS is not set # CONFIG_MACH_VR41XX is not set +# CONFIG_NXP_STB220 is not set +# CONFIG_NXP_STB225 is not set # CONFIG_PNX8550_JBS is not set # CONFIG_PNX8550_STB810 is not set # CONFIG_PMC_MSP is not set # CONFIG_PMC_YOSEMITE is not set +# CONFIG_POWERTV is not set # CONFIG_SGI_IP22 is not set CONFIG_SGI_IP27=y +# CONFIG_SGI_IP28 is not set # CONFIG_SGI_IP32 is not set # CONFIG_SIBYTE_CRHINE is not set # CONFIG_SIBYTE_CARMEL is not set @@ -33,32 +41,39 @@ CONFIG_SGI_IP27=y # CONFIG_SIBYTE_SENTOSA is not set # CONFIG_SIBYTE_BIGSUR is not set # CONFIG_SNI_RM is not set -# CONFIG_TOSHIBA_JMR3927 is not set -# CONFIG_TOSHIBA_RBTX4927 is not set -# CONFIG_TOSHIBA_RBTX4938 is not set +# CONFIG_MACH_TX39XX is not set +# CONFIG_MACH_TX49XX is not set +# CONFIG_MIKROTIK_RB532 is not set # CONFIG_WR_PPMC is not set +# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set +# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set +# CONFIG_ALCHEMY_GPIO_INDIRECT is not set CONFIG_SGI_SN_M_MODE=y # CONFIG_SGI_SN_N_MODE is not set # CONFIG_MAPPED_KERNEL is not set # CONFIG_REPLICATE_KTEXT is not set # CONFIG_REPLICATE_EXHANDLERS is not set +CONFIG_LOONGSON_UART_BASE=y CONFIG_RWSEM_GENERIC_SPINLOCK=y # CONFIG_ARCH_HAS_ILOG2_U32 is not set # CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_ARCH_SUPPORTS_OPROFILE=y CONFIG_GENERIC_FIND_NEXT_BIT=y CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_CLOCKEVENTS=y CONFIG_GENERIC_TIME=y -CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_ARC=y CONFIG_DMA_COHERENT=y -CONFIG_EARLY_PRINTK=y CONFIG_SYS_HAS_EARLY_PRINTK=y # CONFIG_NO_IOPORT is not set CONFIG_CPU_BIG_ENDIAN=y # CONFIG_CPU_LITTLE_ENDIAN is not set CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y +CONFIG_DEFAULT_SGI_PARTITION=y CONFIG_MIPS_L1_CACHE_SHIFT=7 CONFIG_ARC64=y CONFIG_BOOT_ELF64=y @@ -66,7 +81,8 @@ CONFIG_BOOT_ELF64=y # # CPU selection # -# CONFIG_CPU_LOONGSON2 is not set +# CONFIG_CPU_LOONGSON2E is not set +# CONFIG_CPU_LOONGSON2F is not set # CONFIG_CPU_MIPS32_R1 is not set # CONFIG_CPU_MIPS32_R2 is not set # CONFIG_CPU_MIPS64_R1 is not set @@ -79,6 +95,7 @@ CONFIG_BOOT_ELF64=y # CONFIG_CPU_TX49XX is not set # CONFIG_CPU_R5000 is not set # CONFIG_CPU_R5432 is not set +# CONFIG_CPU_R5500 is not set # CONFIG_CPU_R6000 is not set # CONFIG_CPU_NEVADA is not set # CONFIG_CPU_R8000 is not set @@ -86,6 +103,7 @@ CONFIG_CPU_R10000=y # CONFIG_CPU_RM7000 is not set # CONFIG_CPU_RM9000 is not set # CONFIG_CPU_SB1 is not set +# CONFIG_CPU_CAVIUM_OCTEON is not set CONFIG_SYS_HAS_CPU_R10000=y CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y @@ -99,6 +117,7 @@ CONFIG_64BIT=y CONFIG_PAGE_SIZE_4KB=y # CONFIG_PAGE_SIZE_8KB is not set # CONFIG_PAGE_SIZE_16KB is not set +# CONFIG_PAGE_SIZE_32KB is not set # CONFIG_PAGE_SIZE_64KB is not set CONFIG_CPU_HAS_PREFETCH=y CONFIG_MIPS_MT_DISABLED=y @@ -110,6 +129,7 @@ CONFIG_GENERIC_IRQ_PROBE=y CONFIG_IRQ_PER_CPU=y CONFIG_CPU_SUPPORTS_HIGHMEM=y CONFIG_ARCH_DISCONTIGMEM_ENABLE=y +CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_NUMA=y CONFIG_SYS_SUPPORTS_NUMA=y CONFIG_NODES_SHIFT=6 @@ -120,16 +140,22 @@ CONFIG_DISCONTIGMEM_MANUAL=y CONFIG_DISCONTIGMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_NEED_MULTIPLE_NODES=y -# CONFIG_SPARSEMEM_STATIC is not set +CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_MIGRATION=y -CONFIG_RESOURCES_64BIT=y +CONFIG_PHYS_ADDR_T_64BIT=y CONFIG_ZONE_DMA_FLAG=0 CONFIG_VIRT_TO_BUS=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 CONFIG_SMP=y CONFIG_SYS_SUPPORTS_SMP=y CONFIG_NR_CPUS_DEFAULT_64=y CONFIG_NR_CPUS=64 +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y # CONFIG_HZ_48 is not set # CONFIG_HZ_100 is not set # CONFIG_HZ_128 is not set @@ -142,13 +168,13 @@ CONFIG_HZ=1000 CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set -CONFIG_PREEMPT_BKL=y # CONFIG_MIPS_INSANE_LARGE is not set # CONFIG_KEXEC is not set CONFIG_SECCOMP=y CONFIG_LOCKDEP_SUPPORT=y CONFIG_STACKTRACE_SUPPORT=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -162,20 +188,41 @@ CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set -# CONFIG_USER_NS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=64 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=15 +# CONFIG_GROUP_SCHED is not set CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_NS is not set +# CONFIG_CGROUP_FREEZER is not set +# CONFIG_CGROUP_DEVICE is not set CONFIG_CPUSETS=y -CONFIG_SYSFS_DEPRECATED=y +CONFIG_PROC_PID_CPUSET=y +# CONFIG_CGROUP_CPUACCT is not set +# CONFIG_RESOURCE_COUNTERS is not set +# CONFIG_SYSFS_DEPRECATED_V2 is not set CONFIG_RELAY=y +# CONFIG_NAMESPACES is not set # CONFIG_BLK_DEV_INITRD is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y CONFIG_EMBEDDED=y CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS=y @@ -184,44 +231,92 @@ CONFIG_HOTPLUG=y CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y +# CONFIG_PCSPKR_PLATFORM is not set CONFIG_BASE_FULL=y CONFIG_FUTEX=y -CONFIG_ANON_INODES=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y +CONFIG_AIO=y + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y +CONFIG_PCI_QUIRKS=y +CONFIG_COMPAT_BRK=y CONFIG_SLAB=y # CONFIG_SLUB is not set # CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_SYSCALL_WRAPPERS=y +CONFIG_USE_GENERIC_SMP_HELPERS=y + +# +# GCOV-based kernel profiling +# +CONFIG_SLOW_WORK=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_KMOD=y CONFIG_STOP_MACHINE=y CONFIG_BLOCK=y -# CONFIG_BLK_DEV_IO_TRACE is not set # CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set +# CONFIG_BLK_CGROUP is not set +CONFIG_BLOCK_COMPAT=y # # IO Schedulers # CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_AS=y CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_CFQ=y -CONFIG_DEFAULT_AS=y +# CONFIG_CFQ_GROUP_IOSCHED is not set # CONFIG_DEFAULT_DEADLINE is not set -# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="anticipatory" +CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +CONFIG_INLINE_SPIN_UNLOCK=y +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +CONFIG_INLINE_READ_UNLOCK=y +# CONFIG_INLINE_READ_UNLOCK_BH is not set +CONFIG_INLINE_READ_UNLOCK_IRQ=y +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +CONFIG_INLINE_WRITE_UNLOCK=y +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +CONFIG_MUTEX_SPIN_ON_OWNER=y +# CONFIG_FREEZER is not set # # Bus options (PCI, PCMCIA, EISA, ISA, TC) @@ -230,11 +325,10 @@ CONFIG_HW_HAS_PCI=y CONFIG_PCI=y CONFIG_PCI_DOMAINS=y # CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCI_LEGACY is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_IOV is not set CONFIG_MMU=y - -# -# PCCARD (PCMCIA/CardBus) support -# # CONFIG_PCCARD is not set # CONFIG_HOTPLUG_PCI is not set @@ -242,8 +336,9 @@ CONFIG_MMU=y # Executable file formats # CONFIG_BINFMT_ELF=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +# CONFIG_HAVE_AOUT is not set # CONFIG_BINFMT_MISC is not set -# CONFIG_BUILD_ELF64 is not set CONFIG_MIPS32_COMPAT=y CONFIG_COMPAT=y CONFIG_SYSVIPC_COMPAT=y @@ -255,13 +350,10 @@ CONFIG_BINFMT_ELF32=y # Power management options # CONFIG_PM=y -# CONFIG_PM_LEGACY is not set # CONFIG_PM_DEBUG is not set - -# -# Networking -# +# CONFIG_PM_RUNTIME is not set CONFIG_NET=y +CONFIG_COMPAT_NETLINK_MESSAGES=y # # Networking options @@ -273,6 +365,8 @@ CONFIG_XFRM=y CONFIG_XFRM_USER=m # CONFIG_XFRM_SUB_POLICY is not set CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m CONFIG_NET_KEY=y CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y @@ -292,19 +386,40 @@ CONFIG_IP_PNP=y # CONFIG_INET_ESP is not set # CONFIG_INET_IPCOMP is not set # CONFIG_INET_XFRM_TUNNEL is not set -# CONFIG_INET_TUNNEL is not set +CONFIG_INET_TUNNEL=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET_LRO=y CONFIG_INET_DIAG=y CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" CONFIG_TCP_MD5SIG=y -# CONFIG_IPV6 is not set -# CONFIG_INET6_XFRM_TUNNEL is not set -# CONFIG_INET6_TUNNEL is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_PIMSM_V2=y CONFIG_NETWORK_SECMARK=y # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set @@ -314,9 +429,11 @@ CONFIG_IP_SCTP=m # CONFIG_SCTP_HMAC_NONE is not set # CONFIG_SCTP_HMAC_SHA1 is not set CONFIG_SCTP_HMAC_MD5=y +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set # CONFIG_VLAN_8021Q is not set # CONFIG_DECNET is not set # CONFIG_LLC2 is not set @@ -326,12 +443,9 @@ CONFIG_SCTP_HMAC_MD5=y # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set - -# -# QoS and/or fair queueing -# +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set CONFIG_NET_SCHED=y -CONFIG_NET_SCH_FIFO=y # # Queueing/Scheduling @@ -340,7 +454,7 @@ CONFIG_NET_SCH_CBQ=m CONFIG_NET_SCH_HTB=m CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_RR=m +CONFIG_NET_SCH_MULTIQ=y CONFIG_NET_SCH_RED=m CONFIG_NET_SCH_SFQ=m CONFIG_NET_SCH_TEQL=m @@ -348,6 +462,7 @@ CONFIG_NET_SCH_TBF=m CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_DSMARK=m CONFIG_NET_SCH_NETEM=m +# CONFIG_NET_SCH_DRR is not set CONFIG_NET_SCH_INGRESS=m # @@ -364,41 +479,63 @@ CONFIG_NET_CLS_U32=m CONFIG_CLS_U32_MARK=y CONFIG_NET_CLS_RSVP=m CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y # CONFIG_NET_EMATCH is not set CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_POLICE=y CONFIG_NET_ACT_GACT=m CONFIG_GACT_PROB=y CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_NAT=m CONFIG_NET_ACT_PEDIT=m # CONFIG_NET_ACT_SIMP is not set -CONFIG_NET_CLS_POLICE=y +CONFIG_NET_ACT_SKBEDIT=m # CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set # # Network testing # # CONFIG_NET_PKTGEN is not set # CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set - -# -# Wireless -# -CONFIG_CFG80211=m +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_SPY=y +CONFIG_WEXT_PRIV=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_WIRELESS_OLD_REGULATORY is not set +CONFIG_CFG80211_WEXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +CONFIG_LIB80211=m +CONFIG_LIB80211_CRYPT_WEP=m +CONFIG_LIB80211_CRYPT_CCMP=m +CONFIG_LIB80211_CRYPT_TKIP=m +# CONFIG_LIB80211_DEBUG is not set CONFIG_MAC80211=m -# CONFIG_MAC80211_DEBUG is not set -CONFIG_IEEE80211=m -# CONFIG_IEEE80211_DEBUG is not set -CONFIG_IEEE80211_CRYPT_WEP=m -CONFIG_IEEE80211_CRYPT_CCMP=m -CONFIG_IEEE80211_CRYPT_TKIP=m -CONFIG_IEEE80211_SOFTMAC=m -# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set +CONFIG_MAC80211_RC_PID=y +CONFIG_MAC80211_RC_MINSTREL=y +# CONFIG_MAC80211_RC_DEFAULT_PID is not set +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUG_MENU is not set +# CONFIG_WIMAX is not set CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y # CONFIG_NET_9P is not set # @@ -408,9 +545,13 @@ CONFIG_RFKILL=m # # Generic Driver Options # +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_DEVTMPFS is not set CONFIG_STANDALONE=y CONFIG_PREVENT_FIRMWARE_BUILD=y CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" # CONFIG_SYS_HYPERVISOR is not set CONFIG_CONNECTOR=m # CONFIG_MTD is not set @@ -423,14 +564,19 @@ CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_COW_COMMON is not set CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m +# CONFIG_BLK_DEV_DRBD is not set # CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_OSD=m # CONFIG_BLK_DEV_SX8 is not set # CONFIG_BLK_DEV_RAM is not set CONFIG_CDROM_PKTCDVD=m CONFIG_CDROM_PKTCDVD_BUFFERS=8 # CONFIG_CDROM_PKTCDVD_WCACHE is not set CONFIG_ATA_OVER_ETH=m +# CONFIG_BLK_DEV_HD is not set # CONFIG_MISC_DEVICES is not set +CONFIG_EEPROM_93CX6=m +CONFIG_HAVE_IDE=y # CONFIG_IDE is not set # @@ -453,10 +599,6 @@ CONFIG_BLK_DEV_SR=m CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_CHR_DEV_SCH=m - -# -# Some SCSI devices (e.g. CD jukebox) support multiple LUNs -# # CONFIG_SCSI_MULTI_LUN is not set CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y @@ -471,11 +613,18 @@ CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_ISCSI_ATTRS=m CONFIG_SCSI_SAS_ATTRS=m CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_HOST_SMP=y # CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +# CONFIG_SCSI_SRP_ATTRS is not set CONFIG_SCSI_LOWLEVEL=y # CONFIG_ISCSI_TCP is not set +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_BE2ISCSI=m # CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m # CONFIG_SCSI_3W_9XXX is not set +CONFIG_SCSI_3W_SAS=m # CONFIG_SCSI_ACARD is not set # CONFIG_SCSI_AACRAID is not set # CONFIG_SCSI_AIC7XXX is not set @@ -483,11 +632,21 @@ CONFIG_SCSI_LOWLEVEL=y # CONFIG_SCSI_AIC79XX is not set CONFIG_SCSI_AIC94XX=m # CONFIG_AIC94XX_DEBUG is not set +CONFIG_SCSI_MVSAS=m +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_DPT_I2O=m +# CONFIG_SCSI_ADVANSYS is not set # CONFIG_SCSI_ARCMSR is not set # CONFIG_MEGARAID_NEWGEN is not set # CONFIG_MEGARAID_LEGACY is not set # CONFIG_MEGARAID_SAS is not set +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS_LOGGING is not set # CONFIG_SCSI_HPTIOP is not set +CONFIG_LIBFC=m +# CONFIG_LIBFCOE is not set +# CONFIG_FCOE is not set # CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_FUTURE_DOMAIN is not set # CONFIG_SCSI_IPS is not set @@ -502,16 +661,31 @@ CONFIG_SCSI_QLOGIC_1280=y # CONFIG_SCSI_DC395x is not set # CONFIG_SCSI_DC390T is not set # CONFIG_SCSI_DEBUG is not set +CONFIG_SCSI_PMCRAID=m +# CONFIG_SCSI_PM8001 is not set # CONFIG_SCSI_SRP is not set +CONFIG_SCSI_BFA_FC=m +CONFIG_SCSI_DH=m +CONFIG_SCSI_DH_RDAC=m +CONFIG_SCSI_DH_HP_SW=m +CONFIG_SCSI_DH_EMC=m +CONFIG_SCSI_DH_ALUA=m +CONFIG_SCSI_OSD_INITIATOR=m +CONFIG_SCSI_OSD_ULD=m +CONFIG_SCSI_OSD_DPRINT_SENSE=1 +# CONFIG_SCSI_OSD_DEBUG is not set # CONFIG_ATA is not set CONFIG_MD=y CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=y CONFIG_MD_RAID1=y CONFIG_MD_RAID10=m CONFIG_MD_RAID456=y -CONFIG_MD_RAID5_RESHAPE=y +# CONFIG_MULTICORE_RAID456 is not set +CONFIG_MD_RAID6_PQ=y +# CONFIG_ASYNC_RAID6_TEST is not set CONFIG_MD_MULTIPATH=m CONFIG_MD_FAULTY=m CONFIG_BLK_DEV_DM=m @@ -519,36 +693,39 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_EMC=m -CONFIG_DM_MULTIPATH_RDAC=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m # CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +# CONFIG_FUSION is not set # -# Fusion MPT device support +# IEEE 1394 (FireWire) support # -# CONFIG_FUSION is not set -# CONFIG_FUSION_SPI is not set -# CONFIG_FUSION_FC is not set -# CONFIG_FUSION_SAS is not set # -# IEEE 1394 (FireWire) support +# You can enable one or both FireWire driver stacks. +# + +# +# The newer stack is recommended. # # CONFIG_FIREWIRE is not set # CONFIG_IEEE1394 is not set # CONFIG_I2O is not set CONFIG_NETDEVICES=y -CONFIG_NETDEVICES_MULTIQUEUE=y CONFIG_IFB=m # CONFIG_DUMMY is not set # CONFIG_BONDING is not set CONFIG_MACVLAN=m # CONFIG_EQUALIZER is not set # CONFIG_TUN is not set +CONFIG_VETH=m # CONFIG_ARCNET is not set -CONFIG_PHYLIB=m +CONFIG_PHYLIB=y # # MII PHY device drivers @@ -562,23 +739,51 @@ CONFIG_VITESSE_PHY=m CONFIG_SMSC_PHY=m # CONFIG_BROADCOM_PHY is not set CONFIG_ICPLUS_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_STE10XP=m +CONFIG_LSI_ET1011C_PHY=m # CONFIG_FIXED_PHY is not set +CONFIG_MDIO_BITBANG=m CONFIG_NET_ETHERNET=y CONFIG_MII=y CONFIG_AX88796=m +CONFIG_AX88796_93CX6=y CONFIG_SGI_IOC3_ETH=y # CONFIG_HAPPYMEAL is not set # CONFIG_SUNGEM is not set # CONFIG_CASSINI is not set # CONFIG_NET_VENDOR_3COM is not set +CONFIG_SMC91X=m # CONFIG_DM9000 is not set +CONFIG_ETHOC=m +CONFIG_SMSC911X=m +CONFIG_DNET=m # CONFIG_NET_TULIP is not set # CONFIG_HP100 is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_NET_PCI is not set +CONFIG_B44=m +CONFIG_B44_PCI_AUTOSELECT=y +CONFIG_B44_PCICORE_AUTOSELECT=y +CONFIG_B44_PCI=y +CONFIG_KS8842=m +CONFIG_KS8851_MLL=m +CONFIG_ATL2=m CONFIG_NETDEV_1000=y # CONFIG_ACENIC is not set # CONFIG_DL2K is not set # CONFIG_E1000 is not set +CONFIG_E1000E=m +CONFIG_IP1000=m +CONFIG_IGB=m +CONFIG_IGBVF=m # CONFIG_NS83820 is not set # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set @@ -588,24 +793,75 @@ CONFIG_NETDEV_1000=y # CONFIG_SKY2 is not set CONFIG_VIA_VELOCITY=m # CONFIG_TIGON3 is not set -# CONFIG_BNX2 is not set +CONFIG_BNX2=m +CONFIG_CNIC=m CONFIG_QLA3XXX=m # CONFIG_ATL1 is not set +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_JME=m CONFIG_NETDEV_10000=y +CONFIG_MDIO=m # CONFIG_CHELSIO_T1 is not set +CONFIG_CHELSIO_T3_DEPENDS=y CONFIG_CHELSIO_T3=m +CONFIG_ENIC=m +CONFIG_IXGBE=m # CONFIG_IXGB is not set # CONFIG_S2IO is not set +CONFIG_VXGE=m +# CONFIG_VXGE_DEBUG_TRACE_ALL is not set # CONFIG_MYRI10GE is not set CONFIG_NETXEN_NIC=m -# CONFIG_MLX4_CORE is not set +CONFIG_NIU=m +CONFIG_MLX4_EN=m +CONFIG_MLX4_CORE=m +# CONFIG_MLX4_DEBUG is not set +CONFIG_TEHUTI=m +CONFIG_BNX2X=m +CONFIG_QLGE=m +CONFIG_SFC=m +CONFIG_BE2NET=m # CONFIG_TR is not set - -# -# Wireless LAN -# -# CONFIG_WLAN_PRE80211 is not set -CONFIG_WLAN_80211=y +CONFIG_WLAN=y +CONFIG_LIBERTAS_THINFIRM=m +CONFIG_ATMEL=m +CONFIG_PCI_ATMEL=m +CONFIG_PRISM54=m +CONFIG_RTL8180=m +CONFIG_ADM8211=m +# CONFIG_MAC80211_HWSIM is not set +CONFIG_MWL8K=m +CONFIG_ATH_COMMON=m +# CONFIG_ATH_DEBUG is not set +CONFIG_ATH5K=m +# CONFIG_ATH5K_DEBUG is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K=m +CONFIG_B43=m +CONFIG_B43_PCI_AUTOSELECT=y +CONFIG_B43_PCICORE_AUTOSELECT=y +CONFIG_B43_PHY_LP=y +CONFIG_B43_LEDS=y +CONFIG_B43_HWRNG=y +# CONFIG_B43_DEBUG is not set +CONFIG_B43LEGACY=m +CONFIG_B43LEGACY_PCI_AUTOSELECT=y +CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y +CONFIG_B43LEGACY_LEDS=y +CONFIG_B43LEGACY_HWRNG=y +# CONFIG_B43LEGACY_DEBUG is not set +CONFIG_B43LEGACY_DMA=y +CONFIG_B43LEGACY_PIO=y +CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y +# CONFIG_B43LEGACY_DMA_MODE is not set +# CONFIG_B43LEGACY_PIO_MODE is not set +CONFIG_HOSTAP=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_FIRMWARE_NVRAM=y +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTAP_PCI=m CONFIG_IPW2100=m CONFIG_IPW2100_MONITOR=y CONFIG_IPW2100_DEBUG=y @@ -615,38 +871,57 @@ CONFIG_IPW2200_RADIOTAP=y CONFIG_IPW2200_PROMISCUOUS=y CONFIG_IPW2200_QOS=y CONFIG_IPW2200_DEBUG=y +CONFIG_LIBIPW=m +# CONFIG_LIBIPW_DEBUG is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT=y +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLAGN=m +CONFIG_IWL4965=y +CONFIG_IWL5000=y +CONFIG_IWL3945=m +CONFIG_IWL3945_SPECTRUM_MEASUREMENT=y CONFIG_LIBERTAS=m # CONFIG_LIBERTAS_DEBUG is not set CONFIG_HERMES=m +# CONFIG_HERMES_CACHE_FW_ON_INIT is not set CONFIG_PLX_HERMES=m CONFIG_TMD_HERMES=m CONFIG_NORTEL_HERMES=m CONFIG_PCI_HERMES=m -CONFIG_ATMEL=m -CONFIG_PCI_ATMEL=m -CONFIG_PRISM54=m -CONFIG_HOSTAP=m -CONFIG_HOSTAP_FIRMWARE=y -CONFIG_HOSTAP_FIRMWARE_NVRAM=y -CONFIG_HOSTAP_PLX=m -CONFIG_HOSTAP_PCI=m -CONFIG_BCM43XX=m -CONFIG_BCM43XX_DEBUG=y -CONFIG_BCM43XX_DMA=y -CONFIG_BCM43XX_PIO=y -CONFIG_BCM43XX_DMA_AND_PIO_MODE=y -# CONFIG_BCM43XX_DMA_MODE is not set -# CONFIG_BCM43XX_PIO_MODE is not set +CONFIG_P54_COMMON=m +CONFIG_P54_PCI=m +CONFIG_P54_LEDS=y +CONFIG_RT2X00=m +CONFIG_RT2400PCI=m +CONFIG_RT2500PCI=m +CONFIG_RT61PCI=m +CONFIG_RT2800PCI_PCI=m +CONFIG_RT2800PCI=m +CONFIG_RT2800_LIB=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_HT=y +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WL12XX=m +CONFIG_WL1251=m + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# # CONFIG_WAN is not set # CONFIG_FDDI is not set # CONFIG_HIPPI is not set # CONFIG_PPP is not set # CONFIG_SLIP is not set # CONFIG_NET_FC is not set -# CONFIG_SHAPER is not set # CONFIG_NETCONSOLE is not set # CONFIG_NETPOLL is not set # CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_VMXNET3 is not set # CONFIG_ISDN is not set # CONFIG_PHONE is not set @@ -664,13 +939,16 @@ CONFIG_SERIO_SERPORT=y # CONFIG_SERIO_PCIPS2 is not set CONFIG_SERIO_LIBPS2=m CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m # CONFIG_GAMEPORT is not set # # Character devices # # CONFIG_VT is not set +CONFIG_DEVKMEM=y # CONFIG_SERIAL_NONSTANDARD is not set +CONFIG_NOZOMI=m # # Serial drivers @@ -693,95 +971,258 @@ CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_JSM is not set CONFIG_UNIX98_PTYS=y +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=256 # CONFIG_IPMI_HANDLER is not set -# CONFIG_WATCHDOG is not set CONFIG_HW_RANDOM=m -# CONFIG_RTC is not set +CONFIG_HW_RANDOM_TIMERIOMEM=m # CONFIG_R3964 is not set # CONFIG_APPLICOM is not set -# CONFIG_DRM is not set # CONFIG_RAW_DRIVER is not set # CONFIG_TCG_TPM is not set CONFIG_DEVPORT=y -# CONFIG_I2C is not set +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +CONFIG_I2C_ALI1535=m +CONFIG_I2C_ALI1563=m +CONFIG_I2C_ALI15X3=m +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_SIS5595=m +CONFIG_I2C_SIS630=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m # -# SPI support +# I2C system bus drivers (mostly embedded / system-on-chip) # +CONFIG_I2C_OCORES=m +CONFIG_I2C_SIMTEC=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_PARPORT_LIGHT=m +CONFIG_I2C_TAOS_EVM=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_STUB=m + +# +# Miscellaneous I2C Chip support +# +CONFIG_SENSORS_TSL2550=m +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set # CONFIG_SPI is not set -# CONFIG_SPI_MASTER is not set + +# +# PPS support +# +CONFIG_PPS=m +# CONFIG_PPS_DEBUG is not set # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set +CONFIG_THERMAL=m +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y # -# Multifunction device drivers -# -# CONFIG_MFD_SM501 is not set +# Sonics Silicon Backplane +# +CONFIG_SSB=m +CONFIG_SSB_SPROM=y +CONFIG_SSB_PCIHOST_POSSIBLE=y +CONFIG_SSB_PCIHOST=y +CONFIG_SSB_B43_PCI_BRIDGE=y +# CONFIG_SSB_SILENT is not set +# CONFIG_SSB_DEBUG is not set +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y +CONFIG_SSB_DRIVER_PCICORE=y +# CONFIG_SSB_DRIVER_MIPS is not set # -# Multimedia devices +# Multifunction device drivers # -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_DAB is not set +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_WM8400 is not set +CONFIG_MFD_WM8350=m +CONFIG_MFD_WM8350_I2C=m +CONFIG_MFD_PCF50633=m +CONFIG_PCF50633_ADC=m +CONFIG_PCF50633_GPIO=m +CONFIG_AB3100_CORE=m +CONFIG_AB3100_OTP=m +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support # -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set - -# -# Display device support -# -# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_VGA_ARB is not set +# CONFIG_DRM is not set # CONFIG_VGASTATE is not set # CONFIG_VIDEO_OUTPUT_CONTROL is not set # CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set # -# Sound +# Display device support # +# CONFIG_DISPLAY_SUPPORT is not set # CONFIG_SOUND is not set CONFIG_USB_SUPPORT=y CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB_ARCH_HAS_OHCI=y CONFIG_USB_ARCH_HAS_EHCI=y # CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# Enable Host or Gadget support to see Inventra options # # -# USB Gadget Support +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # # CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# +# CONFIG_UWB is not set # CONFIG_MMC is not set -# CONFIG_NEW_LEDS is not set -# CONFIG_INFINIBAND is not set -# CONFIG_RTC_CLASS is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=m + +# +# LED drivers +# +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_PCA955X=m +CONFIG_LEDS_WM8350=m +CONFIG_LEDS_BD2802=m + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m # -# DMA Engine support +# iptables trigger is under Netfilter config (LED target) # -# CONFIG_DMA_ENGINE is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set # -# DMA Clients +# RTC interfaces # +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set # -# DMA Devices +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# + # +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=y +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set +# CONFIG_RTC_DRV_WM8350 is not set +# CONFIG_RTC_DRV_PCF50633 is not set +CONFIG_RTC_DRV_AB3100=m # -# Userspace I/O +# on-CPU RTC drivers # +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set CONFIG_UIO=y # CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +CONFIG_UIO_SMX=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m + +# +# TI VLYNQ +# +# CONFIG_STAGING is not set # # File systems @@ -792,36 +1233,58 @@ CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y # CONFIG_EXT2_FS_XIP is not set CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT3_FS_XATTR=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -# CONFIG_EXT4DEV_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_XATTR=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set CONFIG_JBD=y -CONFIG_JBD_DEBUG=y +CONFIG_JBD2=y CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set CONFIG_FS_POSIX_ACL=y CONFIG_XFS_FS=m CONFIG_XFS_QUOTA=y -CONFIG_XFS_SECURITY=y CONFIG_XFS_POSIX_ACL=y # CONFIG_XFS_RT is not set +# CONFIG_XFS_DEBUG is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_ROMFS_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y # CONFIG_QUOTA is not set +CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QUOTACTL=y -CONFIG_DNOTIFY=y CONFIG_AUTOFS_FS=m # CONFIG_AUTOFS4_FS is not set CONFIG_FUSE_FS=m +CONFIG_CUSE=m CONFIG_GENERIC_ACL=y # +# Caches +# +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# # CD-ROM/DVD Filesystems # # CONFIG_ISO9660_FS is not set @@ -840,16 +1303,13 @@ CONFIG_GENERIC_ACL=y CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y CONFIG_SYSFS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y # CONFIG_HUGETLB_PAGE is not set -CONFIG_RAMFS=y CONFIG_CONFIGFS_FS=m - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_ECRYPT_FS is not set @@ -859,28 +1319,32 @@ CONFIG_CONFIGFS_FS=m # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set # CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 # CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +CONFIG_OMFS_FS=m # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set - -# -# Network File Systems -# +CONFIG_EXOFS_FS=m +# CONFIG_EXOFS_DEBUG is not set +CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y # CONFIG_NFS_V3_ACL is not set # CONFIG_NFS_V4 is not set -# CONFIG_NFS_DIRECTIO is not set -# CONFIG_NFSD is not set # CONFIG_ROOT_NFS is not set +# CONFIG_NFSD is not set CONFIG_LOCKD=y CONFIG_LOCKD_V4=y +CONFIG_EXPORTFS=m CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y CONFIG_SUNRPC_GSS=y -# CONFIG_SUNRPC_BIND34 is not set CONFIG_RPCSEC_GSS_KRB5=y # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set @@ -910,35 +1374,37 @@ CONFIG_SGI_PARTITION=y # CONFIG_KARMA_PARTITION is not set # CONFIG_EFI_PARTITION is not set # CONFIG_SYSV68_PARTITION is not set - -# -# Native Language Support -# # CONFIG_NLS is not set - -# -# Distributed Lock Manager -# CONFIG_DLM=m # CONFIG_DLM_DEBUG is not set # -# Profiling support -# -# CONFIG_PROFILING is not set - -# # Kernel hacking # CONFIG_TRACE_IRQFLAGS_SUPPORT=y # CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 # CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set # CONFIG_DEBUG_FS is not set # CONFIG_HEADERS_CHECK is not set # CONFIG_DEBUG_KERNEL is not set -CONFIG_CROSSCOMPILE=y +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_EARLY_PRINTK=y # CONFIG_CMDLINE_BOOL is not set # @@ -947,65 +1413,140 @@ CONFIG_CROSSCOMPILE=y CONFIG_KEYS=y CONFIG_KEYS_DEBUG_PROC_KEYS=y # CONFIG_SECURITY is not set -CONFIG_XOR_BLOCKS=m -CONFIG_ASYNC_CORE=m -CONFIG_ASYNC_MEMCPY=m -CONFIG_ASYNC_XOR=m +CONFIG_SECURITYFS=y +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_SMACK is not set +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=y +CONFIG_ASYNC_MEMCPY=y +CONFIG_ASYNC_XOR=y +CONFIG_ASYNC_PQ=y +CONFIG_ASYNC_RAID6_RECOV=y CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ABLKCIPHER=m +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=m +CONFIG_CRYPTO_AEAD2=y CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_SEQIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CTR=m +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m + +# +# Hash modes +# CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=m +CONFIG_CRYPTO_GHASH=m CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA1=m CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_GF128MUL=m -CONFIG_CRYPTO_ECB=m -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_DES=y -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m -CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# CONFIG_CRYPTO_AES=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_CRC32C=m -CONFIG_CRYPTO_CAMELLIA=m -# CONFIG_CRYPTO_TEST is not set +CONFIG_CRYPTO_ZLIB=m +CONFIG_CRYPTO_LZO=m + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_HIFN_795X=m +# CONFIG_CRYPTO_DEV_HIFN_795X_RNG is not set +# CONFIG_BINARY_PRINTF is not set # # Library routines # CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m -# CONFIG_CRC16 is not set -# CONFIG_CRC_ITU_T is not set +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=m +CONFIG_CRC_ITU_T=m CONFIG_CRC32=y -# CONFIG_CRC7 is not set +CONFIG_CRC7=m CONFIG_LIBCRC32C=m CONFIG_ZLIB_INFLATE=m CONFIG_ZLIB_DEFLATE=m -CONFIG_PLIST=y +CONFIG_LZO_COMPRESS=m +CONFIG_LZO_DECOMPRESS=m CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 9c187a64649..758ad426c57 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -287,9 +287,9 @@ static inline int __cpu_has_fpu(void) static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) { #ifdef __NEED_VMBITS_PROBE - write_c0_entryhi(0x3ffffffffffff000ULL); + write_c0_entryhi(0x3fffffffffffe000ULL); back_to_back_c0_hazard(); - c->vmbits = fls64(read_c0_entryhi() & 0x3ffffffffffff000ULL); + c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL); #endif } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 338dfe8ed00..31b204b26ba 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1501,6 +1501,7 @@ void __cpuinit per_cpu_trap_init(void) cp0_perfcount_irq = -1; } else { cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; + cp0_compare_irq_shift = cp0_compare_irq; cp0_perfcount_irq = -1; } diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index 94e05e5733c..e06f1af760a 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c @@ -174,7 +174,7 @@ static void octeon_flush_cache_page(struct vm_area_struct *vma, * Probe Octeon's caches * */ -static void __devinit probe_octeon(void) +static void __cpuinit probe_octeon(void) { unsigned long icache_size; unsigned long dcache_size; @@ -235,7 +235,7 @@ static void __devinit probe_octeon(void) * Setup the Octeon cache flush routines * */ -void __devinit octeon_cache_init(void) +void __cpuinit octeon_cache_init(void) { extern unsigned long ebase; extern char except_vec2_octeon; diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 102b2dfa542..e716cafc346 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -155,7 +155,7 @@ static inline void setup_protection_map(void) protection_map[15] = PAGE_SHARED; } -void __devinit cpu_cache_init(void) +void __cpuinit cpu_cache_init(void) { if (cpu_has_3k_cache) { extern void __weak r3k_cache_init(void); diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c index 46f00691f44..31e2583ec62 100644 --- a/arch/mips/sni/rm200.c +++ b/arch/mips/sni/rm200.c @@ -404,7 +404,7 @@ void __init sni_rm200_i8259_irqs(void) if (!rm200_pic_master) return; rm200_pic_slave = ioremap_nocache(0x160000a0, 4); - if (!rm200_pic_master) { + if (!rm200_pic_slave) { iounmap(rm200_pic_master); return; } diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index fb37ac52e46..35c827e94e3 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -468,7 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - tracehook_signal_handler(sig, info, ka, regs, 0); + tracehook_signal_handler(sig, info, ka, regs, + test_thread_flag(TIF_SINGLESTEP) || + test_thread_flag(TIF_BLOCKSTEP)); return 1; } diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 282d9306361..1ec06576f61 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, if (huge) { #ifdef CONFIG_HUGETLB_PAGE psize = get_slice_psize(mm, addr); + /* Mask the address for the correct page size */ + addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); #else BUG(); psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ #endif - } else + } else { psize = pte_pagesize_index(mm, addr, pte); + /* Mask the address for the standard page size. If we + * have a 64k page kernel, but the hardware does not + * support 64k pages, this might be different from the + * hardware page size encoded in the slice table. */ + addr &= PAGE_MASK; + } - /* Mask the address for the correct page size */ - addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); /* Build full vaddr */ if (!is_kernel_addr(addr)) { diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index 21f61b8c445..cc29c0f5300 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c @@ -338,7 +338,8 @@ static void __init mpc85xx_mds_pic_init(void) } mpic = mpic_alloc(np, r.start, - MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | + MPIC_BROKEN_FRR_NIRQS, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); of_node_put(np); diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 04160a4cc69..a15f582300d 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -46,6 +46,7 @@ smp_85xx_kick_cpu(int nr) __iomem u32 *bptr_vaddr; struct device_node *np; int n = 0; + int ioremappable; WARN_ON (nr < 0 || nr >= NR_CPUS); @@ -59,21 +60,37 @@ smp_85xx_kick_cpu(int nr) return; } + /* + * A secondary core could be in a spinloop in the bootpage + * (0xfffff000), somewhere in highmem, or somewhere in lowmem. + * The bootpage and highmem can be accessed via ioremap(), but + * we need to directly access the spinloop if its in lowmem. + */ + ioremappable = *cpu_rel_addr > virt_to_phys(high_memory); + /* Map the spin table */ - bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY); + if (ioremappable) + bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY); + else + bptr_vaddr = phys_to_virt(*cpu_rel_addr); local_irq_save(flags); out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); + if (!ioremappable) + flush_dcache_range((ulong)bptr_vaddr, + (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY)); + /* Wait a bit for the CPU to ack. */ while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) mdelay(1); local_irq_restore(flags); - iounmap(bptr_vaddr); + if (ioremappable) + iounmap(bptr_vaddr); pr_debug("waited %d msecs for CPU #%d.\n", n, nr); } diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 1ee66db003b..f5f79196721 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -784,9 +784,13 @@ static void xics_set_cpu_priority(unsigned char cppr) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); - BUG_ON(os_cppr->index != 0); + /* + * we only really want to set the priority when there's + * just one cppr value on the stack + */ + WARN_ON(os_cppr->index != 0); - os_cppr->stack[os_cppr->index] = cppr; + os_cppr->stack[0] = cppr; if (firmware_has_feature(FW_FEATURE_LPAR)) lpar_cppr_info(cppr); @@ -821,8 +825,14 @@ void xics_setup_cpu(void) void xics_teardown_cpu(void) { + struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); int cpu = smp_processor_id(); + /* + * we have to reset the cppr index to 0 because we're + * not going to return from the IPI + */ + os_cppr->index = 0; xics_set_cpu_priority(0); /* Clear any pending IPI request */ diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index f2ef4b619ce..c25dfac7dd7 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -293,12 +293,12 @@ struct _lowcore __u64 clock_comparator; /* 0x02d0 */ __u32 machine_flags; /* 0x02d8 */ __u32 ftrace_func; /* 0x02dc */ - __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */ + __u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */ /* Interrupt response block */ __u8 irb[64]; /* 0x0300 */ - __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */ + __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */ /* * 0xe00 contains the address of the IPL Parameter Information diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 3f7e2a22c7c..f6a389c996c 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -132,7 +132,6 @@ ENTRY(tlb_protection_violation_store) mov #1, r5 call_handle_tlbmiss: - setup_frame_reg mov.l 1f, r0 mov r5, r8 mov.l @r0, r6 @@ -365,6 +364,8 @@ handle_exception: mov.l @k2, k2 ! read out vector and keep in k2 handle_exception_special: + setup_frame_reg + ! Setup return address and jump to exception handler mov.l 7f, r9 ! fetch return address stc r2_bank, r0 ! k2 (vector) diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 88d28ec3780..e51168064e5 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -540,6 +540,8 @@ void dwarf_free_frame(struct dwarf_frame *frame) mempool_free(frame, dwarf_frame_pool); } +extern void ret_from_irq(void); + /** * dwarf_unwind_stack - unwind the stack * @@ -678,6 +680,24 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, addr = frame->cfa + reg->addr; frame->return_addr = __raw_readl(addr); + /* + * Ah, the joys of unwinding through interrupts. + * + * Interrupts are tricky - the DWARF info needs to be _really_ + * accurate and unfortunately I'm seeing a lot of bogus DWARF + * info. For example, I've seen interrupts occur in epilogues + * just after the frame pointer (r14) had been restored. The + * problem was that the DWARF info claimed that the CFA could be + * reached by using the value of the frame pointer before it was + * restored. + * + * So until the compiler can be trusted to produce reliable + * DWARF info when it really matters, let's stop unwinding once + * we've calculated the function that was interrupted. + */ + if (prev && prev->pc == (unsigned long)ret_from_irq) + frame->return_addr = 0; + return frame; bail: diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index f0abd58c3a6..2b15ae60c3a 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -70,8 +70,14 @@ ret_from_exception: CFI_STARTPROC simple CFI_DEF_CFA r14, 0 CFI_REL_OFFSET 17, 64 - CFI_REL_OFFSET 15, 0 + CFI_REL_OFFSET 15, 60 CFI_REL_OFFSET 14, 56 + CFI_REL_OFFSET 13, 52 + CFI_REL_OFFSET 12, 48 + CFI_REL_OFFSET 11, 44 + CFI_REL_OFFSET 10, 40 + CFI_REL_OFFSET 9, 36 + CFI_REL_OFFSET 8, 32 preempt_stop() ENTRY(ret_from_irq) ! diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c index 873ebdc4f98..b063eb8b18e 100644 --- a/arch/sh/kernel/ptrace_64.c +++ b/arch/sh/kernel/ptrace_64.c @@ -133,6 +133,8 @@ void user_enable_single_step(struct task_struct *child) struct pt_regs *regs = child->thread.uregs; regs->sr |= SR_SSTEP; /* auto-resetting upon exception */ + + set_tsk_thread_flag(child, TIF_SINGLESTEP); } void user_disable_single_step(struct task_struct *child) @@ -140,6 +142,8 @@ void user_disable_single_step(struct task_struct *child) struct pt_regs *regs = child->thread.uregs; regs->sr &= ~SR_SSTEP; + + clear_tsk_thread_flag(child, TIF_SINGLESTEP); } static int genregs_get(struct task_struct *target, @@ -454,6 +458,8 @@ asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs) asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) { + int step; + if (unlikely(current->audit_context)) audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]), regs->regs[9]); @@ -461,8 +467,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_exit(regs, regs->regs[9]); - if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, 0); + step = test_thread_flag(TIF_SINGLESTEP); + if (step || test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(regs, step); } /* Called with interrupts disabled */ diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index ce76dbdef29..580e97d46ca 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c @@ -118,7 +118,9 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset) * clear the TS_RESTORE_SIGMASK flag. */ current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - tracehook_signal_handler(signr, &info, &ka, regs, 0); + + tracehook_signal_handler(signr, &info, &ka, regs, + test_thread_flag(TIF_SINGLESTEP)); return 1; } } diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 2830b415e21..c49865b3071 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -526,7 +526,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, * Set some valid stack frames to give to the child. */ childstack = (struct sparc_stackf __user *) - (sp & ~0x7UL); + (sp & ~0xfUL); parentstack = (struct sparc_stackf __user *) regs->u_regs[UREG_FP]; diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index c3f1cce0e95..cb70476bd8f 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -398,11 +398,11 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) } else __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); - /* Now 8-byte align the stack as this is mandatory in the - * Sparc ABI due to how register windows work. This hides - * the restriction from thread libraries etc. -DaveM + /* Now align the stack as this is mandatory in the Sparc ABI + * due to how register windows work. This hides the + * restriction from thread libraries etc. */ - csp &= ~7UL; + csp &= ~15UL; distance = fp - psp; rval = (csp - distance); diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index ba5b09ad666..ea22cd373c6 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -120,8 +120,8 @@ struct rt_signal_frame32 { }; /* Align macros */ -#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 7) & (~7))) -#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7))) +#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15))) +#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15))) int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) { @@ -420,15 +420,17 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns sp = current->sas_ss_sp + current->sas_ss_size; } + sp -= framesize; + /* Always align the stack frame. This handles two cases. First, * sigaltstack need not be mindful of platform specific stack * alignment. Second, if we took this signal because the stack * is not aligned properly, we'd like to take the signal cleanly * and report that. */ - sp &= ~7UL; + sp &= ~15UL; - return (void __user *)(sp - framesize); + return (void __user *) sp; } static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 7ce1a1005b1..9882df92ba0 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -267,15 +267,17 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re sp = current->sas_ss_sp + current->sas_ss_size; } + sp -= framesize; + /* Always align the stack frame. This handles two cases. First, * sigaltstack need not be mindful of platform specific stack * alignment. Second, if we took this signal because the stack * is not aligned properly, we'd like to take the signal cleanly * and report that. */ - sp &= ~7UL; + sp &= ~15UL; - return (void __user *)(sp - framesize); + return (void __user *) sp; } static inline int diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 647afbda7ae..9fa48c30037 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -353,7 +353,7 @@ segv: /* Checks if the fp is valid */ static int invalid_frame_pointer(void __user *fp, int fplen) { - if (((unsigned long) fp) & 7) + if (((unsigned long) fp) & 15) return 1; return 0; } @@ -396,15 +396,17 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs * sp = current->sas_ss_sp + current->sas_ss_size; } + sp -= framesize; + /* Always align the stack frame. This handles two cases. First, * sigaltstack need not be mindful of platform specific stack * alignment. Second, if we took this signal because the stack * is not aligned properly, we'd like to take the signal cleanly * and report that. */ - sp &= ~7UL; + sp &= ~15UL; - return (void __user *)(sp - framesize); + return (void __user *) sp; } static inline void diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 1994d3f5844..f2ad2163109 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -170,10 +170,7 @@ static inline void elf_common_init(struct thread_struct *t, } #define ELF_PLAT_INIT(_r, load_addr) \ -do { \ - elf_common_init(¤t->thread, _r, 0); \ - clear_thread_flag(TIF_IA32); \ -} while (0) + elf_common_init(¤t->thread, _r, 0) #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ elf_common_init(¤t->thread, regs, __USER_DS) diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index ecb544e6538..e04740f7a0b 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -11,9 +11,9 @@ #include <linux/irqflags.h> /* entries in ARCH_DLINFO: */ -#ifdef CONFIG_IA32_EMULATION +#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64) # define AT_VECTOR_SIZE_ARCH 2 -#else +#else /* else it's non-compat x86-64 */ # define AT_VECTOR_SIZE_ARCH 1 #endif diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 036d28adf59..af1c5833ff2 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1185,9 +1185,6 @@ static void __init acpi_process_madt(void) if (!error) { acpi_lapic = 1; -#ifdef CONFIG_X86_BIGSMP - generic_bigsmp_probe(); -#endif /* * Parse MADT IO-APIC entries */ @@ -1197,8 +1194,6 @@ static void __init acpi_process_madt(void) acpi_ioapic = 1; smp_found_config = 1; - if (apic->setup_apic_routing) - apic->setup_apic_routing(); } } if (error == -EINVAL) { @@ -1349,14 +1344,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = { }, { .callback = force_acpi_ht, - .ident = "ASUS P2B-DS", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), - DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"), - }, - }, - { - .callback = force_acpi_ht, .ident = "ASUS CUR-DLS", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 3987e4408f7..dfca210f6a1 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1641,9 +1641,7 @@ int __init APIC_init_uniprocessor(void) #endif enable_IR_x2apic(); -#ifdef CONFIG_X86_64 default_setup_apic_routing(); -#endif verify_local_APIC(); connect_bsp_APIC(); @@ -1891,21 +1889,6 @@ void __cpuinit generic_processor_info(int apicid, int version) if (apicid > max_physical_apicid) max_physical_apicid = apicid; -#ifdef CONFIG_X86_32 - if (num_processors > 8) { - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_INTEL: - if (!APIC_XAPIC(version)) { - def_to_bigsmp = 0; - break; - } - /* If P4 and above fall through */ - case X86_VENDOR_AMD: - def_to_bigsmp = 1; - } - } -#endif - #if defined(CONFIG_SMP) || defined(CONFIG_X86_64) early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 1a6559f6768..99d2fe01608 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -52,7 +52,32 @@ static int __init print_ipi_mode(void) } late_initcall(print_ipi_mode); -void default_setup_apic_routing(void) +void __init default_setup_apic_routing(void) +{ + int version = apic_version[boot_cpu_physical_apicid]; + + if (num_possible_cpus() > 8) { + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + if (!APIC_XAPIC(version)) { + def_to_bigsmp = 0; + break; + } + /* If P4 and above fall through */ + case X86_VENDOR_AMD: + def_to_bigsmp = 1; + } + } + +#ifdef CONFIG_X86_BIGSMP + generic_bigsmp_probe(); +#endif + + if (apic->setup_apic_routing) + apic->setup_apic_routing(); +} + +static void setup_apic_flat_routing(void) { #ifdef CONFIG_X86_IO_APIC printk(KERN_INFO @@ -103,7 +128,7 @@ struct apic apic_default = { .init_apic_ldr = default_init_apic_ldr, .ioapic_phys_id_map = default_ioapic_phys_id_map, - .setup_apic_routing = default_setup_apic_routing, + .setup_apic_routing = setup_apic_flat_routing, .multi_timer_check = NULL, .apicid_to_node = default_apicid_to_node, .cpu_to_logical_apicid = default_cpu_to_logical_apicid, diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 450fe2064a1..83e9be4778e 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c @@ -67,7 +67,7 @@ void __init default_setup_apic_routing(void) } #endif - if (apic == &apic_flat && num_processors > 8) + if (apic == &apic_flat && num_possible_cpus() > 8) apic = &apic_physflat; printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index f125e5c551c..6e44519960c 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1356,6 +1356,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) kfree(data->powernow_table); kfree(data); + per_cpu(powernow_data, pol->cpu) = NULL; return 0; } @@ -1375,7 +1376,7 @@ static unsigned int powernowk8_get(unsigned int cpu) int err; if (!data) - return -EINVAL; + return 0; smp_call_function_single(cpu, query_values_on_cpu, &err, true); if (err) diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 40b54ceb68b..a2c1edd2d3a 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -359,13 +359,6 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) x86_init.mpparse.mpc_record(1); } -#ifdef CONFIG_X86_BIGSMP - generic_bigsmp_probe(); -#endif - - if (apic->setup_apic_routing) - apic->setup_apic_routing(); - if (!num_processors) printk(KERN_ERR "MPTABLE: no processors registered!\n"); return num_processors; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 41a26a82470..126f0b493d0 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -527,6 +527,7 @@ void set_personality_ia32(void) /* Make sure to be in 32bit mode */ set_thread_flag(TIF_IA32); + current->personality |= force_personality32; /* Prepare the first "return" to user space */ current_thread_info()->status |= TS_COMPAT; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 678d0b8c26f..b4e870cbdc6 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1083,9 +1083,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) set_cpu_sibling_map(0); enable_IR_x2apic(); -#ifdef CONFIG_X86_64 default_setup_apic_routing(); -#endif if (smp_sanity_check(max_cpus) < 0) { printk(KERN_INFO "SMP disabled\n"); diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 296aba49472..15578f180e5 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -467,6 +467,9 @@ static int pit_ioport_read(struct kvm_io_device *this, return -EOPNOTSUPP; addr &= KVM_PIT_CHANNEL_MASK; + if (addr == 3) + return 0; + s = &pit_state->channels[addr]; mutex_lock(&pit_state->lock); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1ddcad452ad..a1e1bc9d412 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -670,7 +670,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { static int version; struct pvclock_wall_clock wc; - struct timespec now, sys, boot; + struct timespec boot; if (!wall_clock) return; @@ -685,9 +685,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) * wall clock specified here. guest system time equals host * system time for us, thus we must fill in host boot time here. */ - now = current_kernel_time(); - ktime_get_ts(&sys); - boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys)); + getboottime(&boot); wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; @@ -762,6 +760,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v) local_irq_save(flags); kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp); ktime_get_ts(&ts); + monotonic_to_bootbased(&ts); local_irq_restore(flags); /* With all the info we got, fill in the values */ diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 71da1bca13c..738e6593799 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -18,7 +18,7 @@ static inline pte_t gup_get_pte(pte_t *ptep) #else /* * With get_user_pages_fast, we walk down the pagetables without taking - * any locks. For this we would like to load the pointers atoimcally, + * any locks. For this we would like to load the pointers atomically, * but that is not possible (without expensive cmpxchg8b) on PAE. What * we do have is the guarantee that a pte will only either go from not * present to present, or present to not present or both -- it will not diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 17b768d0d42..023f4e69a33 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -42,16 +42,13 @@ static const int cfq_hist_divisor = 4; */ #define CFQ_MIN_TT (2) -/* - * Allow merged cfqqs to perform this amount of seeky I/O before - * deciding to break the queues up again. - */ -#define CFQQ_COOP_TOUT (HZ) - #define CFQ_SLICE_SCALE (5) #define CFQ_HW_QUEUE_MIN (5) #define CFQ_SERVICE_SHIFT 12 +#define CFQQ_SEEK_THR 8 * 1024 +#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) + #define RQ_CIC(rq) \ ((struct cfq_io_context *) (rq)->elevator_private) #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) @@ -137,7 +134,6 @@ struct cfq_queue { u64 seek_total; sector_t seek_mean; sector_t last_request_pos; - unsigned long seeky_start; pid_t pid; @@ -314,6 +310,7 @@ enum cfqq_state_flags { CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ CFQ_CFQQ_FLAG_sync, /* synchronous queue */ CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ + CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */ CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ }; @@ -342,6 +339,7 @@ CFQ_CFQQ_FNS(prio_changed); CFQ_CFQQ_FNS(slice_new); CFQ_CFQQ_FNS(sync); CFQ_CFQQ_FNS(coop); +CFQ_CFQQ_FNS(split_coop); CFQ_CFQQ_FNS(deep); CFQ_CFQQ_FNS(wait_busy); #undef CFQ_CFQQ_FNS @@ -1566,6 +1564,15 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_clear_cfqq_wait_busy(cfqq); /* + * If this cfqq is shared between multiple processes, check to + * make sure that those processes are still issuing I/Os within + * the mean seek distance. If not, it may be time to break the + * queues apart again. + */ + if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq)) + cfq_mark_cfqq_split_coop(cfqq); + + /* * store what was left of this slice, if the queue idled/timed out */ if (timed_out && !cfq_cfqq_slice_new(cfqq)) { @@ -1663,9 +1670,6 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, return cfqd->last_position - blk_rq_pos(rq); } -#define CFQQ_SEEK_THR 8 * 1024 -#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) - static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *rq, bool for_preempt) { @@ -3000,19 +3004,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, total = cfqq->seek_total + (cfqq->seek_samples/2); do_div(total, cfqq->seek_samples); cfqq->seek_mean = (sector_t)total; - - /* - * If this cfqq is shared between multiple processes, check to - * make sure that those processes are still issuing I/Os within - * the mean seek distance. If not, it may be time to break the - * queues apart again. - */ - if (cfq_cfqq_coop(cfqq)) { - if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start) - cfqq->seeky_start = jiffies; - else if (!CFQQ_SEEKY(cfqq)) - cfqq->seeky_start = 0; - } } /* @@ -3453,14 +3444,6 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic, return cic_to_cfqq(cic, 1); } -static int should_split_cfqq(struct cfq_queue *cfqq) -{ - if (cfqq->seeky_start && - time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT)) - return 1; - return 0; -} - /* * Returns NULL if a new cfqq should be allocated, or the old cfqq if this * was the last process referring to said cfqq. @@ -3469,9 +3452,9 @@ static struct cfq_queue * split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) { if (cfqq_process_refs(cfqq) == 1) { - cfqq->seeky_start = 0; cfqq->pid = current->pid; cfq_clear_cfqq_coop(cfqq); + cfq_clear_cfqq_split_coop(cfqq); return cfqq; } @@ -3510,7 +3493,7 @@ new_queue: /* * If the queue was seeky for too long, break it apart. */ - if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) { + if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) { cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); cfqq = split_cfqq(cic, cfqq); if (!cfqq) diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index bbc2c1315c4..b2586f57e1f 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -935,6 +935,7 @@ static int dock_add(acpi_handle handle) struct platform_device *dd; id = dock_station_count; + memset(&ds, 0, sizeof(ds)); dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds)); if (IS_ERR(dd)) return PTR_ERR(dd); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 7c0441f63b3..e88e8ae04fd 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, (void *)2}, + { set_max_cstate, "Pavilion zv5000", { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, + (void *)1}, + { set_max_cstate, "Asus L8400B", { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, + (void *)1}, {}, }; diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c index 7247819dbd8..e306ba9aa34 100644 --- a/drivers/acpi/processor_pdc.c +++ b/drivers/acpi/processor_pdc.c @@ -125,6 +125,8 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in) return status; } +static int early_pdc_done; + void acpi_processor_set_pdc(acpi_handle handle) { struct acpi_object_list *obj_list; @@ -132,6 +134,9 @@ void acpi_processor_set_pdc(acpi_handle handle) if (arch_has_acpi_pdc() == false) return; + if (early_pdc_done) + return; + obj_list = acpi_processor_alloc_pdc(); if (!obj_list) return; @@ -151,6 +156,13 @@ static int set_early_pdc_optin(const struct dmi_system_id *id) return 0; } +static int param_early_pdc_optin(char *s) +{ + early_pdc_optin = 1; + return 1; +} +__setup("acpi_early_pdc_eval", param_early_pdc_optin); + static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = { { set_early_pdc_optin, "HP Envy", { @@ -192,4 +204,6 @@ void __init acpi_early_processor_set_pdc(void) acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, early_init_pdc, NULL, NULL, NULL); + + early_pdc_done = 1; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index ff9f6226085..3e009674f33 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1336,9 +1336,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops, if (child) *child = device; - return 0; + + if (device) + return 0; + else + return -ENODEV; } +/* + * acpi_bus_add and acpi_bus_start + * + * scan a given ACPI tree and (probably recently hot-plugged) + * create and add or starts found devices. + * + * If no devices were found -ENODEV is returned which does not + * mean that this is a real error, there just have been no suitable + * ACPI objects in the table trunk from which the kernel could create + * a device and add/start an appropriate driver. + */ + int acpi_bus_add(struct acpi_device **child, struct acpi_device *parent, acpi_handle handle, int type) @@ -1348,8 +1364,7 @@ acpi_bus_add(struct acpi_device **child, memset(&ops, 0, sizeof(ops)); ops.acpi_op_add = 1; - acpi_bus_scan(handle, &ops, child); - return 0; + return acpi_bus_scan(handle, &ops, child); } EXPORT_SYMBOL(acpi_bus_add); @@ -1357,11 +1372,13 @@ int acpi_bus_start(struct acpi_device *device) { struct acpi_bus_ops ops; + if (!device) + return -EINVAL; + memset(&ops, 0, sizeof(ops)); ops.acpi_op_start = 1; - acpi_bus_scan(device->handle, &ops, NULL); - return 0; + return acpi_bus_scan(device->handle, &ops, NULL); } EXPORT_SYMBOL(acpi_bus_start); diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index f336bca7c45..8a0ed2800e6 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id, unsigned long table_end; acpi_size tbl_size; - if (acpi_disabled) + if (acpi_disabled && !acpi_ht) return -ENODEV; if (!handler) @@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler) struct acpi_table_header *table = NULL; acpi_size tbl_size; - if (acpi_disabled) + if (acpi_disabled && !acpi_ht) return -ENODEV; if (!handler) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index b8bea100a16..b34390347c1 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -2868,6 +2868,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) }, .driver_data = "F.23", /* cutoff BIOS version */ }, + /* + * Acer eMachines G725 has the same problem. BIOS + * V1.03 is known to be broken. V3.04 is known to + * work. Inbetween, there are V1.06, V2.06 and V3.03 + * that we don't have much idea about. For now, + * blacklist anything older than V3.04. + */ + { + .ident = "G725", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), + DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), + }, + .driver_data = "V3.04", /* cutoff BIOS version */ + }, { } /* terminate list */ }; const struct dmi_system_id *dmi = dmi_first_match(sysids); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index f4ea5a8c325..d096fbcbc77 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) * write indication (used for PIO/DMA setup), result TF is * copied back and we don't whine too much about its failure. */ - tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; if (scmd->sc_data_direction == DMA_TO_DEVICE) tf->flags |= ATA_TFLAG_WRITE; diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 741065c9da6..730ef3c384c 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) do_write); } + if (!do_write) + flush_dcache_page(page); + qc->curbytes += qc->sect_size; qc->cursg_ofs += qc->sect_size; diff --git a/drivers/base/class.c b/drivers/base/class.c index 161746deab4..6e2c3b064f5 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -59,6 +59,8 @@ static void class_release(struct kobject *kobj) else pr_debug("class '%s' does not have a release() function, " "be careful\n", class->name); + + kfree(cp); } static struct sysfs_ops class_sysfs_ops = { diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 873e594860d..9291614ac6b 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -337,6 +337,9 @@ static int cciss_seq_show(struct seq_file *seq, void *v) if (*pos > h->highest_lun) return 0; + if (drv == NULL) /* it's possible for h->drv[] to have holes. */ + return 0; + if (drv->heads == 0) return 0; diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index f36defa3776..57d965b7f52 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv, exit: sdio_release_host(card->func); + kfree(tmpbuf); return ret; } diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 34cf04e2179..fd50ead59c7 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -767,16 +767,19 @@ int __init agp_amd64_init(void) static int __init agp_amd64_mod_init(void) { +#ifndef MODULE if (gart_iommu_aperture) return agp_bridges_found ? 0 : -ENODEV; - +#endif return agp_amd64_init(); } static void __exit agp_amd64_cleanup(void) { +#ifndef MODULE if (gart_iommu_aperture) return; +#endif if (aperture_resource) release_resource(aperture_resource); pci_unregister_driver(&agp_amd64_pci_driver); diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index ecba4942fc8..f58440791e6 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -39,12 +39,12 @@ struct tpm_inf_dev { int iotype; - void __iomem *mem_base; /* MMIO ioremap'd addr */ - unsigned long map_base; /* phys MMIO base */ - unsigned long map_size; /* MMIO region size */ - unsigned int index_off; /* index register offset */ + void __iomem *mem_base; /* MMIO ioremap'd addr */ + unsigned long map_base; /* phys MMIO base */ + unsigned long map_size; /* MMIO region size */ + unsigned int index_off; /* index register offset */ - unsigned int data_regs; /* Data registers */ + unsigned int data_regs; /* Data registers */ unsigned int data_size; unsigned int config_port; /* IO Port config index reg */ @@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = { .miscdev = {.fops = &inf_ops,}, }; -static const struct pnp_device_id tpm_pnp_tbl[] = { +static const struct pnp_device_id tpm_inf_pnp_tbl[] = { /* Infineon TPMs */ {"IFX0101", 0}, {"IFX0102", 0}, {"", 0} }; -MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); +MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl); static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) @@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { - tpm_dev.iotype = TPM_INF_IO_PORT; + tpm_dev.iotype = TPM_INF_IO_PORT; tpm_dev.config_port = pnp_port_start(dev, 0); tpm_dev.config_size = pnp_port_len(dev, 0); @@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, goto err_last; } } else if (pnp_mem_valid(dev, 0) && - !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { + !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { - tpm_dev.iotype = TPM_INF_IO_MEM; + tpm_dev.iotype = TPM_INF_IO_MEM; tpm_dev.map_base = pnp_mem_start(dev, 0); tpm_dev.map_size = pnp_mem_len(dev, 0); @@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, "product id 0x%02x%02x" "%s\n", tpm_dev.iotype == TPM_INF_IO_PORT ? - tpm_dev.config_port : - tpm_dev.map_base + tpm_dev.index_off, + tpm_dev.config_port : + tpm_dev.map_base + tpm_dev.index_off, tpm_dev.iotype == TPM_INF_IO_PORT ? - tpm_dev.data_regs : - tpm_dev.map_base + tpm_dev.data_regs, + tpm_dev.data_regs : + tpm_dev.map_base + tpm_dev.data_regs, version[0], version[1], vendorid[0], vendorid[1], productid[0], productid[1], chipname); @@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev) iounmap(tpm_dev.mem_base); release_mem_region(tpm_dev.map_base, tpm_dev.map_size); } + tpm_dev_vendor_release(chip); tpm_remove_hardware(chip->dev); } } +static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state) +{ + struct tpm_chip *chip = pnp_get_drvdata(dev); + int rc; + if (chip) { + u8 savestate[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 10, /* blob length (in bytes) */ + 0, 0, 0, 152 /* TPM_ORD_SaveState */ + }; + dev_info(&dev->dev, "saving TPM state\n"); + rc = tpm_inf_send(chip, savestate, sizeof(savestate)); + if (rc < 0) { + dev_err(&dev->dev, "error while saving TPM state\n"); + return rc; + } + } + return 0; +} + +static int tpm_inf_pnp_resume(struct pnp_dev *dev) +{ + /* Re-configure TPM after suspending */ + tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR); + tpm_config_out(IOLIMH, TPM_INF_ADDR); + tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA); + tpm_config_out(IOLIML, TPM_INF_ADDR); + tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA); + /* activate register */ + tpm_config_out(TPM_DAR, TPM_INF_ADDR); + tpm_config_out(0x01, TPM_INF_DATA); + tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR); + /* disable RESET, LP and IRQC */ + tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); + return tpm_pm_resume(&dev->dev); +} + static struct pnp_driver tpm_inf_pnp_driver = { .name = "tpm_inf_pnp", - .driver = { - .owner = THIS_MODULE, - .suspend = tpm_pm_suspend, - .resume = tpm_pm_resume, - }, - .id_table = tpm_pnp_tbl, + .id_table = tpm_inf_pnp_tbl, .probe = tpm_inf_pnp_probe, - .remove = __devexit_p(tpm_inf_pnp_remove), + .suspend = tpm_inf_pnp_suspend, + .resume = tpm_inf_pnp_resume, + .remove = __devexit_p(tpm_inf_pnp_remove) }; static int __init init_inf(void) @@ -638,5 +673,5 @@ module_exit(cleanup_inf); MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>"); MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); -MODULE_VERSION("1.9"); +MODULE_VERSION("1.9.2"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index c6f3b48be9d..dcb9083ecde 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -1951,8 +1951,10 @@ static int tty_fasync(int fd, struct file *filp, int on) pid = task_pid(current); type = PIDTYPE_PID; } - retval = __f_setown(filp, pid, type, 0); + get_pid(pid); spin_unlock_irqrestore(&tty->ctrl_lock, flags); + retval = __f_setown(filp, pid, type, 0); + put_pid(pid); if (retval) goto out; } else { diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 4b34ade2332..bd444dc93cf 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential); + if (freq_next < policy->min) + freq_next = policy->min; + if (!dbs_tuners_ins.powersave_bias) { __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index b5f2ee0f8e2..64a937262a4 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -613,8 +613,6 @@ static void dma_tasklet(unsigned long data) cohd_fin->pending_irqs--; cohc->completed = cohd_fin->desc.cookie; - BUG_ON(cohc->nbr_active_done && cohd_fin == NULL); - if (cohc->nbr_active_done == 0) return; diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 6f51a0a7a8b..e7a3230fb7d 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device) chan->dev->chan = NULL; mutex_unlock(&dma_list_mutex); device_unregister(&chan->dev->device); + free_percpu(chan->local); } } EXPORT_SYMBOL(dma_async_device_unregister); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 8b905161fbf..948d563941c 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -467,7 +467,7 @@ err_srcs: if (iterations > 0) while (!kthread_should_stop()) { - DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit); + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); interruptible_sleep_on(&wait_dmatest_exit); } diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 5f7a500e18d..5cc37afe2bc 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -249,7 +249,7 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo) if (is_ioat_active(status) || is_ioat_idle(status)) ioat_suspend(chan); while (is_ioat_active(status) || is_ioat_idle(status)) { - if (end && time_after(jiffies, end)) { + if (tmo && time_after(jiffies, end)) { err = -ETIMEDOUT; break; } diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 9a5bc1a7389..e80bae1673f 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -761,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) * @buffer_n: buffer number to update. * 0 or 1 are the only valid values. * @phyaddr: buffer physical address. - * @return: Returns 0 on success or negative error code on failure. This - * function will fail if the buffer is set to ready. */ /* Called under spin_lock(_irqsave)(&ichan->lock) */ -static int ipu_update_channel_buffer(struct idmac_channel *ichan, - int buffer_n, dma_addr_t phyaddr) +static void ipu_update_channel_buffer(struct idmac_channel *ichan, + int buffer_n, dma_addr_t phyaddr) { enum ipu_channel channel = ichan->dma_chan.chan_id; uint32_t reg; @@ -806,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan, } spin_unlock_irqrestore(&ipu_data.lock, flags); - - return 0; } /* Called under spin_lock_irqsave(&ichan->lock) */ @@ -816,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan, { unsigned int chan_id = ichan->dma_chan.chan_id; struct device *dev = &ichan->dma_chan.dev->device; - int ret; if (async_tx_test_ack(&desc->txd)) return -EINTR; @@ -827,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan, * could make it conditional on status >= IPU_CHANNEL_ENABLED, but * doing it again shouldn't hurt either. */ - ret = ipu_update_channel_buffer(ichan, buf_idx, - sg_dma_address(sg)); - - if (ret < 0) { - dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n", - sg, chan_id, buf_idx); - return ret; - } + ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg)); ipu_select_buffer(chan_id, buf_idx); dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", @@ -1379,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) if (likely(sgnew) && ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { - callback = desc->txd.callback; - callback_param = desc->txd.callback_param; + callback = descnew->txd.callback; + callback_param = descnew->txd.callback_param; spin_unlock(&ichan->lock); - callback(callback_param); + if (callback) + callback(callback_param); spin_lock(&ichan->lock); } diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 000dc67b85b..3391e6739d0 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -2658,10 +2658,11 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) * the memory system completely. A command line option allows to force-enable * hardware ECC later in amd64_enable_ecc_error_reporting(). */ -static const char *ecc_warning = - "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n" - " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n" - " Also, use of the override can cause unknown side effects.\n"; +static const char *ecc_msg = + "ECC disabled in the BIOS or no ECC capability, module will not load.\n" + " Either enable ECC checking or force module loading by setting " + "'ecc_enable_override'.\n" + " (Note that use of the override may cause unknown side effects.)\n"; static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) { @@ -2673,7 +2674,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); if (!ecc_enabled) - amd64_printk(KERN_WARNING, "This node reports that Memory ECC " + amd64_printk(KERN_NOTICE, "This node reports that Memory ECC " "is currently disabled, set F3x%x[22] (%s).\n", K8_NBCFG, pci_name(pvt->misc_f3_ctl)); else @@ -2681,13 +2682,13 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); if (!nb_mce_en) - amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR " + amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR " "0x%08x[4] on node %d to enable.\n", MSR_IA32_MCG_CTL, pvt->mc_node_id); if (!ecc_enabled || !nb_mce_en) { if (!ecc_enable_override) { - amd64_printk(KERN_WARNING, "%s", ecc_warning); + amd64_printk(KERN_NOTICE, "%s", ecc_msg); return -ENODEV; } ecc_enable_override = 0; diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index cf27402af97..ecd5928d711 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -804,8 +804,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) end <<= (24 - PAGE_SHIFT); end |= (1 << (24 - PAGE_SHIFT)) - 1; - csrow->first_page = start >> PAGE_SHIFT; - csrow->last_page = end >> PAGE_SHIFT; + csrow->first_page = start; + csrow->last_page = end; csrow->nr_pages = end + 1 - start; csrow->grain = 8; csrow->mtype = mtype; @@ -892,10 +892,6 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op, mpc85xx_init_csrows(mci); -#ifdef CONFIG_EDAC_DEBUG - edac_mc_register_mcidev_debug((struct attribute **)debug_attr); -#endif - /* store the original error disable bits */ orig_ddr_err_disable = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE); diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index cbaf420c36c..2d3dc7ded0a 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -893,20 +893,31 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, static struct kmem_cache *fwnet_packet_task_cache; +static void fwnet_free_ptask(struct fwnet_packet_task *ptask) +{ + dev_kfree_skb_any(ptask->skb); + kmem_cache_free(fwnet_packet_task_cache, ptask); +} + static int fwnet_send_packet(struct fwnet_packet_task *ptask); static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) { - struct fwnet_device *dev; + struct fwnet_device *dev = ptask->dev; unsigned long flags; - - dev = ptask->dev; + bool free; spin_lock_irqsave(&dev->lock, flags); - list_del(&ptask->pt_link); - spin_unlock_irqrestore(&dev->lock, flags); - ptask->outstanding_pkts--; /* FIXME access inside lock */ + ptask->outstanding_pkts--; + + /* Check whether we or the networking TX soft-IRQ is last user. */ + free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link)); + + if (ptask->outstanding_pkts == 0) + list_del(&ptask->pt_link); + + spin_unlock_irqrestore(&dev->lock, flags); if (ptask->outstanding_pkts > 0) { u16 dg_size; @@ -951,10 +962,10 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; } fwnet_send_packet(ptask); - } else { - dev_kfree_skb_any(ptask->skb); - kmem_cache_free(fwnet_packet_task_cache, ptask); } + + if (free) + fwnet_free_ptask(ptask); } static void fwnet_write_complete(struct fw_card *card, int rcode, @@ -977,6 +988,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) unsigned tx_len; struct rfc2734_header *bufhdr; unsigned long flags; + bool free; dev = ptask->dev; tx_len = ptask->max_payload; @@ -1022,12 +1034,16 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) generation, SCODE_100, 0ULL, ptask->skb->data, tx_len + 8, fwnet_write_complete, ptask); - /* FIXME race? */ spin_lock_irqsave(&dev->lock, flags); - list_add_tail(&ptask->pt_link, &dev->broadcasted_list); + + /* If the AT tasklet already ran, we may be last user. */ + free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link)); + if (!free) + list_add_tail(&ptask->pt_link, &dev->broadcasted_list); + spin_unlock_irqrestore(&dev->lock, flags); - return 0; + goto out; } fw_send_request(dev->card, &ptask->transaction, @@ -1035,12 +1051,19 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) ptask->generation, ptask->speed, ptask->fifo_addr, ptask->skb->data, tx_len, fwnet_write_complete, ptask); - /* FIXME race? */ spin_lock_irqsave(&dev->lock, flags); - list_add_tail(&ptask->pt_link, &dev->sent_list); + + /* If the AT tasklet already ran, we may be last user. */ + free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link)); + if (!free) + list_add_tail(&ptask->pt_link, &dev->sent_list); + spin_unlock_irqrestore(&dev->lock, flags); dev->netdev->trans_start = jiffies; + out: + if (free) + fwnet_free_ptask(ptask); return 0; } @@ -1298,6 +1321,8 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) spin_unlock_irqrestore(&dev->lock, flags); ptask->max_payload = max_payload; + INIT_LIST_HEAD(&ptask->pt_link); + fwnet_send_packet(ptask); return NETDEV_TX_OK; diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 2345d4103fe..43ebf337b13 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -2101,11 +2101,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base, u32 payload_index, payload_end_index, next_page_index; int page, end_page, i, length, offset; - /* - * FIXME: Cycle lost behavior should be configurable: lose - * packet, retransmit or terminate.. - */ - p = packet; payload_index = payload; @@ -2135,6 +2130,14 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base, if (!p->skip) { d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); d[0].req_count = cpu_to_le16(8); + /* + * Link the skip address to this descriptor itself. This causes + * a context to skip a cycle whenever lost cycles or FIFO + * overruns occur, without dropping the data. The application + * should then decide whether this is an error condition or not. + * FIXME: Make the context's cycle-lost behaviour configurable? + */ + d[0].branch_address = cpu_to_le32(d_bus | z); header = (__le32 *) &d[1]; header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c index a1fce68e3bb..17be051b7aa 100644 --- a/drivers/gpu/drm/ati_pcigart.c +++ b/drivers/gpu/drm/ati_pcigart.c @@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { DRM_ERROR("fail to set dma mask to 0x%Lx\n", - gart_info->table_mask); + (unsigned long long)gart_info->table_mask); ret = 1; goto done; } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f665b05592f..ab6c9733041 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, return mode; } +/* + * EDID is delightfully ambiguous about how interlaced modes are to be + * encoded. Our internal representation is of frame height, but some + * HDTV detailed timings are encoded as field height. + * + * The format list here is from CEA, in frame size. Technically we + * should be checking refresh rate too. Whatever. + */ +static void +drm_mode_do_interlace_quirk(struct drm_display_mode *mode, + struct detailed_pixel_timing *pt) +{ + int i; + static const struct { + int w, h; + } cea_interlaced[] = { + { 1920, 1080 }, + { 720, 480 }, + { 1440, 480 }, + { 2880, 480 }, + { 720, 576 }, + { 1440, 576 }, + { 2880, 576 }, + }; + static const int n_sizes = + sizeof(cea_interlaced)/sizeof(cea_interlaced[0]); + + if (!(pt->misc & DRM_EDID_PT_INTERLACED)) + return; + + for (i = 0; i < n_sizes; i++) { + if ((mode->hdisplay == cea_interlaced[i].w) && + (mode->vdisplay == cea_interlaced[i].h / 2)) { + mode->vdisplay *= 2; + mode->vsync_start *= 2; + mode->vsync_end *= 2; + mode->vtotal *= 2; + mode->vtotal |= 1; + } + } + + mode->flags |= DRM_MODE_FLAG_INTERLACE; +} + /** * drm_mode_detailed - create a new mode from an EDID detailed timing section * @dev: DRM device (needed to create new mode) @@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, drm_mode_set_name(mode); - if (pt->misc & DRM_EDID_PT_INTERLACED) - mode->flags |= DRM_MODE_FLAG_INTERLACE; + drm_mode_do_interlace_quirk(mode, pt); if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index cdec3297712..2ac074c8f5d 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, wasted += alignment - tmp; } - if (entry->size >= size + wasted) { + if (entry->size >= size + wasted && + (entry->start + wasted + size) <= end) { if (!best_match) return entry; if (entry->size < best_size) { diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index e660ac07f3b..2307f98349f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, if (cmdbuf->num_cliprects) { cliprects = kcalloc(cmdbuf->num_cliprects, sizeof(struct drm_clip_rect), GFP_KERNEL); - if (cliprects == NULL) + if (cliprects == NULL) { + ret = -ENOMEM; goto fail_batch_free; + } ret = copy_from_user(cliprects, cmdbuf->cliprects, cmdbuf->num_cliprects * diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 46d88965852..79beffcf593 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = { const static struct intel_device_info intel_pineview_info = { .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, - .has_pipe_cxsr = 1, + .need_gfx_hws = 1, .has_hotplug = 1, }; @@ -174,78 +174,100 @@ const static struct pci_device_id pciidlist[] = { MODULE_DEVICE_TABLE(pci, pciidlist); #endif -static int i915_suspend(struct drm_device *dev, pm_message_t state) +static int i915_drm_freeze(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!dev || !dev_priv) { - DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv); - DRM_ERROR("DRM not initialized, aborting suspend.\n"); - return -ENODEV; - } - - if (state.event == PM_EVENT_PRETHAW) - return 0; - pci_save_state(dev->pdev); /* If KMS is active, we do the leavevt stuff here */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { - if (i915_gem_idle(dev)) + int error = i915_gem_idle(dev); + if (error) { dev_err(&dev->pdev->dev, - "GEM idle failed, resume may fail\n"); + "GEM idle failed, resume might fail\n"); + return error; + } drm_irq_uninstall(dev); } i915_save_state(dev); + return 0; +} + +static void i915_drm_suspend(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + intel_opregion_free(dev, 1); + /* Modeset on resume, not lid events */ + dev_priv->modeset_on_lid = 0; +} + +static int i915_suspend(struct drm_device *dev, pm_message_t state) +{ + int error; + + if (!dev || !dev->dev_private) { + DRM_ERROR("dev: %p\n", dev); + DRM_ERROR("DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } + + if (state.event == PM_EVENT_PRETHAW) + return 0; + + error = i915_drm_freeze(dev); + if (error) + return error; + + i915_drm_suspend(dev); + if (state.event == PM_EVENT_SUSPEND) { /* Shut down the device */ pci_disable_device(dev->pdev); pci_set_power_state(dev->pdev, PCI_D3hot); } - /* Modeset on resume, not lid events */ - dev_priv->modeset_on_lid = 0; - return 0; } -static int i915_resume(struct drm_device *dev) +static int i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int ret = 0; - - if (pci_enable_device(dev->pdev)) - return -1; - pci_set_master(dev->pdev); - - i915_restore_state(dev); - - intel_opregion_init(dev, 1); + int error = 0; /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); dev_priv->mm.suspended = 0; - ret = i915_gem_init_ringbuffer(dev); - if (ret != 0) - ret = -1; + error = i915_gem_init_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); drm_irq_install(dev); - } - if (drm_core_check_feature(dev, DRIVER_MODESET)) { + /* Resume the modeset for every activated CRTC */ drm_helper_resume_force_mode(dev); } dev_priv->modeset_on_lid = 0; - return ret; + return error; +} + +static int i915_resume(struct drm_device *dev) +{ + if (pci_enable_device(dev->pdev)) + return -EIO; + + pci_set_master(dev->pdev); + + i915_restore_state(dev); + + intel_opregion_init(dev, 1); + + return i915_drm_thaw(dev); } /** @@ -386,57 +408,69 @@ i915_pci_remove(struct pci_dev *pdev) drm_put_dev(dev); } -static int -i915_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int i915_pm_suspend(struct device *dev) { - struct drm_device *dev = pci_get_drvdata(pdev); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + int error; - return i915_suspend(dev, state); -} + if (!drm_dev || !drm_dev->dev_private) { + dev_err(dev, "DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } -static int -i915_pci_resume(struct pci_dev *pdev) -{ - struct drm_device *dev = pci_get_drvdata(pdev); + error = i915_drm_freeze(drm_dev); + if (error) + return error; - return i915_resume(dev); -} + i915_drm_suspend(drm_dev); -static int -i915_pm_suspend(struct device *dev) -{ - return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND); -} + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); -static int -i915_pm_resume(struct device *dev) -{ - return i915_pci_resume(to_pci_dev(dev)); + return 0; } -static int -i915_pm_freeze(struct device *dev) +static int i915_pm_resume(struct device *dev) { - return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return i915_resume(drm_dev); } -static int -i915_pm_thaw(struct device *dev) +static int i915_pm_freeze(struct device *dev) { - /* thaw during hibernate, do nothing! */ - return 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + if (!drm_dev || !drm_dev->dev_private) { + dev_err(dev, "DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } + + return i915_drm_freeze(drm_dev); } -static int -i915_pm_poweroff(struct device *dev) +static int i915_pm_thaw(struct device *dev) { - return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return i915_drm_thaw(drm_dev); } -static int -i915_pm_restore(struct device *dev) +static int i915_pm_poweroff(struct device *dev) { - return i915_pci_resume(to_pci_dev(dev)); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + int error; + + error = i915_drm_freeze(drm_dev); + if (!error) + i915_drm_suspend(drm_dev); + + return error; } const struct dev_pm_ops i915_pm_ops = { @@ -445,7 +479,7 @@ const struct dev_pm_ops i915_pm_ops = { .freeze = i915_pm_freeze, .thaw = i915_pm_thaw, .poweroff = i915_pm_poweroff, - .restore = i915_pm_restore, + .restore = i915_pm_resume, }; static struct vm_operations_struct i915_gem_vm_ops = { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index aaf934d96f2..b99b6a841d9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -493,6 +493,15 @@ typedef struct drm_i915_private { struct list_head flushing_list; /** + * List of objects currently pending a GPU write flush. + * + * All elements on this list will belong to either the + * active_list or flushing_list, last_rendering_seqno can + * be used to differentiate between the two elements. + */ + struct list_head gpu_write_list; + + /** * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. * @@ -592,6 +601,8 @@ struct drm_i915_gem_object { /** This object's place on the active/flushing/inactive lists */ struct list_head list; + /** This object's place on GPU write list */ + struct list_head gpu_write_list; /** This object's place on the fenced object LRU */ struct list_head fence_list; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dda787aafcc..ec8a0d7ffa3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1552,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) else list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + BUG_ON(!list_empty(&obj_priv->gpu_write_list)); + obj_priv->last_rendering_seqno = 0; if (obj_priv->active) { obj_priv->active = 0; @@ -1622,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, struct drm_i915_gem_object *obj_priv, *next; list_for_each_entry_safe(obj_priv, next, - &dev_priv->mm.flushing_list, list) { + &dev_priv->mm.gpu_write_list, + gpu_write_list) { struct drm_gem_object *obj = obj_priv->obj; if ((obj->write_domain & flush_domains) == @@ -1630,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, uint32_t old_write_domain = obj->write_domain; obj->write_domain = 0; + list_del_init(&obj_priv->gpu_write_list); i915_gem_object_move_to_active(obj, seqno); trace_i915_gem_object_change_domain(obj, @@ -2084,8 +2088,8 @@ static int i915_gem_evict_everything(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t seqno; int ret; + uint32_t seqno; bool lists_empty; spin_lock(&dev_priv->mm.active_list_lock); @@ -2107,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev) if (ret) return ret; + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); + ret = i915_gem_evict_from_inactive_list(dev); if (ret) return ret; @@ -2701,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) old_write_domain = obj->write_domain; i915_gem_flush(dev, 0, obj->write_domain); seqno = i915_add_request(dev, NULL, obj->write_domain); - obj->write_domain = 0; + BUG_ON(obj->write_domain); i915_gem_object_move_to_active(obj, seqno); trace_i915_gem_object_change_domain(obj, @@ -3564,6 +3570,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, uint32_t reloc_count = 0, i; int ret = 0; + if (relocs == NULL) + return 0; + for (i = 0; i < buffer_count; i++) { struct drm_i915_gem_relocation_entry __user *user_relocs; int unwritten; @@ -3653,7 +3662,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *batch_obj; struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; - struct drm_i915_gem_relocation_entry *relocs; + struct drm_i915_gem_relocation_entry *relocs = NULL; int ret = 0, ret2, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, flush_domains, reloc_index; @@ -3679,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (args->num_cliprects != 0) { cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), GFP_KERNEL); - if (cliprects == NULL) + if (cliprects == NULL) { + ret = -ENOMEM; goto pre_mutex_err; + } ret = copy_from_user(cliprects, (struct drm_clip_rect __user *) @@ -3722,6 +3733,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (object_list[i] == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", exec_list[i].handle, i); + /* prevent error path from reading uninitialized data */ + args->buffer_count = i + 1; ret = -EBADF; goto err; } @@ -3730,6 +3743,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (obj_priv->in_execbuffer) { DRM_ERROR("Object %p appears more than once in object list\n", object_list[i]); + /* prevent error path from reading uninitialized data */ + args->buffer_count = i + 1; ret = -EBADF; goto err; } @@ -3843,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_flush(dev, dev->invalidate_domains, dev->flush_domains); - if (dev->flush_domains) + if (dev->flush_domains & I915_GEM_GPU_DOMAINS) (void)i915_add_request(dev, file_priv, dev->flush_domains); } for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; + struct drm_i915_gem_object *obj_priv = obj->driver_private; uint32_t old_write_domain = obj->write_domain; obj->write_domain = obj->pending_write_domain; + if (obj->write_domain) + list_move_tail(&obj_priv->gpu_write_list, + &dev_priv->mm.gpu_write_list); + else + list_del_init(&obj_priv->gpu_write_list); + trace_i915_gem_object_change_domain(obj, obj->read_domains, old_write_domain); @@ -3926,6 +3948,7 @@ err: mutex_unlock(&dev->struct_mutex); +pre_mutex_err: /* Copy the updated relocations out regardless of current error * state. Failure to update the relocs would mean that the next * time userland calls execbuf, it would do so with presumed offset @@ -3940,7 +3963,6 @@ err: ret = ret2; } -pre_mutex_err: drm_free_large(object_list); kfree(cliprects); @@ -4363,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) obj_priv->obj = obj; obj_priv->fence_reg = I915_FENCE_REG_NONE; INIT_LIST_HEAD(&obj_priv->list); + INIT_LIST_HEAD(&obj_priv->gpu_write_list); INIT_LIST_HEAD(&obj_priv->fence_list); obj_priv->madv = I915_MADV_WILLNEED; @@ -4814,6 +4837,7 @@ i915_gem_load(struct drm_device *dev) spin_lock_init(&dev_priv->mm.active_list_lock); INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); + INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.request_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 89a071a3e6f..a17d6bdfe63 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) if (de_iir & DE_GSE) ironlake_opregion_gse_intr(dev); + if (de_iir & DE_PLANEA_FLIP_DONE) { + intel_prepare_page_flip(dev, 0); + intel_finish_page_flip(dev, 0); + } + + if (de_iir & DE_PLANEB_FLIP_DONE) { + intel_prepare_page_flip(dev, 1); + intel_finish_page_flip(dev, 1); + } + + if (de_iir & DE_PIPEA_VBLANK) + drm_handle_vblank(dev, 0); + + if (de_iir & DE_PIPEB_VBLANK) + drm_handle_vblank(dev, 1); + /* check event from PCH */ if ((de_iir & DE_PCH_EVENT) && (pch_iir & SDE_HOTPLUG_MASK)) { @@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) if (!(pipeconf & PIPEACONF_ENABLE)) return -EINVAL; - if (IS_IRONLAKE(dev)) - return 0; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - if (IS_I965G(dev)) + if (IS_IRONLAKE(dev)) + ironlake_enable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); + else if (IS_I965G(dev)) i915_enable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_ENABLE); else @@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - if (IS_IRONLAKE(dev)) - return; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - i915_disable_pipestat(dev_priv, pipe, - PIPE_VBLANK_INTERRUPT_ENABLE | - PIPE_START_VBLANK_INTERRUPT_ENABLE); + if (IS_IRONLAKE(dev)) + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); + else + i915_disable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_START_VBLANK_INTERRUPT_ENABLE); spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } @@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; /* enable kind of interrupts always enabled */ - u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; u32 render_mask = GT_USER_INTERRUPT; u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; dev_priv->irq_mask_reg = ~display_mask; - dev_priv->de_irq_enable_reg = display_mask; + dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; /* should always can generate irq */ I915_WRITE(DEIIR, I915_READ(DEIIR)); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 847006c5218..ab1bd2d3d3b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -338,6 +338,7 @@ #define FBC_CTL_PERIODIC (1<<30) #define FBC_CTL_INTERVAL_SHIFT (16) #define FBC_CTL_UNCOMPRESSIBLE (1<<14) +#define FBC_C3_IDLE (1<<13) #define FBC_CTL_STRIDE_SHIFT (5) #define FBC_CTL_FENCENO (1<<0) #define FBC_COMMAND 0x0320c diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index ddefc871edf..79dd4026586 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) adpa = I915_READ(PCH_ADPA); adpa &= ~ADPA_CRT_HOTPLUG_MASK; + /* disable HPD first */ + I915_WRITE(PCH_ADPA, adpa); + (void)I915_READ(PCH_ADPA); adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | ADPA_CRT_HOTPLUG_WARMUP_10MS | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 45da78ef4a9..b27202d23eb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -240,33 +240,86 @@ struct intel_limit { #define IRONLAKE_DOT_MAX 350000 #define IRONLAKE_VCO_MIN 1760000 #define IRONLAKE_VCO_MAX 3510000 -#define IRONLAKE_N_MIN 1 -#define IRONLAKE_N_MAX 6 -#define IRONLAKE_M_MIN 79 -#define IRONLAKE_M_MAX 127 #define IRONLAKE_M1_MIN 12 #define IRONLAKE_M1_MAX 22 #define IRONLAKE_M2_MIN 5 #define IRONLAKE_M2_MAX 9 -#define IRONLAKE_P_SDVO_DAC_MIN 5 -#define IRONLAKE_P_SDVO_DAC_MAX 80 -#define IRONLAKE_P_LVDS_MIN 28 -#define IRONLAKE_P_LVDS_MAX 112 -#define IRONLAKE_P1_MIN 1 -#define IRONLAKE_P1_MAX 8 -#define IRONLAKE_P2_SDVO_DAC_SLOW 10 -#define IRONLAKE_P2_SDVO_DAC_FAST 5 -#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */ -#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ -#define IRONLAKE_P_DISPLAY_PORT_MIN 10 -#define IRONLAKE_P_DISPLAY_PORT_MAX 20 -#define IRONLAKE_P2_DISPLAY_PORT_FAST 10 -#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 -#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 -#define IRONLAKE_P1_DISPLAY_PORT_MIN 1 -#define IRONLAKE_P1_DISPLAY_PORT_MAX 2 +/* We have parameter ranges for different type of outputs. */ + +/* DAC & HDMI Refclk 120Mhz */ +#define IRONLAKE_DAC_N_MIN 1 +#define IRONLAKE_DAC_N_MAX 5 +#define IRONLAKE_DAC_M_MIN 79 +#define IRONLAKE_DAC_M_MAX 127 +#define IRONLAKE_DAC_P_MIN 5 +#define IRONLAKE_DAC_P_MAX 80 +#define IRONLAKE_DAC_P1_MIN 1 +#define IRONLAKE_DAC_P1_MAX 8 +#define IRONLAKE_DAC_P2_SLOW 10 +#define IRONLAKE_DAC_P2_FAST 5 + +/* LVDS single-channel 120Mhz refclk */ +#define IRONLAKE_LVDS_S_N_MIN 1 +#define IRONLAKE_LVDS_S_N_MAX 3 +#define IRONLAKE_LVDS_S_M_MIN 79 +#define IRONLAKE_LVDS_S_M_MAX 118 +#define IRONLAKE_LVDS_S_P_MIN 28 +#define IRONLAKE_LVDS_S_P_MAX 112 +#define IRONLAKE_LVDS_S_P1_MIN 2 +#define IRONLAKE_LVDS_S_P1_MAX 8 +#define IRONLAKE_LVDS_S_P2_SLOW 14 +#define IRONLAKE_LVDS_S_P2_FAST 14 + +/* LVDS dual-channel 120Mhz refclk */ +#define IRONLAKE_LVDS_D_N_MIN 1 +#define IRONLAKE_LVDS_D_N_MAX 3 +#define IRONLAKE_LVDS_D_M_MIN 79 +#define IRONLAKE_LVDS_D_M_MAX 127 +#define IRONLAKE_LVDS_D_P_MIN 14 +#define IRONLAKE_LVDS_D_P_MAX 56 +#define IRONLAKE_LVDS_D_P1_MIN 2 +#define IRONLAKE_LVDS_D_P1_MAX 8 +#define IRONLAKE_LVDS_D_P2_SLOW 7 +#define IRONLAKE_LVDS_D_P2_FAST 7 + +/* LVDS single-channel 100Mhz refclk */ +#define IRONLAKE_LVDS_S_SSC_N_MIN 1 +#define IRONLAKE_LVDS_S_SSC_N_MAX 2 +#define IRONLAKE_LVDS_S_SSC_M_MIN 79 +#define IRONLAKE_LVDS_S_SSC_M_MAX 126 +#define IRONLAKE_LVDS_S_SSC_P_MIN 28 +#define IRONLAKE_LVDS_S_SSC_P_MAX 112 +#define IRONLAKE_LVDS_S_SSC_P1_MIN 2 +#define IRONLAKE_LVDS_S_SSC_P1_MAX 8 +#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14 +#define IRONLAKE_LVDS_S_SSC_P2_FAST 14 + +/* LVDS dual-channel 100Mhz refclk */ +#define IRONLAKE_LVDS_D_SSC_N_MIN 1 +#define IRONLAKE_LVDS_D_SSC_N_MAX 3 +#define IRONLAKE_LVDS_D_SSC_M_MIN 79 +#define IRONLAKE_LVDS_D_SSC_M_MAX 126 +#define IRONLAKE_LVDS_D_SSC_P_MIN 14 +#define IRONLAKE_LVDS_D_SSC_P_MAX 42 +#define IRONLAKE_LVDS_D_SSC_P1_MIN 2 +#define IRONLAKE_LVDS_D_SSC_P1_MAX 6 +#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7 +#define IRONLAKE_LVDS_D_SSC_P2_FAST 7 + +/* DisplayPort */ +#define IRONLAKE_DP_N_MIN 1 +#define IRONLAKE_DP_N_MAX 2 +#define IRONLAKE_DP_M_MIN 81 +#define IRONLAKE_DP_M_MAX 90 +#define IRONLAKE_DP_P_MIN 10 +#define IRONLAKE_DP_P_MAX 20 +#define IRONLAKE_DP_P2_FAST 10 +#define IRONLAKE_DP_P2_SLOW 10 +#define IRONLAKE_DP_P2_LIMIT 0 +#define IRONLAKE_DP_P1_MIN 1 +#define IRONLAKE_DP_P1_MAX 2 static bool intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, @@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = { .find_pll = intel_find_best_PLL, }; -static const intel_limit_t intel_limits_ironlake_sdvo = { +static const intel_limit_t intel_limits_ironlake_dac = { .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, - .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, - .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, + .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX }, + .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX }, .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, - .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, - .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, + .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX }, + .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX }, .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, - .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, - .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, + .p2_slow = IRONLAKE_DAC_P2_SLOW, + .p2_fast = IRONLAKE_DAC_P2_FAST }, .find_pll = intel_g4x_find_best_PLL, }; -static const intel_limit_t intel_limits_ironlake_lvds = { +static const intel_limit_t intel_limits_ironlake_single_lvds = { .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, - .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, - .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, + .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX }, + .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX }, .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, - .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, - .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, + .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX }, + .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX }, .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, - .p2_slow = IRONLAKE_P2_LVDS_SLOW, - .p2_fast = IRONLAKE_P2_LVDS_FAST }, + .p2_slow = IRONLAKE_LVDS_S_P2_SLOW, + .p2_fast = IRONLAKE_LVDS_S_P2_FAST }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_ironlake_dual_lvds = { + .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, + .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, + .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX }, + .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX }, + .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, + .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, + .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX }, + .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX }, + .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, + .p2_slow = IRONLAKE_LVDS_D_P2_SLOW, + .p2_fast = IRONLAKE_LVDS_D_P2_FAST }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { + .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, + .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, + .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX }, + .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX }, + .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, + .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, + .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX }, + .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX }, + .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, + .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW, + .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { + .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, + .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, + .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX }, + .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX }, + .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, + .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, + .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX }, + .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX }, + .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, + .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW, + .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST }, .find_pll = intel_g4x_find_best_PLL, }; @@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = { .max = IRONLAKE_DOT_MAX }, .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX}, - .n = { .min = IRONLAKE_N_MIN, - .max = IRONLAKE_N_MAX }, - .m = { .min = IRONLAKE_M_MIN, - .max = IRONLAKE_M_MAX }, + .n = { .min = IRONLAKE_DP_N_MIN, + .max = IRONLAKE_DP_N_MAX }, + .m = { .min = IRONLAKE_DP_M_MIN, + .max = IRONLAKE_DP_M_MAX }, .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, - .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, - .max = IRONLAKE_P_DISPLAY_PORT_MAX }, - .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, - .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, - .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, - .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, - .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, + .p = { .min = IRONLAKE_DP_P_MIN, + .max = IRONLAKE_DP_P_MAX }, + .p1 = { .min = IRONLAKE_DP_P1_MIN, + .max = IRONLAKE_DP_P1_MAX}, + .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT, + .p2_slow = IRONLAKE_DP_P2_SLOW, + .p2_fast = IRONLAKE_DP_P2_FAST }, .find_pll = intel_find_pll_ironlake_dp, }; static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) { + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; const intel_limit_t *limit; - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) - limit = &intel_limits_ironlake_lvds; - else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || + int refclk = 120; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100) + refclk = 100; + + if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == + LVDS_CLKB_POWER_UP) { + /* LVDS dual channel */ + if (refclk == 100) + limit = &intel_limits_ironlake_dual_lvds_100m; + else + limit = &intel_limits_ironlake_dual_lvds; + } else { + if (refclk == 100) + limit = &intel_limits_ironlake_single_lvds_100m; + else + limit = &intel_limits_ironlake_single_lvds; + } + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || HAS_eDP) limit = &intel_limits_ironlake_display_port; else - limit = &intel_limits_ironlake_sdvo; + limit = &intel_limits_ironlake_dac; return limit; } @@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) /* enable it... */ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; + if (IS_I945GM(dev)) + fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; if (obj_priv->tiling_mode != I915_TILING_NONE) @@ -1638,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_OFF: DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); + drm_vblank_off(dev, pipe); /* Disable display plane */ temp = I915_READ(dspcntr_reg); if ((temp & DISPLAY_PLANE_ENABLE) != 0) { @@ -2519,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, sr_entries = roundup(sr_entries / cacheline_size, 1); DRM_DEBUG("self-refresh entries: %d\n", sr_entries); I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + } else { + /* Turn off self refresh if both pipes are enabled */ + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) + & ~FW_BLC_SELF_EN); } DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", @@ -2562,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, srwm = 1; srwm &= 0x3f; I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + } else { + /* Turn off self refresh if both pipes are enabled */ + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) + & ~FW_BLC_SELF_EN); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", @@ -2630,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, if (srwm < 0) srwm = 1; I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); + } else { + /* Turn off self refresh if both pipes are enabled */ + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) + & ~FW_BLC_SELF_EN); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", @@ -3949,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) struct intel_unpin_work { struct work_struct work; struct drm_device *dev; - struct drm_gem_object *obj; + struct drm_gem_object *old_fb_obj; + struct drm_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; int pending; }; @@ -3960,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work) container_of(__work, struct intel_unpin_work, work); mutex_lock(&work->dev->struct_mutex); - i915_gem_object_unpin(work->obj); - drm_gem_object_unreference(work->obj); + i915_gem_object_unpin(work->old_fb_obj); + drm_gem_object_unreference(work->pending_flip_obj); + drm_gem_object_unreference(work->old_fb_obj); mutex_unlock(&work->dev->struct_mutex); kfree(work); } @@ -3984,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_lock_irqsave(&dev->event_lock, flags); work = intel_crtc->unpin_work; if (work == NULL || !work->pending) { + if (work && !work->pending) { + obj_priv = work->pending_flip_obj->driver_private; + DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", + obj_priv, + atomic_read(&obj_priv->pending_flip)); + } spin_unlock_irqrestore(&dev->event_lock, flags); return; } @@ -4004,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_unlock_irqrestore(&dev->event_lock, flags); - obj_priv = work->obj->driver_private; - if (atomic_dec_and_test(&obj_priv->pending_flip)) + obj_priv = work->pending_flip_obj->driver_private; + + /* Initial scanout buffer will have a 0 pending flip count */ + if ((atomic_read(&obj_priv->pending_flip) == 0) || + atomic_dec_and_test(&obj_priv->pending_flip)) DRM_WAKEUP(&dev_priv->pending_flip_queue); schedule_work(&work->work); } @@ -4018,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); - if (intel_crtc->unpin_work) + if (intel_crtc->unpin_work) { intel_crtc->unpin_work->pending = 1; + } else { + DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); + } spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -4035,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; unsigned long flags; - int ret; + int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; + int ret, pipesrc; RING_LOCALS; work = kzalloc(sizeof *work, GFP_KERNEL); @@ -4047,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->event = event; work->dev = crtc->dev; intel_fb = to_intel_framebuffer(crtc->fb); - work->obj = intel_fb->obj; + work->old_fb_obj = intel_fb->obj; INIT_WORK(&work->work, intel_unpin_work_fn); /* We borrow the event spin lock for protecting unpin_work */ spin_lock_irqsave(&dev->event_lock, flags); if (intel_crtc->unpin_work) { + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&dev->event_lock, flags); kfree(work); mutex_unlock(&dev->struct_mutex); @@ -4066,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ret = intel_pin_and_fence_fb_obj(dev, obj); if (ret != 0) { + DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", + obj->driver_private); kfree(work); + intel_crtc->unpin_work = NULL; mutex_unlock(&dev->struct_mutex); return ret; } - /* Reference the old fb object for the scheduled work. */ - drm_gem_object_reference(work->obj); + /* Reference the objects for the scheduled work. */ + drm_gem_object_reference(work->old_fb_obj); + drm_gem_object_reference(obj); crtc->fb = fb; i915_gem_object_flush_write_domain(obj); drm_vblank_get(dev, intel_crtc->pipe); obj_priv = obj->driver_private; atomic_inc(&obj_priv->pending_flip); + work->pending_flip_obj = obj; BEGIN_LP_RING(4); OUT_RING(MI_DISPLAY_FLIP | @@ -4086,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, OUT_RING(fb->pitch); if (IS_I965G(dev)) { OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); - OUT_RING((fb->width << 16) | fb->height); + pipesrc = I915_READ(pipesrc_reg); + OUT_RING(pipesrc & 0x0fff0fff); } else { OUT_RING(obj_priv->gtt_offset); OUT_RING(MI_NOOP); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 371d753e362..aaabbcbe590 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_pin(fbo, PAGE_SIZE); + ret = i915_gem_object_pin(fbo, 64*1024); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index aa74e59bec6..c2e8a45780d 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = { { .ident = "Samsung SX20S", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), + DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), DMI_MATCH(DMI_BOARD_NAME, "SX20S"), }, }, @@ -623,12 +623,26 @@ static const struct dmi_system_id bad_lid_status[] = { }, }, { + .ident = "Aspire 1810T", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), + }, + }, + { .ident = "PC-81005", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), }, }, + { + .ident = "Clevo M5x0N", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), + DMI_MATCH(DMI_BOARD_NAME, "M5x0N"), + }, + }, { } }; @@ -643,7 +657,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect { enum drm_connector_status status = connector_status_connected; - if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) + if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) status = connector_status_disconnected; return status; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index eaacfd0920d..82678d30ab0 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) connector->connector_type = DRM_MODE_CONNECTOR_VGA; intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT); + } else if (flags & SDVO_OUTPUT_CVBS0) { + + sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; + encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; + connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; + sdvo_priv->is_tv = true; + intel_output->needs_tv_clock = true; + intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; } else if (flags & SDVO_OUTPUT_LVDS0) { sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 1cf488247a1..48227e74475 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev) { int result; - if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY, + if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE, &result)) return -ENODEV; NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); - if (result & 0x1) { /* Stamina mode - disable the external GPU */ + if (result) { /* Ensure that the external GPU is enabled */ + nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); + nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, + NULL); + } else { /* Stamina mode - disable the external GPU */ nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, NULL); nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, NULL); - } else { /* Ensure that the external GPU is enabled */ - nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); - nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, - NULL); } return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index d7f8d8b4a4b..0e9cd1d4913 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -1865,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) struct drm_nouveau_private *dev_priv = bios->dev->dev_private; - if (dev_priv->card_type >= NV_50) + if (dev_priv->card_type >= NV_40) return 1; /* @@ -3765,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, */ struct drm_nouveau_private *dev_priv = dev->dev_private; - struct init_exec iexec = {true, false}; struct nvbios *bios = &dev_priv->VBIOS; uint8_t *table = &bios->data[bios->display.script_table_ptr]; uint8_t *otable = NULL; @@ -3845,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, } } - bios->display.output = dcbent; - if (pxclk == 0) { script = ROM16(otable[6]); if (!script) { @@ -3855,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, } NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); - parse_init_table(bios, script, &iexec); + nouveau_bios_run_init_table(dev, script, dcbent); } else if (pxclk == -1) { script = ROM16(otable[8]); @@ -3865,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, } NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); - parse_init_table(bios, script, &iexec); + nouveau_bios_run_init_table(dev, script, dcbent); } else if (pxclk == -2) { if (table[4] >= 12) @@ -3878,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, } NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); - parse_init_table(bios, script, &iexec); + nouveau_bios_run_init_table(dev, script, dcbent); } else if (pxclk > 0) { script = ROM16(otable[table[4] + i*6 + 2]); @@ -3890,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, } NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); - parse_init_table(bios, script, &iexec); + nouveau_bios_run_init_table(dev, script, dcbent); } else if (pxclk < 0) { script = ROM16(otable[table[4] + i*6 + 4]); @@ -3902,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, } NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); - parse_init_table(bios, script, &iexec); + nouveau_bios_run_init_table(dev, script, dcbent); } return 0; @@ -5865,9 +5862,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, struct nvbios *bios = &dev_priv->VBIOS; struct init_exec iexec = { true, false }; + mutex_lock(&bios->lock); bios->display.output = dcbent; parse_init_table(bios, table, &iexec); bios->display.output = NULL; + mutex_unlock(&bios->lock); } static bool NVInitVBIOS(struct drm_device *dev) @@ -5876,6 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev) struct nvbios *bios = &dev_priv->VBIOS; memset(bios, 0, sizeof(struct nvbios)); + mutex_init(&bios->lock); bios->dev = dev; if (!NVShadowVBIOS(dev, bios->data)) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 058e98c76d8..fd94bd6dc26 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -205,6 +205,8 @@ struct nvbios { struct drm_device *dev; struct nouveau_bios_info pub; + struct mutex lock; + uint8_t data[NV_PROM_SIZE]; unsigned int length; bool execute; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index db0ed4c13f9..028719fddf7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev, /* * Some of the tile_flags have a periodic structure of N*4096 bytes, - * align to to that as well as the page size. Overallocate memory to - * avoid corruption of other buffer objects. + * align to to that as well as the page size. Align the size to the + * appropriate boundaries. This does imply that sizes are rounded up + * 3-7 pages, so be aware of this and do not waste memory by allocating + * many small buffers. */ if (dev_priv->card_type == NV_50) { uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; @@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev, case 0x2800: case 0x4800: case 0x7a00: - *size = roundup(*size, block_size); if (is_power_of_2(block_size)) { - *size += 3 * block_size; for (i = 1; i < 10; i++) { *align = 12 * i * block_size; if (!(*align % 65536)) break; } } else { - *size += 6 * block_size; for (i = 1; i < 10; i++) { *align = 8 * i * block_size; if (!(*align % 65536)) break; } } + *size = roundup(*size, *align); break; default: break; diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 343d718a966..2281f99da7f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan) /* Ensure the channel is no longer active on the GPU */ pfifo->reassign(dev, false); - if (pgraph->channel(dev) == chan) { - pgraph->fifo_access(dev, false); + pgraph->fifo_access(dev, false); + if (pgraph->channel(dev) == chan) pgraph->unload_context(dev); - pgraph->fifo_access(dev, true); - } pgraph->destroy_context(chan); + pgraph->fifo_access(dev, true); if (pfifo->channel_id(dev) == chan->id) { pfifo->disable(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 7e6d673f3a2..d2f63353ea9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -88,13 +88,14 @@ nouveau_connector_destroy(struct drm_connector *drm_connector) { struct nouveau_connector *nv_connector = nouveau_connector(drm_connector); - struct drm_device *dev = nv_connector->base.dev; - - NV_DEBUG_KMS(dev, "\n"); + struct drm_device *dev; if (!nv_connector) return; + dev = nv_connector->base.dev; + NV_DEBUG_KMS(dev, "\n"); + kfree(nv_connector->edid); drm_sysfs_connector_remove(drm_connector); drm_connector_cleanup(drm_connector); diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index dd493722422..f954ad93e81 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -502,12 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, break; } - if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { - ret = -EREMOTEIO; - goto out; - } - if (cmd & 1) { + if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { + ret = -EREMOTEIO; + goto out; + } + for (i = 0; i < 4; i++) { data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 343ab7f17cc..da3b93b8450 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -56,7 +56,7 @@ int nouveau_vram_pushbuf; module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); -int nouveau_vram_notify; +int nouveau_vram_notify = 1; module_param_named(vram_notify, nouveau_vram_notify, int, 0400); MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); @@ -75,6 +75,14 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status"); int nouveau_ignorelid = 0; module_param_named(ignorelid, nouveau_ignorelid, int, 0400); +MODULE_PARM_DESC(noagp, "Disable all acceleration"); +int nouveau_noaccel = 0; +module_param_named(noaccel, nouveau_noaccel, int, 0400); + +MODULE_PARM_DESC(noagp, "Disable fbcon acceleration"); +int nouveau_nofbaccel = 0; +module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); + MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 6b9690418bc..5445cefdd03 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -678,6 +678,8 @@ extern int nouveau_reg_debug; extern char *nouveau_vbios; extern int nouveau_ctxfw; extern int nouveau_ignorelid; +extern int nouveau_nofbaccel; +extern int nouveau_noaccel; /* nouveau_state.c */ extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 0b05c869e0e..ea879a2efef 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = { .fb_setcmap = drm_fb_helper_setcmap, }; +static struct fb_ops nv04_fbcon_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_setcolreg = drm_fb_helper_setcolreg, + .fb_fillrect = nv04_fbcon_fillrect, + .fb_copyarea = nv04_fbcon_copyarea, + .fb_imageblit = nv04_fbcon_imageblit, + .fb_sync = nouveau_fbcon_sync, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, +}; + +static struct fb_ops nv50_fbcon_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_setcolreg = drm_fb_helper_setcolreg, + .fb_fillrect = nv50_fbcon_fillrect, + .fb_copyarea = nv50_fbcon_copyarea, + .fb_imageblit = nv50_fbcon_imageblit, + .fb_sync = nouveau_fbcon_sync, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, +}; + static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) { @@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, dev_priv->fbdev_info = info; strcpy(info->fix.id, "nouveaufb"); - info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | - FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; + if (nouveau_nofbaccel) + info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED; + else + info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | + FBINFO_HWACCEL_FILLRECT | + FBINFO_HWACCEL_IMAGEBLIT; info->fbops = &nouveau_fbcon_ops; info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - dev_priv->vm_vram_base; @@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, par->nouveau_fb = nouveau_fb; par->dev = dev; - if (dev_priv->channel) { + if (dev_priv->channel && !nouveau_nofbaccel) { switch (dev_priv->card_type) { case NV_50: nv50_fbcon_accel_init(info); + info->fbops = &nv50_fbcon_ops; break; default: nv04_fbcon_accel_init(info); + info->fbops = &nv04_fbcon_ops; break; }; } diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index 462e0b87b4b..f9c34e1a8c1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h @@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb); void nouveau_fbcon_restore(void); void nouveau_fbcon_zfill(struct drm_device *dev); +void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); +void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); int nv04_fbcon_accel_init(struct fb_info *info); +void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); +void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); int nv50_fbcon_accel_init(struct fb_info *info); void nouveau_fbcon_gpu_lockup(struct fb_info *info); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 6ac804b0c9f..70cc30803e3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -925,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, } if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { + spin_lock(&nvbo->bo.lock); ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); + spin_unlock(&nvbo->bo.lock); } else { ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); if (ret == 0) diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c index 419f4c2b3b8..c7ebec69674 100644 --- a/drivers/gpu/drm/nouveau/nouveau_grctx.c +++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c @@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev) } pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); - if (!pgraph->ctxprog) { - NV_ERROR(dev, "OOM copying ctxprog\n"); + if (!pgraph->ctxvals) { + NV_ERROR(dev, "OOM copying ctxvals\n"); release_firmware(fw); nouveau_grctx_fini(dev); return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 3b9bad66162..447f9f69d6b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev) get + 4); } + if (status & NV_PFIFO_INTR_SEMAPHORE) { + uint32_t sem; + + status &= ~NV_PFIFO_INTR_SEMAPHORE; + nv_wr32(dev, NV03_PFIFO_INTR_0, + NV_PFIFO_INTR_SEMAPHORE); + + sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); + nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); + + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); + nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); + } + if (status) { NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", status, chid); @@ -566,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev) static void nv50_pgraph_irq_handler(struct drm_device *dev) { - uint32_t status, nsource; + uint32_t status; - status = nv_rd32(dev, NV03_PGRAPH_INTR); - nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); + while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { + uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); - if (status & 0x00000001) { - nouveau_pgraph_intr_notify(dev, nsource); - status &= ~0x00000001; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); - } + if (status & 0x00000001) { + nouveau_pgraph_intr_notify(dev, nsource); + status &= ~0x00000001; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); + } - if (status & 0x00000010) { - nouveau_pgraph_intr_error(dev, nsource | - NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); + if (status & 0x00000010) { + nouveau_pgraph_intr_error(dev, nsource | + NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); - status &= ~0x00000010; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); - } + status &= ~0x00000010; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); + } - if (status & 0x00001000) { - nv_wr32(dev, 0x400500, 0x00000000); - nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); - nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, - NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH); - nv_wr32(dev, 0x400500, 0x00010001); + if (status & 0x00001000) { + nv_wr32(dev, 0x400500, 0x00000000); + nv_wr32(dev, NV03_PGRAPH_INTR, + NV_PGRAPH_INTR_CONTEXT_SWITCH); + nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, + NV40_PGRAPH_INTR_EN) & + ~NV_PGRAPH_INTR_CONTEXT_SWITCH); + nv_wr32(dev, 0x400500, 0x00010001); - nv50_graph_context_switch(dev); + nv50_graph_context_switch(dev); - status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; - } + status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + } - if (status & 0x00100000) { - nouveau_pgraph_intr_error(dev, nsource | - NV03_PGRAPH_NSOURCE_DATA_ERROR); + if (status & 0x00100000) { + nouveau_pgraph_intr_error(dev, nsource | + NV03_PGRAPH_NSOURCE_DATA_ERROR); - status &= ~0x00100000; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); - } + status &= ~0x00100000; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); + } - if (status & 0x00200000) { - int r; - - nouveau_pgraph_intr_error(dev, nsource | - NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); - - NV_ERROR(dev, "magic set 1:\n"); - for (r = 0x408900; r <= 0x408910; r += 4) - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); - nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000); - for (r = 0x408e08; r <= 0x408e24; r += 4) - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); - nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000); - - NV_ERROR(dev, "magic set 2:\n"); - for (r = 0x409900; r <= 0x409910; r += 4) - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); - nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000); - for (r = 0x409e08; r <= 0x409e24; r += 4) - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); - nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000); - - status &= ~0x00200000; - nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); - } + if (status & 0x00200000) { + int r; + + nouveau_pgraph_intr_error(dev, nsource | + NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); + + NV_ERROR(dev, "magic set 1:\n"); + for (r = 0x408900; r <= 0x408910; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, + nv_rd32(dev, r)); + nv_wr32(dev, 0x408900, + nv_rd32(dev, 0x408904) | 0xc0000000); + for (r = 0x408e08; r <= 0x408e24; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, + nv_rd32(dev, r)); + nv_wr32(dev, 0x408e08, + nv_rd32(dev, 0x408e08) | 0xc0000000); + + NV_ERROR(dev, "magic set 2:\n"); + for (r = 0x409900; r <= 0x409910; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, + nv_rd32(dev, r)); + nv_wr32(dev, 0x409900, + nv_rd32(dev, 0x409904) | 0xc0000000); + for (r = 0x409e08; r <= 0x409e24; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, + nv_rd32(dev, r)); + nv_wr32(dev, 0x409e08, + nv_rd32(dev, 0x409e08) | 0xc0000000); + + status &= ~0x00200000; + nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); + } - if (status) { - NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); - nv_wr32(dev, NV03_PGRAPH_INTR, status); - } + if (status) { + NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", + status); + nv_wr32(dev, NV03_PGRAPH_INTR, status); + } - { - const int isb = (1 << 16) | (1 << 0); + { + const int isb = (1 << 16) | (1 << 0); - if ((nv_rd32(dev, 0x400500) & isb) != isb) - nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); - nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); + if ((nv_rd32(dev, 0x400500) & isb) != isb) + nv_wr32(dev, 0x400500, + nv_rd32(dev, 0x400500) | isb); + } } nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); + nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); } static void diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 6c66a34b634..d99dc087f9b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct nouveau_bo *ntfy = NULL; + uint32_t flags; int ret; - ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ? - TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT, + if (nouveau_vram_notify) + flags = TTM_PL_FLAG_VRAM; + else + flags = TTM_PL_FLAG_TT; + + ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0x0000, false, true, &ntfy); if (ret) return ret; - ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM); + ret = nouveau_bo_pin(ntfy, flags); if (ret) goto out_err; @@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, target = NV_DMA_TARGET_PCI; } else { target = NV_DMA_TARGET_AGP; + if (dev_priv->card_type >= NV_50) + offset += dev_priv->vm_gart_base; } } else { NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 6c2cf81716d..e7c100ba63a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -885,11 +885,12 @@ int nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, struct nouveau_gpuobj **gpuobj_ret) { - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct drm_nouveau_private *dev_priv; struct nouveau_gpuobj *gpuobj; if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) return -EINVAL; + dev_priv = chan->dev->dev_private; gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); if (!gpuobj) diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index 251f1b3b38b..aa9b310e41b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -99,6 +99,7 @@ * the card will hang early on in the X init process. */ # define NV_PMC_ENABLE_UNK13 (1<<13) +#define NV40_PMC_GRAPH_UNITS 0x00001540 #define NV40_PMC_BACKLIGHT 0x000015f0 # define NV40_PMC_BACKLIGHT_MASK 0x001f0000 #define NV40_PMC_1700 0x00001700 diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 4c7f1e403e8..ed1590577b6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -54,11 +54,12 @@ static void nouveau_sgdma_clear(struct ttm_backend *be) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; - struct drm_device *dev = nvbe->dev; - - NV_DEBUG(nvbe->dev, "\n"); + struct drm_device *dev; if (nvbe && nvbe->pages) { + dev = nvbe->dev; + NV_DEBUG(dev, "\n"); + if (nvbe->bound) be->func->unbind(be); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index f2d0187ba15..a4851af5b05 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) static unsigned int nouveau_vga_set_decode(void *priv, bool state) { + struct drm_device *dev = priv; + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->chipset >= 0x40) + nv_wr32(dev, 0x88054, state); + else + nv_wr32(dev, 0x1854, state); + if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; @@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_timer; - /* PGRAPH */ - ret = engine->graph.init(dev); - if (ret) - goto out_fb; + if (nouveau_noaccel) + engine->graph.accel_blocked = true; + else { + /* PGRAPH */ + ret = engine->graph.init(dev); + if (ret) + goto out_fb; - /* PFIFO */ - ret = engine->fifo.init(dev); - if (ret) - goto out_graph; + /* PFIFO */ + ret = engine->fifo.init(dev); + if (ret) + goto out_graph; + } /* this call irq_preinstall, register irq handler and * call irq_postinstall @@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev) out_irq: drm_irq_uninstall(dev); out_fifo: - engine->fifo.takedown(dev); + if (!nouveau_noaccel) + engine->fifo.takedown(dev); out_graph: - engine->graph.takedown(dev); + if (!nouveau_noaccel) + engine->graph.takedown(dev); out_fb: engine->fb.takedown(dev); out_timer: @@ -518,8 +532,10 @@ static void nouveau_card_takedown(struct drm_device *dev) dev_priv->channel = NULL; } - engine->fifo.takedown(dev); - engine->graph.takedown(dev); + if (!nouveau_noaccel) { + engine->fifo.takedown(dev); + engine->graph.takedown(dev); + } engine->fb.takedown(dev); engine->timer.takedown(dev); engine->mc.takedown(dev); @@ -817,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, case NOUVEAU_GETPARAM_VM_VRAM_BASE: getparam->value = dev_priv->vm_vram_base; break; + case NOUVEAU_GETPARAM_GRAPH_UNITS: + /* NV40 and NV50 versions are quite different, but register + * address is the same. User is supposed to know the card + * family anyway... */ + if (dev_priv->chipset >= 0x40) { + getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS); + break; + } + /* FALLTHRU */ default: NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index d910873c136..fd01caabd5c 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -27,7 +27,7 @@ #include "nouveau_dma.h" #include "nouveau_fbcon.h" -static void +void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) { struct nouveau_fbcon_par *par = info->par; @@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) FIRE_RING(chan); } -static void +void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct nouveau_fbcon_par *par = info->par; @@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) FIRE_RING(chan); } -static void +void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) { struct nouveau_fbcon_par *par = info->par; @@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info) FIRE_RING(chan); - info->fbops->fb_fillrect = nv04_fbcon_fillrect; - info->fbops->fb_copyarea = nv04_fbcon_copyarea; - info->fbops->fb_imageblit = nv04_fbcon_imageblit; return 0; } diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 58b917c3341..21ac6e49b6e 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c @@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder) nouveau_encoder(encoder)->restore.output); nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); + + nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED; } static int nv17_tv_create_resources(struct drm_encoder *encoder, diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 40b7360841f..d1a651e3400 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) static void nv50_crtc_destroy(struct drm_crtc *crtc) { - struct drm_device *dev = crtc->dev; - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - - NV_DEBUG_KMS(dev, "\n"); + struct drm_device *dev; + struct nouveau_crtc *nv_crtc; if (!crtc) return; + dev = crtc->dev; + nv_crtc = nouveau_crtc(crtc); + + NV_DEBUG_KMS(dev, "\n"); + drm_crtc_cleanup(&nv_crtc->base); nv50_cursor_fini(nv_crtc); diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index e4f279ee61c..0f57cdf7ccb 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -3,7 +3,7 @@ #include "nouveau_dma.h" #include "nouveau_fbcon.h" -static void +void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct nouveau_fbcon_par *par = info->par; @@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) FIRE_RING(chan); } -static void +void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) { struct nouveau_fbcon_par *par = info->par; @@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) FIRE_RING(chan); } -static void +void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) { struct nouveau_fbcon_par *par = info->par; @@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base); - info->fbops->fb_fillrect = nv50_fbcon_fillrect; - info->fbops->fb_copyarea = nv50_fbcon_copyarea; - info->fbops->fb_imageblit = nv50_fbcon_imageblit; return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 32b244bcb48..204a79ff10f 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -317,17 +317,20 @@ void nv50_fifo_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; + struct nouveau_gpuobj_ref *ramfc = chan->ramfc; NV_DEBUG(dev, "ch%d\n", chan->id); - nouveau_gpuobj_ref_del(dev, &chan->ramfc); - nouveau_gpuobj_ref_del(dev, &chan->cache); - + /* This will ensure the channel is seen as disabled. */ + chan->ramfc = NULL; nv50_fifo_channel_disable(dev, chan->id, false); /* Dummy channel, also used on ch 127 */ if (chan->id == 0) nv50_fifo_channel_disable(dev, 127, false); + + nouveau_gpuobj_ref_del(dev, &ramfc); + nouveau_gpuobj_ref_del(dev, &chan->cache); } int diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 20319e59d36..6d504801b51 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev) uint32_t inst; int i; + /* Be sure we're not in the middle of a context switch or bad things + * will happen, such as unloading the wrong pgraph context. + */ + if (!nv_wait(0x400300, 0x00000001, 0x00000000)) + NV_ERROR(dev, "Ctxprog is still running\n"); + inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) return NULL; @@ -275,7 +281,7 @@ nv50_graph_load_context(struct nouveau_channel *chan) int nv50_graph_unload_context(struct drm_device *dev) { - uint32_t inst, fifo = nv_rd32(dev, 0x400500); + uint32_t inst; inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) @@ -283,12 +289,10 @@ nv50_graph_unload_context(struct drm_device *dev) inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; nouveau_wait_for_idle(dev); - nv_wr32(dev, 0x400500, fifo & ~1); nv_wr32(dev, 0x400784, inst); nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); nouveau_wait_for_idle(dev); - nv_wr32(dev, 0x400500, fifo); nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); return 0; diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index ecf1936b822..c2fff543b06 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c @@ -101,6 +101,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) struct nouveau_encoder *nvenc = nouveau_encoder(enc); if (nvenc == nv_encoder || + nvenc->disconnect != nv50_sor_disconnect || nvenc->dcb->or != nv_encoder->dcb->or) continue; diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig index 5982321be4d..1c02d23f6fc 100644 --- a/drivers/gpu/drm/radeon/Kconfig +++ b/drivers/gpu/drm/radeon/Kconfig @@ -1,10 +1,14 @@ config DRM_RADEON_KMS - bool "Enable modesetting on radeon by default" + bool "Enable modesetting on radeon by default - NEW DRIVER" depends on DRM_RADEON help - Choose this option if you want kernel modesetting enabled by default, - and you have a new enough userspace to support this. Running old - userspaces with this enabled will cause pain. + Choose this option if you want kernel modesetting enabled by default. + + This is a completely new driver. It's only part of the existing drm + for compatibility reasons. It requires an entirely different graphics + stack above it and works very differently from the old drm stack. + i.e. don't enable this unless you know what you are doing it may + cause issues or bugs compared to the previous userspace driver stack. When kernel modesetting is enabled the IOCTL of radeon/drm driver are considered as invalid and an error message is printed diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index e3b44562d26..7f152f66f19 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/sched.h> +#include <asm/unaligned.h> #define ATOM_DEBUG @@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, case ATOM_ARG_PS: idx = U8(*ptr); (*ptr)++; - val = le32_to_cpu(ctx->ps[idx]); + /* get_unaligned_le32 avoids unaligned accesses from atombios + * tables, noticed on a DEC Alpha. */ + val = get_unaligned_le32((u32 *)&ctx->ps[idx]); if (print) DEBUG("PS[0x%02X,0x%04X]", idx, val); break; @@ -640,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) uint8_t count = U8((*ptr)++); SDEBUG(" count: %d\n", count); if (arg == ATOM_UNIT_MICROSEC) - schedule_timeout_uninterruptible(usecs_to_jiffies(count)); + udelay(count); else schedule_timeout_uninterruptible(msecs_to_jiffies(count)); } diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 71060114d5d..99915a682d5 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); unsigned char *base; + int retry_count = 0; memset(&args, 0, sizeof(args)); base = (unsigned char *)rdev->mode_info.atom_context->scratch; +retry: memcpy(base, req_bytes, num_bytes); args.lpAuxRequest = 0; @@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - if (args.ucReplyStatus) { - DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", + if (args.ucReplyStatus && !args.ucDataOutLen) { + if (args.ucReplyStatus == 0x20 && retry_count++ < 10) + goto retry; + DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], - chan->rec.i2c_id, args.ucReplyStatus); + chan->rec.i2c_id, args.ucReplyStatus, retry_count); return false; } diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 11c9a3fe681..c0d4650cdb7 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -354,11 +354,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) return RREG32(RADEON_CRTC2_CRNT_FRAME); } +/* Who ever call radeon_fence_emit should call ring_lock and ask + * for enough space (today caller are ib schedule and buffer move) */ void r100_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { - /* Who ever call radeon_fence_emit should call ring_lock and ask - * for enough space (today caller are ib schedule and buffer move) */ + /* We have to make sure that caches are flushed before + * CPU might read something from VRAM. */ + radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL); + radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); /* Wait until IDLE & CLEAN */ radeon_ring_write(rdev, PACKET0(0x1720, 0)); radeon_ring_write(rdev, (1 << 16) | (1 << 17)); @@ -3369,7 +3375,6 @@ int r100_suspend(struct radeon_device *rdev) void r100_fini(struct radeon_device *rdev) { - r100_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -3481,13 +3486,12 @@ int r100_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - r100_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); + radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); - radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 0051d11b907..43b55a030b4 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -506,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev) /* DDR for all card after R300 & IGP */ rdev->mc.vram_is_ddr = true; + tmp = RREG32(RADEON_MEM_CNTL); - if (tmp & R300_MEM_NUM_CHANNELS_MASK) { - rdev->mc.vram_width = 128; - } else { - rdev->mc.vram_width = 64; + tmp &= R300_MEM_NUM_CHANNELS_MASK; + switch (tmp) { + case 0: rdev->mc.vram_width = 64; break; + case 1: rdev->mc.vram_width = 128; break; + case 2: rdev->mc.vram_width = 256; break; + default: rdev->mc.vram_width = 128; break; } r100_vram_init_sizes(rdev); @@ -1327,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev) void r300_fini(struct radeon_device *rdev) { - r300_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -1418,15 +1420,15 @@ int r300_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - r300_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); + radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); - radeon_irq_kms_fini(rdev); + radeon_agp_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 4526faaacca..d9373246c97 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -389,16 +389,15 @@ int r420_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - r420_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); + radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); radeon_agp_fini(rdev); - radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 9a189072f2b..ddf5731eba0 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -294,13 +294,12 @@ int r520_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); + radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); radeon_agp_fini(rdev); - radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 1b6d0001b20..2ffcf5a0355 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1654,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size) rdev->cp.align_mask = 16 - 1; } +void r600_cp_fini(struct radeon_device *rdev) +{ + r600_cp_stop(rdev); + radeon_ring_fini(rdev); +} + /* * GPU scratch registers helpers function. @@ -1861,6 +1867,12 @@ int r600_startup(struct radeon_device *rdev) return r; } r600_gpu_init(rdev); + r = r600_blit_init(rdev); + if (r) { + r600_blit_fini(rdev); + rdev->asic->copy = NULL; + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); + } /* pin copy shader into vram */ if (rdev->r600_blit.shader_obj) { r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); @@ -1938,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev) DRM_ERROR("radeon: failled testing IB (%d).\n", r); return r; } + + r = r600_audio_init(rdev); + if (r) { + DRM_ERROR("radeon: audio resume failed\n"); + return r; + } + return r; } @@ -1945,6 +1964,7 @@ int r600_suspend(struct radeon_device *rdev) { int r; + r600_audio_fini(rdev); /* FIXME: we should wait for ring to be empty */ r600_cp_stop(rdev); rdev->cp.ready = false; @@ -2045,19 +2065,15 @@ int r600_init(struct radeon_device *rdev) r = r600_pcie_gart_init(rdev); if (r) return r; - r = r600_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } rdev->accel_working = true; r = r600_startup(rdev); if (r) { - r600_suspend(rdev); + dev_err(rdev->dev, "disabling GPU acceleration\n"); + r600_cp_fini(rdev); r600_wb_fini(rdev); - radeon_ring_fini(rdev); + r600_irq_fini(rdev); + radeon_irq_kms_fini(rdev); r600_pcie_gart_fini(rdev); rdev->accel_working = false; } @@ -2083,20 +2099,17 @@ int r600_init(struct radeon_device *rdev) void r600_fini(struct radeon_device *rdev) { - /* Suspend operations */ - r600_suspend(rdev); - r600_audio_fini(rdev); r600_blit_fini(rdev); + r600_cp_fini(rdev); + r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); - radeon_ring_fini(rdev); - r600_wb_fini(rdev); r600_pcie_gart_fini(rdev); + radeon_agp_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); - radeon_agp_fini(rdev); radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); @@ -2900,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) return 0; #endif } + +/** + * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl + * rdev: radeon device structure + * bo: buffer object struct which userspace is waiting for idle + * + * Some R6XX/R7XX doesn't seems to take into account HDP flush performed + * through ring buffer, this leads to corruption in rendering, see + * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we + * directly perform HDP flush by writing register through MMIO. + */ +void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) +{ + WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); +} diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index 99e2c3891a7..0dcb6904c4f 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c @@ -35,7 +35,7 @@ */ static int r600_audio_chipset_supported(struct radeon_device *rdev) { - return rdev->family >= CHIP_R600 + return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710) || rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740; @@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev) if (!r600_audio_chipset_supported(rdev)) return; - WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); - del_timer(&rdev->audio_timer); + WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); } diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index af1c3ca8a4c..446b765ac72 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev) void r600_vb_ib_put(struct radeon_device *rdev) { radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); - mutex_lock(&rdev->ib_pool.mutex); - list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs); - mutex_unlock(&rdev->ib_pool.mutex); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2d5f2bfa720..c0356bb193e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -96,6 +96,7 @@ extern int radeon_audio; * symbol; */ #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ +/* RADEON_IB_POOL_SIZE must be a power of 2 */ #define RADEON_IB_POOL_SIZE 16 #define RADEON_DEBUGFS_MAX_NUM_FILES 32 #define RADEONFB_CONN_LIMIT 4 @@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); */ struct radeon_ib { struct list_head list; - unsigned long idx; + unsigned idx; uint64_t gpu_addr; struct radeon_fence *fence; - uint32_t *ptr; + uint32_t *ptr; uint32_t length_dw; + bool free; }; /* @@ -377,10 +379,9 @@ struct radeon_ib { struct radeon_ib_pool { struct mutex mutex; struct radeon_bo *robj; - struct list_head scheduled_ibs; struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; bool ready; - DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); + unsigned head_id; }; struct radeon_cp { @@ -661,6 +662,13 @@ struct radeon_asic { void (*hpd_fini)(struct radeon_device *rdev); bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); + /* ioctl hw specific callback. Some hw might want to perform special + * operation on specific ioctl. For instance on wait idle some hw + * might want to perform and HDP flush through MMIO as it seems that + * some R6XX/R7XX hw doesn't take HDP flush into account if programmed + * through ring. + */ + void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); }; /* @@ -1143,6 +1151,7 @@ extern bool r600_card_posted(struct radeon_device *rdev); extern void r600_cp_stop(struct radeon_device *rdev); extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); extern int r600_cp_resume(struct radeon_device *rdev); +extern void r600_cp_fini(struct radeon_device *rdev); extern int r600_count_pipe_bits(uint32_t val); extern int r600_gart_clear_page(struct radeon_device *rdev, int i); extern int r600_mc_wait_for_idle(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index f2fbd2e4e9d..05ee1aeac3f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; @@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; /* @@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; @@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; @@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; @@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; @@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; @@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; /* @@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); void r600_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd); +extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); static struct radeon_asic r600_asic = { .init = &r600_init, @@ -538,6 +547,7 @@ static struct radeon_asic r600_asic = { .hpd_fini = &r600_hpd_fini, .hpd_sense = &r600_hpd_sense, .hpd_set_polarity = &r600_hpd_set_polarity, + .ioctl_wait_idle = r600_ioctl_wait_idle, }; /* @@ -582,6 +592,7 @@ static struct radeon_asic rv770_asic = { .hpd_fini = &r600_hpd_fini, .hpd_sense = &r600_hpd_sense, .hpd_set_polarity = &r600_hpd_set_polarity, + .ioctl_wait_idle = r600_ioctl_wait_idle, }; #endif diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index fa82ca74324..2dcda611587 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -287,6 +287,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, *connector_type = DRM_MODE_CONNECTOR_DVID; } + /* XFX Pine Group device rv730 reports no VGA DDC lines + * even though they are wired up to record 0x93 + */ + if ((dev->pdev->device == 0x9498) && + (dev->pdev->subsystem_vendor == 0x1682) && + (dev->pdev->subsystem_device == 0x2452)) { + struct radeon_device *rdev = dev->dev_private; + *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93); + } return true; } diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 4ddfd4b5bc5..7932dc4d6b9 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, if (r) { goto out_cleanup; } - start_jiffies = jiffies; - for (i = 0; i < n; i++) { - r = radeon_fence_create(rdev, &fence); - if (r) { - goto out_cleanup; + + /* r100 doesn't have dma engine so skip the test */ + if (rdev->asic->copy_dma) { + + start_jiffies = jiffies; + for (i = 0; i < n; i++) { + r = radeon_fence_create(rdev, &fence); + if (r) { + goto out_cleanup; + } + + r = radeon_copy_dma(rdev, saddr, daddr, + size / RADEON_GPU_PAGE_SIZE, fence); + + if (r) { + goto out_cleanup; + } + r = radeon_fence_wait(fence, false); + if (r) { + goto out_cleanup; + } + radeon_fence_unref(&fence); } - r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); - if (r) { - goto out_cleanup; + end_jiffies = jiffies; + time = end_jiffies - start_jiffies; + time = jiffies_to_msecs(time); + if (time > 0) { + i = ((n * size) >> 10) / time; + printk(KERN_INFO "radeon: dma %u bo moves of %ukb from" + " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n", + n, size >> 10, + sdomain, ddomain, time, + i, i * 1000, (i * 1000) / 1024); } - r = radeon_fence_wait(fence, false); - if (r) { - goto out_cleanup; - } - radeon_fence_unref(&fence); - } - end_jiffies = jiffies; - time = end_jiffies - start_jiffies; - time = jiffies_to_msecs(time); - if (time > 0) { - i = ((n * size) >> 10) / time; - printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d" - " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10, - sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024); } + start_jiffies = jiffies; for (i = 0; i < n; i++) { r = radeon_fence_create(rdev, &fence); diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 579c8920e08..e7b19440102 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -971,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder lvds->native_mode.vdisplay); lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); - if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) - lvds->panel_vcc_delay = 2000; + lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000); lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 55266416fa4..23818854001 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder; struct drm_encoder_helper_funcs *encoder_funcs; - bool dret; + bool dret = false; enum drm_connector_status ret = connector_status_disconnected; encoder = radeon_best_single_encoder(connector); if (!encoder) ret = connector_status_disconnected; - radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); - dret = radeon_ddc_probe(radeon_connector); - radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); + if (radeon_connector->ddc_bus) { + radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); + dret = radeon_ddc_probe(radeon_connector); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); + } if (dret) { if (radeon_connector->edid) { kfree(radeon_connector->edid); @@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect struct drm_mode_object *obj; int i; enum drm_connector_status ret = connector_status_disconnected; - bool dret; + bool dret = false; - radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); - dret = radeon_ddc_probe(radeon_connector); - radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); + if (radeon_connector->ddc_bus) { + radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); + dret = radeon_ddc_probe(radeon_connector); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); + } if (dret) { if (radeon_connector->edid) { kfree(radeon_connector->edid); @@ -1343,7 +1347,7 @@ radeon_add_legacy_connector(struct drm_device *dev, radeon_connector->dac_load_detect = false; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, - 1); + radeon_connector->dac_load_detect); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.tv_std_property, radeon_combios_get_tv_info(rdev)); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 1190148cf5e..e9d085021c1 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) &p->validated); } } - return radeon_bo_list_validate(&p->validated, p->ib->fence); + return radeon_bo_list_validate(&p->validated); } int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) @@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) { unsigned i; - if (error && parser->ib) { - radeon_bo_list_unvalidate(&parser->validated, - parser->ib->fence); - } else { - radeon_bo_list_unreserve(&parser->validated); + if (!error && parser->ib) { + radeon_bo_list_fence(&parser->validated, parser->ib->fence); } + radeon_bo_list_unreserve(&parser->validated); for (i = 0; i < parser->nrelocs; i++) { if (parser->relocs[i].gobj) { mutex_lock(&parser->rdev->ddev->struct_mutex); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 6a92f994cc2..7e17a362b54 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -278,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev) DRM_INFO(" %s\n", connector_names[connector->connector_type]); if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); - if (radeon_connector->ddc_bus) + if (radeon_connector->ddc_bus) { DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", radeon_connector->ddc_bus->rec.mask_clk_reg, radeon_connector->ddc_bus->rec.mask_data_reg, @@ -288,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev) radeon_connector->ddc_bus->rec.en_data_reg, radeon_connector->ddc_bus->rec.y_clk_reg, radeon_connector->ddc_bus->rec.y_data_reg); + } else { + if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || + connector->connector_type == DRM_MODE_CONNECTOR_DVII || + connector->connector_type == DRM_MODE_CONNECTOR_DVID || + connector->connector_type == DRM_MODE_CONNECTOR_DVIA || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) + DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n"); + } DRM_INFO(" Encoders:\n"); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 3ba213d1b06..d71e346e9ab 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev, if (ret) goto out_unref; - memset_io(fbptr, 0xff, aligned_size); + memset_io(fbptr, 0x0, aligned_size); strcpy(info->fix.id, "radeondrmfb"); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 0e1325e1853..db8e9a355a0 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -308,6 +308,9 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } robj = gobj->driver_private; r = radeon_bo_wait(robj, NULL, false); + /* callback hw specific functions if any */ + if (robj->rdev->asic->ioctl_wait_idle) + robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(gobj); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d72a71bff21..f1da370928e 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head) } } -int radeon_bo_list_validate(struct list_head *head, void *fence) +int radeon_bo_list_validate(struct list_head *head) { struct radeon_bo_list *lobj; struct radeon_bo *bo; - struct radeon_fence *old_fence = NULL; int r; r = radeon_bo_list_reserve(head); @@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence) } lobj->gpu_offset = radeon_bo_gpu_offset(bo); lobj->tiling_flags = bo->tiling_flags; - if (fence) { - old_fence = (struct radeon_fence *)bo->tbo.sync_obj; - bo->tbo.sync_obj = radeon_fence_ref(fence); - bo->tbo.sync_obj_arg = NULL; - } - if (old_fence) { - radeon_fence_unref(&old_fence); - } } return 0; } -void radeon_bo_list_unvalidate(struct list_head *head, void *fence) +void radeon_bo_list_fence(struct list_head *head, void *fence) { struct radeon_bo_list *lobj; - struct radeon_fence *old_fence; - - if (fence) - list_for_each_entry(lobj, head, list) { - old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); - if (old_fence == fence) { - lobj->bo->tbo.sync_obj = NULL; - radeon_fence_unref(&old_fence); - } + struct radeon_bo *bo; + struct radeon_fence *old_fence = NULL; + + list_for_each_entry(lobj, head, list) { + bo = lobj->bo; + spin_lock(&bo->tbo.lock); + old_fence = (struct radeon_fence *)bo->tbo.sync_obj; + bo->tbo.sync_obj = radeon_fence_ref(fence); + bo->tbo.sync_obj_arg = NULL; + spin_unlock(&bo->tbo.lock); + if (old_fence) { + radeon_fence_unref(&old_fence); } - radeon_bo_list_unreserve(head); + } } int radeon_bo_fbdev_mmap(struct radeon_bo *bo, diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index a02f18011ad..7ab43de1e24 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, struct list_head *head); extern int radeon_bo_list_reserve(struct list_head *head); extern void radeon_bo_list_unreserve(struct list_head *head); -extern int radeon_bo_list_validate(struct list_head *head, void *fence); -extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); +extern int radeon_bo_list_validate(struct list_head *head); +extern void radeon_bo_list_fence(struct list_head *head, void *fence); extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, struct vm_area_struct *vma); extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 4d12b2d17b4..694799f6fac 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) { struct radeon_fence *fence; struct radeon_ib *nib; - unsigned long i; - int r = 0; + int r = 0, i, c; *ib = NULL; r = radeon_fence_create(rdev, &fence); if (r) { - DRM_ERROR("failed to create fence for new IB\n"); + dev_err(rdev->dev, "failed to create fence for new IB\n"); return r; } mutex_lock(&rdev->ib_pool.mutex); - i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); - if (i < RADEON_IB_POOL_SIZE) { - set_bit(i, rdev->ib_pool.alloc_bm); - rdev->ib_pool.ibs[i].length_dw = 0; - *ib = &rdev->ib_pool.ibs[i]; - mutex_unlock(&rdev->ib_pool.mutex); - goto out; + for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) { + i &= (RADEON_IB_POOL_SIZE - 1); + if (rdev->ib_pool.ibs[i].free) { + nib = &rdev->ib_pool.ibs[i]; + break; + } } - if (list_empty(&rdev->ib_pool.scheduled_ibs)) { - /* we go do nothings here */ + if (nib == NULL) { + /* This should never happen, it means we allocated all + * IB and haven't scheduled one yet, return EBUSY to + * userspace hoping that on ioctl recall we get better + * luck + */ + dev_err(rdev->dev, "no free indirect buffer !\n"); mutex_unlock(&rdev->ib_pool.mutex); - DRM_ERROR("all IB allocated none scheduled.\n"); - r = -EINVAL; - goto out; + radeon_fence_unref(&fence); + return -EBUSY; } - /* get the first ib on the scheduled list */ - nib = list_entry(rdev->ib_pool.scheduled_ibs.next, - struct radeon_ib, list); - if (nib->fence == NULL) { - /* we go do nothings here */ + rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1); + nib->free = false; + if (nib->fence) { mutex_unlock(&rdev->ib_pool.mutex); - DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); - r = -EINVAL; - goto out; - } - mutex_unlock(&rdev->ib_pool.mutex); - - r = radeon_fence_wait(nib->fence, false); - if (r) { - DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, - (unsigned long)nib->gpu_addr, nib->length_dw); - DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); - goto out; + r = radeon_fence_wait(nib->fence, false); + if (r) { + dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n", + nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw); + mutex_lock(&rdev->ib_pool.mutex); + nib->free = true; + mutex_unlock(&rdev->ib_pool.mutex); + radeon_fence_unref(&fence); + return r; + } + mutex_lock(&rdev->ib_pool.mutex); } radeon_fence_unref(&nib->fence); - + nib->fence = fence; nib->length_dw = 0; - - /* scheduled list is accessed here */ - mutex_lock(&rdev->ib_pool.mutex); - list_del(&nib->list); - INIT_LIST_HEAD(&nib->list); mutex_unlock(&rdev->ib_pool.mutex); - *ib = nib; -out: - if (r) { - radeon_fence_unref(&fence); - } else { - (*ib)->fence = fence; - } - return r; + return 0; } void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) @@ -114,18 +101,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) return; } mutex_lock(&rdev->ib_pool.mutex); - if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) { - /* IB is scheduled & not signaled don't do anythings */ - mutex_unlock(&rdev->ib_pool.mutex); - return; - } - list_del(&tmp->list); - INIT_LIST_HEAD(&tmp->list); - if (tmp->fence) - radeon_fence_unref(&tmp->fence); - - tmp->length_dw = 0; - clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); + tmp->free = true; mutex_unlock(&rdev->ib_pool.mutex); } @@ -135,7 +111,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) if (!ib->length_dw || !rdev->cp.ready) { /* TODO: Nothings in the ib we should report. */ - DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); + DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); return -EINVAL; } @@ -148,7 +124,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) radeon_ring_ib_execute(rdev, ib); radeon_fence_emit(rdev, ib->fence); mutex_lock(&rdev->ib_pool.mutex); - list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); + /* once scheduled IB is considered free and protected by the fence */ + ib->free = true; mutex_unlock(&rdev->ib_pool.mutex); radeon_ring_unlock_commit(rdev); return 0; @@ -164,7 +141,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev) if (rdev->ib_pool.robj) return 0; /* Allocate 1M object buffer */ - INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, true, RADEON_GEM_DOMAIN_GTT, &rdev->ib_pool.robj); @@ -195,9 +171,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev) rdev->ib_pool.ibs[i].ptr = ptr + offset; rdev->ib_pool.ibs[i].idx = i; rdev->ib_pool.ibs[i].length_dw = 0; - INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); + rdev->ib_pool.ibs[i].free = true; } - bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); + rdev->ib_pool.head_id = 0; rdev->ib_pool.ready = true; DRM_INFO("radeon: ib pool ready.\n"); if (radeon_debugfs_ib_init(rdev)) { @@ -214,7 +190,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) return; } mutex_lock(&rdev->ib_pool.mutex); - bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); if (rdev->ib_pool.robj) { r = radeon_bo_reserve(rdev->ib_pool.robj, false); if (likely(r == 0)) { @@ -363,7 +338,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data) if (ib == NULL) { return 0; } - seq_printf(m, "IB %04lu\n", ib->idx); + seq_printf(m, "IB %04u\n", ib->idx); seq_printf(m, "IB fence %p\n", ib->fence); seq_printf(m, "IB size %05u dwords\n", ib->length_dw); for (i = 0; i < ib->length_dw; i++) { diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 9f5418983e2..287fcebfb4e 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) return 0; } +int rs400_mc_wait_for_idle(struct radeon_device *rdev) +{ + unsigned i; + uint32_t tmp; + + for (i = 0; i < rdev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(0x0150); + if (tmp & (1 << 2)) { + return 0; + } + DRM_UDELAY(1); + } + return -1; +} + void rs400_gpu_init(struct radeon_device *rdev) { /* FIXME: HDP same place on rs400 ? */ r100_hdp_reset(rdev); /* FIXME: is this correct ? */ r420_pipes_init(rdev); - if (r300_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "Failed to wait MC idle while " - "programming pipes. Bad things might happen.\n"); + if (rs400_mc_wait_for_idle(rdev)) { + printk(KERN_WARNING "rs400: Failed to wait MC idle while " + "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); } } @@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev) r100_mc_stop(rdev, &save); /* Wait for mc idle */ - if (r300_mc_wait_for_idle(rdev)) - dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); + if (rs400_mc_wait_for_idle(rdev)) + dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n"); WREG32(R_000148_MC_FB_LOCATION, S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); @@ -448,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev) void rs400_fini(struct radeon_device *rdev) { - rs400_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -527,7 +542,6 @@ int rs400_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - rs400_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index d5255751e7b..c3818562a13 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -610,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev) void rs600_fini(struct radeon_device *rdev) { - rs600_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -689,7 +688,6 @@ int rs600_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - rs600_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index cd31da91377..06e2771aee5 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev) void rs690_fini(struct radeon_device *rdev) { - rs690_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -756,7 +755,6 @@ int rs690_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - rs690_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 62756717b04..0e1e6b8632b 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -537,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev) void rv515_fini(struct radeon_device *rdev) { - rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); + radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); radeon_agp_fini(rdev); - radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index afd9e8213c2..5943d561fd1 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -887,6 +887,12 @@ static int rv770_startup(struct radeon_device *rdev) return r; } rv770_gpu_init(rdev); + r = r600_blit_init(rdev); + if (r) { + r600_blit_fini(rdev); + rdev->asic->copy = NULL; + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); + } /* pin copy shader into vram */ if (rdev->r600_blit.shader_obj) { r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); @@ -1055,19 +1061,15 @@ int rv770_init(struct radeon_device *rdev) r = r600_pcie_gart_init(rdev); if (r) return r; - r = r600_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } rdev->accel_working = true; r = rv770_startup(rdev); if (r) { - rv770_suspend(rdev); + dev_err(rdev->dev, "disabling GPU acceleration\n"); + r600_cp_fini(rdev); r600_wb_fini(rdev); - radeon_ring_fini(rdev); + r600_irq_fini(rdev); + radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); rdev->accel_working = false; } @@ -1089,13 +1091,11 @@ int rv770_init(struct radeon_device *rdev) void rv770_fini(struct radeon_device *rdev) { - rv770_suspend(rdev); - r600_blit_fini(rdev); + r600_cp_fini(rdev); + r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); - radeon_ring_fini(rdev); - r600_wb_fini(rdev); rv770_pcie_gart_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 1a3e909b7bb..c7320ce4567 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1020,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem) { int i; + struct drm_mm_node *node = mem->mm_node; + + if (node && placement->lpfn != 0 && + (node->start < placement->fpfn || + node->start + node->size > placement->lpfn)) + return -1; for (i = 0; i < placement->num_placement; i++) { if ((placement->placement[i] & mem->placement & diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index a6e8f687fa6..0c9c0811f42 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) */ DRM_INFO("It appears like vesafb is loaded. " - "Ignore above error if any. Entering stealth mode.\n"); + "Ignore above error if any.\n"); ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); if (unlikely(ret != 0)) { DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); goto out_no_device; } - vmw_kms_init(dev_priv); - vmw_overlay_init(dev_priv); - } else { - ret = vmw_request_device(dev_priv); - if (unlikely(ret != 0)) - goto out_no_device; - vmw_kms_init(dev_priv); - vmw_overlay_init(dev_priv); - vmw_fb_init(dev_priv); } + ret = vmw_request_device(dev_priv); + if (unlikely(ret != 0)) + goto out_no_device; + vmw_kms_init(dev_priv); + vmw_overlay_init(dev_priv); + vmw_fb_init(dev_priv); dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; register_pm_notifier(&dev_priv->pm_nb); @@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev) unregister_pm_notifier(&dev_priv->pm_nb); - if (!dev_priv->stealth) { - vmw_fb_close(dev_priv); - vmw_kms_close(dev_priv); - vmw_overlay_close(dev_priv); - vmw_release_device(dev_priv); - pci_release_regions(dev->pdev); - } else { - vmw_kms_close(dev_priv); - vmw_overlay_close(dev_priv); + vmw_fb_close(dev_priv); + vmw_kms_close(dev_priv); + vmw_overlay_close(dev_priv); + vmw_release_device(dev_priv); + if (dev_priv->stealth) pci_release_region(dev->pdev, 2); - } + else + pci_release_regions(dev->pdev); + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); if (dev->devname == vmw_devname) @@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev, int ret = 0; DRM_INFO("Master set.\n"); - if (dev_priv->stealth) { - ret = vmw_request_device(dev_priv); - if (unlikely(ret != 0)) - return ret; - } if (active) { BUG_ON(active != &dev_priv->fbdev_master); @@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev, ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); - if (dev_priv->stealth) { - ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); - if (unlikely(ret != 0)) - DRM_ERROR("Unable to clean VRAM on master drop.\n"); - vmw_release_device(dev_priv); - } dev_priv->active_master = &dev_priv->fbdev_master; ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); ttm_vt_unlock(&dev_priv->fbdev_master.lock); - if (!dev_priv->stealth) - vmw_fb_on(dev_priv); + vmw_fb_on(dev_priv); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 135be9688c9..356dc935ec1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -39,10 +39,10 @@ #include "ttm/ttm_execbuf_util.h" #include "ttm/ttm_module.h" -#define VMWGFX_DRIVER_DATE "20090724" -#define VMWGFX_DRIVER_MAJOR 0 -#define VMWGFX_DRIVER_MINOR 1 -#define VMWGFX_DRIVER_PATCHLEVEL 2 +#define VMWGFX_DRIVER_DATE "20100209" +#define VMWGFX_DRIVER_MAJOR 1 +#define VMWGFX_DRIVER_MINOR 0 +#define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_MAX_RELOCATIONS 2048 @@ -113,6 +113,7 @@ struct vmw_fifo_state { unsigned long static_buffer_size; bool using_bounce_buffer; uint32_t capabilities; + struct mutex fifo_mutex; struct rw_semaphore rwsem; }; @@ -213,7 +214,7 @@ struct vmw_private { * Fencing and IRQs. */ - uint32_t fence_seq; + atomic_t fence_seq; wait_queue_head_t fence_queue; wait_queue_head_t fifo_queue; atomic_t fence_queue_waiters; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 4f4f6432be8..a93367041cd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv) info->pixmap.scan_align = 1; #endif + info->aperture_base = vmw_priv->vram_start; + info->aperture_size = vmw_priv->vram_size; + /* * Dirty & Deferred IO */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 4157547cc6e..39d43a01d84 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) fifo->reserved_size = 0; fifo->using_bounce_buffer = false; + mutex_init(&fifo->fifo_mutex); init_rwsem(&fifo->rwsem); /* @@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) (unsigned int) min, (unsigned int) fifo->capabilities); - dev_priv->fence_seq = dev_priv->last_read_sequence; + atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); return vmw_fifo_send_fence(dev_priv, &dummy); @@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; int ret; - down_write(&fifo_state->rwsem); + mutex_lock(&fifo_state->fifo_mutex); max = ioread32(fifo_mem + SVGA_FIFO_MAX); min = ioread32(fifo_mem + SVGA_FIFO_MIN); next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); @@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) } out_err: fifo_state->reserved_size = 0; - up_write(&fifo_state->rwsem); + mutex_unlock(&fifo_state->fifo_mutex); return NULL; } @@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) } + down_write(&fifo_state->rwsem); if (fifo_state->using_bounce_buffer || reserveable) { next_cmd += bytes; if (next_cmd >= max) @@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) if (reserveable) iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); mb(); - vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); up_write(&fifo_state->rwsem); + vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); + mutex_unlock(&fifo_state->fifo_mutex); } int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) @@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) fm = vmw_fifo_reserve(dev_priv, bytes); if (unlikely(fm == NULL)) { - down_write(&fifo_state->rwsem); - *sequence = dev_priv->fence_seq; - up_write(&fifo_state->rwsem); + *sequence = atomic_read(&dev_priv->fence_seq); ret = -ENOMEM; (void)vmw_fallback_wait(dev_priv, false, true, *sequence, false, 3*HZ); @@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) } do { - *sequence = dev_priv->fence_seq++; + *sequence = atomic_add_return(1, &dev_priv->fence_seq); } while (*sequence == 0); if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 778851f9f1d..1c7a316454d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -48,6 +48,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, case DRM_VMW_PARAM_FIFO_OFFSET: param->value = dev_priv->mmio_start; break; + case DRM_VMW_PARAM_HW_CAPS: + param->value = dev_priv->capabilities; + break; + case DRM_VMW_PARAM_FIFO_CAPS: + param->value = dev_priv->fifo.capabilities; + break; default: DRM_ERROR("Illegal vmwgfx get param request: %d\n", param->param); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index d40086fc864..4d7cb539386 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv, return true; /** - * Below is to signal stale fences that have wrapped. - * First, block fence submission. - */ - - down_read(&fifo_state->rwsem); - - /** * Then check if the sequence is higher than what we've actually * emitted. Then the fence is stale and signaled. */ - ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); - up_read(&fifo_state->rwsem); + ret = ((atomic_read(&dev_priv->fence_seq) - sequence) + > VMW_FENCE_WRAP); return ret; } @@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, if (fifo_idle) down_read(&fifo_state->rwsem); - signal_seq = dev_priv->fence_seq; + signal_seq = atomic_read(&dev_priv->fence_seq); ret = 0; for (;;) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index eeba6d1d06e..31f9afed0a6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -769,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv) drm_mode_config_init(dev); dev->mode_config.funcs = &vmw_kms_funcs; - dev->mode_config.min_width = 640; - dev->mode_config.min_height = 480; - dev->mode_config.max_width = 2048; - dev->mode_config.max_height = 2048; + dev->mode_config.min_width = 1; + dev->mode_config.min_height = 1; + dev->mode_config.max_width = dev_priv->fb_max_width; + dev->mode_config.max_height = dev_priv->fb_max_height; ret = vmw_kms_init_legacy_display_system(dev_priv); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c7efbd47ab8..f8fbbc67a40 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -35,11 +35,6 @@ #define VMW_RES_SURFACE ttm_driver_type1 #define VMW_RES_STREAM ttm_driver_type2 -/* XXX: This isn't a real hardware flag, but just a hack for kernel to - * know about primary surfaces. Find a better way to accomplish this. - */ -#define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9) - struct vmw_user_context { struct ttm_base_object base; struct vmw_resource res; @@ -579,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, srf->flags = req->flags; srf->format = req->format; + srf->scanout = req->scanout; memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); srf->num_sizes = 0; for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) @@ -604,16 +600,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) goto out_err1; - if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) { - /* we should not send this flag down to hardware since - * its not a official one - */ - srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT; - srf->scanout = true; - } else { - srf->scanout = false; - } - if (srf->scanout && srf->num_sizes == 1 && srf->sizes[0].width == 64 && diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 1ac0c93603c..24b56dc5459 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, remaining -= 7; pr_devel("client 0x%p called 'target'\n", priv); /* if target is default */ - if (!strncmp(buf, "default", 7)) + if (!strncmp(kbuf, "default", 7)) pdev = pci_dev_get(vga_default_device()); else { if (!vga_pci_str_to_vars(curr_pos, remaining, diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c index a31e77c776a..b8156b4893b 100644 --- a/drivers/hwmon/adt7462.c +++ b/drivers/hwmon/adt7462.c @@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END }; * * Some, but not all, of these voltages have low/high limits. */ -#define ADT7462_VOLT_COUNT 12 +#define ADT7462_VOLT_COUNT 13 #define ADT7462_VENDOR 0x41 #define ADT7462_DEVICE 0x62 diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index cadcbd90ff3..72ff2c4e757 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c @@ -851,17 +851,16 @@ static struct lm78_data *lm78_update_device(struct device *dev) static int __init lm78_isa_found(unsigned short address) { int val, save, found = 0; - - /* We have to request the region in two parts because some - boards declare base+4 to base+7 as a PNP device */ - if (!request_region(address, 4, "lm78")) { - pr_debug("lm78: Failed to request low part of region\n"); - return 0; - } - if (!request_region(address + 4, 4, "lm78")) { - pr_debug("lm78: Failed to request high part of region\n"); - release_region(address, 4); - return 0; + int port; + + /* Some boards declare base+0 to base+7 as a PNP device, some base+4 + * to base+7 and some base+5 to base+6. So we better request each port + * individually for the probing phase. */ + for (port = address; port < address + LM78_EXTENT; port++) { + if (!request_region(port, 1, "lm78")) { + pr_debug("lm78: Failed to request port 0x%x\n", port); + goto release; + } } #define REALLY_SLOW_IO @@ -925,8 +924,8 @@ static int __init lm78_isa_found(unsigned short address) val & 0x80 ? "LM79" : "LM78", (int)address); release: - release_region(address + 4, 4); - release_region(address, 4); + for (port--; port >= address; port--) + release_region(port, 1); return found; } diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index 05f9225b6f9..32d4adee73d 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c @@ -1793,17 +1793,17 @@ static int __init w83781d_isa_found(unsigned short address) { int val, save, found = 0; - - /* We have to request the region in two parts because some - boards declare base+4 to base+7 as a PNP device */ - if (!request_region(address, 4, "w83781d")) { - pr_debug("w83781d: Failed to request low part of region\n"); - return 0; - } - if (!request_region(address + 4, 4, "w83781d")) { - pr_debug("w83781d: Failed to request high part of region\n"); - release_region(address, 4); - return 0; + int port; + + /* Some boards declare base+0 to base+7 as a PNP device, some base+4 + * to base+7 and some base+5 to base+6. So we better request each port + * individually for the probing phase. */ + for (port = address; port < address + W83781D_EXTENT; port++) { + if (!request_region(port, 1, "w83781d")) { + pr_debug("w83781d: Failed to request port 0x%x\n", + port); + goto release; + } } #define REALLY_SLOW_IO @@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address) val == 0x30 ? "W83782D" : "W83781D", (int)address); release: - release_region(address + 4, 4); - release_region(address, 4); + for (port--; port >= address; port--) + release_region(port, 1); return found; } diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c index b1c050ff311..e29b6d5ba8e 100644 --- a/drivers/i2c/busses/i2c-tiny-usb.c +++ b/drivers/i2c/busses/i2c-tiny-usb.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> +#include <linux/types.h> /* include interfaces to usb layer */ #include <linux/usb.h> @@ -31,8 +32,8 @@ #define CMD_I2C_IO_END (1<<1) /* i2c bit delay, default is 10us -> 100kHz */ -static int delay = 10; -module_param(delay, int, 0); +static unsigned short delay = 10; +module_param(delay, ushort, 0); MODULE_PARM_DESC(delay, "bit delay in microseconds, " "e.g. 10 for 100kHz (default is 100kHz)"); @@ -109,7 +110,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) static u32 usb_func(struct i2c_adapter *adapter) { - u32 func; + __le32 func; /* get functionality from adapter */ if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) != @@ -118,7 +119,7 @@ static u32 usb_func(struct i2c_adapter *adapter) return 0; } - return func; + return le32_to_cpu(func); } /* This is the actual algorithm we define */ @@ -216,8 +217,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface, "i2c-tiny-usb at bus %03d device %03d", dev->usb_dev->bus->busnum, dev->usb_dev->devnum); - if (usb_write(&dev->adapter, CMD_SET_DELAY, - cpu_to_le16(delay), 0, NULL, 0) != 0) { + if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) { dev_err(&dev->adapter.dev, "failure setting delay to %dus\n", delay); retval = -EIO; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index cc9b5940fa9..875e34e0b23 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2115,9 +2115,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) if (ret) goto err1; - if (cma_loopback_addr(addr)) { - ret = cma_bind_loopback(id_priv); - } else if (!cma_zero_addr(addr)) { + if (!cma_any_addr(addr)) { ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); if (ret) goto err1; diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 9774bdfaa48..d8c0c8d6992 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -1141,7 +1141,14 @@ static void psmouse_cleanup(struct serio *serio) psmouse_deactivate(parent); } - psmouse_deactivate(psmouse); + psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); + + /* + * Disable stream mode so cleanup routine can proceed undisturbed. + */ + if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE)) + printk(KERN_WARNING "psmouse.c: Failed to disable mouse on %s\n", + psmouse->ps2dev.serio->phys); if (psmouse->cleanup) psmouse->cleanup(psmouse); diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index d84a36e545f..b54aee7cd9e 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -1161,9 +1161,17 @@ static int i8042_pm_restore(struct device *dev) return 0; } +static int i8042_pm_thaw(struct device *dev) +{ + i8042_interrupt(0, NULL); + + return 0; +} + static const struct dev_pm_ops i8042_pm_ops = { .suspend = i8042_pm_reset, .resume = i8042_pm_restore, + .thaw = i8042_pm_thaw, .poweroff = i8042_pm_reset, .restore = i8042_pm_restore, }; diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index 54abf9e303b..f1c8cae70b4 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c @@ -172,11 +172,15 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, { int r = 0; size_t dummy = 0; - int overhead_size = - sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg); + int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg); struct dm_ulog_request *tfr = prealloced_ulog_tfr; struct receiving_pkg pkg; + /* + * Given the space needed to hold the 'struct cn_msg' and + * 'struct dm_ulog_request' - do we have enough payload + * space remaining? + */ if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) { DMINFO("Size of tfr exceeds preallocated size"); return -EINVAL; @@ -191,7 +195,7 @@ resend: */ mutex_lock(&dm_ulog_lock); - memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); + memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg)); memcpy(tfr->uuid, uuid, DM_UUID_LEN); tfr->luid = luid; tfr->seq = dm_ulog_seq++; diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index ad779bd13ae..6c1046df81f 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -724,7 +724,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) /* * Dispatch io. */ - if (unlikely(ms->log_failure)) { + if (unlikely(ms->log_failure) && errors_handled(ms)) { spin_lock_irq(&ms->lock); bio_list_merge(&ms->failures, &sync); spin_unlock_irq(&ms->lock); diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 5f19ceb6fe9..168bd38f500 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -660,10 +660,9 @@ void dm_rh_recovery_end(struct dm_region *reg, int success) spin_lock_irq(&rh->region_lock); if (success) list_add(®->list, ®->rh->recovered_regions); - else { - reg->state = DM_RH_NOSYNC; + else list_add(®->list, ®->rh->failed_recovered_regions); - } + spin_unlock_irq(&rh->region_lock); rh->wakeup_workers(rh->context); diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 7d08879689a..c097d8a4823 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, * Issue the synchronous I/O from a different thread * to avoid generic_make_request recursion. */ - INIT_WORK(&req.work, do_metadata); + INIT_WORK_ON_STACK(&req.work, do_metadata); queue_work(ps->metadata_wq, &req.work); flush_workqueue(ps->metadata_wq); diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index e0efc1adcaf..bd58703ee8f 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) } stripes = simple_strtoul(argv[0], &end, 10); - if (*end) { + if (!stripes || *end) { ti->error = "Invalid stripe count"; return -EINVAL; } diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c index f53392df7b9..f91b40942e0 100644 --- a/drivers/md/dm-sysfs.c +++ b/drivers/md/dm-sysfs.c @@ -80,20 +80,12 @@ static struct sysfs_ops dm_sysfs_ops = { }; /* - * The sysfs structure is embedded in md struct, nothing to do here - */ -static void dm_sysfs_release(struct kobject *kobj) -{ -} - -/* * dm kobject is embedded in mapped_device structure * no need to define release function here */ static struct kobj_type dm_ktype = { .sysfs_ops = &dm_sysfs_ops, .default_attrs = dm_attrs, - .release = dm_sysfs_release }; /* diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3167480b532..aa4e2aa86d4 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq) return BLKPREP_OK; } -static void map_request(struct dm_target *ti, struct request *clone, - struct mapped_device *md) +/* + * Returns: + * 0 : the request has been processed (not requeued) + * !0 : the request has been requeued + */ +static int map_request(struct dm_target *ti, struct request *clone, + struct mapped_device *md) { - int r; + int r, requeued = 0; struct dm_rq_target_io *tio = clone->end_io_data; /* @@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone, case DM_MAPIO_REQUEUE: /* The target wants to requeue the I/O */ dm_requeue_unmapped_request(clone); + requeued = 1; break; default: if (r > 0) { @@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone, dm_kill_unmapped_request(clone, r); break; } + + return requeued; } /* @@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q) atomic_inc(&md->pending[rq_data_dir(clone)]); spin_unlock(q->queue_lock); - map_request(ti, clone, md); + if (map_request(ti, clone, md)) + goto requeued; + spin_lock_irq(q->queue_lock); } goto out; +requeued: + spin_lock_irq(q->queue_lock); + plug_and_out: if (!elv_queue_empty(q)) /* Some requests still remain, retry later */ diff --git a/drivers/md/md.c b/drivers/md/md.c index dd3dfe42d5a..a20a71e5efd 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4075,8 +4075,10 @@ static void mddev_delayed_delete(struct work_struct *ws) { mddev_t *mddev = container_of(ws, mddev_t, del_work); - if (mddev->private == &md_redundancy_group) { + if (mddev->private) { sysfs_remove_group(&mddev->kobj, &md_redundancy_group); + if (mddev->private != (void*)1) + sysfs_remove_group(&mddev->kobj, mddev->private); if (mddev->sysfs_action) sysfs_put(mddev->sysfs_action); mddev->sysfs_action = NULL; @@ -4287,10 +4289,7 @@ static int do_md_run(mddev_t * mddev) sysfs_notify_dirent(rdev->sysfs_state); } - md_probe(mddev->unit, NULL, NULL); disk = mddev->gendisk; - if (!disk) - return -ENOMEM; spin_lock(&pers_lock); pers = find_pers(mddev->level, mddev->clevel); @@ -4530,8 +4529,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) mddev->queue->unplug_fn = NULL; mddev->queue->backing_dev_info.congested_fn = NULL; module_put(mddev->pers->owner); - if (mddev->pers->sync_request) - mddev->private = &md_redundancy_group; + if (mddev->pers->sync_request && mddev->private == NULL) + mddev->private = (void*)1; mddev->pers = NULL; /* tell userspace to handle 'inactive' */ sysfs_notify_dirent(mddev->sysfs_state); @@ -4578,9 +4577,6 @@ out: } mddev->bitmap_info.offset = 0; - /* make sure all md_delayed_delete calls have finished */ - flush_scheduled_work(); - export_array(mddev); mddev->array_sectors = 0; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e84204eb12d..ceb24afdc14 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5136,9 +5136,8 @@ static int stop(mddev_t *mddev) mddev->thread = NULL; mddev->queue->backing_dev_info.congested_fn = NULL; blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ - sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); free_conf(conf); - mddev->private = NULL; + mddev->private = &raid5_attrs_group; return 0; } @@ -5464,11 +5463,11 @@ static int raid5_start_reshape(mddev_t *mddev) !test_bit(Faulty, &rdev->flags)) { if (raid5_add_disk(mddev, rdev) == 0) { char nm[20]; - if (rdev->raid_disk >= conf->previous_raid_disks) + if (rdev->raid_disk >= conf->previous_raid_disks) { set_bit(In_sync, &rdev->flags); - else + added_devices++; + } else rdev->recovery_offset = 0; - added_devices++; sprintf(nm, "rd%d", rdev->raid_disk); if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) @@ -5480,9 +5479,12 @@ static int raid5_start_reshape(mddev_t *mddev) break; } + /* When a reshape changes the number of devices, ->degraded + * is measured against the large of the pre and post number of + * devices.*/ if (mddev->delta_disks > 0) { spin_lock_irqsave(&conf->device_lock, flags); - mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) + mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) - added_devices; spin_unlock_irqrestore(&conf->device_lock, flags); } diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c index c37790ad92d..9ddc57909d4 100644 --- a/drivers/media/dvb/dvb-core/dmxdev.c +++ b/drivers/media/dvb/dvb-core/dmxdev.c @@ -761,7 +761,6 @@ static int dvb_demux_open(struct inode *inode, struct file *file) dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); dmxdevfilter->type = DMXDEV_TYPE_NONE; dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); - INIT_LIST_HEAD(&dmxdevfilter->feed.ts); init_timer(&dmxdevfilter->timer); dvbdev->users++; @@ -887,6 +886,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev, dmxdevfilter->type = DMXDEV_TYPE_PES; memcpy(&dmxdevfilter->params, params, sizeof(struct dmx_pes_filter_params)); + INIT_LIST_HEAD(&dmxdevfilter->feed.ts); dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c index b78cfb7d189..67f189b7aa1 100644 --- a/drivers/media/dvb/dvb-core/dvb_demux.c +++ b/drivers/media/dvb/dvb-core/dvb_demux.c @@ -426,16 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) }; }; - if (dvb_demux_tscheck) { - if (!demux->cnt_storage) - demux->cnt_storage = vmalloc(MAX_PID + 1); - - if (!demux->cnt_storage) { - printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n"); - dvb_demux_tscheck = 0; - goto no_dvb_demux_tscheck; - } - + if (demux->cnt_storage) { /* check pkt counter */ if (pid < MAX_PID) { if (buf[1] & 0x80) @@ -454,7 +445,6 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) }; /* end check */ }; -no_dvb_demux_tscheck: list_for_each_entry(feed, &demux->feed_list, list_head) { if ((feed->pid != pid) && (feed->pid != 0x2000)) @@ -1246,6 +1236,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed)); if (!dvbdemux->feed) { vfree(dvbdemux->filter); + dvbdemux->filter = NULL; return -ENOMEM; } for (i = 0; i < dvbdemux->filternum; i++) { @@ -1257,6 +1248,13 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) dvbdemux->feed[i].index = i; } + if (dvb_demux_tscheck) { + dvbdemux->cnt_storage = vmalloc(MAX_PID + 1); + + if (!dvbdemux->cnt_storage) + printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n"); + } + INIT_LIST_HEAD(&dvbdemux->frontend_list); for (i = 0; i < DMX_TS_PES_OTHER; i++) { diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 57752751712..81279b3d694 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -1796,7 +1796,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: " "Command not in the active list! (sc=%p)\n", ioc->name, SCpnt)); - retval = 0; + retval = SUCCESS; goto out; } diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index b9f1e84897c..e7f8027165e 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -74,6 +74,9 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test, } mrq->cmd->arg = dev_addr; + if (!mmc_card_blockaddr(test->card)) + mrq->cmd->arg <<= 9; + mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; if (blocks == 1) @@ -190,7 +193,7 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write) } for (i = 0;i < BUFFER_SIZE / 512;i++) { - ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); + ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); if (ret) return ret; } @@ -219,7 +222,7 @@ static int mmc_test_cleanup(struct mmc_test_card *test) memset(test->buffer, 0, 512); for (i = 0;i < BUFFER_SIZE / 512;i++) { - ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); + ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); if (ret) return ret; } @@ -426,7 +429,7 @@ static int mmc_test_transfer(struct mmc_test_card *test, for (i = 0;i < sectors;i++) { ret = mmc_test_buffer_transfer(test, test->buffer + i * 512, - dev_addr + i * 512, 512, 0); + dev_addr + i, 512, 0); if (ret) return ret; } diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c index 62d9c9cc567..1dd4403247c 100644 --- a/drivers/net/ax88796.c +++ b/drivers/net/ax88796.c @@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev) size = (res->end - res->start) + 1; ax->mem2 = request_mem_region(res->start, size, pdev->name); - if (ax->mem == NULL) { + if (ax->mem2 == NULL) { dev_err(&pdev->dev, "cannot reserve registers\n"); ret = -ENXIO; goto exit_mem1; diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index fee6eee7ae5..006cb2efcd2 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -296,6 +296,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, req_hdr->opcode = opcode; req_hdr->subsystem = subsystem; req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); + req_hdr->version = 0; } static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index bdbd14727e4..318a018ca7c 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -2079,6 +2079,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, struct sge_fl *fl, int len, int complete) { struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; + struct port_info *pi = netdev_priv(qs->netdev); struct sk_buff *skb = NULL; struct cpl_rx_pkt *cpl; struct skb_frag_struct *rx_frag; @@ -2116,11 +2117,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, if (!nr_frags) { offset = 2 + sizeof(struct cpl_rx_pkt); - qs->lro_va = sd->pg_chunk.va + 2; - } - len -= offset; + cpl = qs->lro_va = sd->pg_chunk.va + 2; - prefetch(qs->lro_va); + if ((pi->rx_offload & T3_RX_CSUM) && + cpl->csum_valid && cpl->csum == htons(0xffff)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; + } else + skb->ip_summed = CHECKSUM_NONE; + } else + cpl = qs->lro_va; + + len -= offset; rx_frag += nr_frags; rx_frag->page = sd->pg_chunk.page; @@ -2136,12 +2144,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, return; skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); - skb->ip_summed = CHECKSUM_UNNECESSARY; - cpl = qs->lro_va; if (unlikely(cpl->vlan_valid)) { - struct net_device *dev = qs->netdev; - struct port_info *pi = netdev_priv(dev); struct vlan_group *grp = pi->vlan_grp; if (likely(grp != NULL)) { diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 997124d2992..c881347cb26 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -421,6 +421,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; if (tx_queue > IGB_N0_QUEUE) msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; + if (!adapter->msix_entries && msix_vector == 0) + msixbm |= E1000_EIMS_OTHER; array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); q_vector->eims_value = msixbm; break; @@ -877,7 +879,6 @@ static int igb_request_irq(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; - struct e1000_hw *hw = &adapter->hw; int err = 0; if (adapter->msix_entries) { @@ -909,20 +910,7 @@ static int igb_request_irq(struct igb_adapter *adapter) igb_setup_all_tx_resources(adapter); igb_setup_all_rx_resources(adapter); } else { - switch (hw->mac.type) { - case e1000_82575: - wr32(E1000_MSIXBM(0), - (E1000_EICR_RX_QUEUE0 | - E1000_EICR_TX_QUEUE0 | - E1000_EIMS_OTHER)); - break; - case e1000_82580: - case e1000_82576: - wr32(E1000_IVAR0, E1000_IVAR_VALID); - break; - default: - break; - } + igb_assign_vector(adapter->q_vector[0], 0); } if (adapter->flags & IGB_FLAG_HAS_MSI) { @@ -1140,6 +1128,8 @@ int igb_up(struct igb_adapter *adapter) } if (adapter->msix_entries) igb_configure_msix(adapter); + else + igb_assign_vector(adapter->q_vector[0], 0); /* Clear any pending interrupts. */ rd32(E1000_ICR); diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 3103f416531..35a06b47587 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -357,12 +357,34 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) u32 fctrl_reg; u32 rmcs_reg; u32 reg; + u32 link_speed = 0; + bool link_up; #ifdef CONFIG_DCB if (hw->fc.requested_mode == ixgbe_fc_pfc) goto out; #endif /* CONFIG_DCB */ + /* + * On 82598 having Rx FC on causes resets while doing 1G + * so if it's on turn it off once we know link_speed. For + * more details see 82598 Specification update. + */ + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + hw->fc.requested_mode = ixgbe_fc_tx_pause; + break; + case ixgbe_fc_rx_pause: + hw->fc.requested_mode = ixgbe_fc_none; + break; + default: + /* no change */ + break; + } + } + /* Negotiate the fc mode to use */ ret_val = ixgbe_fc_autoneg(hw); if (ret_val) diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index b5f64ad6797..951b73cf5ca 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -5179,7 +5179,7 @@ dma_error: ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); } - return count; + return 0; } static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, @@ -5329,8 +5329,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) struct ixgbe_adapter *adapter = netdev_priv(dev); int txq = smp_processor_id(); - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + while (unlikely(txq >= dev->real_num_tx_queues)) + txq -= dev->real_num_tx_queues; return txq; + } #ifdef IXGBE_FCOE if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && @@ -5760,6 +5763,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (err) goto err_sw_init; + /* Make it possible the adapter to be woken up via WOL */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + /* * If there is a fan on this device and it has failed log the * failure. diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 9f9d6081959..24279e6e55f 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -1941,7 +1941,7 @@ static void netxen_tx_timeout_task(struct work_struct *work) netif_wake_queue(adapter->netdev); clear_bit(__NX_RESETTING, &adapter->state); - + return; } else { clear_bit(__NX_RESETTING, &adapter->state); if (!netxen_nic_reset_context(adapter)) { @@ -2240,7 +2240,9 @@ netxen_detach_work(struct work_struct *work) netxen_nic_down(adapter, netdev); + rtnl_lock(); netxen_nic_detach(adapter); + rtnl_unlock(); status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 103e8b0e2a0..46997e177ee 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -2284,6 +2284,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, fail2: efx_fini_struct(efx); fail1: + WARN_ON(rc > 0); EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); free_netdev(net_dev); return rc; diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c index e0d13a45101..67eec7a6e48 100644 --- a/drivers/net/sfc/qt202x_phy.c +++ b/drivers/net/sfc/qt202x_phy.c @@ -320,7 +320,7 @@ static int qt202x_reset_phy(struct efx_nic *efx) falcon_board(efx)->type->init_phy(efx); - return rc; + return 0; fail: EFX_ERR(efx, "PHY reset timed out\n"); diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index d760650c5c0..67249c3c9f5 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -1025,11 +1025,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr, static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) { struct sky2_tx_le *le = sky2->tx_le + *slot; - struct tx_ring_info *re = sky2->tx_ring + *slot; *slot = RING_NEXT(*slot, sky2->tx_ring_size); - re->flags = 0; - re->skb = NULL; le->ctrl = 0; return le; } @@ -1622,8 +1619,7 @@ static unsigned tx_le_req(const struct sk_buff *skb) return count; } -static void sky2_tx_unmap(struct pci_dev *pdev, - const struct tx_ring_info *re) +static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) { if (re->flags & TX_MAP_SINGLE) pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), @@ -1633,6 +1629,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev, pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), pci_unmap_len(re, maplen), PCI_DMA_TODEVICE); + re->flags = 0; } /* @@ -1839,6 +1836,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; + re->skb = NULL; dev_kfree_skb_any(skb); sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index c93f58f5c6f..317aa34b21c 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -1877,13 +1877,12 @@ static void velocity_error(struct velocity_info *vptr, int status) /** * tx_srv - transmit interrupt service * @vptr; Velocity - * @status: * * Scan the queues looking for transmitted packets that * we can complete and clean up. Update any statistics as * necessary/ */ -static int velocity_tx_srv(struct velocity_info *vptr, u32 status) +static int velocity_tx_srv(struct velocity_info *vptr) { struct tx_desc *td; int qnum; @@ -2090,14 +2089,12 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) /** * velocity_rx_srv - service RX interrupt * @vptr: velocity - * @status: adapter status (unused) * * Walk the receive ring of the velocity adapter and remove * any received packets from the receive queue. Hand the ring * slots back to the adapter for reuse. */ -static int velocity_rx_srv(struct velocity_info *vptr, int status, - int budget_left) +static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) { struct net_device_stats *stats = &vptr->dev->stats; int rd_curr = vptr->rx.curr; @@ -2151,32 +2148,24 @@ static int velocity_poll(struct napi_struct *napi, int budget) struct velocity_info *vptr = container_of(napi, struct velocity_info, napi); unsigned int rx_done; - u32 isr_status; - - spin_lock(&vptr->lock); - isr_status = mac_read_isr(vptr->mac_regs); - - /* Ack the interrupt */ - mac_write_isr(vptr->mac_regs, isr_status); - if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) - velocity_error(vptr, isr_status); + unsigned long flags; + spin_lock_irqsave(&vptr->lock, flags); /* * Do rx and tx twice for performance (taken from the VIA * out-of-tree driver). */ - rx_done = velocity_rx_srv(vptr, isr_status, budget / 2); - velocity_tx_srv(vptr, isr_status); - rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done); - velocity_tx_srv(vptr, isr_status); - - spin_unlock(&vptr->lock); + rx_done = velocity_rx_srv(vptr, budget / 2); + velocity_tx_srv(vptr); + rx_done += velocity_rx_srv(vptr, budget - rx_done); + velocity_tx_srv(vptr); /* If budget not fully consumed, exit the polling mode */ if (rx_done < budget) { napi_complete(napi); mac_enable_int(vptr->mac_regs); } + spin_unlock_irqrestore(&vptr->lock, flags); return rx_done; } @@ -2206,10 +2195,17 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance) return IRQ_NONE; } + /* Ack the interrupt */ + mac_write_isr(vptr->mac_regs, isr_status); + if (likely(napi_schedule_prep(&vptr->napi))) { mac_disable_int(vptr->mac_regs); __napi_schedule(&vptr->napi); } + + if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) + velocity_error(vptr, isr_status); + spin_unlock(&vptr->lock); return IRQ_HANDLED; @@ -3100,7 +3096,7 @@ static int velocity_resume(struct pci_dev *pdev) velocity_init_registers(vptr, VELOCITY_INIT_WOL); mac_disable_int(vptr->mac_regs); - velocity_tx_srv(vptr, 0); + velocity_tx_srv(vptr); for (i = 0; i < vptr->tx.numq; i++) { if (vptr->tx.used[i]) @@ -3344,6 +3340,7 @@ static int velocity_set_coalesce(struct net_device *dev, { struct velocity_info *vptr = netdev_priv(dev); int max_us = 0x3f * 64; + unsigned long flags; /* 6 bits of */ if (ecmd->tx_coalesce_usecs > max_us) @@ -3365,6 +3362,7 @@ static int velocity_set_coalesce(struct net_device *dev, ecmd->tx_coalesce_usecs); /* Setup the interrupt suppression and queue timers */ + spin_lock_irqsave(&vptr->lock, flags); mac_disable_int(vptr->mac_regs); setup_adaptive_interrupts(vptr); setup_queue_timers(vptr); @@ -3372,6 +3370,7 @@ static int velocity_set_coalesce(struct net_device *dev, mac_write_int_mask(vptr->int_mask, vptr->mac_regs); mac_clear_isr(vptr->mac_regs); mac_enable_int(vptr->mac_regs); + spin_unlock_irqrestore(&vptr->lock, flags); return 0; } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index fa12b9060b0..29bf33692f7 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1615,7 +1615,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, bf->bf_frmlen -= padsize; } - if (conf_is_ht(&hw->conf) && !is_pae(skb)) + if (conf_is_ht(&hw->conf)) bf->bf_state.bf_type |= BUF_HT; bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); @@ -1701,7 +1701,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, goto tx_done; } - if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { + if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) { /* * Try aggregation if it's a unicast data frame * and the destination is HT capable. diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index fe3bf949199..c484cc25389 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h @@ -115,6 +115,7 @@ #define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ #define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ #define B43_MMIO_RNG 0x65A +#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */ #define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ #define B43_MMIO_IFSCTL_USE_EDCF 0x0004 #define B43_MMIO_POWERUP_DELAY 0x6A8 diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 4c41cfe44f2..490fb45d1d0 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -628,10 +628,17 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev) static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) { /* slot_time is in usec. */ - if (dev->phy.type != B43_PHYTYPE_G) + /* This test used to exit for all but a G PHY. */ + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) return; - b43_write16(dev, 0x684, 510 + slot_time); - b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); + b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time); + /* Shared memory location 0x0010 is the slot time and should be + * set to slot_time; however, this register is initially 0 and changing + * the value adversely affects the transmit rate for BCM4311 + * devices. Until this behavior is unterstood, delete this step + * + * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); + */ } static void b43_short_slot_timing_enable(struct b43_wldev *dev) diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 5461f105bd2..d10bea64fce 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -2744,6 +2744,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) if ((le16_to_cpu(priv->staging_rxon.channel) != ch)) priv->staging_rxon.flags = 0; + iwl_set_rxon_ht(priv, ht_conf); iwl_set_rxon_channel(priv, conf->channel); iwl_set_flags_for_band(priv, conf->channel->band); diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index 6f36b6e79f5..2dbce85404a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c @@ -928,7 +928,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, if (ieee80211_is_mgmt(fc) || ieee80211_has_protected(fc) || ieee80211_has_morefrags(fc) || - le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) + le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG || + (ieee80211_is_data_qos(fc) && + *ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)) ret = skb_linearize(skb); else ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c index 6d6ed748517..f727b4a8319 100644 --- a/drivers/net/wireless/iwmc3200wifi/rx.c +++ b/drivers/net/wireless/iwmc3200wifi/rx.c @@ -794,7 +794,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf, } bss->bss = kzalloc(bss_len, GFP_KERNEL); - if (!bss) { + if (!bss->bss) { kfree(bss); IWM_ERR(iwm, "Couldn't allocate bss\n"); return -ENOMEM; diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c index bc5726dd5fe..7ba3052b070 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c @@ -65,6 +65,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = { /* Sitecom */ {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B}, + {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B}, /* Sphairon Access Systems GmbH */ {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187}, /* Dick Smith Electronics */ diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 8e952fdab76..cb2fd01edda 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -720,12 +720,6 @@ static int acpiphp_bus_add(struct acpiphp_func *func) -ret_val); goto acpiphp_bus_add_out; } - /* - * try to start anyway. We could have failed to add - * simply because this bus had previously been added - * on another add. Don't bother with the return value - * we just keep going. - */ ret_val = acpi_bus_start(device); acpiphp_bus_add_out: diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index c74694345b6..d58b94030ef 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -338,6 +338,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); +/* + * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS + * ver. 1.33 20070103) don't set the correct ISA PCI region header info. + * BAR0 should be 8 bytes; instead, it may be set to something like 8k + * (which conflicts w/ BAR1's memory range). + */ +static void __devinit quirk_cs5536_vsa(struct pci_dev *dev) +{ + if (pci_resource_len(dev, 0) != 8) { + struct resource *res = &dev->resource[0]; + res->end = res->start + 8 - 1; + dev_info(&dev->dev, "CS5536 ISA bridge bug detected " + "(incorrect header); workaround applied.\n"); + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); + static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, unsigned size, int nr, const char *name) { diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index e67e4feb35c..eb603f1d55c 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -5771,7 +5771,7 @@ static void thermal_exit(void) case TPACPI_THERMAL_ACPI_TMP07: case TPACPI_THERMAL_ACPI_UPDT: sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, - &thermal_temp_input16_group); + &thermal_temp_input8_group); break; case TPACPI_THERMAL_NONE: default: diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index fa39e759a27..6ea3cb5837c 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c @@ -175,8 +175,14 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev) dev_err(&dev->dev, "Do not pass platform_data through " "wm97xx_bat_set_pdata!\n"); return -EINVAL; - } else - pdata = wmdata->batt_pdata; + } + + if (!wmdata) { + dev_err(&dev->dev, "No platform data supplied\n"); + return -EINVAL; + } + + pdata = wmdata->batt_pdata; if (dev->id != -1) return -EINVAL; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 686ef270ecf..b60a4c9f8f1 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -661,7 +661,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state) static void print_constraints(struct regulator_dev *rdev) { struct regulation_constraints *constraints = rdev->constraints; - char buf[80]; + char buf[80] = ""; int count = 0; int ret; diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index 76d08c282f9..4f33a0f4a17 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c @@ -183,7 +183,7 @@ static int lp3971_ldo_set_voltage(struct regulator_dev *dev, if (vol_map[val] >= min_vol) break; - if (vol_map[val] > max_vol) + if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol) return -EINVAL; return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo), @@ -272,7 +272,7 @@ static int lp3971_dcdc_set_voltage(struct regulator_dev *dev, if (vol_map[val] >= min_vol) break; - if (vol_map[val] > max_vol) + if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol) return -EINVAL; ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck), diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 999fe80c405..62b654af923 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -531,7 +531,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) qdio_siga_sync_q(q); get_buf_state(q, q->first_to_check, &state, 0); - if (state == SLSB_P_INPUT_PRIMED) + if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) /* more work coming */ return 0; @@ -960,6 +960,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, qdio_handle_activate_check(cdev, intparm, cstat, dstat); break; + case QDIO_IRQ_STATE_STOPPED: + break; default: WARN_ON(1); } diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 0f7b493fb10..271399f62f1 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -671,12 +671,11 @@ static void zfcp_fc_ct_els_job_handler(void *data) { struct fc_bsg_job *job = data; struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data; - int status = zfcp_ct_els->status; - int reply_status; + struct fc_bsg_reply *jr = job->reply; - reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK; - job->reply->reply_data.ctels_reply.status = reply_status; - job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; + jr->reply_payload_rcv_len = job->reply_payload.payload_len; + jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; + jr->result = zfcp_ct_els->status ? -EIO : 0; job->job_done(job); } diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 47754260228..9e71ac61114 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -2516,7 +2516,7 @@ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt) if (info->scsi.phase == PHASE_IDLE) fas216_kick(info); - mod_timer(&info->eh_timer, 30 * HZ); + mod_timer(&info->eh_timer, jiffies + 30 * HZ); spin_unlock_irqrestore(&info->host_lock, flags); /* diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f61fb8d0133..8bc6f53691e 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -453,6 +453,5 @@ extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); -extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *); #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index ffd0efdff40..6fc63b98818 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1917,6 +1917,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; struct scsi_qla_host *vha; + unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1927,15 +1928,15 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) ha = rsp->hw; reg = &ha->iobase->isp24; - spin_lock_irq(&ha->hardware_lock); + spin_lock_irqsave(&ha->hardware_lock, flags); - vha = qla25xx_get_host(rsp); + vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); if (!ha->flags.disable_msix_handshake) { WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(®->hccr); } - spin_unlock_irq(&ha->hardware_lock); + spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } @@ -1946,6 +1947,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; + unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1958,10 +1960,10 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) /* Clear the interrupt, if enabled, for this response queue */ if (rsp->options & ~BIT_6) { reg = &ha->iobase->isp24; - spin_lock_irq(&ha->hardware_lock); + spin_lock_irqsave(&ha->hardware_lock, flags); WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(®->hccr); - spin_unlock_irq(&ha->hardware_lock); + spin_unlock_irqrestore(&ha->hardware_lock, flags); } queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); @@ -1979,6 +1981,7 @@ qla24xx_msix_default(int irq, void *dev_id) uint32_t stat; uint32_t hccr; uint16_t mb[4]; + unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1990,7 +1993,7 @@ qla24xx_msix_default(int irq, void *dev_id) reg = &ha->iobase->isp24; status = 0; - spin_lock_irq(&ha->hardware_lock); + spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); do { stat = RD_REG_DWORD(®->host_status); @@ -2039,7 +2042,7 @@ qla24xx_msix_default(int irq, void *dev_id) } WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); } while (0); - spin_unlock_irq(&ha->hardware_lock); + spin_unlock_irqrestore(&ha->hardware_lock, flags); if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && (status & MBX_INTERRUPT) && ha->flags.mbox_int) { @@ -2277,30 +2280,3 @@ int qla25xx_request_irq(struct rsp_que *rsp) msix->rsp = rsp; return ret; } - -struct scsi_qla_host * -qla25xx_get_host(struct rsp_que *rsp) -{ - srb_t *sp; - struct qla_hw_data *ha = rsp->hw; - struct scsi_qla_host *vha = NULL; - struct sts_entry_24xx *pkt; - struct req_que *req; - uint16_t que; - uint32_t handle; - - pkt = (struct sts_entry_24xx *) rsp->ring_ptr; - que = MSW(pkt->handle); - handle = (uint32_t) LSW(pkt->handle); - req = ha->req_q_map[que]; - if (handle < MAX_OUTSTANDING_COMMANDS) { - sp = req->outstanding_cmds[handle]; - if (sp) - return sp->fcport->vha; - else - goto base_que; - } -base_que: - vha = pci_get_drvdata(ha->pdev); - return vha; -} diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index b901aa267e7..ff17dee2861 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -636,13 +636,15 @@ failed: static void qla_do_work(struct work_struct *work) { + unsigned long flags; struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); struct scsi_qla_host *vha; + struct qla_hw_data *ha = rsp->hw; - spin_lock_irq(&rsp->hw->hardware_lock); - vha = qla25xx_get_host(rsp); + spin_lock_irqsave(&rsp->hw->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); - spin_unlock_irq(&rsp->hw->hardware_lock); + spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags); } /* create response queue */ diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index c3e37c8e7e2..e9b15c3746f 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -83,6 +83,9 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */ #define PASS_LIMIT 256 +#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) + + /* * We default to IRQ0 for the "no irq" hack. Some * machine types want others as well - they're free @@ -1792,7 +1795,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port) up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; spin_unlock_irqrestore(&up->port.lock, flags); - return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0; + return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; } static unsigned int serial8250_get_mctrl(struct uart_port *port) @@ -1850,8 +1853,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state) spin_unlock_irqrestore(&up->port.lock, flags); } -#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) - /* * Wait for transmitter & holding register to empty */ diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 5681ebed9c6..03dfd27c4bf 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -494,8 +494,7 @@ static int ssb_devices_register(struct ssb_bus *bus) #endif break; case SSB_BUSTYPE_SDIO: -#ifdef CONFIG_SSB_SDIO - sdev->irq = bus->host_sdio->dev.irq; +#ifdef CONFIG_SSB_SDIOHOST dev->parent = &bus->host_sdio->dev; #endif break; diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 6e8bcdfd23b..a678186f218 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1312,9 +1312,9 @@ static int processcompl(struct async *as, void __user * __user *arg) void __user *addr = as->userurb; unsigned int i; - if (as->userbuffer) + if (as->userbuffer && urb->actual_length) if (copy_to_user(as->userbuffer, urb->transfer_buffer, - urb->transfer_buffer_length)) + urb->actual_length)) goto err_out; if (put_user(as->status, &userurb->status)) goto err_out; @@ -1334,14 +1334,11 @@ static int processcompl(struct async *as, void __user * __user *arg) } } - free_async(as); - if (put_user(addr, (void __user * __user *)arg)) return -EFAULT; return 0; err_out: - free_async(as); return -EFAULT; } @@ -1371,8 +1368,11 @@ static struct async *reap_as(struct dev_state *ps) static int proc_reapurb(struct dev_state *ps, void __user *arg) { struct async *as = reap_as(ps); - if (as) - return processcompl(as, (void __user * __user *)arg); + if (as) { + int retval = processcompl(as, (void __user * __user *)arg); + free_async(as); + return retval; + } if (signal_pending(current)) return -EINTR; return -EIO; @@ -1380,11 +1380,16 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg) static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) { + int retval; struct async *as; - if (!(as = async_getcompleted(ps))) - return -EAGAIN; - return processcompl(as, (void __user * __user *)arg); + as = async_getcompleted(ps); + retval = -EAGAIN; + if (as) { + retval = processcompl(as, (void __user * __user *)arg); + free_async(as); + } + return retval; } #ifdef CONFIG_COMPAT @@ -1475,9 +1480,9 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) void __user *addr = as->userurb; unsigned int i; - if (as->userbuffer) + if (as->userbuffer && urb->actual_length) if (copy_to_user(as->userbuffer, urb->transfer_buffer, - urb->transfer_buffer_length)) + urb->actual_length)) return -EFAULT; if (put_user(as->status, &userurb->status)) return -EFAULT; @@ -1497,7 +1502,6 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) } } - free_async(as); if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) return -EFAULT; return 0; @@ -1506,8 +1510,11 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) { struct async *as = reap_as(ps); - if (as) - return processcompl_compat(as, (void __user * __user *)arg); + if (as) { + int retval = processcompl_compat(as, (void __user * __user *)arg); + free_async(as); + return retval; + } if (signal_pending(current)) return -EINTR; return -EIO; @@ -1515,11 +1522,16 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) { + int retval; struct async *as; - if (!(as = async_getcompleted(ps))) - return -EAGAIN; - return processcompl_compat(as, (void __user * __user *)arg); + retval = -EAGAIN; + as = async_getcompleted(ps); + if (as) { + retval = processcompl_compat(as, (void __user * __user *)arg); + free_async(as); + } + return retval; } diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c index 0a577d5694f..d4f0db58a8a 100644 --- a/drivers/usb/gadget/f_eem.c +++ b/drivers/usb/gadget/f_eem.c @@ -358,7 +358,7 @@ done: * b15: bmType (0 == data) */ len = skb->len; - put_unaligned_le16((len & 0x3FFF) | BIT(14), skb_push(skb, 2)); + put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2)); /* add a zero-length EEM packet, if needed */ if (padlen) @@ -464,7 +464,6 @@ static int eem_unwrap(struct gether *port, } /* validate CRC */ - crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN); if (header & BIT(14)) { crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN); diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c index 429560100b1..76496f5d272 100644 --- a/drivers/usb/gadget/multi.c +++ b/drivers/usb/gadget/multi.c @@ -29,7 +29,7 @@ #if defined USB_ETH_RNDIS # undef USB_ETH_RNDIS #endif -#ifdef CONFIG_USB_ETH_RNDIS +#ifdef CONFIG_USB_G_MULTI_RNDIS # define USB_ETH_RNDIS y #endif diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c index e220fb8091a..8b45145b913 100644 --- a/drivers/usb/gadget/r8a66597-udc.c +++ b/drivers/usb/gadget/r8a66597-udc.c @@ -26,6 +26,7 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/clk.h> +#include <linux/err.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c index 4b5dbd0127f..5fc80a10415 100644 --- a/drivers/usb/gadget/s3c-hsotg.c +++ b/drivers/usb/gadget/s3c-hsotg.c @@ -2582,6 +2582,7 @@ err: hsotg->gadget.dev.driver = NULL; return ret; } +EXPORT_SYMBOL(usb_gadget_register_driver); int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) { diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index c75d9270c75..19372673bf0 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -196,7 +196,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) if (hostpc_reg) { u32 t3; + spin_unlock_irq(&ehci->lock); msleep(5);/* 5ms for HCD enter low pwr mode */ + spin_lock_irq(&ehci->lock); t3 = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); t3 = ehci_readl(ehci, hostpc_reg); @@ -904,17 +906,18 @@ static int ehci_hub_control ( if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) goto error; - ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); + /* After above check the port must be connected. * Set appropriate bit thus could put phy into low power * mode if we have hostpc feature */ + temp &= ~PORT_WKCONN_E; + temp |= PORT_WKDISC_E | PORT_WKOC_E; + ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); if (hostpc_reg) { - temp &= ~PORT_WKCONN_E; - temp |= (PORT_WKDISC_E | PORT_WKOC_E); - ehci_writel(ehci, temp | PORT_SUSPEND, - status_reg); + spin_unlock_irqrestore(&ehci->lock, flags); msleep(5);/* 5ms for HCD enter low pwr mode */ + spin_lock_irqsave(&ehci->lock, flags); temp1 = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, temp1 | HOSTPC_PHCD, hostpc_reg); diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c index d224ab467a4..e1232890c78 100644 --- a/drivers/usb/host/fhci-tds.c +++ b/drivers/usb/host/fhci-tds.c @@ -105,7 +105,7 @@ void fhci_ep0_free(struct fhci_usb *usb) if (ep->td_base) cpm_muram_free(cpm_muram_offset(ep->td_base)); - if (ep->conf_frame_Q) { + if (kfifo_initialized(&ep->conf_frame_Q)) { size = cq_howmany(&ep->conf_frame_Q); for (; size; size--) { struct packet *pkt = cq_get(&ep->conf_frame_Q); @@ -115,7 +115,7 @@ void fhci_ep0_free(struct fhci_usb *usb) cq_delete(&ep->conf_frame_Q); } - if (ep->empty_frame_Q) { + if (kfifo_initialized(&ep->empty_frame_Q)) { size = cq_howmany(&ep->empty_frame_Q); for (; size; size--) { struct packet *pkt = cq_get(&ep->empty_frame_Q); @@ -125,7 +125,7 @@ void fhci_ep0_free(struct fhci_usb *usb) cq_delete(&ep->empty_frame_Q); } - if (ep->dummy_packets_Q) { + if (kfifo_initialized(&ep->dummy_packets_Q)) { size = cq_howmany(&ep->dummy_packets_Q); for (; size; size--) { u8 *buff = cq_get(&ep->dummy_packets_Q); diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 0ceec123ddf..bee558aed42 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -35,7 +35,9 @@ #include <linux/usb.h> #include <linux/platform_device.h> #include <linux/io.h> +#include <linux/mm.h> #include <linux/irq.h> +#include <asm/cacheflush.h> #include "../core/hcd.h" #include "r8a66597.h" @@ -820,6 +822,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb, enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); } +static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb, + int status) +__releases(r8a66597->lock) +__acquires(r8a66597->lock) +{ + if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) { + void *ptr; + + for (ptr = urb->transfer_buffer; + ptr < urb->transfer_buffer + urb->transfer_buffer_length; + ptr += PAGE_SIZE) + flush_dcache_page(virt_to_page(ptr)); + } + + usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); + spin_unlock(&r8a66597->lock); + usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status); + spin_lock(&r8a66597->lock); +} + /* this function must be called with interrupt disabled */ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) { @@ -838,15 +860,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) list_del(&td->queue); kfree(td); - if (urb) { - usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), - urb); + if (urb) + r8a66597_urb_done(r8a66597, urb, -ENODEV); - spin_unlock(&r8a66597->lock); - usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, - -ENODEV); - spin_lock(&r8a66597->lock); - } break; } } @@ -1006,6 +1022,8 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port, /* this function must be called with interrupt disabled */ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, u16 syssts) +__releases(r8a66597->lock) +__acquires(r8a66597->lock) { if (syssts == SE0) { r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); @@ -1023,7 +1041,9 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); } + spin_unlock(&r8a66597->lock); usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); + spin_lock(&r8a66597->lock); } /* this function must be called with interrupt disabled */ @@ -1283,10 +1303,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock) if (usb_pipeisoc(urb->pipe)) urb->start_frame = r8a66597_get_frame(hcd); - usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); - spin_unlock(&r8a66597->lock); - usb_hcd_giveback_urb(hcd, urb, status); - spin_lock(&r8a66597->lock); + r8a66597_urb_done(r8a66597, urb, status); } if (restart) { diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 0025847743f..8b37a4b9839 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -3245,6 +3245,7 @@ static struct usb_device_id sisusb_table [] = { { USB_DEVICE(0x0711, 0x0902) }, { USB_DEVICE(0x0711, 0x0903) }, { USB_DEVICE(0x0711, 0x0918) }, + { USB_DEVICE(0x0711, 0x0920) }, { USB_DEVICE(0x182d, 0x021c) }, { USB_DEVICE(0x182d, 0x0269) }, { } diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig index de56b3d743d..3d2d3e549bd 100644 --- a/drivers/usb/otg/Kconfig +++ b/drivers/usb/otg/Kconfig @@ -44,6 +44,7 @@ config ISP1301_OMAP config USB_ULPI bool "Generic ULPI Transceiver Driver" depends on ARM + select USB_OTG_UTILS help Enable this to support ULPI connected USB OTG transceivers which are likely found on embedded boards. diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 216f187582a..7638828e731 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -50,7 +50,7 @@ * Version Information */ #define DRIVER_VERSION "v1.5.0" -#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>" +#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr" #define DRIVER_DESC "USB FTDI Serial Converters Driver" static int debug; @@ -145,10 +145,15 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = { +/* + * Device ID not listed? Test via module params product/vendor or + * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! + */ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, @@ -552,9 +557,16 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, /* - * Due to many user requests for multiple ELV devices we enable - * them by default. + * ELV devices: */ + { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) }, @@ -571,11 +583,17 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, @@ -697,6 +715,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index da92b4952ff..c8951aeed98 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -38,6 +38,8 @@ /* www.candapter.com Ewert Energy Systems CANdapter device */ #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ +#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ + /* OOCDlink by Joern Kaipf <joernk@web.de> * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ @@ -161,22 +163,37 @@ /* * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). * All of these devices use FTDI's vendor ID (0x0403). + * Further IDs taken from ELV Windows .inf file. * * The previously included PID for the UO 100 module was incorrect. * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58). * * Armin Laeuger originally sent the PID for the UM 100 module. */ +#define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */ +#define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */ +#define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */ +#define FTDI_ELV_WS550_PID 0xE004 /* WS 550 */ +#define FTDI_ELV_EC3000_PID 0xE006 /* ENERGY CONTROL 3000 USB */ +#define FTDI_ELV_WS888_PID 0xE008 /* WS 888 */ +#define FTDI_ELV_TWS550_PID 0xE009 /* Technoline WS 550 */ +#define FTDI_ELV_FEM_PID 0xE00A /* Funk Energie Monitor */ #define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ #define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ #define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */ +#define FTDI_ELV_UMS100_PID 0xE0EB /* ELV USB Master-Slave Schaltsteckdose UMS 100 */ +#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */ +#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */ +#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */ #define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ #define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */ #define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */ #define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */ #define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */ #define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */ +#define FTDI_ELV_UTP8_PID 0xE0F5 /* ELV UTP 8 */ #define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ +#define FTDI_ELV_WS444PC_PID 0xE0F7 /* Conrad WS 444 PC */ #define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */ #define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */ #define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */ @@ -968,6 +985,7 @@ #define PAPOUCH_VID 0x5050 /* Vendor ID */ #define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ #define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */ +#define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */ /* * Marvell SheevaPlug diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index ac1b6449fb6..3eb6143bb64 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -298,6 +298,7 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, + { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ { } }; diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index c932f905318..49575fba375 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -941,7 +941,7 @@ UNUSUAL_DEV( 0x07ab, 0xfccd, 0x0000, 0x9999, UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133, "Microtech", "USB-SCSI-DB25", - US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init, + US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init, US_FL_SCM_MULT_TARG ), UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100, diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index cf62b05e296..7d6c2139891 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -84,7 +84,7 @@ static const match_table_t tokens = { static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) { - char *options; + char *options, *tmp_options; substring_t args[MAX_OPT_ARGS]; char *p; int option = 0; @@ -102,9 +102,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) if (!opts) return 0; - options = kstrdup(opts, GFP_KERNEL); - if (!options) + tmp_options = kstrdup(opts, GFP_KERNEL); + if (!tmp_options) { + ret = -ENOMEM; goto fail_option_alloc; + } + options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -159,8 +162,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) break; case Opt_cache: s = match_strdup(&args[0]); - if (!s) - goto fail_option_alloc; + if (!s) { + ret = -ENOMEM; + P9_DPRINTK(P9_DEBUG_ERROR, + "problem allocating copy of cache arg\n"); + goto free_and_return; + } if (strcmp(s, "loose") == 0) v9ses->cache = CACHE_LOOSE; @@ -173,8 +180,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) case Opt_access: s = match_strdup(&args[0]); - if (!s) - goto fail_option_alloc; + if (!s) { + ret = -ENOMEM; + P9_DPRINTK(P9_DEBUG_ERROR, + "problem allocating copy of access arg\n"); + goto free_and_return; + } v9ses->flags &= ~V9FS_ACCESS_MASK; if (strcmp(s, "user") == 0) @@ -194,13 +205,11 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) continue; } } - kfree(options); - return ret; +free_and_return: + kfree(tmp_options); fail_option_alloc: - P9_DPRINTK(P9_DEBUG_ERROR, - "failed to allocate copy of option argument\n"); - return -ENOMEM; + return ret; } /** diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h index 3a7560e3586..ed835836e0d 100644 --- a/fs/9p/v9fs_vfs.h +++ b/fs/9p/v9fs_vfs.h @@ -60,3 +60,4 @@ void v9fs_dentry_release(struct dentry *); int v9fs_uflags2omode(int uflags, int extended); ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64); +void v9fs_blank_wstat(struct p9_wstat *wstat); diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 3902bf43a08..74a0461a9ac 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -257,6 +257,23 @@ v9fs_file_write(struct file *filp, const char __user * data, return total; } +static int v9fs_file_fsync(struct file *filp, struct dentry *dentry, + int datasync) +{ + struct p9_fid *fid; + struct p9_wstat wstat; + int retval; + + P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp, + dentry, datasync); + + fid = filp->private_data; + v9fs_blank_wstat(&wstat); + + retval = p9_client_wstat(fid, &wstat); + return retval; +} + static const struct file_operations v9fs_cached_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, @@ -266,6 +283,7 @@ static const struct file_operations v9fs_cached_file_operations = { .release = v9fs_dir_release, .lock = v9fs_file_lock, .mmap = generic_file_readonly_mmap, + .fsync = v9fs_file_fsync, }; const struct file_operations v9fs_file_operations = { @@ -276,4 +294,5 @@ const struct file_operations v9fs_file_operations = { .release = v9fs_dir_release, .lock = v9fs_file_lock, .mmap = generic_file_readonly_mmap, + .fsync = v9fs_file_fsync, }; diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 9d03d1ebca6..a407fa3388c 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -176,7 +176,7 @@ int v9fs_uflags2omode(int uflags, int extended) * */ -static void +void v9fs_blank_wstat(struct p9_wstat *wstat) { wstat->type = ~0; diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 33baf27fac7..34ddda888e6 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -873,6 +873,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent) brelse(bh); unacquire_priv_sbp: + kfree(befs_sb->mount_opts.iocharset); kfree(sb->s_fs_info); unacquire_none: diff --git a/fs/block_dev.c b/fs/block_dev.c index 73d6a735b8f..d11d0289f3d 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -246,7 +246,8 @@ struct super_block *freeze_bdev(struct block_device *bdev) if (!sb) goto out; if (sb->s_flags & MS_RDONLY) { - deactivate_locked_super(sb); + sb->s_frozen = SB_FREEZE_TRANS; + up_write(&sb->s_umount); mutex_unlock(&bdev->bd_fsfreeze_mutex); return sb; } @@ -307,7 +308,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb) BUG_ON(sb->s_bdev != bdev); down_write(&sb->s_umount); if (sb->s_flags & MS_RDONLY) - goto out_deactivate; + goto out_unfrozen; if (sb->s_op->unfreeze_fs) { error = sb->s_op->unfreeze_fs(sb); @@ -321,11 +322,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb) } } +out_unfrozen: sb->s_frozen = SB_UNFROZEN; smp_wmb(); wake_up(&sb->s_wait_unfrozen); -out_deactivate: if (sb) deactivate_locked_super(sb); out_unlock: diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 87b25543d7d..2b59201b955 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1982,7 +1982,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (!(sb->s_flags & MS_RDONLY)) { ret = btrfs_recover_relocation(tree_root); - BUG_ON(ret); + if (ret < 0) { + printk(KERN_WARNING + "btrfs: failed to recover relocation\n"); + err = -EINVAL; + goto fail_trans_kthread; + } } location.objectid = BTRFS_FS_TREE_OBJECTID; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 432a2da4641..559f72489b3 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5402,10 +5402,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, int ret; while (level >= 0) { - if (path->slots[level] >= - btrfs_header_nritems(path->nodes[level])) - break; - ret = walk_down_proc(trans, root, path, wc, lookup_info); if (ret > 0) break; @@ -5413,6 +5409,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, if (level == 0) break; + if (path->slots[level] >= + btrfs_header_nritems(path->nodes[level])) + break; + ret = do_walk_down(trans, root, path, wc, &lookup_info); if (ret > 0) { path->slots[level]++; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 96577e8bf9f..b177ed31961 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3165,10 +3165,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, spin_unlock(&tree->buffer_lock); goto free_eb; } - spin_unlock(&tree->buffer_lock); - /* add one reference for the tree */ atomic_inc(&eb->refs); + spin_unlock(&tree->buffer_lock); return eb; free_eb: diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c02033596f0..6ed434ac037 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -720,13 +720,15 @@ again: inode->i_ino, orig_offset); BUG_ON(ret); } - fi = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); if (del_nr == 0) { + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG); btrfs_mark_buffer_dirty(leaf); } else { + fi = btrfs_item_ptr(leaf, del_slot - 1, + struct btrfs_file_extent_item); btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG); btrfs_set_file_extent_num_bytes(leaf, fi, @@ -1133,7 +1135,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) } mutex_lock(&dentry->d_inode->i_mutex); out: - return ret > 0 ? EIO : ret; + return ret > 0 ? -EIO : ret; } static const struct vm_operations_struct btrfs_file_vm_ops = { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8cd109972fa..4deb280f896 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1681,24 +1681,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, * before we start the transaction. It limits the amount of btree * reads required while inside the transaction. */ -static noinline void reada_csum(struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_ordered_extent *ordered_extent) -{ - struct btrfs_ordered_sum *sum; - u64 bytenr; - - sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum, - list); - bytenr = sum->sums[0].bytenr; - - /* - * we don't care about the results, the point of this search is - * just to get the btree leaves into ram - */ - btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0); -} - /* as ordered data IO finishes, this gets called so we can finish * an ordered extent if the range of bytes in the file it covers are * fully written. @@ -1709,7 +1691,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct btrfs_trans_handle *trans; struct btrfs_ordered_extent *ordered_extent = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - struct btrfs_path *path; int compressed = 0; int ret; @@ -1717,32 +1698,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) if (!ret) return 0; - /* - * before we join the transaction, try to do some of our IO. - * This will limit the amount of IO that we have to do with - * the transaction running. We're unlikely to need to do any - * IO if the file extents are new, the disk_i_size checks - * covers the most common case. - */ - if (start < BTRFS_I(inode)->disk_i_size) { - path = btrfs_alloc_path(); - if (path) { - ret = btrfs_lookup_file_extent(NULL, root, path, - inode->i_ino, - start, 0); - ordered_extent = btrfs_lookup_ordered_extent(inode, - start); - if (!list_empty(&ordered_extent->list)) { - btrfs_release_path(root, path); - reada_csum(root, path, ordered_extent); - } - btrfs_free_path(path); - } - } - - if (!ordered_extent) - ordered_extent = btrfs_lookup_ordered_extent(inode, start); + ordered_extent = btrfs_lookup_ordered_extent(inode, start); BUG_ON(!ordered_extent); + if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { BUG_ON(!list_empty(&ordered_extent->list)); ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); @@ -5841,7 +5799,9 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end, inode->i_ctime = CURRENT_TIME; BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; if (!(mode & FALLOC_FL_KEEP_SIZE) && - cur_offset > inode->i_size) { + (actual_len > inode->i_size) && + (cur_offset > inode->i_size)) { + if (cur_offset > actual_len) i_size = actual_len; else diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index ed3e4a2ec2c..ab7ab531874 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3764,7 +3764,8 @@ out: BTRFS_DATA_RELOC_TREE_OBJECTID); if (IS_ERR(fs_root)) err = PTR_ERR(fs_root); - btrfs_orphan_cleanup(fs_root); + else + btrfs_orphan_cleanup(fs_root); } return err; } diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 7b2600b380d..49503d2edc7 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES @@ -1,3 +1,7 @@ +Version 1.62 +------------ +Add sockopt=TCP_NODELAY mount option. + Version 1.61 ------------ Fix append problem to Samba servers (files opened with O_APPEND could diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index ac2b24c192f..78c1b86d55f 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* EXPERIMENTAL */ -#define CIFS_VERSION "1.61" +#define CIFS_VERSION "1.62" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 4b35f7ec058..ed751bb657d 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -149,6 +149,7 @@ struct TCP_Server_Info { bool svlocal:1; /* local server or remote */ bool noblocksnd; /* use blocking sendmsg */ bool noautotune; /* do not autotune send buf sizes */ + bool tcp_nodelay; atomic_t inFlight; /* number of requests on the wire to server */ #ifdef CONFIG_CIFS_STATS2 atomic_t inSend; /* requests trying to send */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 3bbcaa716b3..2e9e09ca0e3 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -98,7 +98,7 @@ struct smb_vol { bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ unsigned int rsize; unsigned int wsize; - unsigned int sockopt; + bool sockopt_tcp_nodelay:1; unsigned short int port; char *prepath; }; @@ -1142,9 +1142,11 @@ cifs_parse_mount_options(char *options, const char *devname, simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "sockopt", 5) == 0) { - if (value && *value) { - vol->sockopt = - simple_strtoul(value, &value, 0); + if (!value || !*value) { + cERROR(1, ("no socket option specified")); + continue; + } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) { + vol->sockopt_tcp_nodelay = 1; } } else if (strnicmp(data, "netbiosname", 4) == 0) { if (!value || !*value || (*value == ' ')) { @@ -1514,6 +1516,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) tcp_ses->noblocksnd = volume_info->noblocksnd; tcp_ses->noautotune = volume_info->noautotune; + tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay; atomic_set(&tcp_ses->inFlight, 0); init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); @@ -1764,6 +1767,7 @@ static int ipv4_connect(struct TCP_Server_Info *server) { int rc = 0; + int val; bool connected = false; __be16 orig_port = 0; struct socket *socket = server->ssocket; @@ -1845,6 +1849,14 @@ ipv4_connect(struct TCP_Server_Info *server) socket->sk->sk_rcvbuf = 140 * 1024; } + if (server->tcp_nodelay) { + val = 1; + rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, + (char *)&val, sizeof(val)); + if (rc) + cFYI(1, ("set TCP_NODELAY socket option error %d", rc)); + } + cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx", socket->sk->sk_sndbuf, socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo)); @@ -1916,6 +1928,7 @@ static int ipv6_connect(struct TCP_Server_Info *server) { int rc = 0; + int val; bool connected = false; __be16 orig_port = 0; struct socket *socket = server->ssocket; @@ -1987,6 +2000,15 @@ ipv6_connect(struct TCP_Server_Info *server) */ socket->sk->sk_rcvtimeo = 7 * HZ; socket->sk->sk_sndtimeo = 5 * HZ; + + if (server->tcp_nodelay) { + val = 1; + rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, + (char *)&val, sizeof(val)); + if (rc) + cFYI(1, ("set TCP_NODELAY socket option error %d", rc)); + } + server->ssocket = socket; return rc; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index cf18ee76559..e3fda978f48 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1762,8 +1762,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) CIFS_MOUNT_MAP_SPECIAL_CHR); } - if (!rc) + if (!rc) { rc = inode_setattr(inode, attrs); + + /* force revalidate when any of these times are set since some + of the fs types (eg ext3, fat) do not have fine enough + time granularity to match protocol, and we do not have a + a way (yet) to query the server fs's time granularity (and + whether it rounds times down). + */ + if (!rc && (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME))) + cifsInode->time = 0; + } out: kfree(args); kfree(full_path); diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index f84062f9a98..c343b14ba2d 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -77,6 +77,11 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name, cFYI(1, ("For %s", name->name)); + if (parent->d_op && parent->d_op->d_hash) + parent->d_op->d_hash(parent, name); + else + name->hash = full_name_hash(name->name, name->len); + dentry = d_lookup(parent, name); if (dentry) { /* FIXME: check for inode number changes? */ @@ -666,12 +671,11 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst, min(len, max_len), nlt, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + pqst->len -= nls_nullsize(nlt); } else { pqst->name = filename; pqst->len = len; } - pqst->hash = full_name_hash(pqst->name, pqst->len); -/* cFYI(1, ("filldir on %s",pqst->name)); */ return rc; } diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 7085a6275c4..aaa9c1c5a5b 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -223,9 +223,9 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, /* null user mount */ *bcc_ptr = 0; *(bcc_ptr+1) = 0; - } else { /* 300 should be long enough for any conceivable user name */ + } else { bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName, - 300, nls_cp); + MAX_USERNAME_SIZE, nls_cp); } bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* account for null termination */ @@ -246,11 +246,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, /* copy user */ if (ses->userName == NULL) { /* BB what about null user mounts - check that we do this BB */ - } else { /* 300 should be long enough for any conceivable user name */ - strncpy(bcc_ptr, ses->userName, 300); + } else { + strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE); } - /* BB improve check for overflow */ - bcc_ptr += strnlen(ses->userName, 300); + bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE); *bcc_ptr = 0; bcc_ptr++; /* account for null termination */ diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index c5c45de1a2e..0ca9ec4a79c 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -301,6 +301,12 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd, u32 data; void __user *dxferp; int err; + int interface_id; + + if (get_user(interface_id, &sgio32->interface_id)) + return -EFAULT; + if (interface_id != 'S') + return sys_ioctl(fd, cmd, (unsigned long)sgio32); if (get_user(iovec_count, &sgio32->iovec_count)) return -EFAULT; @@ -936,6 +942,7 @@ COMPATIBLE_IOCTL(TCSETSF) COMPATIBLE_IOCTL(TIOCLINUX) COMPATIBLE_IOCTL(TIOCSBRK) COMPATIBLE_IOCTL(TIOCCBRK) +COMPATIBLE_IOCTL(TIOCGSID) COMPATIBLE_IOCTL(TIOCGICOUNT) /* Little t */ COMPATIBLE_IOCTL(TIOCGETD) @@ -1038,6 +1045,8 @@ COMPATIBLE_IOCTL(FIOQSIZE) #ifdef CONFIG_BLOCK /* loop */ IGNORE_IOCTL(LOOP_CLR_FD) +/* md calls this on random blockdevs */ +IGNORE_IOCTL(RAID_VERSION) /* SG stuff */ COMPATIBLE_IOCTL(SG_SET_TIMEOUT) COMPATIBLE_IOCTL(SG_GET_TIMEOUT) diff --git a/fs/exec.c b/fs/exec.c index 0790a107ff7..e95c692ef0e 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -571,6 +571,9 @@ int setup_arg_pages(struct linux_binprm *bprm, struct vm_area_struct *prev = NULL; unsigned long vm_flags; unsigned long stack_base; + unsigned long stack_size; + unsigned long stack_expand; + unsigned long rlim_stack; #ifdef CONFIG_STACK_GROWSUP /* Limit stack size to 1GB */ @@ -627,10 +630,24 @@ int setup_arg_pages(struct linux_binprm *bprm, goto out_unlock; } + stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE; + stack_size = vma->vm_end - vma->vm_start; + /* + * Align this down to a page boundary as expand_stack + * will align it up. + */ + rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK; + rlim_stack = min(rlim_stack, stack_size); #ifdef CONFIG_STACK_GROWSUP - stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE; + if (stack_size + stack_expand > rlim_stack) + stack_base = vma->vm_start + rlim_stack; + else + stack_base = vma->vm_end + stack_expand; #else - stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE; + if (stack_size + stack_expand > rlim_stack) + stack_base = vma->vm_end - rlim_stack; + else + stack_base = vma->vm_start - stack_expand; #endif ret = expand_stack(vma, stack_base); if (ret) diff --git a/fs/fcntl.c b/fs/fcntl.c index 5ef953e6f90..97e01dc0d95 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -199,9 +199,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg) static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, int force) { - unsigned long flags; - - write_lock_irqsave(&filp->f_owner.lock, flags); + write_lock_irq(&filp->f_owner.lock); if (force || !filp->f_owner.pid) { put_pid(filp->f_owner.pid); filp->f_owner.pid = get_pid(pid); @@ -213,7 +211,7 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, filp->f_owner.euid = cred->euid; } } - write_unlock_irqrestore(&filp->f_owner.lock, flags); + write_unlock_irq(&filp->f_owner.lock); } int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, diff --git a/fs/file_table.c b/fs/file_table.c index 69652c5bd5f..b98404b5438 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -253,6 +253,7 @@ void __fput(struct file *file) if (file->f_op && file->f_op->release) file->f_op->release(inode, file); security_file_free(file); + ima_file_free(file); if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) cdev_put(inode->i_cdev); fops_put(file->f_op); diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 6d47379e794..583e823307a 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -541,7 +541,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock, *ptr++ = cpu_to_be64(bn++); break; } - } while (state != ALLOC_DATA); + } while ((state != ALLOC_DATA) || !dblock); ip->i_height = height; gfs2_add_inode_blocks(&ip->i_inode, alloced); diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 8a102f73100..a86ed638156 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -725,7 +725,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) goto fail; } - error = -EINVAL; + error = -EUSERS; if (!gfs2_jindex_size(sdp)) { fs_err(sdp, "no journals!\n"); goto fail_jindex; diff --git a/fs/namei.c b/fs/namei.c index 94a5e60779f..d62fdc875f2 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1736,8 +1736,7 @@ do_last: if (nd.root.mnt) path_put(&nd.root); if (!IS_ERR(filp)) { - error = ima_path_check(&filp->f_path, filp->f_mode & - (MAY_READ | MAY_WRITE | MAY_EXEC)); + error = ima_file_check(filp, acc_mode); if (error) { fput(filp); filp = ERR_PTR(error); @@ -1797,8 +1796,7 @@ ok: } filp = nameidata_to_filp(&nd); if (!IS_ERR(filp)) { - error = ima_path_check(&filp->f_path, filp->f_mode & - (MAY_READ | MAY_WRITE | MAY_EXEC)); + error = ima_file_check(filp, acc_mode); if (error) { fput(filp); filp = ERR_PTR(error); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index e1d415e9784..0d289823e85 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -342,6 +342,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq, data->res.fattr = &data->fattr; data->res.eof = 0; data->res.count = bytes; + nfs_fattr_init(&data->fattr); msg.rpc_argp = &data->args; msg.rpc_resp = &data->res; @@ -575,6 +576,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) data->res.count = 0; data->res.fattr = &data->fattr; data->res.verf = &data->verf; + nfs_fattr_init(&data->fattr); NFS_PROTO(data->inode)->commit_setup(data, &msg); @@ -766,6 +768,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq, data->res.fattr = &data->fattr; data->res.count = bytes; data->res.verf = &data->verf; + nfs_fattr_init(&data->fattr); task_setup_data.task = &data->task; task_setup_data.callback_data = data; diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index fa588006588..237874f1af2 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -354,12 +354,11 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode) */ int nfs_fscache_release_page(struct page *page, gfp_t gfp) { - struct nfs_inode *nfsi = NFS_I(page->mapping->host); - struct fscache_cookie *cookie = nfsi->fscache; - - BUG_ON(!cookie); - if (PageFsCache(page)) { + struct nfs_inode *nfsi = NFS_I(page->mapping->host); + struct fscache_cookie *cookie = nfsi->fscache; + + BUG_ON(!cookie); dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", cookie, page, nfsi); diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index 0adefc40cc8..59047f8d7d7 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -120,7 +120,7 @@ static struct { { .status = MNT3ERR_INVAL, .errno = -EINVAL, }, { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, }, { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, }, - { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, }, + { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, }, }; struct mountres { diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 5e078b222b4..7bc2da8efd4 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -699,7 +699,7 @@ static struct { { NFSERR_BAD_COOKIE, -EBADCOOKIE }, { NFSERR_NOTSUPP, -ENOTSUPP }, { NFSERR_TOOSMALL, -ETOOSMALL }, - { NFSERR_SERVERFAULT, -ESERVERFAULT }, + { NFSERR_SERVERFAULT, -EREMOTEIO }, { NFSERR_BADTYPE, -EBADTYPE }, { NFSERR_JUKEBOX, -EJUKEBOX }, { -1, -EIO } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index e437fd6a819..5cd5184b56d 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4631,7 +4631,7 @@ static int decode_sequence(struct xdr_stream *xdr, * If the server returns different values for sessionID, slotID or * sequence number, the server is looney tunes. */ - status = -ESERVERFAULT; + status = -EREMOTEIO; if (memcmp(id.data, res->sr_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { @@ -5774,7 +5774,7 @@ static struct { { NFS4ERR_BAD_COOKIE, -EBADCOOKIE }, { NFS4ERR_NOTSUPP, -ENOTSUPP }, { NFS4ERR_TOOSMALL, -ETOOSMALL }, - { NFS4ERR_SERVERFAULT, -ESERVERFAULT }, + { NFS4ERR_SERVERFAULT, -EREMOTEIO }, { NFS4ERR_BADTYPE, -EBADTYPE }, { NFS4ERR_LOCKED, -EAGAIN }, { NFS4ERR_SYMLINK, -ELOOP }, @@ -5801,7 +5801,7 @@ nfs4_stat_to_errno(int stat) } if (stat <= 10000 || stat > 10100) { /* The server is looney tunes. */ - return -ESERVERFAULT; + return -EREMOTEIO; } /* If we cannot translate the error, the recovery routines should * handle it. diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 7b54b8bb101..d63d964a039 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1598,8 +1598,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage, struct nfs_page *req; int ret; - if (PageFsCache(page)) - nfs_fscache_release_page(page, GFP_KERNEL); + nfs_fscache_release_page(page, GFP_KERNEL); req = nfs_find_and_lock_request(page); ret = PTR_ERR(req); diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index c487810a236..a0c4016413f 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -1316,19 +1316,11 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path) static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp) { - struct svc_export *exp; u32 fsidv[2]; mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL); - exp = rqst_exp_find(rqstp, FSID_NUM, fsidv); - /* - * We shouldn't have accepting an nfsv4 request at all if we - * don't have a pseudoexport!: - */ - if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT) - exp = ERR_PTR(-ESERVERFAULT); - return exp; + return rqst_exp_find(rqstp, FSID_NUM, fsidv); } /* diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index c194793b642..97d79eff6b7 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -752,6 +752,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, flags, current_cred()); if (IS_ERR(*filp)) host_err = PTR_ERR(*filp); + host_err = ima_file_check(*filp, access); out_nfserr: err = nfserrno(host_err); out: @@ -2127,7 +2128,6 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp, */ path.mnt = exp->ex_path.mnt; path.dentry = dentry; - err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC)); nfsd_out: return err? nfserrno(err) : 0; } diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 3dae4a13f6e..7e9df11260f 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -599,7 +599,7 @@ bail: return ret; } -/* +/* * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're * particularly interested in the aio/dio case. Like the core uses * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from @@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw, ret = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, - nr_segs, + nr_segs, ocfs2_direct_IO_get_blocks, ocfs2_dio_end_io); diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index d43d34a1dd3..21c808f752d 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, } ocfs2_metadata_cache_io_unlock(ci); - mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", + mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", (unsigned long long)block, nr, ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes", flags); diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index eda5b8bcddd..5c989000670 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type); unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; -/* Only sets a new threshold if there are no active regions. +/* Only sets a new threshold if there are no active regions. * * No locking or otherwise interesting code is required for reading * o2hb_dead_threshold as it can't change once regions are active and @@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work) mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " "milliseconds\n", reg->hr_dev_name, - jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); + jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); o2quo_disk_timeout(); } @@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg, "seq %llu last %llu changed %u equal %u\n", slot->ds_node_num, (long long)slot->ds_last_generation, le32_to_cpu(hb_block->hb_cksum), - (unsigned long long)le64_to_cpu(hb_block->hb_seq), + (unsigned long long)le64_to_cpu(hb_block->hb_seq), (unsigned long long)slot->ds_last_time, slot->ds_changed_samples, slot->ds_equal_samples); diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 334f231a422..d8d0c65ac03 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -485,7 +485,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, } if (was_valid && !valid) { - printk(KERN_INFO "o2net: no longer connected to " + printk(KERN_NOTICE "o2net: no longer connected to " SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); o2net_complete_nodes_nsw(nn); } @@ -493,7 +493,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, if (!was_valid && valid) { o2quo_conn_up(o2net_num_from_nn(nn)); cancel_delayed_work(&nn->nn_connect_expired); - printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n", + printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n", o2nm_this_node() > sc->sc_node->nd_num ? "connected to" : "accepted connection from", SC_NODEF_ARGS(sc)); @@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc, cond_resched(); continue; } - mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT + mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret); o2net_ensure_shutdown(nn, sc, 0); break; @@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data) do_gettimeofday(&now); - printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " + printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), o2net_idle_timeout() / 1000, o2net_idle_timeout() % 1000); mlog(ML_NOTICE, "here are some times that might help debug the " "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", - sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, + sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, now.tv_sec, (long) now.tv_usec, sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec, sc->sc_tv_advance_start.tv_sec, diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index 8d58cfe410b..96fa7ebc530 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h @@ -32,10 +32,10 @@ * on their number */ #define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS) -/* +/* * This version number represents quite a lot, unfortunately. It not * only represents the raw network message protocol on the wire but also - * locking semantics of the file system using the protocol. It should + * locking semantics of the file system using the protocol. It should * be somewhere else, I'm sure, but right now it isn't. * * With version 11, we separate out the filesystem locking portion. The diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h index b5786a787fa..3cfa114aa39 100644 --- a/fs/ocfs2/dlm/dlmapi.h +++ b/fs/ocfs2/dlm/dlmapi.h @@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err); mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \ } while (0) -#define DLM_LKSB_UNUSED1 0x01 +#define DLM_LKSB_UNUSED1 0x01 #define DLM_LKSB_PUT_LVB 0x02 #define DLM_LKSB_GET_LVB 0x04 #define DLM_LKSB_UNUSED2 0x08 diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index 01cf8cc3d28..dccc439fa08 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c @@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) dlm_lock_put(lock); /* free up the reserved bast that we are cancelling. * guaranteed that this will not be the last reserved - * ast because *both* an ast and a bast were reserved + * ast because *both* an ast and a bast were reserved * to get to this point. the res->spinlock will not be * taken here */ dlm_lockres_release_ast(dlm, res); diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index ca96bce50e1..f283bce776b 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c @@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, /* instead of logging the same network error over * and over, sleep here and wait for the heartbeat * to notice the node is dead. times out after 5s. */ - dlm_wait_for_node_death(dlm, res->owner, + dlm_wait_for_node_death(dlm, res->owner, DLM_NODE_DEATH_WAIT_MAX); ret = DLM_RECOVERING; mlog(0, "node %u died so returning DLM_RECOVERING " diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 42b0bad7a61..0cd24cf5439 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) assert_spin_locked(&res->spinlock); stringify_lockname(res->lockname.name, res->lockname.len, - buf, sizeof(buf) - 1); + buf, sizeof(buf)); printk("lockres: %s, owner=%u, state=%u\n", buf, res->owner, res->state); printk(" last used: %lu, refcnt: %u, on purge list: %s\n", diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 0334000676d..988c9055fd4 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, } /* Once the dlm ctxt is marked as leaving then we don't want - * to be put in someone's domain map. + * to be put in someone's domain map. * Also, explicitly disallow joining at certain troublesome * times (ie. during recovery). */ if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 437698e9465..73333777267 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, } dlm_revert_pending_lock(res, lock); dlm_lock_put(lock); - } else if (dlm_is_recovery_lock(res->lockname.name, + } else if (dlm_is_recovery_lock(res->lockname.name, res->lockname.len)) { /* special case for the $RECOVERY lock. * there will never be an AST delivered to put diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 03ccf9a7b1f..a659606dcb9 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) struct dlm_master_list_entry *mle; assert_spin_locked(&dlm->spinlock); - + list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { if (node_up) dlm_mle_node_up(dlm, mle, NULL, idx); @@ -833,7 +833,7 @@ lookup: __dlm_insert_mle(dlm, mle); /* still holding the dlm spinlock, check the recovery map - * to see if there are any nodes that still need to be + * to see if there are any nodes that still need to be * considered. these will not appear in the mle nodemap * but they might own this lockres. wait on them. */ bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); @@ -883,7 +883,7 @@ redo_request: msleep(500); } continue; - } + } dlm_kick_recovery_thread(dlm); msleep(1000); @@ -939,8 +939,8 @@ wait: res->lockname.name, blocked); if (++tries > 20) { mlog(ML_ERROR, "%s:%.*s: spinning on " - "dlm_wait_for_lock_mastery, blocked=%d\n", - dlm->name, res->lockname.len, + "dlm_wait_for_lock_mastery, blocked=%d\n", + dlm->name, res->lockname.len, res->lockname.name, blocked); dlm_print_one_lock_resource(res); dlm_print_one_mle(mle); @@ -1029,7 +1029,7 @@ recheck: ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); b = (mle->type == DLM_MLE_BLOCK); if ((*blocked && !b) || (!*blocked && b)) { - mlog(0, "%s:%.*s: status change: old=%d new=%d\n", + mlog(0, "%s:%.*s: status change: old=%d new=%d\n", dlm->name, res->lockname.len, res->lockname.name, *blocked, b); *blocked = b; @@ -1602,7 +1602,7 @@ send_response: } mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", dlm->node_num, res->lockname.len, res->lockname.name); - ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, + ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, DLM_ASSERT_MASTER_MLE_CLEANUP); if (ret < 0) { mlog(ML_ERROR, "failed to dispatch assert master work\n"); @@ -1701,7 +1701,7 @@ again: if (r & DLM_ASSERT_RESPONSE_REASSERT) { mlog(0, "%.*s: node %u create mles on other " - "nodes and requests a re-assert\n", + "nodes and requests a re-assert\n", namelen, lockname, to); reassert = 1; } @@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); goto done; - } + } } } spin_unlock(&dlm->master_lock); @@ -1883,7 +1883,7 @@ ok: int extra_ref = 0; int nn = -1; int rr, err = 0; - + spin_lock(&mle->spinlock); if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) extra_ref = 1; @@ -1891,7 +1891,7 @@ ok: /* MASTER mle: if any bits set in the response map * then the calling node needs to re-assert to clear * up nodes that this node contacted */ - while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, + while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, nn+1)) < O2NM_MAX_NODES) { if (nn != dlm->node_num && nn != assert->node_idx) master_request = 1; @@ -2002,7 +2002,7 @@ kill: __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); spin_unlock(&dlm->spinlock); - *ret_data = (void *)res; + *ret_data = (void *)res; dlm_put(dlm); return -EINVAL; } @@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, item->u.am.request_from = request_from; item->u.am.flags = flags; - if (ignore_higher) - mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, + if (ignore_higher) + mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, res->lockname.name); - + spin_lock(&dlm->work_lock); list_add_tail(&item->list, &dlm->work_list); spin_unlock(&dlm->work_lock); @@ -2133,7 +2133,7 @@ put: * think that $RECOVERY is currently mastered by a dead node. If so, * we wait a short time to allow that node to get notified by its own * heartbeat stack, then check again. All $RECOVERY lock resources - * mastered by dead nodes are purged when the hearbeat callback is + * mastered by dead nodes are purged when the hearbeat callback is * fired, so we can know for sure that it is safe to continue once * the node returns a live node or no node. */ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, @@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, ret = -EAGAIN; } spin_unlock(&dlm->spinlock); - mlog(0, "%s: reco lock master is %u\n", dlm->name, + mlog(0, "%s: reco lock master is %u\n", dlm->name, master); break; } @@ -2602,7 +2602,7 @@ fail: mlog(0, "%s:%.*s: timed out during migration\n", dlm->name, res->lockname.len, res->lockname.name); - /* avoid hang during shutdown when migrating lockres + /* avoid hang during shutdown when migrating lockres * to a node which also goes down */ if (dlm_is_node_dead(dlm, target)) { mlog(0, "%s:%.*s: expected migration " @@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); spin_unlock(&res->spinlock); - /* target has died, so make the caller break out of the + /* target has died, so make the caller break out of the * wait_event, but caller must recheck the domain_map */ spin_lock(&dlm->spinlock); if (!test_bit(mig_target, dlm->domain_map)) diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 2f9e4e19a4f..344bcf90cbf 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, if (lock->ml.node == dead_node) { mlog(0, "AHA! there was " "a $RECOVERY lock for dead " - "node %u (%s)!\n", + "node %u (%s)!\n", dead_node, dlm->name); list_del_init(&lock->list); dlm_lock_put(lock); @@ -1164,6 +1164,39 @@ static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, mres->master = master; } +static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock, + struct dlm_migratable_lockres *mres, + int queue) +{ + if (!lock->lksb) + return; + + /* Ignore lvb in all locks in the blocked list */ + if (queue == DLM_BLOCKED_LIST) + return; + + /* Only consider lvbs in locks with granted EX or PR lock levels */ + if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) + return; + + if (dlm_lvb_is_empty(mres->lvb)) { + memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); + return; + } + + /* Ensure the lvb copied for migration matches in other valid locks */ + if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) + return; + + mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, " + "node=%u\n", + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), + lock->lockres->lockname.len, lock->lockres->lockname.name, + lock->ml.node); + dlm_print_one_lock_resource(lock->lockres); + BUG(); +} /* returns 1 if this lock fills the network structure, * 0 otherwise */ @@ -1181,20 +1214,7 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock, ml->list = queue; if (lock->lksb) { ml->flags = lock->lksb->flags; - /* send our current lvb */ - if (ml->type == LKM_EXMODE || - ml->type == LKM_PRMODE) { - /* if it is already set, this had better be a PR - * and it has to match */ - if (!dlm_lvb_is_empty(mres->lvb) && - (ml->type == LKM_EXMODE || - memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) { - mlog(ML_ERROR, "mismatched lvbs!\n"); - dlm_print_one_lock_resource(lock->lockres); - BUG(); - } - memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); - } + dlm_prepare_lvb_for_migration(lock, mres, queue); } ml->node = lock->ml.node; mres->num_locks++; @@ -1730,6 +1750,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, struct dlm_lock *lock = NULL; u8 from = O2NM_MAX_NODES; unsigned int added = 0; + __be64 c; mlog(0, "running %d locks for this lockres\n", mres->num_locks); for (i=0; i<mres->num_locks; i++) { @@ -1777,19 +1798,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, /* lock is always created locally first, and * destroyed locally last. it must be on the list */ if (!lock) { - __be64 c = ml->cookie; - mlog(ML_ERROR, "could not find local lock " - "with cookie %u:%llu!\n", + c = ml->cookie; + mlog(ML_ERROR, "Could not find local lock " + "with cookie %u:%llu, node %u, " + "list %u, flags 0x%x, type %d, " + "conv %d, highest blocked %d\n", dlm_get_lock_cookie_node(be64_to_cpu(c)), - dlm_get_lock_cookie_seq(be64_to_cpu(c))); + dlm_get_lock_cookie_seq(be64_to_cpu(c)), + ml->node, ml->list, ml->flags, ml->type, + ml->convert_type, ml->highest_blocked); + __dlm_print_one_lock_resource(res); + BUG(); + } + + if (lock->ml.node != ml->node) { + c = lock->ml.cookie; + mlog(ML_ERROR, "Mismatched node# in lock " + "cookie %u:%llu, name %.*s, node %u\n", + dlm_get_lock_cookie_node(be64_to_cpu(c)), + dlm_get_lock_cookie_seq(be64_to_cpu(c)), + res->lockname.len, res->lockname.name, + lock->ml.node); + c = ml->cookie; + mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " + "node %u, list %u, flags 0x%x, type %d, " + "conv %d, highest blocked %d\n", + dlm_get_lock_cookie_node(be64_to_cpu(c)), + dlm_get_lock_cookie_seq(be64_to_cpu(c)), + ml->node, ml->list, ml->flags, ml->type, + ml->convert_type, ml->highest_blocked); __dlm_print_one_lock_resource(res); BUG(); } - BUG_ON(lock->ml.node != ml->node); if (tmpq != queue) { - mlog(0, "lock was on %u instead of %u for %.*s\n", - j, ml->list, res->lockname.len, res->lockname.name); + c = ml->cookie; + mlog(0, "Lock cookie %u:%llu was on list %u " + "instead of list %u for %.*s\n", + dlm_get_lock_cookie_node(be64_to_cpu(c)), + dlm_get_lock_cookie_seq(be64_to_cpu(c)), + j, ml->list, res->lockname.len, + res->lockname.name); + __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); continue; } @@ -1839,7 +1889,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, * the lvb. */ memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); } else { - /* otherwise, the node is sending its + /* otherwise, the node is sending its * most recent valid lvb info */ BUG_ON(ml->type != LKM_EXMODE && ml->type != LKM_PRMODE); @@ -1886,7 +1936,7 @@ skip_lvb: spin_lock(&res->spinlock); list_for_each_entry(lock, queue, list) { if (lock->ml.cookie == ml->cookie) { - __be64 c = lock->ml.cookie; + c = lock->ml.cookie; mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " "exists on this lockres!\n", dlm->name, res->lockname.len, res->lockname.name, @@ -2114,7 +2164,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, assert_spin_locked(&res->spinlock); if (res->owner == dlm->node_num) - /* if this node owned the lockres, and if the dead node + /* if this node owned the lockres, and if the dead node * had an EX when he died, blank out the lvb */ search_node = dead_node; else { @@ -2152,7 +2202,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, /* this node is the lockres master: * 1) remove any stale locks for the dead node - * 2) if the dead node had an EX when he died, blank out the lvb + * 2) if the dead node had an EX when he died, blank out the lvb */ assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); @@ -2193,7 +2243,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, mlog(0, "%s:%.*s: freed %u locks for dead node %u, " "dropping ref from lockres\n", dlm->name, res->lockname.len, res->lockname.name, freed, dead_node); - BUG_ON(!test_bit(dead_node, res->refmap)); + if(!test_bit(dead_node, res->refmap)) { + mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, " + "but ref was not set\n", dlm->name, + res->lockname.len, res->lockname.name, freed, dead_node); + __dlm_print_one_lock_resource(res); + } dlm_lockres_clear_refmap_bit(dead_node, res); } else if (test_bit(dead_node, res->refmap)) { mlog(0, "%s:%.*s: dead node %u had a ref, but had " @@ -2260,7 +2315,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) } spin_unlock(&res->spinlock); continue; - } + } spin_lock(&res->spinlock); /* zero the lvb if necessary */ dlm_revalidate_lvb(dlm, res, dead_node); @@ -2411,7 +2466,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) * this function on each node racing to become the recovery * master will not stop attempting this until either: * a) this node gets the EX (and becomes the recovery master), - * or b) dlm->reco.new_master gets set to some nodenum + * or b) dlm->reco.new_master gets set to some nodenum * != O2NM_INVALID_NODE_NUM (another node will do the reco). * so each time a recovery master is needed, the entire cluster * will sync at this point. if the new master dies, that will @@ -2424,7 +2479,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); -again: +again: memset(&lksb, 0, sizeof(lksb)); ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, @@ -2437,8 +2492,8 @@ again: if (ret == DLM_NORMAL) { mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", dlm->name, dlm->node_num); - - /* got the EX lock. check to see if another node + + /* got the EX lock. check to see if another node * just became the reco master */ if (dlm_reco_master_ready(dlm)) { mlog(0, "%s: got reco EX lock, but %u will " @@ -2451,12 +2506,12 @@ again: /* see if recovery was already finished elsewhere */ spin_lock(&dlm->spinlock); if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { - status = -EINVAL; + status = -EINVAL; mlog(0, "%s: got reco EX lock, but " "node got recovered already\n", dlm->name); if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { mlog(ML_ERROR, "%s: new master is %u " - "but no dead node!\n", + "but no dead node!\n", dlm->name, dlm->reco.new_master); BUG(); } @@ -2468,7 +2523,7 @@ again: * set the master and send the messages to begin recovery */ if (!status) { mlog(0, "%s: dead=%u, this=%u, sending " - "begin_reco now\n", dlm->name, + "begin_reco now\n", dlm->name, dlm->reco.dead_node, dlm->node_num); status = dlm_send_begin_reco_message(dlm, dlm->reco.dead_node); @@ -2501,7 +2556,7 @@ again: mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", dlm->name, dlm->node_num); /* another node is master. wait on - * reco.new_master != O2NM_INVALID_NODE_NUM + * reco.new_master != O2NM_INVALID_NODE_NUM * for at most one second */ wait_event_timeout(dlm->dlm_reco_thread_wq, dlm_reco_master_ready(dlm), @@ -2589,7 +2644,13 @@ retry: "begin reco msg (%d)\n", dlm->name, nodenum, ret); ret = 0; } - if (ret == -EAGAIN) { + + /* + * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8, + * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN. + * We are handling both for compatibility reasons. + */ + if (ret == -EAGAIN || ret == EAGAIN) { mlog(0, "%s: trying to start recovery of node " "%u, but node %u is waiting for last recovery " "to complete, backoff for a bit\n", dlm->name, @@ -2599,7 +2660,7 @@ retry: } if (ret < 0) { struct dlm_lock_resource *res; - /* this is now a serious problem, possibly ENOMEM + /* this is now a serious problem, possibly ENOMEM * in the network stack. must retry */ mlog_errno(ret); mlog(ML_ERROR, "begin reco of dlm %s to node %u " @@ -2612,7 +2673,7 @@ retry: } else { mlog(ML_ERROR, "recovery lock not found\n"); } - /* sleep for a bit in hopes that we can avoid + /* sleep for a bit in hopes that we can avoid * another ENOMEM */ msleep(100); goto retry; @@ -2664,7 +2725,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, } if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { mlog(ML_NOTICE, "%s: dead_node previously set to %u, " - "node %u changing it to %u\n", dlm->name, + "node %u changing it to %u\n", dlm->name, dlm->reco.dead_node, br->node_idx, br->dead_node); } dlm_set_reco_master(dlm, br->node_idx); @@ -2730,8 +2791,8 @@ stage2: if (ret < 0) { mlog_errno(ret); if (dlm_is_host_down(ret)) { - /* this has no effect on this recovery - * session, so set the status to zero to + /* this has no effect on this recovery + * session, so set the status to zero to * finish out the last recovery */ mlog(ML_ERROR, "node %u went down after this " "node finished recovery.\n", nodenum); @@ -2768,7 +2829,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, mlog(0, "%s: node %u finalizing recovery stage%d of " "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); - + spin_lock(&dlm->spinlock); if (dlm->reco.new_master != fr->node_idx) { diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index 00f53b2aea7..49e29ecd020 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c @@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, actions &= ~(DLM_UNLOCK_REMOVE_LOCK| DLM_UNLOCK_REGRANT_LOCK| DLM_UNLOCK_CLEAR_CONVERT_TYPE); - } else if (status == DLM_RECOVERING || - status == DLM_MIGRATING || + } else if (status == DLM_RECOVERING || + status == DLM_MIGRATING || status == DLM_FORWARD) { /* must clear the actions because this unlock * is about to be retried. cannot free or do @@ -661,14 +661,14 @@ retry: if (call_ast) { mlog(0, "calling unlockast(%p, %d)\n", data, status); if (is_master) { - /* it is possible that there is one last bast + /* it is possible that there is one last bast * pending. make sure it is flushed, then * call the unlockast. * not an issue if this is a mastered remotely, * since this lock has been removed from the * lockres queues and cannot be found. */ dlm_kick_thread(dlm, NULL); - wait_event(dlm->ast_wq, + wait_event(dlm->ast_wq, dlm_lock_basts_flushed(dlm, lock)); } (*unlockast)(data, status); diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index c5e4a49e3a1..e044019cb3b 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -875,6 +875,14 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); lockres->l_level = lockres->l_requested; + + /* + * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing + * the OCFS2_LOCK_BUSY flag to prevent the dc thread from + * downconverting the lock before the upconvert has fully completed. + */ + lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); + lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); mlog_exit_void(); @@ -907,8 +915,6 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, assert_spin_locked(&lockres->l_lock); - lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); - if (level > lockres->l_blocking) { /* only schedule a downconvert if we haven't already scheduled * one that goes low enough to satisfy the level we're @@ -921,6 +927,9 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, lockres->l_blocking = level; } + if (needs_downconvert) + lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); + mlog_exit(needs_downconvert); return needs_downconvert; } @@ -1133,6 +1142,7 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, mlog_entry_void(); spin_lock_irqsave(&lockres->l_lock, flags); lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); + lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); if (convert) lockres->l_action = OCFS2_AST_INVALID; else @@ -1323,13 +1333,13 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb, again: wait = 0; + spin_lock_irqsave(&lockres->l_lock, flags); + if (catch_signals && signal_pending(current)) { ret = -ERESTARTSYS; - goto out; + goto unlock; } - spin_lock_irqsave(&lockres->l_lock, flags); - mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING, "Cluster lock called on freeing lockres %s! flags " "0x%lx\n", lockres->l_name, lockres->l_flags); @@ -1346,6 +1356,25 @@ again: goto unlock; } + if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) { + /* + * We've upconverted. If the lock now has a level we can + * work with, we take it. If, however, the lock is not at the + * required level, we go thru the full cycle. One way this could + * happen is if a process requesting an upconvert to PR is + * closely followed by another requesting upconvert to an EX. + * If the process requesting EX lands here, we want it to + * continue attempting to upconvert and let the process + * requesting PR take the lock. + * If multiple processes request upconvert to PR, the first one + * here will take the lock. The others will have to go thru the + * OCFS2_LOCK_BLOCKED check to ensure that there is no pending + * downconvert request. + */ + if (level <= lockres->l_level) + goto update_holders; + } + if (lockres->l_flags & OCFS2_LOCK_BLOCKED && !ocfs2_may_continue_on_blocked_lock(lockres, level)) { /* is the lock is currently blocked on behalf of @@ -1416,11 +1445,14 @@ again: goto again; } +update_holders: /* Ok, if we get here then we're good to go. */ ocfs2_inc_holders(lockres, level); ret = 0; unlock: + lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); + spin_unlock_irqrestore(&lockres->l_lock, flags); out: /* @@ -3155,7 +3187,7 @@ out: /* Mark the lockres as being dropped. It will no longer be * queued if blocking, but we still may have to wait on it * being dequeued from the downconvert thread before we can consider - * it safe to drop. + * it safe to drop. * * You can *not* attempt to call cluster_lock on this lockres anymore. */ void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres) @@ -3352,6 +3384,7 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, unsigned long flags; int blocking; int new_level; + int level; int ret = 0; int set_lvb = 0; unsigned int gen; @@ -3360,9 +3393,17 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, spin_lock_irqsave(&lockres->l_lock, flags); - BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); - recheck: + /* + * Is it still blocking? If not, we have no more work to do. + */ + if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) { + BUG_ON(lockres->l_blocking != DLM_LOCK_NL); + spin_unlock_irqrestore(&lockres->l_lock, flags); + ret = 0; + goto leave; + } + if (lockres->l_flags & OCFS2_LOCK_BUSY) { /* XXX * This is a *big* race. The OCFS2_LOCK_PENDING flag @@ -3401,6 +3442,31 @@ recheck: goto leave; } + /* + * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is + * set when the ast is received for an upconvert just before the + * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast + * on the heels of the ast, we want to delay the downconvert just + * enough to allow the up requestor to do its task. Because this + * lock is in the blocked queue, the lock will be downconverted + * as soon as the requestor is done with the lock. + */ + if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) + goto leave_requeue; + + /* + * How can we block and yet be at NL? We were trying to upconvert + * from NL and got canceled. The code comes back here, and now + * we notice and clear BLOCKING. + */ + if (lockres->l_level == DLM_LOCK_NL) { + BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders); + lockres->l_blocking = DLM_LOCK_NL; + lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED); + spin_unlock_irqrestore(&lockres->l_lock, flags); + goto leave; + } + /* if we're blocking an exclusive and we have *any* holders, * then requeue. */ if ((lockres->l_blocking == DLM_LOCK_EX) @@ -3438,6 +3504,7 @@ recheck: * may sleep, so we save off a copy of what we're blocking as * it may change while we're not holding the spin lock. */ blocking = lockres->l_blocking; + level = lockres->l_level; spin_unlock_irqrestore(&lockres->l_lock, flags); ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking); @@ -3446,7 +3513,7 @@ recheck: goto leave; spin_lock_irqsave(&lockres->l_lock, flags); - if (blocking != lockres->l_blocking) { + if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) { /* If this changed underneath us, then we can't drop * it just yet. */ goto recheck; diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c index 15713cbb865..19ad145d2af 100644 --- a/fs/ocfs2/export.c +++ b/fs/ocfs2/export.c @@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", (unsigned long long)blkno, generation); } - + *max_len = len; bail: diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index d35a27f4523..5328529e7fd 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -192,7 +192,7 @@ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi, emi->ei_clusters += ins->ei_clusters; return 1; } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys && - (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys && + (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos && ins->ei_flags == emi->ei_flags) { emi->ei_phys = ins->ei_phys; emi->ei_cpos = ins->ei_cpos; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 06ccf6a86d3..558ce031242 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode, int ret; offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */ - /* ugh. in prepare/commit_write, if from==to==start of block, we + /* ugh. in prepare/commit_write, if from==to==start of block, we ** skip the prepare. make sure we never send an offset for the start ** of a block */ @@ -1779,7 +1779,7 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry, struct inode *inode = dentry->d_inode; loff_t saved_pos, end; - /* + /* * We start with a read level meta lock and only jump to an ex * if we need to make modifications here. */ @@ -2013,8 +2013,8 @@ out_dio: /* buffered aio wouldn't have proper lock coverage today */ BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); - if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) || - (file->f_flags & O_DIRECT && has_refcount)) { + if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || + ((file->f_flags & O_DIRECT) && has_refcount)) { ret = filemap_fdatawrite_range(file->f_mapping, pos, pos + count - 1); if (ret < 0) @@ -2033,7 +2033,7 @@ out_dio: pos + count - 1); } - /* + /* * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io * function pointer which is called when o_direct io completes so that * it can unlock our rw lock. (it's the clustered equivalent of @@ -2198,7 +2198,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, goto bail; } - /* + /* * buffered reads protect themselves in ->readpage(). O_DIRECT reads * need locks to protect pending reads from racing with truncate. */ @@ -2220,10 +2220,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, * We're fine letting folks race truncates and extending * writes with read across the cluster, just like they can * locally. Hence no rw_lock during read. - * + * * Take and drop the meta data lock to update inode fields * like i_size. This allows the checks down below - * generic_file_aio_read() a chance of actually working. + * generic_file_aio_read() a chance of actually working. */ ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); if (ret < 0) { @@ -2248,7 +2248,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, bail: if (have_alloc_sem) up_read(&inode->i_alloc_sem); - if (rw_level != -1) + if (rw_level != -1) ocfs2_rw_unlock(inode, rw_level); mlog_exit(ret); diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 0297fb8982b..88459bdd1ff 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode, if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) { status = ocfs2_try_open_lock(inode, 0); if (status) { - make_bad_inode(inode); + make_bad_inode(inode); return status; } } @@ -684,7 +684,7 @@ bail: return status; } -/* +/* * Serialize with orphan dir recovery. If the process doing * recovery on this orphan dir does an iget() with the dir * i_mutex held, we'll deadlock here. Instead we detect this diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 31fbb061951..7d9d9c132ce 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -7,6 +7,7 @@ #include <linux/fs.h> #include <linux/mount.h> +#include <linux/compat.h> #define MLOG_MASK_PREFIX ML_INODE #include <cluster/masklog.h> @@ -181,6 +182,10 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) #ifdef CONFIG_COMPAT long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) { + bool preserve; + struct reflink_arguments args; + struct inode *inode = file->f_path.dentry->d_inode; + switch (cmd) { case OCFS2_IOC32_GETFLAGS: cmd = OCFS2_IOC_GETFLAGS; @@ -195,8 +200,15 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) case OCFS2_IOC_GROUP_EXTEND: case OCFS2_IOC_GROUP_ADD: case OCFS2_IOC_GROUP_ADD64: - case OCFS2_IOC_REFLINK: break; + case OCFS2_IOC_REFLINK: + if (copy_from_user(&args, (struct reflink_arguments *)arg, + sizeof(args))) + return -EFAULT; + preserve = (args.preserve != 0); + + return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path), + compat_ptr(args.new_path), preserve); default: return -ENOIOCTLCMD; } diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index bf34c491ae9..9336c60e3a3 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb, status = -ENOENT; mlog_errno(status); return status; - } + } mutex_lock(&orphan_dir_inode->i_mutex); status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0); diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 9362eea7424..740f448041e 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -136,6 +136,10 @@ enum ocfs2_unlock_action { #define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a call to dlm_lock. Only exists with BUSY set. */ +#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread + * from downconverting + * before the upconvert + * has completed */ struct ocfs2_lock_res_ops; diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 1a1a679e51b..7638a38c32b 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -1417,9 +1417,16 @@ static inline int ocfs2_fast_symlink_chars(int blocksize) return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink); } -static inline int ocfs2_max_inline_data(int blocksize) +static inline int ocfs2_max_inline_data_with_xattr(int blocksize, + struct ocfs2_dinode *di) { - return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data); + if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL)) + return blocksize - + offsetof(struct ocfs2_dinode, id2.i_data.id_data) - + di->i_xattr_inline_size; + else + return blocksize - + offsetof(struct ocfs2_dinode, id2.i_data.id_data); } static inline int ocfs2_extent_recs_per_inode(int blocksize) diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 74db2be75dd..8ae65c9c020 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2945,7 +2945,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, while (offset < end) { page_index = offset >> PAGE_CACHE_SHIFT; - map_end = (page_index + 1) << PAGE_CACHE_SHIFT; + map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; if (map_end > end) map_end = end; @@ -2957,8 +2957,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, page = grab_cache_page(mapping, page_index); - /* This page can't be dirtied before we CoW it out. */ - BUG_ON(PageDirty(page)); + /* + * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page + * can't be dirtied before we CoW it out. + */ + if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) + BUG_ON(PageDirty(page)); if (!PageUptodate(page)) { ret = block_read_full_page(page, ocfs2_get_block); @@ -3170,7 +3174,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb, while (offset < end) { page_index = offset >> PAGE_CACHE_SHIFT; - map_end = (page_index + 1) << PAGE_CACHE_SHIFT; + map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; if (map_end > end) map_end = end; diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c index e49c4105026..3038c92af49 100644 --- a/fs/ocfs2/stack_o2cb.c +++ b/fs/ocfs2/stack_o2cb.c @@ -277,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) u32 dlm_key; struct dlm_ctxt *dlm; struct o2dlm_private *priv; - struct dlm_protocol_version dlm_version; + struct dlm_protocol_version fs_version; BUG_ON(conn == NULL); BUG_ON(o2cb_stack.sp_proto == NULL); @@ -304,18 +304,18 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) /* used by the dlm code to make message headers unique, each * node in this domain must agree on this. */ dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen); - dlm_version.pv_major = conn->cc_version.pv_major; - dlm_version.pv_minor = conn->cc_version.pv_minor; + fs_version.pv_major = conn->cc_version.pv_major; + fs_version.pv_minor = conn->cc_version.pv_minor; - dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version); + dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version); if (IS_ERR(dlm)) { rc = PTR_ERR(dlm); mlog_errno(rc); goto out_free; } - conn->cc_version.pv_major = dlm_version.pv_major; - conn->cc_version.pv_minor = dlm_version.pv_minor; + conn->cc_version.pv_major = fs_version.pv_major; + conn->cc_version.pv_minor = fs_version.pv_minor; conn->cc_lockspace = dlm; dlm_register_eviction_cb(dlm, &priv->op_eviction_cb); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 26069917a9f..755cd49a5ef 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1062,7 +1062,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) "file system, but write access is " "unavailable.\n"); else - mlog_errno(status); + mlog_errno(status); goto read_super_error; } diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index 49b133ccbf1..32499d213fc 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -137,20 +137,20 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry, } memcpy(link, target, len); - nd_set_link(nd, link); bail: + nd_set_link(nd, status ? ERR_PTR(status) : link); brelse(bh); mlog_exit(status); - return status ? ERR_PTR(status) : link; + return NULL; } static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { - char *link = cookie; - - kfree(link); + char *link = nd_get_link(nd); + if (!IS_ERR(link)) + kfree(link); } const struct inode_operations ocfs2_symlink_inode_operations = { diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c index c61369342a2..a0a120e82b9 100644 --- a/fs/ocfs2/uptodate.c +++ b/fs/ocfs2/uptodate.c @@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci, } /* Warning: even if it returns true, this does *not* guarantee that - * the block is stored in our inode metadata cache. - * + * the block is stored in our inode metadata cache. + * * This can be called under lock_buffer() */ int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci, diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 9087b10209e..2df0f5c7c60 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1497,9 +1497,11 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key) args.objectid = key->on_disk_key.k_objectid; args.dirid = key->on_disk_key.k_dir_id; + reiserfs_write_unlock(s); inode = iget5_locked(s, key->on_disk_key.k_objectid, reiserfs_find_actor, reiserfs_init_locked_inode, (void *)(&args)); + reiserfs_write_lock(s); if (!inode) return ERR_PTR(-ENOMEM); diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index 220b758523a..6a06a1d1ea7 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c @@ -81,24 +81,23 @@ int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr * iattr) if (!sd_attrs) return -ENOMEM; sd->s_iattr = sd_attrs; - } else { - /* attributes were changed at least once in past */ - iattrs = &sd_attrs->ia_iattr; - - if (ia_valid & ATTR_UID) - iattrs->ia_uid = iattr->ia_uid; - if (ia_valid & ATTR_GID) - iattrs->ia_gid = iattr->ia_gid; - if (ia_valid & ATTR_ATIME) - iattrs->ia_atime = iattr->ia_atime; - if (ia_valid & ATTR_MTIME) - iattrs->ia_mtime = iattr->ia_mtime; - if (ia_valid & ATTR_CTIME) - iattrs->ia_ctime = iattr->ia_ctime; - if (ia_valid & ATTR_MODE) { - umode_t mode = iattr->ia_mode; - iattrs->ia_mode = sd->s_mode = mode; - } + } + /* attributes were changed at least once in past */ + iattrs = &sd_attrs->ia_iattr; + + if (ia_valid & ATTR_UID) + iattrs->ia_uid = iattr->ia_uid; + if (ia_valid & ATTR_GID) + iattrs->ia_gid = iattr->ia_gid; + if (ia_valid & ATTR_ATIME) + iattrs->ia_atime = iattr->ia_atime; + if (ia_valid & ATTR_MTIME) + iattrs->ia_mtime = iattr->ia_mtime; + if (ia_valid & ATTR_CTIME) + iattrs->ia_ctime = iattr->ia_ctime; + if (ia_valid & ATTR_MODE) { + umode_t mode = iattr->ia_mode; + iattrs->ia_mode = sd->s_mode = mode; } return 0; } diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index 1e67c441ea8..f745948b61e 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h @@ -77,6 +77,7 @@ struct drm_nouveau_gpuobj_free { #define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 #define NOUVEAU_GETPARAM_CHIPSET_ID 11 #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 +#define NOUVEAU_GETPARAM_GRAPH_UNITS 13 struct drm_nouveau_getparam { uint64_t param; uint64_t value; diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h index 2be7e1249b6..c7645f480d1 100644 --- a/include/drm/vmwgfx_drm.h +++ b/include/drm/vmwgfx_drm.h @@ -68,7 +68,8 @@ #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 #define DRM_VMW_PARAM_3D 2 #define DRM_VMW_PARAM_FIFO_OFFSET 3 - +#define DRM_VMW_PARAM_HW_CAPS 4 +#define DRM_VMW_PARAM_FIFO_CAPS 5 /** * struct drm_vmw_getparam_arg @@ -181,6 +182,8 @@ struct drm_vmw_context_arg { * The size of the array should equal the total number of mipmap levels. * @shareable: Boolean whether other clients (as identified by file descriptors) * may reference this surface. + * @scanout: Boolean whether the surface is intended to be used as a + * scanout. * * Input data to the DRM_VMW_CREATE_SURFACE Ioctl. * Output data from the DRM_VMW_REF_SURFACE Ioctl. @@ -192,7 +195,7 @@ struct drm_vmw_surface_create_req { uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; uint64_t size_addr; int32_t shareable; - uint32_t pad64; + int32_t scanout; }; /** @@ -295,17 +298,28 @@ union drm_vmw_surface_reference_arg { * * @commands: User-space address of a command buffer cast to an uint64_t. * @command-size: Size in bytes of the command buffer. + * @throttle-us: Sleep until software is less than @throttle_us + * microseconds ahead of hardware. The driver may round this value + * to the nearest kernel tick. * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an * uint64_t. + * @version: Allows expanding the execbuf ioctl parameters without breaking + * backwards compatibility, since user-space will always tell the kernel + * which version it uses. + * @flags: Execbuf flags. None currently. * * Argument to the DRM_VMW_EXECBUF Ioctl. */ +#define DRM_VMW_EXECBUF_VERSION 0 + struct drm_vmw_execbuf_arg { uint64_t commands; uint32_t command_size; - uint32_t pad64; + uint32_t throttle_us; uint64_t fence_rep; + uint32_t version; + uint32_t flags; }; /** diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index ab94335b4bb..6816be6c3f7 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -1,5 +1,9 @@ /* - * linux/include/asm-arm/hardware/amba.h + * linux/include/amba/bus.h + * + * This device type deals with ARM PrimeCells and anything else that + * presents a proper CID (0xB105F00D) at the end of the I/O register + * region or that is derived from a PrimeCell. * * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. * diff --git a/include/linux/ata.h b/include/linux/ata.h index 38a6948ce0c..20f31567cce 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -647,9 +647,9 @@ static inline int ata_id_has_large_logical_sectors(const u16 *id) return id[ATA_ID_SECTOR_SIZE] & (1 << 13); } -static inline u8 ata_id_logical_per_physical_sectors(const u16 *id) +static inline u16 ata_id_logical_per_physical_sectors(const u16 *id) { - return id[ATA_ID_SECTOR_SIZE] & 0xf; + return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf); } static inline int ata_id_has_lba48(const u16 *id) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 5be3dab4a69..188fcae10a9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -15,6 +15,7 @@ # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) +# define __percpu __attribute__((noderef, address_space(3))) extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); #else @@ -32,6 +33,7 @@ extern void __chk_io_ptr(const volatile void __iomem *); # define __acquire(x) (void)0 # define __release(x) (void)0 # define __cond_lock(x,c) (c) +# define __percpu #endif #ifdef __KERNEL__ diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 070ba062173..5977b724f7c 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -44,7 +44,7 @@ static inline int hw_breakpoint_type(struct perf_event *bp) return bp->attr.bp_type; } -static inline int hw_breakpoint_len(struct perf_event *bp) +static inline unsigned long hw_breakpoint_len(struct perf_event *bp) { return bp->attr.bp_len; } diff --git a/include/linux/ima.h b/include/linux/ima.h index 99dc6d5cf7e..975837e7d6c 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -17,7 +17,7 @@ struct linux_binprm; extern int ima_bprm_check(struct linux_binprm *bprm); extern int ima_inode_alloc(struct inode *inode); extern void ima_inode_free(struct inode *inode); -extern int ima_path_check(struct path *path, int mask); +extern int ima_file_check(struct file *file, int mask); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); extern void ima_counts_get(struct file *file); @@ -38,7 +38,7 @@ static inline void ima_inode_free(struct inode *inode) return; } -static inline int ima_path_check(struct path *path, int mask) +static inline int ima_file_check(struct file *file, int mask) { return 0; } diff --git a/include/linux/input.h b/include/linux/input.h index 735ceaf1bc2..663208afb64 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -376,6 +376,7 @@ struct input_absinfo { #define KEY_DISPLAY_OFF 245 /* display device to off state */ #define KEY_WIMAX 246 +#define KEY_RFKILL 247 /* Key that controls all radios */ /* Range 248 - 255 is reserved for special needs of AT keyboard driver */ diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 6f6c5f300af..bc0fc795bd3 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -124,7 +124,7 @@ extern __must_check unsigned int kfifo_out_peek(struct kfifo *fifo, */ static inline bool kfifo_initialized(struct kfifo *fifo) { - return fifo->buffer != 0; + return fifo->buffer != NULL; } /** diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 8fa71874113..a177698d95e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -211,11 +211,9 @@ struct perf_event_attr { __u32 wakeup_watermark; /* bytes before wakeup */ }; - __u32 __reserved_2; - - __u64 bp_addr; __u32 bp_type; - __u32 bp_len; + __u64 bp_addr; + __u64 bp_len; }; /* diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index ba1ba0c5efd..63d449807d9 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -11,6 +11,8 @@ struct nf_conntrack_ecache; struct netns_ct { atomic_t count; unsigned int expect_count; + unsigned int htable_size; + struct kmem_cache *nf_conntrack_cachep; struct hlist_nulls_head *hash; struct hlist_head *expect_hash; struct hlist_nulls_head unconfirmed; @@ -28,5 +30,6 @@ struct netns_ct { #endif int hash_vmalloc; int expect_vmalloc; + char *slabname; }; #endif diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 2eb3814d625..9a4b8b71407 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -40,6 +40,7 @@ struct netns_ipv4 { struct xt_table *iptable_security; struct xt_table *nat_table; struct hlist_head *nat_bysource; + unsigned int nat_htable_size; int nat_vmalloced; #endif diff --git a/init/main.c b/init/main.c index dac44a9356a..4cb47a159f0 100644 --- a/init/main.c +++ b/init/main.c @@ -657,9 +657,9 @@ asmlinkage void __init start_kernel(void) proc_caches_init(); buffer_init(); key_init(); + radix_tree_init(); security_init(); vfs_caches_init(totalram_pages); - radix_tree_init(); signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 8a5c7d55ac9..967e66143e1 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -360,8 +360,8 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) { u64 old_addr = bp->attr.bp_addr; + u64 old_len = bp->attr.bp_len; int old_type = bp->attr.bp_type; - int old_len = bp->attr.bp_len; int err = 0; perf_event_disable(bp); diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 498cabba225..35edbe22e9a 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c @@ -80,7 +80,7 @@ int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask) buffer = kmalloc(size, gfp_mask); if (!buffer) { - _kfifo_init(fifo, 0, 0); + _kfifo_init(fifo, NULL, 0); return -ENOMEM; } @@ -97,6 +97,7 @@ EXPORT_SYMBOL(kfifo_alloc); void kfifo_free(struct kfifo *fifo) { kfree(fifo->buffer); + _kfifo_init(fifo, NULL, 0); } EXPORT_SYMBOL(kfifo_free); diff --git a/kernel/perf_event.c b/kernel/perf_event.c index d27746bd3a0..2b19297742c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4580,7 +4580,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, if (attr->type >= PERF_TYPE_MAX) return -EINVAL; - if (attr->__reserved_1 || attr->__reserved_2) + if (attr->__reserved_1) return -EINVAL; if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) diff --git a/kernel/softirq.c b/kernel/softirq.c index a09502e2ef7..7c1a67ef027 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -500,22 +500,17 @@ EXPORT_SYMBOL(tasklet_kill); */ /* - * The trampoline is called when the hrtimer expires. If this is - * called from the hrtimer interrupt then we schedule the tasklet as - * the timer callback function expects to run in softirq context. If - * it's called in softirq context anyway (i.e. high resolution timers - * disabled) then the hrtimer callback is called right away. + * The trampoline is called when the hrtimer expires. It schedules a tasklet + * to run __tasklet_hrtimer_trampoline() which in turn will call the intended + * hrtimer callback, but from softirq context. */ static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) { struct tasklet_hrtimer *ttimer = container_of(timer, struct tasklet_hrtimer, timer); - if (hrtimer_is_hres_active(timer)) { - tasklet_hi_schedule(&ttimer->tasklet); - return HRTIMER_NORESTART; - } - return ttimer->function(timer); + tasklet_hi_schedule(&ttimer->tasklet); + return HRTIMER_NORESTART; } /* diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 7faaa32fbf4..e2ab064c6d4 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -880,6 +880,7 @@ void getboottime(struct timespec *ts) set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); } +EXPORT_SYMBOL_GPL(getboottime); /** * monotonic_to_bootbased - Convert the monotonic time to boot based. @@ -889,6 +890,7 @@ void monotonic_to_bootbased(struct timespec *ts) { *ts = timespec_add_safe(*ts, total_sleep_time); } +EXPORT_SYMBOL_GPL(monotonic_to_bootbased); unsigned long get_seconds(void) { diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 6ea90c0e2c9..50b1b823980 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -689,7 +689,7 @@ static int create_trace_probe(int argc, char **argv) return -EINVAL; } /* an address specified */ - ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr); + ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr); if (ret) { pr_info("Failed to parse address.\n"); return ret; diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 678a5120ee3..f4bc9b27de5 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -157,6 +157,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, unsigned long val, flags; char buf[64]; int ret; + int cpu; if (count >= sizeof(buf)) return -EINVAL; @@ -171,9 +172,20 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, return ret; local_irq_save(flags); + + /* + * In case we trace inside arch_spin_lock() or after (NMI), + * we will cause circular lock, so we also need to increase + * the percpu trace_active here. + */ + cpu = smp_processor_id(); + per_cpu(trace_active, cpu)++; + arch_spin_lock(&max_stack_lock); *ptr = val; arch_spin_unlock(&max_stack_lock); + + per_cpu(trace_active, cpu)--; local_irq_restore(flags); return count; @@ -206,7 +218,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos) static void *t_start(struct seq_file *m, loff_t *pos) { + int cpu; + local_irq_disable(); + + cpu = smp_processor_id(); + per_cpu(trace_active, cpu)++; + arch_spin_lock(&max_stack_lock); if (*pos == 0) @@ -217,7 +235,13 @@ static void *t_start(struct seq_file *m, loff_t *pos) static void t_stop(struct seq_file *m, void *p) { + int cpu; + arch_spin_unlock(&max_stack_lock); + + cpu = smp_processor_id(); + per_cpu(trace_active, cpu)--; + local_irq_enable(); } diff --git a/mm/migrate.c b/mm/migrate.c index efddbf0926b..9a0db5bbabe 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -912,6 +912,9 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task, goto out_pm; err = -ENODEV; + if (node < 0 || node >= MAX_NUMNODES) + goto out_pm; + if (!node_state(node, N_HIGH_MEMORY)) goto out_pm; diff --git a/net/9p/client.c b/net/9p/client.c index 8af95b2dddd..09d4f1e2e4a 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -69,7 +69,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...); static int parse_opts(char *opts, struct p9_client *clnt) { - char *options; + char *options, *tmp_options; char *p; substring_t args[MAX_OPT_ARGS]; int option; @@ -81,12 +81,13 @@ static int parse_opts(char *opts, struct p9_client *clnt) if (!opts) return 0; - options = kstrdup(opts, GFP_KERNEL); - if (!options) { + tmp_options = kstrdup(opts, GFP_KERNEL); + if (!tmp_options) { P9_DPRINTK(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } + options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -108,6 +109,13 @@ static int parse_opts(char *opts, struct p9_client *clnt) break; case Opt_trans: clnt->trans_mod = v9fs_get_trans_by_name(&args[0]); + if(clnt->trans_mod == NULL) { + P9_DPRINTK(P9_DEBUG_ERROR, + "Could not find request transport: %s\n", + (char *) &args[0]); + ret = -EINVAL; + goto free_and_return; + } break; case Opt_legacy: clnt->dotu = 0; @@ -117,7 +125,8 @@ static int parse_opts(char *opts, struct p9_client *clnt) } } - kfree(options); +free_and_return: + kfree(tmp_options); return ret; } @@ -667,18 +676,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) clnt->trans = NULL; spin_lock_init(&clnt->lock); INIT_LIST_HEAD(&clnt->fidlist); - clnt->fidpool = p9_idpool_create(); - if (IS_ERR(clnt->fidpool)) { - err = PTR_ERR(clnt->fidpool); - clnt->fidpool = NULL; - goto error; - } p9_tag_init(clnt); err = parse_opts(options, clnt); if (err < 0) - goto error; + goto free_client; if (!clnt->trans_mod) clnt->trans_mod = v9fs_get_default_trans(); @@ -687,7 +690,14 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) err = -EPROTONOSUPPORT; P9_DPRINTK(P9_DEBUG_ERROR, "No transport defined or default transport\n"); - goto error; + goto free_client; + } + + clnt->fidpool = p9_idpool_create(); + if (IS_ERR(clnt->fidpool)) { + err = PTR_ERR(clnt->fidpool); + clnt->fidpool = NULL; + goto put_trans; } P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n", @@ -695,19 +705,25 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) err = clnt->trans_mod->create(clnt, dev_name, options); if (err) - goto error; + goto destroy_fidpool; if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize) clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ; err = p9_client_version(clnt); if (err) - goto error; + goto close_trans; return clnt; -error: - p9_client_destroy(clnt); +close_trans: + clnt->trans_mod->close(clnt); +destroy_fidpool: + p9_idpool_destroy(clnt->fidpool); +put_trans: + v9fs_put_trans(clnt->trans_mod); +free_client: + kfree(clnt); return ERR_PTR(err); } EXPORT_SYMBOL(p9_client_create); @@ -1214,10 +1230,11 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional) { int ret; + /* NOTE: size shouldn't include its own length */ /* size[2] type[2] dev[4] qid[13] */ /* mode[4] atime[4] mtime[4] length[8]*/ /* name[s] uid[s] gid[s] muid[s] */ - ret = 2+2+4+13+4+4+4+8+2+2+2+2; + ret = 2+4+13+4+4+4+8+2+2+2+2; if (wst->name) ret += strlen(wst->name); @@ -1258,7 +1275,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) wst->name, wst->uid, wst->gid, wst->muid, wst->extension, wst->n_uid, wst->n_gid, wst->n_muid); - req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst); + req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst); if (IS_ERR(req)) { err = PTR_ERR(req); goto error; diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index be1cb909d8c..31d0b05582a 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -714,7 +714,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts) char *p; substring_t args[MAX_OPT_ARGS]; int option; - char *options; + char *options, *tmp_options; int ret; opts->port = P9_PORT; @@ -724,12 +724,13 @@ static int parse_opts(char *params, struct p9_fd_opts *opts) if (!params) return 0; - options = kstrdup(params, GFP_KERNEL); - if (!options) { + tmp_options = kstrdup(params, GFP_KERNEL); + if (!tmp_options) { P9_DPRINTK(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } + options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -760,7 +761,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts) continue; } } - kfree(options); + + kfree(tmp_options); return 0; } diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 65cb29db03f..2c95a89c0f4 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c @@ -166,7 +166,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts) char *p; substring_t args[MAX_OPT_ARGS]; int option; - char *options; + char *options, *tmp_options; int ret; opts->port = P9_PORT; @@ -177,12 +177,13 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts) if (!params) return 0; - options = kstrdup(params, GFP_KERNEL); - if (!options) { + tmp_options = kstrdup(params, GFP_KERNEL); + if (!tmp_options) { P9_DPRINTK(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } + options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -216,7 +217,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts) } /* RQ must be at least as large as the SQ */ opts->rq_depth = max(opts->rq_depth, opts->sq_depth); - kfree(options); + kfree(tmp_options); return 0; } diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index ea1e3daabef..cb50f4ae5ee 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -102,7 +102,8 @@ static void p9_virtio_close(struct p9_client *client) struct virtio_chan *chan = client->trans; mutex_lock(&virtio_9p_lock); - chan->inuse = false; + if (chan) + chan->inuse = false; mutex_unlock(&virtio_9p_lock); } @@ -311,6 +312,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args) } client->trans = (void *)chan; + client->status = Connected; chan->client = client; return 0; diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index b7c4224f4e7..b10e3cdb08f 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -377,6 +377,9 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 if (acl->state == BT_CONNECTED && (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { + acl->power_save = 1; + hci_conn_enter_active_mode(acl); + if (lmp_esco_capable(hdev)) hci_setup_sync(sco, acl->handle); else diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 28517bad796..592da5c909c 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1699,6 +1699,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu break; case 0x1c: /* SCO interval rejected */ + case 0x1a: /* Unsupported Remote Feature */ case 0x1f: /* Unspecified error */ if (conn->out && conn->attempt < 2) { conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 6cf526d06e2..fc6ec1e7265 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -703,29 +703,9 @@ static void hidp_close(struct hid_device *hid) static int hidp_parse(struct hid_device *hid) { struct hidp_session *session = hid->driver_data; - struct hidp_connadd_req *req = session->req; - unsigned char *buf; - int ret; - - buf = kmalloc(req->rd_size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - if (copy_from_user(buf, req->rd_data, req->rd_size)) { - kfree(buf); - return -EFAULT; - } - - ret = hid_parse_report(session->hid, buf, req->rd_size); - - kfree(buf); - - if (ret) - return ret; - - session->req = NULL; - return 0; + return hid_parse_report(session->hid, session->rd_data, + session->rd_size); } static int hidp_start(struct hid_device *hid) @@ -770,12 +750,24 @@ static int hidp_setup_hid(struct hidp_session *session, bdaddr_t src, dst; int err; + session->rd_data = kzalloc(req->rd_size, GFP_KERNEL); + if (!session->rd_data) + return -ENOMEM; + + if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) { + err = -EFAULT; + goto fault; + } + session->rd_size = req->rd_size; + hid = hid_allocate_device(); - if (IS_ERR(hid)) - return PTR_ERR(hid); + if (IS_ERR(hid)) { + err = PTR_ERR(hid); + goto fault; + } session->hid = hid; - session->req = req; + hid->driver_data = session; baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); @@ -806,6 +798,10 @@ failed: hid_destroy_device(hid); session->hid = NULL; +fault: + kfree(session->rd_data); + session->rd_data = NULL; + return err; } @@ -900,6 +896,9 @@ unlink: session->hid = NULL; } + kfree(session->rd_data); + session->rd_data = NULL; + purge: skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h index faf3d74c358..a4e215d50c1 100644 --- a/net/bluetooth/hidp/hidp.h +++ b/net/bluetooth/hidp/hidp.h @@ -154,7 +154,9 @@ struct hidp_session { struct sk_buff_head ctrl_transmit; struct sk_buff_head intr_transmit; - struct hidp_connadd_req *req; + /* Report descriptor */ + __u8 *rd_data; + uint rd_size; }; static inline void hidp_schedule(struct hidp_session *session) diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index fc5ee3296e2..89f4a59eb82 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -252,7 +252,6 @@ static void rfcomm_session_timeout(unsigned long arg) BT_DBG("session %p state %ld", s, s->state); set_bit(RFCOMM_TIMED_OUT, &s->flags); - rfcomm_session_put(s); rfcomm_schedule(RFCOMM_SCHED_TIMEO); } @@ -1151,7 +1150,11 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) break; case BT_DISCONN: - rfcomm_session_put(s); + /* When socket is closed and we are not RFCOMM + * initiator rfcomm_process_rx already calls + * rfcomm_session_put() */ + if (s->sock->sk->sk_state != BT_CLOSED) + rfcomm_session_put(s); break; } } @@ -1920,6 +1923,7 @@ static inline void rfcomm_process_sessions(void) if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { s->state = BT_DISCONN; rfcomm_send_disc(s, 0); + rfcomm_session_put(s); continue; } diff --git a/net/core/dst.c b/net/core/dst.c index 57bc4d5b8d0..cb1b3488b73 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -17,6 +17,7 @@ #include <linux/string.h> #include <linux/types.h> #include <net/net_namespace.h> +#include <linux/sched.h> #include <net/dst.h> @@ -79,6 +80,7 @@ loop: while ((dst = next) != NULL) { next = dst->next; prefetch(&next->next); + cond_resched(); if (likely(atomic_read(&dst->__refcnt))) { last->next = dst; last = dst; diff --git a/net/core/ethtool.c b/net/core/ethtool.c index d8aee584e8d..236a9988ea9 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -927,6 +927,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GPERMADDR: case ETHTOOL_GUFO: case ETHTOOL_GGSO: + case ETHTOOL_GGRO: case ETHTOOL_GFLAGS: case ETHTOOL_GPFLAGS: case ETHTOOL_GRXFH: diff --git a/net/core/pktgen.c b/net/core/pktgen.c index de0c2c72642..2e692afdc55 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3524,6 +3524,7 @@ static int pktgen_thread_worker(void *arg) wait_event_interruptible_timeout(t->queue, t->control != 0, HZ/10); + try_to_freeze(); continue; } diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index 57dfb9c8c4f..ff16e9df196 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c @@ -83,7 +83,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f va_list args; va_start(args, fmt); - vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); + vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args); va_end(args); slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 269958bf7fe..6df6f8ac963 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h @@ -19,7 +19,9 @@ #include <linux/list.h> #include <linux/module.h> -#define CCID_MAX 255 +/* maximum value for a CCID (RFC 4340, 19.5) */ +#define CCID_MAX 255 +#define CCID_SLAB_NAME_LENGTH 32 struct tcp_info; @@ -49,8 +51,8 @@ struct ccid_operations { const char *ccid_name; struct kmem_cache *ccid_hc_rx_slab, *ccid_hc_tx_slab; - char ccid_hc_rx_slab_name[32]; - char ccid_hc_tx_slab_name[32]; + char ccid_hc_rx_slab_name[CCID_SLAB_NAME_LENGTH]; + char ccid_hc_tx_slab_name[CCID_SLAB_NAME_LENGTH]; __u32 ccid_hc_rx_obj_size, ccid_hc_tx_obj_size; /* Interface Routines */ diff --git a/net/dccp/probe.c b/net/dccp/probe.c index bace1d8cbcf..f5b3464f124 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c @@ -161,8 +161,8 @@ static __init int dccpprobe_init(void) if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) goto err0; - ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0), - "dccp"); + try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0, + "dccp"); if (ret) goto err1; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 76c08402c93..a42f658e756 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -946,7 +946,6 @@ int igmp_rcv(struct sk_buff *skb) break; case IGMP_HOST_MEMBERSHIP_REPORT: case IGMPV2_HOST_MEMBERSHIP_REPORT: - case IGMPV3_HOST_MEMBERSHIP_REPORT: /* Is it our report looped back? */ if (skb_rtable(skb)->fl.iif == 0) break; @@ -960,6 +959,7 @@ int igmp_rcv(struct sk_buff *skb) in_dev_put(in_dev); return pim_rcv_v1(skb); #endif + case IGMPV3_HOST_MEMBERSHIP_REPORT: case IGMP_DVMRP: case IGMP_TRACE: case IGMP_HOST_LEAVE_MESSAGE: diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 38fbf04150a..544ce0876f1 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -124,16 +124,12 @@ static int ipcomp4_init_state(struct xfrm_state *x) if (x->props.mode == XFRM_MODE_TUNNEL) { err = ipcomp_tunnel_attach(x); if (err) - goto error_tunnel; + goto out; } err = 0; out: return err; - -error_tunnel: - ipcomp_destroy(x); - goto out; } static const struct xfrm_type ipcomp_type = { diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 06632762ba5..90203e1b918 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -925,10 +925,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) if (t && !IS_ERR(t)) { struct arpt_getinfo info; const struct xt_table_info *private = t->private; - #ifdef CONFIG_COMPAT + struct xt_table_info tmp; + if (compat) { - struct xt_table_info tmp; ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(NFPROTO_ARP); private = &tmp; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 572330a552e..3ce53cf13d5 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) if (t && !IS_ERR(t)) { struct ipt_getinfo info; const struct xt_table_info *private = t->private; - #ifdef CONFIG_COMPAT + struct xt_table_info tmp; + if (compat) { - struct xt_table_info tmp; ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET); private = &tmp; diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index d171b123a65..d1ea38a7c49 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -210,7 +210,7 @@ static ctl_table ip_ct_sysctl_table[] = { }, { .procname = "ip_conntrack_buckets", - .data = &nf_conntrack_htable_size, + .data = &init_net.ct.htable_size, .maxlen = sizeof(unsigned int), .mode = 0444, .proc_handler = proc_dointvec, diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 8668a3defda..2fb7b76da94 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c @@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) struct hlist_nulls_node *n; for (st->bucket = 0; - st->bucket < nf_conntrack_htable_size; + st->bucket < net->ct.htable_size; st->bucket++) { n = rcu_dereference(net->ct.hash[st->bucket].first); if (!is_a_nulls(n)) @@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, head = rcu_dereference(head->next); while (is_a_nulls(head)) { if (likely(get_nulls_value(head) == st->bucket)) { - if (++st->bucket >= nf_conntrack_htable_size) + if (++st->bucket >= net->ct.htable_size) return NULL; } head = rcu_dereference(net->ct.hash[st->bucket].first); diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index fe1a64479dd..26066a2327a 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -35,9 +35,6 @@ static DEFINE_SPINLOCK(nf_nat_lock); static struct nf_conntrack_l3proto *l3proto __read_mostly; -/* Calculated at init based on memory size */ -static unsigned int nf_nat_htable_size __read_mostly; - #define MAX_IP_NAT_PROTO 256 static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] __read_mostly; @@ -72,7 +69,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put); /* We keep an extra hash for each conntrack, for fast searching. */ static inline unsigned int -hash_by_src(const struct nf_conntrack_tuple *tuple) +hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) { unsigned int hash; @@ -80,7 +77,7 @@ hash_by_src(const struct nf_conntrack_tuple *tuple) hash = jhash_3words((__force u32)tuple->src.u3.ip, (__force u32)tuple->src.u.all, tuple->dst.protonum, 0); - return ((u64)hash * nf_nat_htable_size) >> 32; + return ((u64)hash * net->ipv4.nat_htable_size) >> 32; } /* Is this tuple already taken? (not by us) */ @@ -147,7 +144,7 @@ find_appropriate_src(struct net *net, struct nf_conntrack_tuple *result, const struct nf_nat_range *range) { - unsigned int h = hash_by_src(tuple); + unsigned int h = hash_by_src(net, tuple); const struct nf_conn_nat *nat; const struct nf_conn *ct; const struct hlist_node *n; @@ -330,7 +327,7 @@ nf_nat_setup_info(struct nf_conn *ct, if (have_to_hash) { unsigned int srchash; - srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); spin_lock_bh(&nf_nat_lock); /* nf_conntrack_alter_reply might re-allocate exntension aera */ nat = nfct_nat(ct); @@ -679,8 +676,10 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct, static int __net_init nf_nat_net_init(struct net *net) { - net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, - &net->ipv4.nat_vmalloced, 0); + /* Leave them the same for the moment. */ + net->ipv4.nat_htable_size = net->ct.htable_size; + net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, + &net->ipv4.nat_vmalloced, 0); if (!net->ipv4.nat_bysource) return -ENOMEM; return 0; @@ -703,7 +702,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) nf_ct_iterate_cleanup(net, &clean_nat, NULL); synchronize_rcu(); nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, - nf_nat_htable_size); + net->ipv4.nat_htable_size); } static struct pernet_operations nf_nat_net_ops = { @@ -724,9 +723,6 @@ static int __init nf_nat_init(void) return ret; } - /* Leave them the same for the moment. */ - nf_nat_htable_size = nf_conntrack_htable_size; - ret = register_pernet_subsys(&nf_nat_net_ops); if (ret < 0) goto cleanup_extend; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 28e02963249..3fddc69cccc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5783,11 +5783,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, /* tcp_ack considers this ACK as duplicate * and does not calculate rtt. - * Fix it at least with timestamps. + * Force it here. */ - if (tp->rx_opt.saw_tstamp && - tp->rx_opt.rcv_tsecr && !tp->srtt) - tcp_ack_saw_tstamp(sk, 0); + tcp_ack_update_rtt(sk, 0, 0); if (tp->rx_opt.tstamp_ok) tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 2f2a5ca2c87..002e6eef912 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -154,16 +154,12 @@ static int ipcomp6_init_state(struct xfrm_state *x) if (x->props.mode == XFRM_MODE_TUNNEL) { err = ipcomp6_tunnel_attach(x); if (err) - goto error_tunnel; + goto out; } err = 0; out: return err; -error_tunnel: - ipcomp_destroy(x); - - goto out; } static const struct xfrm_type ipcomp6_type = diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 480d7f8c980..8a7e0f52e17 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1164,10 +1164,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) if (t && !IS_ERR(t)) { struct ip6t_getinfo info; const struct xt_table_info *private = t->private; - #ifdef CONFIG_COMPAT + struct xt_table_info tmp; + if (compat) { - struct xt_table_info tmp; ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET6); private = &tmp; diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 156020d138b..6b3602de359 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c @@ -698,15 +698,18 @@ dev_irnet_ioctl( /* Query PPP channel and unit number */ case PPPIOCGCHAN: + lock_kernel(); if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), (int __user *)argp)) err = 0; + unlock_kernel(); break; case PPPIOCGUNIT: lock_kernel(); if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), (int __user *)argp)) - err = 0; + err = 0; + unlock_kernel(); break; /* All these ioctls can be passed both directly and from ppp_generic, diff --git a/net/key/af_key.c b/net/key/af_key.c index 76fa6fef647..539f43bc97d 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3794,9 +3794,9 @@ static struct pernet_operations pfkey_net_ops = { static void __exit ipsec_pfkey_exit(void) { - unregister_pernet_subsys(&pfkey_net_ops); xfrm_unregister_km(&pfkeyv2_mgr); sock_unregister(PF_KEY); + unregister_pernet_subsys(&pfkey_net_ops); proto_unregister(&key_proto); } @@ -3807,21 +3807,22 @@ static int __init ipsec_pfkey_init(void) if (err != 0) goto out; - err = sock_register(&pfkey_family_ops); + err = register_pernet_subsys(&pfkey_net_ops); if (err != 0) goto out_unregister_key_proto; + err = sock_register(&pfkey_family_ops); + if (err != 0) + goto out_unregister_pernet; err = xfrm_register_km(&pfkeyv2_mgr); if (err != 0) goto out_sock_unregister; - err = register_pernet_subsys(&pfkey_net_ops); - if (err != 0) - goto out_xfrm_unregister_km; out: return err; -out_xfrm_unregister_km: - xfrm_unregister_km(&pfkeyv2_mgr); + out_sock_unregister: sock_unregister(PF_KEY); +out_unregister_pernet: + unregister_pernet_subsys(&pfkey_net_ops); out_unregister_key_proto: proto_unregister(&key_proto); goto out; diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 1f2db647bb5..22f0c2aa7a8 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -647,7 +647,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, } if (pos[1] != 0 && (pos[1] != ifibss->ssid_len || - !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { + memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { /* Ignore ProbeReq for foreign SSID */ return; } diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index b9007f80cb9..12a2bff7dcd 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -245,6 +245,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, info->control.rates[i].count = 1; } + if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) + return; + if (sta && sdata->force_unicast_rateidx > -1) { info->control.rates[0].idx = sdata->force_unicast_rateidx; } else { diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index f934c9620b7..bc17cf7d68d 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -439,6 +439,16 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, if (local->scan_req) return -EBUSY; + if (req != local->int_scan_req && + sdata->vif.type == NL80211_IFTYPE_STATION && + !list_empty(&ifmgd->work_list)) { + /* actually wait for the work it's doing to finish/time out */ + set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request); + local->scan_req = req; + local->scan_sdata = sdata; + return 0; + } + if (local->ops->hw_scan) { u8 *ies; @@ -463,14 +473,6 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, local->scan_req = req; local->scan_sdata = sdata; - if (req != local->int_scan_req && - sdata->vif.type == NL80211_IFTYPE_STATION && - !list_empty(&ifmgd->work_list)) { - /* actually wait for the work it's doing to finish/time out */ - set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request); - return 0; - } - if (local->ops->hw_scan) __set_bit(SCAN_HW_SCANNING, &local->scanning); else diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0e98c3282d4..4d79e3c1616 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -30,6 +30,7 @@ #include <linux/netdevice.h> #include <linux/socket.h> #include <linux/mm.h> +#include <linux/nsproxy.h> #include <linux/rculist_nulls.h> #include <net/netfilter/nf_conntrack.h> @@ -63,8 +64,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max); struct nf_conn nf_conntrack_untracked __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_untracked); -static struct kmem_cache *nf_conntrack_cachep __read_mostly; - static int nf_conntrack_hash_rnd_initted; static unsigned int nf_conntrack_hash_rnd; @@ -86,9 +85,10 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, return ((u64)h * size) >> 32; } -static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) +static inline u_int32_t hash_conntrack(const struct net *net, + const struct nf_conntrack_tuple *tuple) { - return __hash_conntrack(tuple, nf_conntrack_htable_size, + return __hash_conntrack(tuple, net->ct.htable_size, nf_conntrack_hash_rnd); } @@ -296,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; - unsigned int hash = hash_conntrack(tuple); + unsigned int hash = hash_conntrack(net, tuple); /* Disable BHs the entire time since we normally need to disable them * at least once for the stats anyway. @@ -366,10 +366,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct, void nf_conntrack_hash_insert(struct nf_conn *ct) { + struct net *net = nf_ct_net(ct); unsigned int hash, repl_hash; - hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); + hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); __nf_conntrack_hash_insert(ct, hash, repl_hash); } @@ -397,8 +398,8 @@ __nf_conntrack_confirm(struct sk_buff *skb) if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) return NF_ACCEPT; - hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); + hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); /* We're not in hash table, and we refuse to set up related connections for unconfirmed conns. But packet copies and @@ -468,7 +469,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, struct net *net = nf_ct_net(ignored_conntrack); struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; - unsigned int hash = hash_conntrack(tuple); + unsigned int hash = hash_conntrack(net, tuple); /* Disable BHs the entire time since we need to disable them at * least once for the stats anyway. @@ -503,7 +504,7 @@ static noinline int early_drop(struct net *net, unsigned int hash) int dropped = 0; rcu_read_lock(); - for (i = 0; i < nf_conntrack_htable_size; i++) { + for (i = 0; i < net->ct.htable_size; i++) { hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { tmp = nf_ct_tuplehash_to_ctrack(h); @@ -523,7 +524,7 @@ static noinline int early_drop(struct net *net, unsigned int hash) if (cnt >= NF_CT_EVICTION_RANGE) break; - hash = (hash + 1) % nf_conntrack_htable_size; + hash = (hash + 1) % net->ct.htable_size; } rcu_read_unlock(); @@ -557,7 +558,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, if (nf_conntrack_max && unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { - unsigned int hash = hash_conntrack(orig); + unsigned int hash = hash_conntrack(net, orig); if (!early_drop(net, hash)) { atomic_dec(&net->ct.count); if (net_ratelimit()) @@ -572,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, * Do not use kmem_cache_zalloc(), as this cache uses * SLAB_DESTROY_BY_RCU. */ - ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); + ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); if (ct == NULL) { pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); atomic_dec(&net->ct.count); @@ -611,7 +612,7 @@ void nf_conntrack_free(struct nf_conn *ct) nf_ct_ext_destroy(ct); atomic_dec(&net->ct.count); nf_ct_ext_free(ct); - kmem_cache_free(nf_conntrack_cachep, ct); + kmem_cache_free(net->ct.nf_conntrack_cachep, ct); } EXPORT_SYMBOL_GPL(nf_conntrack_free); @@ -1014,7 +1015,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), struct hlist_nulls_node *n; spin_lock_bh(&nf_conntrack_lock); - for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { + for (; *bucket < net->ct.htable_size; (*bucket)++) { hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (iter(ct, data)) @@ -1113,9 +1114,12 @@ static void nf_ct_release_dying_list(struct net *net) static void nf_conntrack_cleanup_init_net(void) { + /* wait until all references to nf_conntrack_untracked are dropped */ + while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) + schedule(); + nf_conntrack_helper_fini(); nf_conntrack_proto_fini(); - kmem_cache_destroy(nf_conntrack_cachep); } static void nf_conntrack_cleanup_net(struct net *net) @@ -1127,15 +1131,14 @@ static void nf_conntrack_cleanup_net(struct net *net) schedule(); goto i_see_dead_people; } - /* wait until all references to nf_conntrack_untracked are dropped */ - while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) - schedule(); nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, - nf_conntrack_htable_size); + net->ct.htable_size); nf_conntrack_ecache_fini(net); nf_conntrack_acct_fini(net); nf_conntrack_expect_fini(net); + kmem_cache_destroy(net->ct.nf_conntrack_cachep); + kfree(net->ct.slabname); free_percpu(net->ct.stat); } @@ -1190,10 +1193,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) { int i, bucket, vmalloced, old_vmalloced; unsigned int hashsize, old_size; - int rnd; struct hlist_nulls_head *hash, *old_hash; struct nf_conntrack_tuple_hash *h; + if (current->nsproxy->net_ns != &init_net) + return -EOPNOTSUPP; + /* On boot, we can set this without any fancy locking. */ if (!nf_conntrack_htable_size) return param_set_uint(val, kp); @@ -1206,33 +1211,29 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) if (!hash) return -ENOMEM; - /* We have to rehahs for the new table anyway, so we also can - * use a newrandom seed */ - get_random_bytes(&rnd, sizeof(rnd)); - /* Lookups in the old hash might happen in parallel, which means we * might get false negatives during connection lookup. New connections * created because of a false negative won't make it into the hash * though since that required taking the lock. */ spin_lock_bh(&nf_conntrack_lock); - for (i = 0; i < nf_conntrack_htable_size; i++) { + for (i = 0; i < init_net.ct.htable_size; i++) { while (!hlist_nulls_empty(&init_net.ct.hash[i])) { h = hlist_nulls_entry(init_net.ct.hash[i].first, struct nf_conntrack_tuple_hash, hnnode); hlist_nulls_del_rcu(&h->hnnode); - bucket = __hash_conntrack(&h->tuple, hashsize, rnd); + bucket = __hash_conntrack(&h->tuple, hashsize, + nf_conntrack_hash_rnd); hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); } } - old_size = nf_conntrack_htable_size; + old_size = init_net.ct.htable_size; old_vmalloced = init_net.ct.hash_vmalloc; old_hash = init_net.ct.hash; - nf_conntrack_htable_size = hashsize; + init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; init_net.ct.hash_vmalloc = vmalloced; init_net.ct.hash = hash; - nf_conntrack_hash_rnd = rnd; spin_unlock_bh(&nf_conntrack_lock); nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); @@ -1271,15 +1272,6 @@ static int nf_conntrack_init_init_net(void) NF_CONNTRACK_VERSION, nf_conntrack_htable_size, nf_conntrack_max); - nf_conntrack_cachep = kmem_cache_create("nf_conntrack", - sizeof(struct nf_conn), - 0, SLAB_DESTROY_BY_RCU, NULL); - if (!nf_conntrack_cachep) { - printk(KERN_ERR "Unable to create nf_conn slab cache\n"); - ret = -ENOMEM; - goto err_cache; - } - ret = nf_conntrack_proto_init(); if (ret < 0) goto err_proto; @@ -1288,13 +1280,19 @@ static int nf_conntrack_init_init_net(void) if (ret < 0) goto err_helper; + /* Set up fake conntrack: to never be deleted, not in any hashes */ +#ifdef CONFIG_NET_NS + nf_conntrack_untracked.ct_net = &init_net; +#endif + atomic_set(&nf_conntrack_untracked.ct_general.use, 1); + /* - and look it like as a confirmed connection */ + set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); + return 0; err_helper: nf_conntrack_proto_fini(); err_proto: - kmem_cache_destroy(nf_conntrack_cachep); -err_cache: return ret; } @@ -1316,7 +1314,24 @@ static int nf_conntrack_init_net(struct net *net) ret = -ENOMEM; goto err_stat; } - net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, + + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); + if (!net->ct.slabname) { + ret = -ENOMEM; + goto err_slabname; + } + + net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, + sizeof(struct nf_conn), 0, + SLAB_DESTROY_BY_RCU, NULL); + if (!net->ct.nf_conntrack_cachep) { + printk(KERN_ERR "Unable to create nf_conn slab cache\n"); + ret = -ENOMEM; + goto err_cache; + } + + net->ct.htable_size = nf_conntrack_htable_size; + net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, &net->ct.hash_vmalloc, 1); if (!net->ct.hash) { ret = -ENOMEM; @@ -1333,15 +1348,6 @@ static int nf_conntrack_init_net(struct net *net) if (ret < 0) goto err_ecache; - /* Set up fake conntrack: - - to never be deleted, not in any hashes */ -#ifdef CONFIG_NET_NS - nf_conntrack_untracked.ct_net = &init_net; -#endif - atomic_set(&nf_conntrack_untracked.ct_general.use, 1); - /* - and look it like as a confirmed connection */ - set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); - return 0; err_ecache: @@ -1350,8 +1356,12 @@ err_acct: nf_conntrack_expect_fini(net); err_expect: nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, - nf_conntrack_htable_size); + net->ct.htable_size); err_hash: + kmem_cache_destroy(net->ct.nf_conntrack_cachep); +err_cache: + kfree(net->ct.slabname); +err_slabname: free_percpu(net->ct.stat); err_stat: return ret; diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index fdf5d2a1d9b..2f25ff61098 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -569,7 +569,7 @@ static void exp_proc_remove(struct net *net) #endif /* CONFIG_PROC_FS */ } -module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600); +module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400); int nf_conntrack_expect_init(struct net *net) { @@ -577,7 +577,7 @@ int nf_conntrack_expect_init(struct net *net) if (net_eq(net, &init_net)) { if (!nf_ct_expect_hsize) { - nf_ct_expect_hsize = nf_conntrack_htable_size / 256; + nf_ct_expect_hsize = net->ct.htable_size / 256; if (!nf_ct_expect_hsize) nf_ct_expect_hsize = 1; } diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 65c2a7bc3af..4b1a56bd074 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -192,7 +192,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, /* Get rid of expecteds, set helpers to NULL. */ hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) unhelp(h, me); - for (i = 0; i < nf_conntrack_htable_size; i++) { + for (i = 0; i < net->ct.htable_size; i++) { hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) unhelp(h, me); } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 42f21c01a93..0ffe689dfe9 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -594,7 +594,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); last = (struct nf_conn *)cb->args[1]; - for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { + for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) { restart: hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], hnnode) { diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 028aba667ef..e310f1561bb 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -51,7 +51,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) struct hlist_nulls_node *n; for (st->bucket = 0; - st->bucket < nf_conntrack_htable_size; + st->bucket < net->ct.htable_size; st->bucket++) { n = rcu_dereference(net->ct.hash[st->bucket].first); if (!is_a_nulls(n)) @@ -69,7 +69,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, head = rcu_dereference(head->next); while (is_a_nulls(head)) { if (likely(get_nulls_value(head) == st->bucket)) { - if (++st->bucket >= nf_conntrack_htable_size) + if (++st->bucket >= net->ct.htable_size) return NULL; } head = rcu_dereference(net->ct.hash[st->bucket].first); @@ -355,7 +355,7 @@ static ctl_table nf_ct_sysctl_table[] = { }, { .procname = "nf_conntrack_buckets", - .data = &nf_conntrack_htable_size, + .data = &init_net.ct.htable_size, .maxlen = sizeof(unsigned int), .mode = 0444, .proc_handler = proc_dointvec, @@ -421,6 +421,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) goto out_kmemdup; table[1].data = &net->ct.count; + table[2].data = &net->ct.htable_size; table[3].data = &net->ct.sysctl_checksum; table[4].data = &net->ct.sysctl_log_invalid; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a4957bf2ca6..4c5972ba8c7 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -455,9 +455,14 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, if (nl_table[protocol].registered && try_module_get(nl_table[protocol].module)) module = nl_table[protocol].module; + else + err = -EPROTONOSUPPORT; cb_mutex = nl_table[protocol].cb_mutex; netlink_unlock_table(); + if (err < 0) + goto out; + err = __netlink_create(net, sock, cb_mutex, protocol); if (err < 0) goto out_module; diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 929218a4762..21f9c7678aa 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -433,7 +433,7 @@ config NET_ACT_POLICE module. To compile this code as a module, choose M here: the - module will be called police. + module will be called act_police. config NET_ACT_GACT tristate "Generic actions" @@ -443,7 +443,7 @@ config NET_ACT_GACT accepting packets. To compile this code as a module, choose M here: the - module will be called gact. + module will be called act_gact. config GACT_PROB bool "Probability support" @@ -459,7 +459,7 @@ config NET_ACT_MIRRED other devices. To compile this code as a module, choose M here: the - module will be called mirred. + module will be called act_mirred. config NET_ACT_IPT tristate "IPtables targets" @@ -469,7 +469,7 @@ config NET_ACT_IPT classification. To compile this code as a module, choose M here: the - module will be called ipt. + module will be called act_ipt. config NET_ACT_NAT tristate "Stateless NAT" @@ -479,7 +479,7 @@ config NET_ACT_NAT netfilter for NAT unless you know what you are doing. To compile this code as a module, choose M here: the - module will be called nat. + module will be called act_nat. config NET_ACT_PEDIT tristate "Packet Editing" @@ -488,7 +488,7 @@ config NET_ACT_PEDIT Say Y here if you want to mangle the content of packets. To compile this code as a module, choose M here: the - module will be called pedit. + module will be called act_pedit. config NET_ACT_SIMP tristate "Simple Example (Debug)" @@ -502,7 +502,7 @@ config NET_ACT_SIMP If unsure, say N. To compile this code as a module, choose M here: the - module will be called simple. + module will be called act_simple. config NET_ACT_SKBEDIT tristate "SKB Editing" @@ -513,7 +513,7 @@ config NET_ACT_SKBEDIT If unsure, say N. To compile this code as a module, choose M here: the - module will be called skbedit. + module will be called act_skbedit. config NET_CLS_IND bool "Incoming device classification" diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index b36cc344474..f445ea1c5f5 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1102,7 +1102,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp) int err = -ENOMEM; struct xfrm_state *x = xfrm_state_alloc(net); if (!x) - goto error; + goto out; memcpy(&x->id, &orig->id, sizeof(x->id)); memcpy(&x->sel, &orig->sel, sizeof(x->sel)); @@ -1160,16 +1160,10 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp) return x; error: + xfrm_state_put(x); +out: if (errp) *errp = err; - if (x) { - kfree(x->aalg); - kfree(x->ealg); - kfree(x->calg); - kfree(x->encap); - kfree(x->coaddr); - } - kfree(x); return NULL; } diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index c41afe6639a..47fb65d1fcb 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -65,7 +65,6 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode, const char *cause, int result, int info); /* Internal IMA function definitions */ -void ima_iintcache_init(void); int ima_init(void); void ima_cleanup(void); int ima_fs_init(void); @@ -131,7 +130,7 @@ void iint_free(struct kref *kref); void iint_rcu_free(struct rcu_head *rcu); /* IMA policy related functions */ -enum ima_hooks { PATH_CHECK = 1, FILE_MMAP, BPRM_CHECK }; +enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK }; int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask); void ima_init_policy(void); diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c index 3cd58b60afd..2a5e0bcf388 100644 --- a/security/integrity/ima/ima_api.c +++ b/security/integrity/ima/ima_api.c @@ -95,12 +95,12 @@ err_out: * ima_must_measure - measure decision based on policy. * @inode: pointer to inode to measure * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE) - * @function: calling function (PATH_CHECK, BPRM_CHECK, FILE_MMAP) + * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP) * * The policy is defined in terms of keypairs: * subj=, obj=, type=, func=, mask=, fsmagic= * subj,obj, and type: are LSM specific. - * func: PATH_CHECK | BPRM_CHECK | FILE_MMAP + * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP * mask: contains the permission mask * fsmagic: hex value * diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c index fa592ff1ac1..0d83edcfc40 100644 --- a/security/integrity/ima/ima_iint.c +++ b/security/integrity/ima/ima_iint.c @@ -52,9 +52,6 @@ int ima_inode_alloc(struct inode *inode) struct ima_iint_cache *iint = NULL; int rc = 0; - if (!ima_initialized) - return 0; - iint = kmem_cache_alloc(iint_cache, GFP_NOFS); if (!iint) return -ENOMEM; @@ -118,8 +115,6 @@ void ima_inode_free(struct inode *inode) { struct ima_iint_cache *iint; - if (!ima_initialized) - return; spin_lock(&ima_iint_lock); iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode); spin_unlock(&ima_iint_lock); @@ -141,9 +136,11 @@ static void init_once(void *foo) kref_set(&iint->refcount, 1); } -void __init ima_iintcache_init(void) +static int __init ima_iintcache_init(void) { iint_cache = kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, SLAB_PANIC, init_once); + return 0; } +security_initcall(ima_iintcache_init); diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index a89f44d5e03..294b005d652 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -14,7 +14,7 @@ * * File: ima_main.c * implements the IMA hooks: ima_bprm_check, ima_file_mmap, - * and ima_path_check. + * and ima_file_check. */ #include <linux/module.h> #include <linux/file.h> @@ -84,6 +84,36 @@ out: return found; } +/* ima_read_write_check - reflect possible reading/writing errors in the PCR. + * + * When opening a file for read, if the file is already open for write, + * the file could change, resulting in a file measurement error. + * + * Opening a file for write, if the file is already open for read, results + * in a time of measure, time of use (ToMToU) error. + * + * In either case invalidate the PCR. + */ +enum iint_pcr_error { TOMTOU, OPEN_WRITERS }; +static void ima_read_write_check(enum iint_pcr_error error, + struct ima_iint_cache *iint, + struct inode *inode, + const unsigned char *filename) +{ + switch (error) { + case TOMTOU: + if (iint->readcount > 0) + ima_add_violation(inode, filename, "invalid_pcr", + "ToMToU"); + break; + case OPEN_WRITERS: + if (iint->writecount > 0) + ima_add_violation(inode, filename, "invalid_pcr", + "open_writers"); + break; + } +} + /* * Update the counts given an fmode_t */ @@ -99,6 +129,47 @@ static void ima_inc_counts(struct ima_iint_cache *iint, fmode_t mode) } /* + * ima_counts_get - increment file counts + * + * Maintain read/write counters for all files, but only + * invalidate the PCR for measured files: + * - Opening a file for write when already open for read, + * results in a time of measure, time of use (ToMToU) error. + * - Opening a file for read when already open for write, + * could result in a file measurement error. + * + */ +void ima_counts_get(struct file *file) +{ + struct dentry *dentry = file->f_path.dentry; + struct inode *inode = dentry->d_inode; + fmode_t mode = file->f_mode; + struct ima_iint_cache *iint; + int rc; + + if (!ima_initialized || !S_ISREG(inode->i_mode)) + return; + iint = ima_iint_find_get(inode); + if (!iint) + return; + mutex_lock(&iint->mutex); + rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK); + if (rc < 0) + goto out; + + if (mode & FMODE_WRITE) { + ima_read_write_check(TOMTOU, iint, inode, dentry->d_name.name); + goto out; + } + ima_read_write_check(OPEN_WRITERS, iint, inode, dentry->d_name.name); +out: + ima_inc_counts(iint, file->f_mode); + mutex_unlock(&iint->mutex); + + kref_put(&iint->refcount, iint_free); +} + +/* * Decrement ima counts */ static void ima_dec_counts(struct ima_iint_cache *iint, struct inode *inode, @@ -153,123 +224,6 @@ void ima_file_free(struct file *file) kref_put(&iint->refcount, iint_free); } -/* ima_read_write_check - reflect possible reading/writing errors in the PCR. - * - * When opening a file for read, if the file is already open for write, - * the file could change, resulting in a file measurement error. - * - * Opening a file for write, if the file is already open for read, results - * in a time of measure, time of use (ToMToU) error. - * - * In either case invalidate the PCR. - */ -enum iint_pcr_error { TOMTOU, OPEN_WRITERS }; -static void ima_read_write_check(enum iint_pcr_error error, - struct ima_iint_cache *iint, - struct inode *inode, - const unsigned char *filename) -{ - switch (error) { - case TOMTOU: - if (iint->readcount > 0) - ima_add_violation(inode, filename, "invalid_pcr", - "ToMToU"); - break; - case OPEN_WRITERS: - if (iint->writecount > 0) - ima_add_violation(inode, filename, "invalid_pcr", - "open_writers"); - break; - } -} - -static int get_path_measurement(struct ima_iint_cache *iint, struct file *file, - const unsigned char *filename) -{ - int rc = 0; - - ima_inc_counts(iint, file->f_mode); - - rc = ima_collect_measurement(iint, file); - if (!rc) - ima_store_measurement(iint, file, filename); - return rc; -} - -/** - * ima_path_check - based on policy, collect/store measurement. - * @path: contains a pointer to the path to be measured - * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE - * - * Measure the file being open for readonly, based on the - * ima_must_measure() policy decision. - * - * Keep read/write counters for all files, but only - * invalidate the PCR for measured files: - * - Opening a file for write when already open for read, - * results in a time of measure, time of use (ToMToU) error. - * - Opening a file for read when already open for write, - * could result in a file measurement error. - * - * Always return 0 and audit dentry_open failures. - * (Return code will be based upon measurement appraisal.) - */ -int ima_path_check(struct path *path, int mask) -{ - struct inode *inode = path->dentry->d_inode; - struct ima_iint_cache *iint; - struct file *file = NULL; - int rc; - - if (!ima_initialized || !S_ISREG(inode->i_mode)) - return 0; - iint = ima_iint_find_get(inode); - if (!iint) - return 0; - - mutex_lock(&iint->mutex); - - rc = ima_must_measure(iint, inode, MAY_READ, PATH_CHECK); - if (rc < 0) - goto out; - - if ((mask & MAY_WRITE) || (mask == 0)) - ima_read_write_check(TOMTOU, iint, inode, - path->dentry->d_name.name); - - if ((mask & (MAY_WRITE | MAY_READ | MAY_EXEC)) != MAY_READ) - goto out; - - ima_read_write_check(OPEN_WRITERS, iint, inode, - path->dentry->d_name.name); - if (!(iint->flags & IMA_MEASURED)) { - struct dentry *dentry = dget(path->dentry); - struct vfsmount *mnt = mntget(path->mnt); - - file = dentry_open(dentry, mnt, O_RDONLY | O_LARGEFILE, - current_cred()); - if (IS_ERR(file)) { - int audit_info = 0; - - integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, - dentry->d_name.name, - "add_measurement", - "dentry_open failed", - 1, audit_info); - file = NULL; - goto out; - } - rc = get_path_measurement(iint, file, dentry->d_name.name); - } -out: - mutex_unlock(&iint->mutex); - if (file) - fput(file); - kref_put(&iint->refcount, iint_free); - return 0; -} -EXPORT_SYMBOL_GPL(ima_path_check); - static int process_measurement(struct file *file, const unsigned char *filename, int mask, int function) { @@ -297,33 +251,6 @@ out: return rc; } -/* - * ima_counts_get - increment file counts - * - * - for IPC shm and shmat file. - * - for nfsd exported files. - * - * Increment the counts for these files to prevent unnecessary - * imbalance messages. - */ -void ima_counts_get(struct file *file) -{ - struct inode *inode = file->f_dentry->d_inode; - struct ima_iint_cache *iint; - - if (!ima_initialized || !S_ISREG(inode->i_mode)) - return; - iint = ima_iint_find_get(inode); - if (!iint) - return; - mutex_lock(&iint->mutex); - ima_inc_counts(iint, file->f_mode); - mutex_unlock(&iint->mutex); - - kref_put(&iint->refcount, iint_free); -} -EXPORT_SYMBOL_GPL(ima_counts_get); - /** * ima_file_mmap - based on policy, collect/store measurement. * @file: pointer to the file to be measured (May be NULL) @@ -369,11 +296,31 @@ int ima_bprm_check(struct linux_binprm *bprm) return 0; } +/** + * ima_path_check - based on policy, collect/store measurement. + * @file: pointer to the file to be measured + * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE + * + * Measure files based on the ima_must_measure() policy decision. + * + * Always return 0 and audit dentry_open failures. + * (Return code will be based upon measurement appraisal.) + */ +int ima_file_check(struct file *file, int mask) +{ + int rc; + + rc = process_measurement(file, file->f_dentry->d_name.name, + mask & (MAY_READ | MAY_WRITE | MAY_EXEC), + FILE_CHECK); + return 0; +} +EXPORT_SYMBOL_GPL(ima_file_check); + static int __init init_ima(void) { int error; - ima_iintcache_init(); error = ima_init(); ima_initialized = 1; return error; diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index e1278399b34..4759d0f9933 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -67,7 +67,7 @@ static struct ima_measure_rule_entry default_rules[] = { .flags = IMA_FUNC | IMA_MASK}, {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC, .flags = IMA_FUNC | IMA_MASK}, - {.action = MEASURE,.func = PATH_CHECK,.mask = MAY_READ,.uid = 0, + {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0, .flags = IMA_FUNC | IMA_MASK | IMA_UID}, }; @@ -282,8 +282,11 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry) break; case Opt_func: audit_log_format(ab, "func=%s ", args[0].from); - if (strcmp(args[0].from, "PATH_CHECK") == 0) - entry->func = PATH_CHECK; + if (strcmp(args[0].from, "FILE_CHECK") == 0) + entry->func = FILE_CHECK; + /* PATH_CHECK is for backwards compat */ + else if (strcmp(args[0].from, "PATH_CHECK") == 0) + entry->func = FILE_CHECK; else if (strcmp(args[0].from, "FILE_MMAP") == 0) entry->func = FILE_MMAP; else if (strcmp(args[0].from, "BPRM_CHECK") == 0) diff --git a/security/security.c b/security/security.c index 24e060be9fa..122b748d0f4 100644 --- a/security/security.c +++ b/security/security.c @@ -666,8 +666,6 @@ int security_file_alloc(struct file *file) void security_file_free(struct file *file) { security_ops->file_free_security(file); - if (file->f_dentry) - ima_file_free(file); } int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c index cb65bd0dd35..459c1f62783 100644 --- a/sound/pci/ctxfi/ctatc.c +++ b/sound/pci/ctxfi/ctatc.c @@ -166,18 +166,7 @@ static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm) static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index) { - struct ct_vm *vm; - void *kvirt_addr; - unsigned long phys_addr; - - vm = atc->vm; - kvirt_addr = vm->get_ptp_virt(vm, index); - if (kvirt_addr == NULL) - phys_addr = (~0UL); - else - phys_addr = virt_to_phys(kvirt_addr); - - return phys_addr; + return atc->vm->get_ptp_phys(atc->vm, index); } static unsigned int convert_format(snd_pcm_format_t snd_format) @@ -1669,7 +1658,7 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci, } /* Set up device virtual memory management object */ - err = ct_vm_create(&atc->vm); + err = ct_vm_create(&atc->vm, pci); if (err < 0) goto error1; diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c index 6b78752e950..65da6e466f8 100644 --- a/sound/pci/ctxfi/ctvmem.c +++ b/sound/pci/ctxfi/ctvmem.c @@ -138,7 +138,7 @@ ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size) return NULL; } - ptp = vm->ptp[0]; + ptp = (unsigned long *)vm->ptp[0].area; pte_start = (block->addr >> CT_PAGE_SHIFT); pages = block->size >> CT_PAGE_SHIFT; for (i = 0; i < pages; i++) { @@ -158,25 +158,25 @@ static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block) } /* * - * return the host (kmalloced) addr of the @index-th device - * page talbe page on success, or NULL on failure. - * The first returned NULL indicates the termination. + * return the host physical addr of the @index-th device + * page table page on success, or ~0UL on failure. + * The first returned ~0UL indicates the termination. * */ -static void * -ct_get_ptp_virt(struct ct_vm *vm, int index) +static dma_addr_t +ct_get_ptp_phys(struct ct_vm *vm, int index) { - void *addr; + dma_addr_t addr; - addr = (index >= CT_PTP_NUM) ? NULL : vm->ptp[index]; + addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr; return addr; } -int ct_vm_create(struct ct_vm **rvm) +int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci) { struct ct_vm *vm; struct ct_vm_block *block; - int i; + int i, err = 0; *rvm = NULL; @@ -188,23 +188,21 @@ int ct_vm_create(struct ct_vm **rvm) /* Allocate page table pages */ for (i = 0; i < CT_PTP_NUM; i++) { - vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!vm->ptp[i]) + err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, + snd_dma_pci_data(pci), + PAGE_SIZE, &vm->ptp[i]); + if (err < 0) break; } - if (!i) { + if (err < 0) { /* no page table pages are allocated */ - kfree(vm); + ct_vm_destroy(vm); return -ENOMEM; } vm->size = CT_ADDRS_PER_PAGE * i; - /* Initialise remaining ptps */ - for (; i < CT_PTP_NUM; i++) - vm->ptp[i] = NULL; - vm->map = ct_vm_map; vm->unmap = ct_vm_unmap; - vm->get_ptp_virt = ct_get_ptp_virt; + vm->get_ptp_phys = ct_get_ptp_phys; INIT_LIST_HEAD(&vm->unused); INIT_LIST_HEAD(&vm->used); block = kzalloc(sizeof(*block), GFP_KERNEL); @@ -242,7 +240,7 @@ void ct_vm_destroy(struct ct_vm *vm) /* free allocated page table pages */ for (i = 0; i < CT_PTP_NUM; i++) - kfree(vm->ptp[i]); + snd_dma_free_pages(&vm->ptp[i]); vm->size = 0; diff --git a/sound/pci/ctxfi/ctvmem.h b/sound/pci/ctxfi/ctvmem.h index 01e4fd0386a..b23adfca4de 100644 --- a/sound/pci/ctxfi/ctvmem.h +++ b/sound/pci/ctxfi/ctvmem.h @@ -22,6 +22,8 @@ #include <linux/mutex.h> #include <linux/list.h> +#include <linux/pci.h> +#include <sound/memalloc.h> /* The chip can handle the page table of 4k pages * (emu20k1 can handle even 8k pages, but we don't use it right now) @@ -41,7 +43,7 @@ struct snd_pcm_substream; /* Virtual memory management object for card device */ struct ct_vm { - void *ptp[CT_PTP_NUM]; /* Device page table pages */ + struct snd_dma_buffer ptp[CT_PTP_NUM]; /* Device page table pages */ unsigned int size; /* Available addr space in bytes */ struct list_head unused; /* List of unused blocks */ struct list_head used; /* List of used blocks */ @@ -52,10 +54,10 @@ struct ct_vm { int size); /* Unmap device logical addr area. */ void (*unmap)(struct ct_vm *, struct ct_vm_block *block); - void *(*get_ptp_virt)(struct ct_vm *vm, int index); + dma_addr_t (*get_ptp_phys)(struct ct_vm *vm, int index); }; -int ct_vm_create(struct ct_vm **rvm); +int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci); void ct_vm_destroy(struct ct_vm *vm); #endif /* CTVMEM_H */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 565de38a3fc..ff6da6f386d 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -426,6 +426,7 @@ struct azx { /* flags */ int position_fix; + int poll_count; unsigned int running :1; unsigned int initialized :1; unsigned int single_cmd :1; @@ -506,7 +507,7 @@ static char *driver_short_names[] __devinitdata = { #define get_azx_dev(substream) (substream->runtime->private_data) static int azx_acquire_irq(struct azx *chip, int do_disconnect); - +static int azx_send_cmd(struct hda_bus *bus, unsigned int val); /* * Interface for HD codec */ @@ -664,11 +665,12 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, { struct azx *chip = bus->private_data; unsigned long timeout; + int do_poll = 0; again: timeout = jiffies + msecs_to_jiffies(1000); for (;;) { - if (chip->polling_mode) { + if (chip->polling_mode || do_poll) { spin_lock_irq(&chip->reg_lock); azx_update_rirb(chip); spin_unlock_irq(&chip->reg_lock); @@ -676,6 +678,9 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, if (!chip->rirb.cmds[addr]) { smp_rmb(); bus->rirb_error = 0; + + if (!do_poll) + chip->poll_count = 0; return chip->rirb.res[addr]; /* the last value */ } if (time_after(jiffies, timeout)) @@ -688,6 +693,16 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, } } + if (!chip->polling_mode && chip->poll_count < 2) { + snd_printdd(SFX "azx_get_response timeout, " + "polling the codec once: last cmd=0x%08x\n", + chip->last_cmd[addr]); + do_poll = 1; + chip->poll_count++; + goto again; + } + + if (!chip->polling_mode) { snd_printk(KERN_WARNING SFX "azx_get_response timeout, " "switching to polling mode: last cmd=0x%08x\n", @@ -1878,6 +1893,9 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev) if (!bdl_pos_adj[chip->dev_index]) return 1; /* no delayed ack */ + if (WARN_ONCE(!azx_dev->period_bytes, + "hda-intel: zero azx_dev->period_bytes")) + return 0; /* this shouldn't happen! */ if (pos % azx_dev->period_bytes > azx_dev->period_bytes / 2) return 0; /* NG - it's below the period boundary */ return 1; /* OK, it's fine */ @@ -2043,7 +2061,7 @@ static int azx_acquire_irq(struct azx *chip, int do_disconnect) { if (request_irq(chip->pci->irq, azx_interrupt, chip->msi ? 0 : IRQF_SHARED, - "HDA Intel", chip)) { + "hda_intel", chip)) { printk(KERN_ERR "hda-intel: unable to grab IRQ %d, " "disabling device\n", chip->pci->irq); if (do_disconnect) @@ -2332,7 +2350,7 @@ static void __devinit check_probe_mask(struct azx *chip, int dev) */ static struct snd_pci_quirk msi_black_list[] __devinitdata = { SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ - SND_PCI_QUIRK(0x1043, 0x829c, "ASUS", 0), /* nvidia */ + SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ {} }; diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c index 765d7bd4c3d..9e66f6d306f 100644 --- a/sound/pci/ice1712/aureon.c +++ b/sound/pci/ice1712/aureon.c @@ -703,11 +703,13 @@ static void wm_set_vol(struct snd_ice1712 *ice, unsigned int index, unsigned sho { unsigned char nvol; - if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) + if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) { nvol = 0; - else + } else { nvol = ((vol % WM_VOL_CNT) * (master % WM_VOL_CNT)) / WM_VOL_MAX; + nvol += 0x1b; + } wm_put(ice, index, nvol); wm_put_nocache(ice, index, 0x180 | nvol); @@ -778,7 +780,7 @@ static int wm_master_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_ for (ch = 0; ch < 2; ch++) { unsigned int vol = ucontrol->value.integer.value[ch]; if (vol > WM_VOL_MAX) - continue; + vol = WM_VOL_MAX; vol |= spec->master[ch] & WM_VOL_MUTE; if (vol != spec->master[ch]) { int dac; @@ -834,8 +836,8 @@ static int wm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value * for (i = 0; i < voices; i++) { unsigned int vol = ucontrol->value.integer.value[i]; if (vol > WM_VOL_MAX) - continue; - vol |= spec->vol[ofs+i]; + vol = WM_VOL_MAX; + vol |= spec->vol[ofs+i] & WM_VOL_MUTE; if (vol != spec->vol[ofs+i]) { spec->vol[ofs+i] = vol; idx = WM_DAC_ATTEN + ofs + i; diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c index 71b2c161158..68980c19a3b 100644 --- a/sound/soc/omap/omap3pandora.c +++ b/sound/soc/omap/omap3pandora.c @@ -145,6 +145,7 @@ static const struct snd_soc_dapm_widget omap3pandora_in_dapm_widgets[] = { }; static const struct snd_soc_dapm_route omap3pandora_out_map[] = { + {"PCM DAC", NULL, "APLL Enable"}, {"Headphone Amplifier", NULL, "PCM DAC"}, {"Line Out", NULL, "PCM DAC"}, {"Headphone Jack", NULL, "Headphone Amplifier"}, diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index ddc584b6487..4b91d8cf00e 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -705,7 +705,7 @@ static void print_mapped_keys(void) fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0); fprintf(stdout, - "\t[K] hide kernel_symbols symbols. \t(%s)\n", + "\t[K] hide kernel_symbols symbols. \t(%s)\n", hide_kernel_symbols ? "yes" : "no"); fprintf(stdout, "\t[U] hide user symbols. \t(%s)\n", diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index bb0fd6da2d5..8a9e6baa309 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -295,10 +295,10 @@ void thread__find_addr_location(struct thread *self, al->thread = self; al->addr = addr; - if (cpumode & PERF_RECORD_MISC_KERNEL) { + if (cpumode == PERF_RECORD_MISC_KERNEL) { al->level = 'k'; mg = &session->kmaps; - } else if (cpumode & PERF_RECORD_MISC_USER) + } else if (cpumode == PERF_RECORD_MISC_USER) al->level = '.'; else { al->level = 'H'; |