diff options
author | David S. Miller <davem@davemloft.net> | 2012-11-25 12:49:17 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-11-25 12:49:17 -0500 |
commit | 24bc518a6888e4c97add5a5ebbff11c1ccac219f (patch) | |
tree | d125270f4e8432cebcbc8af9079dece51dd798a0 /drivers | |
parent | b3e3bd71b429c04490d6a57671f2bbe2121d4f5a (diff) | |
parent | 194d9831f0419b5125dc94ec0ece4434d8ef74f0 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/wireless/iwlwifi/pcie/tx.c
Minor iwlwifi conflict in TX queue disabling between 'net', which
removed a bogus warning, and 'net-next' which added some status
register poking code.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
79 files changed, 833 insertions, 306 deletions
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index b1ae48054dc..b7078afddb7 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c @@ -238,7 +238,7 @@ static int __devexit ahci_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int ahci_suspend(struct device *dev) { struct ahci_platform_data *pdata = dev_get_platdata(dev); diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index fd9ecf74e63..5b0ba3f20ed 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -1105,10 +1105,15 @@ static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev, struct acpi_device *acpi_dev; struct acpi_device_power_state *states; - if (ap->flags & ATA_FLAG_ACPI_SATA) - ata_dev = &ap->link.device[sdev->channel]; - else + if (ap->flags & ATA_FLAG_ACPI_SATA) { + if (!sata_pmp_attached(ap)) + ata_dev = &ap->link.device[sdev->id]; + else + ata_dev = &ap->pmp_link[sdev->channel].device[sdev->id]; + } + else { ata_dev = &ap->link.device[sdev->id]; + } *handle = ata_dev_acpi_handle(ata_dev); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 3cc7096cfda..f46fbd3bd3f 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2942,6 +2942,10 @@ const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) if (xfer_mode == t->mode) return t; + + WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", + __func__, xfer_mode); + return NULL; } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index e3bda074fa1..a6df6a351d6 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1052,6 +1052,8 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev) { sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; + sdev->no_report_opcodes = 1; + sdev->no_write_same = 1; /* Schedule policy is determined by ->qc_defer() callback and * it needs to see every deferred qc. Set dev_blocked to 1 to diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 26201ebef3c..371fd2c698b 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -317,6 +317,12 @@ static int cf_init(struct arasan_cf_dev *acdev) return ret; } + ret = clk_set_rate(acdev->clk, 166000000); + if (ret) { + dev_warn(acdev->host->dev, "clock set rate failed"); + return ret; + } + spin_lock_irqsave(&acdev->host->lock, flags); /* configure CF interface clock */ writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk : @@ -908,7 +914,7 @@ static int __devexit arasan_cf_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int arasan_cf_suspend(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index 0d7c4c2cd26..400bf1c3e98 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -260,7 +260,7 @@ static const struct of_device_id ahci_of_match[] = { }; MODULE_DEVICE_TABLE(of, ahci_of_match); -static int __init ahci_highbank_probe(struct platform_device *pdev) +static int __devinit ahci_highbank_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ahci_host_priv *hpriv; @@ -378,7 +378,7 @@ static int __devexit ahci_highbank_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int ahci_highbank_suspend(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 44a4256533e..08608de87e4 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -142,6 +142,39 @@ static int k2_sata_scr_write(struct ata_link *link, return 0; } +static int k2_sata_softreset(struct ata_link *link, + unsigned int *class, unsigned long deadline) +{ + u8 dmactl; + void __iomem *mmio = link->ap->ioaddr.bmdma_addr; + + dmactl = readb(mmio + ATA_DMA_CMD); + + /* Clear the start bit */ + if (dmactl & ATA_DMA_START) { + dmactl &= ~ATA_DMA_START; + writeb(dmactl, mmio + ATA_DMA_CMD); + } + + return ata_sff_softreset(link, class, deadline); +} + +static int k2_sata_hardreset(struct ata_link *link, + unsigned int *class, unsigned long deadline) +{ + u8 dmactl; + void __iomem *mmio = link->ap->ioaddr.bmdma_addr; + + dmactl = readb(mmio + ATA_DMA_CMD); + + /* Clear the start bit */ + if (dmactl & ATA_DMA_START) { + dmactl &= ~ATA_DMA_START; + writeb(dmactl, mmio + ATA_DMA_CMD); + } + + return sata_sff_hardreset(link, class, deadline); +} static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { @@ -346,6 +379,8 @@ static struct scsi_host_template k2_sata_sht = { static struct ata_port_operations k2_sata_ops = { .inherits = &ata_bmdma_port_ops, + .softreset = k2_sata_softreset, + .hardreset = k2_sata_hardreset, .sff_tf_load = k2_sata_tf_load, .sff_tf_read = k2_sata_tf_read, .sff_check_status = k2_stat_check_status, diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 74a67e0019a..fbbd4ed2edf 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -451,7 +451,7 @@ int dev_pm_qos_add_ancestor_request(struct device *dev, if (ancestor) error = dev_pm_qos_add_request(ancestor, req, value); - if (error) + if (error < 0) req->dev = NULL; return error; diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 3804a0af3ef..9fe4f186555 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -935,7 +935,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail) /* cf. http://lkml.org/lkml/2006/10/31/28 */ if (!fastfail) - q->request_fn(q); + __blk_run_queue(q); } static void diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 1c49d717396..2ddd64a9ffd 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4330,6 +4330,7 @@ out_unreg_region: out_unreg_blkdev: unregister_blkdev(FLOPPY_MAJOR, "fd"); out_put_disk: + destroy_workqueue(floppy_wq); for (drive = 0; drive < N_DRIVE; drive++) { if (!disks[drive]) break; @@ -4340,7 +4341,6 @@ out_put_disk: } put_disk(disks[drive]); } - destroy_workqueue(floppy_wq); return err; } @@ -4555,6 +4555,8 @@ static void __exit floppy_module_exit(void) unregister_blkdev(FLOPPY_MAJOR, "fd"); platform_driver_unregister(&floppy_driver); + destroy_workqueue(floppy_wq); + for (drive = 0; drive < N_DRIVE; drive++) { del_timer_sync(&motor_off_timer[drive]); @@ -4578,7 +4580,6 @@ static void __exit floppy_module_exit(void) cancel_delayed_work_sync(&fd_timeout); cancel_delayed_work_sync(&fd_timer); - destroy_workqueue(floppy_wq); if (atomic_read(&usage_count)) floppy_release_irq_and_dma(); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index adc6f36564c..9694dd99bbb 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -559,7 +559,7 @@ static void mtip_timeout_function(unsigned long int data) struct mtip_cmd *command; int tag, cmdto_cnt = 0; unsigned int bit, group; - unsigned int num_command_slots = port->dd->slot_groups * 32; + unsigned int num_command_slots; unsigned long to, tagaccum[SLOTBITS_IN_LONGS]; if (unlikely(!port)) @@ -572,6 +572,7 @@ static void mtip_timeout_function(unsigned long int data) } /* clear the tag accumulator */ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); + num_command_slots = port->dd->slot_groups * 32; for (tag = 0; tag < num_command_slots; tag++) { /* @@ -2218,8 +2219,8 @@ static int exec_drive_taskfile(struct driver_data *dd, fis.device); /* check for erase mode support during secure erase.*/ - if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) - && (outbuf[0] & MTIP_SEC_ERASE_MODE)) { + if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf && + (outbuf[0] & MTIP_SEC_ERASE_MODE)) { erasemode = 1; } @@ -2439,7 +2440,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, * return value * None */ -static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, +static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector, int nsect, int nents, int tag, void *callback, void *data, int dir) { @@ -2447,6 +2448,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, struct mtip_port *port = dd->port; struct mtip_cmd *command = &port->commands[tag]; int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + u64 start = sector; /* Map the scatter list for DMA access */ nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); @@ -2465,8 +2467,12 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, fis->opts = 1 << 7; fis->command = (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE); - *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF); - *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF); + fis->lba_low = start & 0xFF; + fis->lba_mid = (start >> 8) & 0xFF; + fis->lba_hi = (start >> 16) & 0xFF; + fis->lba_low_ex = (start >> 24) & 0xFF; + fis->lba_mid_ex = (start >> 32) & 0xFF; + fis->lba_hi_ex = (start >> 40) & 0xFF; fis->device = 1 << 6; fis->features = nsect & 0xFF; fis->features_ex = (nsect >> 8) & 0xFF; diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 5f4a917bd8b..b1742640556 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -34,7 +34,7 @@ #define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48 /* check for erase mode support during secure erase */ -#define MTIP_SEC_ERASE_MODE 0x3 +#define MTIP_SEC_ERASE_MODE 0x2 /* # of times to retry timed out/failed IOs */ #define MTIP_MAX_RETRIES 2 @@ -155,14 +155,14 @@ enum { MTIP_DDF_REBUILD_FAILED_BIT = 8, }; -__packed struct smart_attr{ +struct smart_attr { u8 attr_id; u16 flags; u8 cur; u8 worst; u32 data; u8 res[3]; -}; +} __packed; /* Register Frame Information Structure (FIS), host to device. */ struct host_to_dev_fis { diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 8d4804732ba..8c4139647ef 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -33,7 +33,7 @@ * detection. The mods to Rev F required more family * information detection. * - * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>: + * Changes/Fixes by Borislav Petkov <bp@alien8.de>: * - misc fixes and code cleanups * * This module is based on the following documents diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c index 6c86f6e5455..351945fa2ec 100644 --- a/drivers/edac/edac_stub.c +++ b/drivers/edac/edac_stub.c @@ -5,7 +5,7 @@ * * 2007 (c) MontaVista Software, Inc. * 2010 (c) Advanced Micro Devices Inc. - * Borislav Petkov <borislav.petkov@amd.com> + * Borislav Petkov <bp@alien8.de> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c index 66b5151c108..2ae78f20cc2 100644 --- a/drivers/edac/mce_amd_inj.c +++ b/drivers/edac/mce_amd_inj.c @@ -6,7 +6,7 @@ * This file may be distributed under the terms of the GNU General Public * License version 2. * - * Copyright (c) 2010: Borislav Petkov <borislav.petkov@amd.com> + * Copyright (c) 2010: Borislav Petkov <bp@alien8.de> * Advanced Micro Devices Inc. */ @@ -168,6 +168,6 @@ module_init(edac_init_mce_inject); module_exit(edac_exit_mce_inject); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Borislav Petkov <borislav.petkov@amd.com>"); +MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>"); MODULE_AUTHOR("AMD Inc."); MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding"); diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 1162d6b3bf8..bb1b392f5cd 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1546,6 +1546,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) struct sbp2_logical_unit *lu = sdev->hostdata; sdev->use_10_for_rw = 1; + sdev->no_report_opcodes = 1; + sdev->no_write_same = 1; if (sbp2_param_exclusive_login) sdev->manage_start_stop = 1; diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index f11d8e3b404..47150f5ded0 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -466,7 +466,7 @@ config GPIO_ADP5588_IRQ config GPIO_ADNP tristate "Avionic Design N-bit GPIO expander" - depends on I2C && OF + depends on I2C && OF_GPIO help This option enables support for N GPIOs found on Avionic Design I2C GPIO expanders. The register space will be extended by powers diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index 0f425189de1..ce1c8476007 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c @@ -77,7 +77,7 @@ struct mcp23s08_driver_data { /*----------------------------------------------------------------------*/ -#ifdef CONFIG_I2C +#if IS_ENABLED(CONFIG_I2C) static int mcp23008_read(struct mcp23s08 *mcp, unsigned reg) { @@ -399,7 +399,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, break; #endif /* CONFIG_SPI_MASTER */ -#ifdef CONFIG_I2C +#if IS_ENABLED(CONFIG_I2C) case MCP_TYPE_008: mcp->ops = &mcp23008_ops; mcp->chip.ngpio = 8; @@ -473,7 +473,7 @@ fail: /*----------------------------------------------------------------------*/ -#ifdef CONFIG_I2C +#if IS_ENABLED(CONFIG_I2C) static int __devinit mcp230xx_probe(struct i2c_client *client, const struct i2c_device_id *id) diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index cf7afb9eb61..be65c0451ad 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -92,6 +92,11 @@ static inline void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip) return mvchip->membase + GPIO_OUT_OFF; } +static inline void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip) +{ + return mvchip->membase + GPIO_BLINK_EN_OFF; +} + static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip) { return mvchip->membase + GPIO_IO_CONF_OFF; @@ -206,6 +211,23 @@ static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin) return (u >> pin) & 1; } +static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned pin, int value) +{ + struct mvebu_gpio_chip *mvchip = + container_of(chip, struct mvebu_gpio_chip, chip); + unsigned long flags; + u32 u; + + spin_lock_irqsave(&mvchip->lock, flags); + u = readl_relaxed(mvebu_gpioreg_blink(mvchip)); + if (value) + u |= 1 << pin; + else + u &= ~(1 << pin); + writel_relaxed(u, mvebu_gpioreg_blink(mvchip)); + spin_unlock_irqrestore(&mvchip->lock, flags); +} + static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin) { struct mvebu_gpio_chip *mvchip = @@ -244,6 +266,7 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin, if (ret) return ret; + mvebu_gpio_blink(chip, pin, 0); mvebu_gpio_set(chip, pin, value); spin_lock_irqsave(&mvchip->lock, flags); diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 05a909a17ce..15b182c84ce 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -49,13 +49,7 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) if (chan->vblank.crtc != crtc) continue; - if (nv_device(priv)->chipset == 0x50) { - nv_wr32(priv, 0x001704, chan->vblank.channel); - nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma); - bar->flush(bar); - nv_wr32(priv, 0x001570, chan->vblank.offset); - nv_wr32(priv, 0x001574, chan->vblank.value); - } else { + if (nv_device(priv)->chipset >= 0xc0) { nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel); bar->flush(bar); nv_wr32(priv, 0x06000c, @@ -63,6 +57,17 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset)); nv_wr32(priv, 0x060014, chan->vblank.value); + } else { + nv_wr32(priv, 0x001704, chan->vblank.channel); + nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma); + bar->flush(bar); + if (nv_device(priv)->chipset == 0x50) { + nv_wr32(priv, 0x001570, chan->vblank.offset); + nv_wr32(priv, 0x001574, chan->vblank.value); + } else { + nv_wr32(priv, 0x060010, chan->vblank.offset); + nv_wr32(priv, 0x060014, chan->vblank.value); + } } list_del(&chan->vblank.head); diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c index e45035efb8c..7bbb1e1b7a8 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c @@ -669,21 +669,27 @@ nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem) }); } -void +int nv40_grctx_init(struct nouveau_device *device, u32 *size) { - u32 ctxprog[256], i; + u32 *ctxprog = kmalloc(256 * 4, GFP_KERNEL), i; struct nouveau_grctx ctx = { .device = device, .mode = NOUVEAU_GRCTX_PROG, .data = ctxprog, - .ctxprog_max = ARRAY_SIZE(ctxprog) + .ctxprog_max = 256, }; + if (!ctxprog) + return -ENOMEM; + nv40_grctx_generate(&ctx); nv_wr32(device, 0x400324, 0); for (i = 0; i < ctx.ctxprog_len; i++) nv_wr32(device, 0x400328, ctxprog[i]); *size = ctx.ctxvals_pos * 4; + + kfree(ctxprog); + return 0; } diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c index 425001204a8..cc6574eeb80 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c @@ -346,7 +346,9 @@ nv40_graph_init(struct nouveau_object *object) return ret; /* generate and upload context program */ - nv40_grctx_init(nv_device(priv), &priv->size); + ret = nv40_grctx_init(nv_device(priv), &priv->size); + if (ret) + return ret; /* No context present currently */ nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h index d2ac975afc2..7da35a4e797 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h @@ -15,7 +15,7 @@ nv44_graph_class(void *priv) return !(0x0baf & (1 << (device->chipset & 0x0f))); } -void nv40_grctx_init(struct nouveau_device *, u32 *size); +int nv40_grctx_init(struct nouveau_device *, u32 *size); void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *); #endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h index 818feabbf4a..486f1a9217f 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/object.h +++ b/drivers/gpu/drm/nouveau/core/include/core/object.h @@ -175,14 +175,18 @@ nv_mo32(void *obj, u32 addr, u32 mask, u32 data) return temp; } -static inline bool -nv_strncmp(void *obj, u32 addr, u32 len, const char *str) +static inline int +nv_memcmp(void *obj, u32 addr, const char *str, u32 len) { + unsigned char c1, c2; + while (len--) { - if (nv_ro08(obj, addr++) != *(str++)) - return false; + c1 = nv_ro08(obj, addr++); + c2 = *(str++); + if (c1 != c2) + return c1 - c2; } - return true; + return 0; } #endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h index 39e73b91d36..41b7a6a76f1 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h @@ -54,6 +54,7 @@ int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, int clk, struct nouveau_pll_vals *); int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1, struct nouveau_pll_vals *); - +int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, + int clk, struct nouveau_pll_vals *); #endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c index 7d750382a83..c5119715774 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c @@ -64,7 +64,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) } } else if (*ver >= 0x15) { - if (!nv_strncmp(bios, dcb - 7, 7, "DEV_REC")) { + if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) { u16 i2c = nv_ro16(bios, dcb + 2); *hdr = 4; *cnt = (i2c - dcb) / 10; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c index cc8d7d162d7..9068c98b96f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c @@ -66,6 +66,24 @@ nva3_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq) return ret; } +int +nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info, + int clk, struct nouveau_pll_vals *pv) +{ + int ret, N, M, P; + + ret = nva3_pll_calc(clock, info, clk, &N, NULL, &M, &P); + + if (ret > 0) { + pv->refclk = info->refclk; + pv->N1 = N; + pv->M1 = M; + pv->log2P = P; + } + return ret; +} + + static int nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -80,6 +98,7 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return ret; priv->base.pll_set = nva3_clock_pll_set; + priv->base.pll_calc = nva3_clock_pll_calc; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c index 5ccce0b17bf..f6962c9b6c3 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c @@ -79,6 +79,7 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return ret; priv->base.pll_set = nvc0_clock_pll_set; + priv->base.pll_calc = nva3_clock_pll_calc; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index cc79c796afe..cbf1fc60a38 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -241,6 +241,10 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) if (unlikely(!abi16)) return -ENOMEM; + + if (!drm->channel) + return nouveau_abi16_put(abi16, -ENODEV); + client = nv_client(abi16->client); if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 0910125cbbc..8503b2ea570 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -129,7 +129,8 @@ nouveau_accel_init(struct nouveau_drm *drm) /* initialise synchronisation routines */ if (device->card_type < NV_10) ret = nv04_fence_create(drm); - else if (device->chipset < 0x84) ret = nv10_fence_create(drm); + else if (device->card_type < NV_50) ret = nv10_fence_create(drm); + else if (device->chipset < 0x84) ret = nv50_fence_create(drm); else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); else ret = nvc0_fence_create(drm); if (ret) { diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index af31f829f4a..219942c660d 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1330,6 +1330,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav break; udelay(1); } + } else { + save->crtc_enabled[i] = false; } } diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index 10ea17a6b2a..42433344cb1 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -69,9 +69,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/ { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59, PCI_VENDOR_ID_DELL, 0x00e3, 2}, - /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */ + /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66, PCI_VENDOR_ID_DELL, 0x0149, 1}, + /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */ + { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, + PCI_VENDOR_ID_IBM, 0x0531, 1}, /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, 0x1025, 0x0061, 1}, diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index aa59a254be2..c02bf208084 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c @@ -39,6 +39,7 @@ #define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */ #define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */ #define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */ +#define AT91_TWI_QUICK 0x0040 /* SMBus quick command */ #define AT91_TWI_SWRST 0x0080 /* Software Reset */ #define AT91_TWI_MMR 0x0004 /* Master Mode Register */ @@ -212,7 +213,11 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) INIT_COMPLETION(dev->cmd_complete); dev->transfer_status = 0; - if (dev->msg->flags & I2C_M_RD) { + + if (!dev->buf_len) { + at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK); + at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); + } else if (dev->msg->flags & I2C_M_RD) { unsigned start_flags = AT91_TWI_START; if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) { diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 286ca191782..0670da79ee5 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -287,12 +287,14 @@ read_init_dma_fail: select_init_dma_fail: dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE); select_init_pio_fail: + dmaengine_terminate_all(i2c->dmach); return -EINVAL; /* Write failpath. */ write_init_dma_fail: dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE); write_init_pio_fail: + dmaengine_terminate_all(i2c->dmach); return -EINVAL; } diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index db31eaed6ea..3525c9e62cb 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -43,7 +43,6 @@ #include <linux/slab.h> #include <linux/i2c-omap.h> #include <linux/pm_runtime.h> -#include <linux/pm_qos.h> /* I2C controller revisions */ #define OMAP_I2C_OMAP1_REV_2 0x20 @@ -187,8 +186,9 @@ struct omap_i2c_dev { int reg_shift; /* bit shift for I2C register addresses */ struct completion cmd_complete; struct resource *ioarea; - u32 latency; /* maximum MPU wkup latency */ - struct pm_qos_request pm_qos_request; + u32 latency; /* maximum mpu wkup latency */ + void (*set_mpu_wkup_lat)(struct device *dev, + long latency); u32 speed; /* Speed of bus in kHz */ u32 dtrev; /* extra revision from DT */ u32 flags; @@ -494,7 +494,9 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx) dev->b_hw = 1; /* Enable hardware fixes */ /* calculate wakeup latency constraint for MPU */ - dev->latency = (1000000 * dev->threshold) / (1000 * dev->speed / 8); + if (dev->set_mpu_wkup_lat != NULL) + dev->latency = (1000000 * dev->threshold) / + (1000 * dev->speed / 8); } /* @@ -522,6 +524,9 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, dev->buf = msg->buf; dev->buf_len = msg->len; + /* make sure writes to dev->buf_len are ordered */ + barrier(); + omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len); /* Clear the FIFO Buffers */ @@ -579,7 +584,6 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, */ timeout = wait_for_completion_timeout(&dev->cmd_complete, OMAP_I2C_TIMEOUT); - dev->buf_len = 0; if (timeout == 0) { dev_err(dev->dev, "controller timed out\n"); omap_i2c_init(dev); @@ -629,16 +633,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) if (r < 0) goto out; - /* - * When waiting for completion of a i2c transfer, we need to - * set a wake up latency constraint for the MPU. This is to - * ensure quick enough wakeup from idle, when transfer - * completes. - */ - if (dev->latency) - pm_qos_add_request(&dev->pm_qos_request, - PM_QOS_CPU_DMA_LATENCY, - dev->latency); + if (dev->set_mpu_wkup_lat != NULL) + dev->set_mpu_wkup_lat(dev->dev, dev->latency); for (i = 0; i < num; i++) { r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1))); @@ -646,8 +642,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) break; } - if (dev->latency) - pm_qos_remove_request(&dev->pm_qos_request); + if (dev->set_mpu_wkup_lat != NULL) + dev->set_mpu_wkup_lat(dev->dev, -1); if (r == 0) r = num; @@ -1104,6 +1100,7 @@ omap_i2c_probe(struct platform_device *pdev) } else if (pdata != NULL) { dev->speed = pdata->clkrate; dev->flags = pdata->flags; + dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat; dev->dtrev = pdata->rev; } @@ -1159,8 +1156,9 @@ omap_i2c_probe(struct platform_device *pdev) dev->b_hw = 1; /* Enable hardware fixes */ /* calculate wakeup latency constraint for MPU */ - dev->latency = (1000000 * dev->fifo_size) / - (1000 * dev->speed / 8); + if (dev->set_mpu_wkup_lat != NULL) + dev->latency = (1000000 * dev->fifo_size) / + (1000 * dev->speed / 8); } /* reset ASAP, clearing any IRQs */ diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 3e0335f1fc6..9d902725bac 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -806,6 +806,7 @@ static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c) dev_err(i2c->dev, "invalid gpio[%d]: %d\n", idx, gpio); goto free_gpio; } + i2c->gpios[idx] = gpio; ret = gpio_request(gpio, "i2c-bus"); if (ret) { diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c index c0ec7d42c3b..1abbc170d8b 100644 --- a/drivers/input/input-mt.c +++ b/drivers/input/input-mt.c @@ -26,10 +26,14 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src) * input_mt_init_slots() - initialize MT input slots * @dev: input device supporting MT events and finger tracking * @num_slots: number of slots used by the device + * @flags: mt tasks to handle in core * * This function allocates all necessary memory for MT slot handling * in the input device, prepares the ABS_MT_SLOT and * ABS_MT_TRACKING_ID events for use and sets up appropriate buffers. + * Depending on the flags set, it also performs pointer emulation and + * frame synchronization. + * * May be called repeatedly. Returns -EINVAL if attempting to * reinitialize with a different number of slots. */ diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 8f02e3d0e71..4c842c320c2 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -12,8 +12,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MOUSEDEV_MINOR_BASE 32 -#define MOUSEDEV_MINORS 32 -#define MOUSEDEV_MIX 31 +#define MOUSEDEV_MINORS 31 +#define MOUSEDEV_MIX 63 #include <linux/sched.h> #include <linux/slab.h> diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index f02028ec3db..78e5d9ab0ba 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -955,7 +955,8 @@ static int ads7846_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume); -static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads7846 *ts) +static int __devinit ads7846_setup_pendown(struct spi_device *spi, + struct ads7846 *ts) { struct ads7846_platform_data *pdata = spi->dev.platform_data; int err; @@ -981,6 +982,9 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784 ts->gpio_pendown = pdata->gpio_pendown; + if (pdata->gpio_pendown_debounce) + gpio_set_debounce(pdata->gpio_pendown, + pdata->gpio_pendown_debounce); } else { dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n"); return -EINVAL; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index d4a4cd445ca..0badfa48b32 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4108,7 +4108,7 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to) static int intel_iommu_add_device(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - struct pci_dev *bridge, *dma_pdev; + struct pci_dev *bridge, *dma_pdev = NULL; struct iommu_group *group; int ret; @@ -4122,7 +4122,7 @@ static int intel_iommu_add_device(struct device *dev) dma_pdev = pci_get_domain_bus_and_slot( pci_domain_nr(pdev->bus), bridge->subordinate->number, 0); - else + if (!dma_pdev) dma_pdev = pci_dev_get(bridge); } else dma_pdev = pci_dev_get(pdev); diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index a649f146d17..c0f7a426626 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -1054,6 +1054,7 @@ static int smmu_debugfs_stats_show(struct seq_file *s, void *v) stats[i], val, offs); } seq_printf(s, "\n"); + dput(dent); return 0; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 02db9183ca0..77e6eff41ca 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -740,8 +740,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue) if (!md_in_flight(md)) wake_up(&md->wait); + /* + * Run this off this callpath, as drivers could invoke end_io while + * inside their request_fn (and holding the queue lock). Calling + * back into ->request_fn() could deadlock attempting to grab the + * queue lock again. + */ if (run_queue) - blk_run_queue(md->queue); + blk_run_queue_async(md->queue); /* * dm_put() must be at the end of this function. See the comment above diff --git a/drivers/md/md.c b/drivers/md/md.c index 9ab768acfb6..61200717687 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1817,10 +1817,10 @@ retry: memset(bbp, 0xff, PAGE_SIZE); for (i = 0 ; i < bb->count ; i++) { - u64 internal_bb = *p++; + u64 internal_bb = p[i]; u64 store_bb = ((BB_OFFSET(internal_bb) << 10) | BB_LEN(internal_bb)); - *bbp++ = cpu_to_le64(store_bb); + bbp[i] = cpu_to_le64(store_bb); } bb->changed = 0; if (read_seqretry(&bb->lock, seq)) @@ -5294,7 +5294,7 @@ void md_stop_writes(struct mddev *mddev) } EXPORT_SYMBOL_GPL(md_stop_writes); -void md_stop(struct mddev *mddev) +static void __md_stop(struct mddev *mddev) { mddev->ready = 0; mddev->pers->stop(mddev); @@ -5304,6 +5304,18 @@ void md_stop(struct mddev *mddev) mddev->pers = NULL; clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } + +void md_stop(struct mddev *mddev) +{ + /* stop the array and free an attached data structures. + * This is called from dm-raid + */ + __md_stop(mddev); + bitmap_destroy(mddev); + if (mddev->bio_set) + bioset_free(mddev->bio_set); +} + EXPORT_SYMBOL_GPL(md_stop); static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) @@ -5364,7 +5376,7 @@ static int do_md_stop(struct mddev * mddev, int mode, set_disk_ro(disk, 0); __md_stop_writes(mddev); - md_stop(mddev); + __md_stop(mddev); mddev->queue->merge_bvec_fn = NULL; mddev->queue->backing_dev_info.congested_fn = NULL; @@ -7936,9 +7948,9 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, sector_t *first_bad, int *bad_sectors) { int hi; - int lo = 0; + int lo; u64 *p = bb->page; - int rv = 0; + int rv; sector_t target = s + sectors; unsigned seq; @@ -7953,7 +7965,8 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, retry: seq = read_seqbegin(&bb->lock); - + lo = 0; + rv = 0; hi = bb->count; /* Binary search between lo and hi for 'target' diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d1295aff417..0d5d0ff2c0f 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -499,7 +499,7 @@ static void raid10_end_write_request(struct bio *bio, int error) */ one_write_done(r10_bio); if (dec_rdev) - rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); + rdev_dec_pending(rdev, conf->mddev); } /* @@ -1334,18 +1334,21 @@ retry_write: blocked_rdev = rrdev; break; } + if (rdev && (test_bit(Faulty, &rdev->flags) + || test_bit(Unmerged, &rdev->flags))) + rdev = NULL; if (rrdev && (test_bit(Faulty, &rrdev->flags) || test_bit(Unmerged, &rrdev->flags))) rrdev = NULL; r10_bio->devs[i].bio = NULL; r10_bio->devs[i].repl_bio = NULL; - if (!rdev || test_bit(Faulty, &rdev->flags) || - test_bit(Unmerged, &rdev->flags)) { + + if (!rdev && !rrdev) { set_bit(R10BIO_Degraded, &r10_bio->state); continue; } - if (test_bit(WriteErrorSeen, &rdev->flags)) { + if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { sector_t first_bad; sector_t dev_sector = r10_bio->devs[i].addr; int bad_sectors; @@ -1387,8 +1390,10 @@ retry_write: max_sectors = good_sectors; } } - r10_bio->devs[i].bio = bio; - atomic_inc(&rdev->nr_pending); + if (rdev) { + r10_bio->devs[i].bio = bio; + atomic_inc(&rdev->nr_pending); + } if (rrdev) { r10_bio->devs[i].repl_bio = bio; atomic_inc(&rrdev->nr_pending); @@ -1444,69 +1449,71 @@ retry_write: for (i = 0; i < conf->copies; i++) { struct bio *mbio; int d = r10_bio->devs[i].devnum; - if (!r10_bio->devs[i].bio) - continue; + if (r10_bio->devs[i].bio) { + struct md_rdev *rdev = conf->mirrors[d].rdev; + mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, + max_sectors); + r10_bio->devs[i].bio = mbio; + + mbio->bi_sector = (r10_bio->devs[i].addr+ + choose_data_offset(r10_bio, + rdev)); + mbio->bi_bdev = rdev->bdev; + mbio->bi_end_io = raid10_end_write_request; + mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; + mbio->bi_private = r10_bio; - mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, - max_sectors); - r10_bio->devs[i].bio = mbio; + atomic_inc(&r10_bio->remaining); - mbio->bi_sector = (r10_bio->devs[i].addr+ - choose_data_offset(r10_bio, - conf->mirrors[d].rdev)); - mbio->bi_bdev = conf->mirrors[d].rdev->bdev; - mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; - mbio->bi_private = r10_bio; + cb = blk_check_plugged(raid10_unplug, mddev, + sizeof(*plug)); + if (cb) + plug = container_of(cb, struct raid10_plug_cb, + cb); + else + plug = NULL; + spin_lock_irqsave(&conf->device_lock, flags); + if (plug) { + bio_list_add(&plug->pending, mbio); + plug->pending_cnt++; + } else { + bio_list_add(&conf->pending_bio_list, mbio); + conf->pending_count++; + } + spin_unlock_irqrestore(&conf->device_lock, flags); + if (!plug) + md_wakeup_thread(mddev->thread); + } - atomic_inc(&r10_bio->remaining); + if (r10_bio->devs[i].repl_bio) { + struct md_rdev *rdev = conf->mirrors[d].replacement; + if (rdev == NULL) { + /* Replacement just got moved to main 'rdev' */ + smp_mb(); + rdev = conf->mirrors[d].rdev; + } + mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, + max_sectors); + r10_bio->devs[i].repl_bio = mbio; + + mbio->bi_sector = (r10_bio->devs[i].addr + + choose_data_offset( + r10_bio, rdev)); + mbio->bi_bdev = rdev->bdev; + mbio->bi_end_io = raid10_end_write_request; + mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; + mbio->bi_private = r10_bio; - cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); - if (cb) - plug = container_of(cb, struct raid10_plug_cb, cb); - else - plug = NULL; - spin_lock_irqsave(&conf->device_lock, flags); - if (plug) { - bio_list_add(&plug->pending, mbio); - plug->pending_cnt++; - } else { + atomic_inc(&r10_bio->remaining); + spin_lock_irqsave(&conf->device_lock, flags); bio_list_add(&conf->pending_bio_list, mbio); conf->pending_count++; + spin_unlock_irqrestore(&conf->device_lock, flags); + if (!mddev_check_plugged(mddev)) + md_wakeup_thread(mddev->thread); } - spin_unlock_irqrestore(&conf->device_lock, flags); - if (!plug) - md_wakeup_thread(mddev->thread); - - if (!r10_bio->devs[i].repl_bio) - continue; - - mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, - max_sectors); - r10_bio->devs[i].repl_bio = mbio; - - /* We are actively writing to the original device - * so it cannot disappear, so the replacement cannot - * become NULL here - */ - mbio->bi_sector = (r10_bio->devs[i].addr + - choose_data_offset( - r10_bio, - conf->mirrors[d].replacement)); - mbio->bi_bdev = conf->mirrors[d].replacement->bdev; - mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; - mbio->bi_private = r10_bio; - - atomic_inc(&r10_bio->remaining); - spin_lock_irqsave(&conf->device_lock, flags); - bio_list_add(&conf->pending_bio_list, mbio); - conf->pending_count++; - spin_unlock_irqrestore(&conf->device_lock, flags); - if (!mddev_check_plugged(mddev)) - md_wakeup_thread(mddev->thread); } /* Don't remove the bias on 'remaining' (one_write_done) until diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index c5439dce029..a4502686e7a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2774,10 +2774,12 @@ static void handle_stripe_clean_event(struct r5conf *conf, dev = &sh->dev[i]; if (!test_bit(R5_LOCKED, &dev->flags) && (test_bit(R5_UPTODATE, &dev->flags) || - test_and_clear_bit(R5_Discard, &dev->flags))) { + test_bit(R5_Discard, &dev->flags))) { /* We can return any write requests */ struct bio *wbi, *wbi2; pr_debug("Return write for disc %d\n", i); + if (test_and_clear_bit(R5_Discard, &dev->flags)) + clear_bit(R5_UPTODATE, &dev->flags); wbi = dev->written; dev->written = NULL; while (wbi && wbi->bi_sector < @@ -2795,7 +2797,8 @@ static void handle_stripe_clean_event(struct r5conf *conf, !test_bit(STRIPE_DEGRADED, &sh->state), 0); } - } + } else if (test_bit(R5_Discard, &sh->dev[i].flags)) + clear_bit(R5_Discard, &sh->dev[i].flags); if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) if (atomic_dec_and_test(&conf->pending_full_writes)) @@ -3490,40 +3493,6 @@ static void handle_stripe(struct stripe_head *sh) handle_failed_sync(conf, sh, &s); } - /* - * might be able to return some write requests if the parity blocks - * are safe, or on a failed drive - */ - pdev = &sh->dev[sh->pd_idx]; - s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) - || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); - qdev = &sh->dev[sh->qd_idx]; - s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) - || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) - || conf->level < 6; - - if (s.written && - (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) - && !test_bit(R5_LOCKED, &pdev->flags) - && (test_bit(R5_UPTODATE, &pdev->flags) || - test_bit(R5_Discard, &pdev->flags))))) && - (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) - && !test_bit(R5_LOCKED, &qdev->flags) - && (test_bit(R5_UPTODATE, &qdev->flags) || - test_bit(R5_Discard, &qdev->flags)))))) - handle_stripe_clean_event(conf, sh, disks, &s.return_bi); - - /* Now we might consider reading some blocks, either to check/generate - * parity, or to satisfy requests - * or to load a block that is being partially written. - */ - if (s.to_read || s.non_overwrite - || (conf->level == 6 && s.to_write && s.failed) - || (s.syncing && (s.uptodate + s.compute < disks)) - || s.replacing - || s.expanding) - handle_stripe_fill(sh, &s, disks); - /* Now we check to see if any write operations have recently * completed */ @@ -3561,6 +3530,40 @@ static void handle_stripe(struct stripe_head *sh) s.dec_preread_active = 1; } + /* + * might be able to return some write requests if the parity blocks + * are safe, or on a failed drive + */ + pdev = &sh->dev[sh->pd_idx]; + s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) + || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); + qdev = &sh->dev[sh->qd_idx]; + s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) + || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) + || conf->level < 6; + + if (s.written && + (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) + && !test_bit(R5_LOCKED, &pdev->flags) + && (test_bit(R5_UPTODATE, &pdev->flags) || + test_bit(R5_Discard, &pdev->flags))))) && + (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) + && !test_bit(R5_LOCKED, &qdev->flags) + && (test_bit(R5_UPTODATE, &qdev->flags) || + test_bit(R5_Discard, &qdev->flags)))))) + handle_stripe_clean_event(conf, sh, disks, &s.return_bi); + + /* Now we might consider reading some blocks, either to check/generate + * parity, or to satisfy requests + * or to load a block that is being partially written. + */ + if (s.to_read || s.non_overwrite + || (conf->level == 6 && s.to_write && s.failed) + || (s.syncing && (s.uptodate + s.compute < disks)) + || s.replacing + || s.expanding) + handle_stripe_fill(sh, &s, disks); + /* Now to consider new write requests and what else, if anything * should be read. We do not handle new writes when: * 1/ A 'write' operation (copy+xor) is already in flight. @@ -5529,6 +5532,10 @@ static int run(struct mddev *mddev) * discard data disk but write parity disk */ stripe = stripe * PAGE_SIZE; + /* Round up to power of 2, as discard handling + * currently assumes that */ + while ((stripe-1) & stripe) + stripe = (stripe | (stripe-1)) + 1; mddev->queue->limits.discard_alignment = stripe; mddev->queue->limits.discard_granularity = stripe; /* diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index 8f52fc858e4..5a5cd2ace4a 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c @@ -240,7 +240,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength) if (*(szlength) != '+') { devlength = simple_strtoul(szlength, &buffer, 0); - devlength = handle_unit(devlength, buffer) - devstart; + devlength = handle_unit(devlength, buffer); if (devlength < devstart) goto err_out; diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index ec6841d8e95..1a03b7f673c 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2983,13 +2983,15 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip, /* * Field definitions are in the following datasheets: * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) - * New style (6 byte ID): Samsung K9GAG08U0F (p.44) + * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22) * - * Check for ID length, cell type, and Hynix/Samsung ID to decide what - * to do. + * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung + * ID to decide what to do. */ - if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG) { + if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG && + (chip->cellinfo & NAND_CI_CELLTYPE_MSK) && + id_data[5] != 0x00) { /* Calc pagesize */ mtd->writesize = 2048 << (extid & 0x03); extid >>= 2; diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index 64be8f0848b..d9127e2ed80 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c @@ -121,7 +121,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master, nr_parts = plen / sizeof(part[0]); *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL); - if (!pparts) + if (!*pparts) return -ENOMEM; names = of_get_property(dp, "partition-names", &plen); diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index 7153e0d2710..b3f41f20062 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -3694,7 +3694,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int * flexonenand_set_boundary - Writes the SLC boundary * @param mtd - mtd info structure */ -int flexonenand_set_boundary(struct mtd_info *mtd, int die, +static int flexonenand_set_boundary(struct mtd_info *mtd, int die, int boundary, int lock) { struct onenand_chip *this = mtd->priv; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b2530b00212..5f5b69f37d2 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1379,6 +1379,8 @@ static void bond_compute_features(struct bonding *bond) struct net_device *bond_dev = bond->dev; netdev_features_t vlan_features = BOND_VLAN_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; + unsigned int gso_max_size = GSO_MAX_SIZE; + u16 gso_max_segs = GSO_MAX_SEGS; int i; unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; @@ -1394,11 +1396,16 @@ static void bond_compute_features(struct bonding *bond) dst_release_flag &= slave->dev->priv_flags; if (slave->dev->hard_header_len > max_hard_header_len) max_hard_header_len = slave->dev->hard_header_len; + + gso_max_size = min(gso_max_size, slave->dev->gso_max_size); + gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs); } done: bond_dev->vlan_features = vlan_features; bond_dev->hard_header_len = max_hard_header_len; + bond_dev->gso_max_segs = gso_max_segs; + netif_set_gso_max_size(bond_dev, gso_max_size); flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE; bond_dev->priv_flags = flags | dst_release_flag; diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index d04911d33b6..47618e50535 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -813,6 +813,7 @@ static int __init ne_drv_probe(struct platform_device *pdev) dev->irq = irq[this_dev]; dev->mem_end = bad[this_dev]; } + SET_NETDEV_DEV(dev, &pdev->dev); err = do_ne_probe(dev); if (err) { free_netdev(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 3519fed58c3..54b8c1f19d3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9706,10 +9706,13 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp) */ static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) { - u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { - BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); - REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); + if (!CHIP_IS_E1x(bp)) { + u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { + BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, + 1 << BP_FUNC(bp)); + } } } diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 1c818254b7b..b01f83a044c 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -979,17 +979,6 @@ static void cp_init_hw (struct cp_private *cp) cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); - cpw32_f(HiTxRingAddr, 0); - cpw32_f(HiTxRingAddr + 4, 0); - - ring_dma = cp->ring_dma; - cpw32_f(RxRingAddr, ring_dma & 0xffffffff); - cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); - - ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; - cpw32_f(TxRingAddr, ring_dma & 0xffffffff); - cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); - cp_start_hw(cp); cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ @@ -1003,6 +992,17 @@ static void cp_init_hw (struct cp_private *cp) cpw8(Config5, cpr8(Config5) & PMEStatus); + cpw32_f(HiTxRingAddr, 0); + cpw32_f(HiTxRingAddr + 4, 0); + + ring_dma = cp->ring_dma; + cpw32_f(RxRingAddr, ring_dma & 0xffffffff); + cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); + + ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; + cpw32_f(TxRingAddr, ring_dma & 0xffffffff); + cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); + cpw16(MultiIntr, 0); cpw8_f(Cfg9346, Cfg9346_Lock); diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index fb9f6b38511..edf5edb1314 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -2479,7 +2479,7 @@ static int sis900_resume(struct pci_dev *pci_dev) netif_start_queue(net_dev); /* Workaround for EDB */ - sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); + sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); /* Enable all known interrupts by setting the interrupt mask. */ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 77e6db9dcfe..a788501e978 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -894,6 +894,8 @@ out: return IRQ_HANDLED; } +static void axienet_dma_err_handler(unsigned long data); + /** * axienet_open - Driver open routine. * @ndev: Pointer to net_device structure diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 98934bdf6ac..477d6729b17 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1102,10 +1102,12 @@ static int init_queues(struct port *port) { int i; - if (!ports_open) - if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, - POOL_ALLOC_SIZE, 32, 0))) + if (!ports_open) { + dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, + POOL_ALLOC_SIZE, 32, 0); + if (!dma_pool) return -ENOMEM; + } if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys))) diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index 5039f08f5a5..43e9ab4f4d7 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c @@ -222,7 +222,7 @@ static void sirdev_config_fsm(struct work_struct *work) break; case SIRDEV_STATE_DONGLE_SPEED: - if (dev->dongle_drv->reset) { + if (dev->dongle_drv->set_speed) { ret = dev->dongle_drv->set_speed(dev, fsm->param); if (ret < 0) { fsm->result = ret; diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 899274f2f9b..2ed1140df3e 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -185,17 +185,20 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev) { struct mdio_gpio_platform_data *pdata; struct mii_bus *new_bus; - int ret; + int ret, bus_id; - if (pdev->dev.of_node) + if (pdev->dev.of_node) { pdata = mdio_gpio_of_get_data(pdev); - else + bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); + } else { pdata = pdev->dev.platform_data; + bus_id = pdev->id; + } if (!pdata) return -ENODEV; - new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id); + new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, bus_id); if (!new_bus) return -ENODEV; diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c index 9db0171e936..c5db428e73f 100644 --- a/drivers/net/team/team_mode_broadcast.c +++ b/drivers/net/team/team_mode_broadcast.c @@ -29,8 +29,8 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb) if (last) { skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) { - ret = team_dev_queue_xmit(team, last, - skb2); + ret = !team_dev_queue_xmit(team, last, + skb2); if (!sum_ret) sum_ret = ret; } @@ -39,7 +39,7 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb) } } if (last) { - ret = team_dev_queue_xmit(team, last, skb); + ret = !team_dev_queue_xmit(team, last, skb); if (!sum_ret) sum_ret = ret; } diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 3f575afd8cf..e9a3da588e9 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -969,10 +969,12 @@ static int init_hdlc_queues(struct port *port) { int i; - if (!ports_open) - if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, - POOL_ALLOC_SIZE, 32, 0))) + if (!ports_open) { + dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, + POOL_ALLOC_SIZE, 32, 0); + if (!dma_pool) return -ENOMEM; + } if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys))) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 71cd9f0c96a..741e73dece3 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1456,7 +1456,7 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) switch (type) { case ATH9K_RESET_POWER_ON: ret = ath9k_hw_set_reset_power_on(ah); - if (!ret) + if (ret) ah->reset_power_on = true; break; case ATH9K_RESET_WARM: diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index fa4d1b8cd9f..2d9eee93c74 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -1354,6 +1354,20 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, vif_priv->ctx = ctx; ctx->vif = vif; + /* + * In SNIFFER device type, the firmware reports the FCS to + * the host, rather than snipping it off. Unfortunately, + * mac80211 doesn't (yet) provide a per-packet flag for + * this, so that we have to set the hardware flag based + * on the interfaces added. As the monitor interface can + * only be present by itself, and will be removed before + * other interfaces are added, this is safe. + */ + if (vif->type == NL80211_IFTYPE_MONITOR) + priv->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS; + else + priv->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS; + err = iwl_setup_interface(priv, ctx); if (!err || reset) goto out; diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index da6c49177fc..be52a0a8d64 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -890,9 +890,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context) return; } cmd_node = adapter->curr_cmd; - if (cmd_node->wait_q_enabled) - adapter->cmd_wait_q.status = -ETIMEDOUT; - if (cmd_node) { adapter->dbg.timeout_cmd_id = adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index]; @@ -941,6 +938,14 @@ mwifiex_cmd_timeout_func(unsigned long function_context) dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n", adapter->ps_mode, adapter->ps_state); + + if (cmd_node->wait_q_enabled) { + adapter->cmd_wait_q.status = -ETIMEDOUT; + wake_up_interruptible(&adapter->cmd_wait_q.wait); + mwifiex_cancel_pending_ioctl(adapter); + /* reset cmd_sent flag to unblock new commands */ + adapter->cmd_sent = false; + } } if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) mwifiex_init_fw_complete(adapter); diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index fc8a9bfa124..82cf0fa2d9f 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -161,7 +161,6 @@ static int mwifiex_sdio_suspend(struct device *dev) struct sdio_mmc_card *card; struct mwifiex_adapter *adapter; mmc_pm_flag_t pm_flag = 0; - int hs_actived = 0; int i; int ret = 0; @@ -188,12 +187,14 @@ static int mwifiex_sdio_suspend(struct device *dev) adapter = card->adapter; /* Enable the Host Sleep */ - hs_actived = mwifiex_enable_hs(adapter); - if (hs_actived) { - pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n"); - ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (!mwifiex_enable_hs(adapter)) { + dev_err(adapter->dev, "cmd: failed to suspend\n"); + return -EFAULT; } + dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n"); + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + /* Indicate device suspended */ adapter->is_suspended = true; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 9970c2b1b19..b7e6607e6b6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -297,6 +297,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { /*=== Customer ID ===*/ /****** 8188CU ********/ {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/ + {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/ {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index caa011008cd..fc24eb9b394 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -452,29 +452,85 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, /* Grant backend access to each skb fragment page. */ for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; + struct page *page = skb_frag_page(frag); - tx->flags |= XEN_NETTXF_more_data; + len = skb_frag_size(frag); + offset = frag->page_offset; - id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); - np->tx_skbs[id].skb = skb_get(skb); - tx = RING_GET_REQUEST(&np->tx, prod++); - tx->id = id; - ref = gnttab_claim_grant_reference(&np->gref_tx_head); - BUG_ON((signed short)ref < 0); + /* Data must not cross a page boundary. */ + BUG_ON(len + offset > PAGE_SIZE<<compound_order(page)); - mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag))); - gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, - mfn, GNTMAP_readonly); + /* Skip unused frames from start of page */ + page += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; - tx->gref = np->grant_tx_ref[id] = ref; - tx->offset = frag->page_offset; - tx->size = skb_frag_size(frag); - tx->flags = 0; + while (len > 0) { + unsigned long bytes; + + BUG_ON(offset >= PAGE_SIZE); + + bytes = PAGE_SIZE - offset; + if (bytes > len) + bytes = len; + + tx->flags |= XEN_NETTXF_more_data; + + id = get_id_from_freelist(&np->tx_skb_freelist, + np->tx_skbs); + np->tx_skbs[id].skb = skb_get(skb); + tx = RING_GET_REQUEST(&np->tx, prod++); + tx->id = id; + ref = gnttab_claim_grant_reference(&np->gref_tx_head); + BUG_ON((signed short)ref < 0); + + mfn = pfn_to_mfn(page_to_pfn(page)); + gnttab_grant_foreign_access_ref(ref, + np->xbdev->otherend_id, + mfn, GNTMAP_readonly); + + tx->gref = np->grant_tx_ref[id] = ref; + tx->offset = offset; + tx->size = bytes; + tx->flags = 0; + + offset += bytes; + len -= bytes; + + /* Next frame */ + if (offset == PAGE_SIZE && len) { + BUG_ON(!PageCompound(page)); + page++; + offset = 0; + } + } } np->tx.req_prod_pvt = prod; } +/* + * Count how many ring slots are required to send the frags of this + * skb. Each frag might be a compound page. + */ +static int xennet_count_skb_frag_slots(struct sk_buff *skb) +{ + int i, frags = skb_shinfo(skb)->nr_frags; + int pages = 0; + + for (i = 0; i < frags; i++) { + skb_frag_t *frag = skb_shinfo(skb)->frags + i; + unsigned long size = skb_frag_size(frag); + unsigned long offset = frag->page_offset; + + /* Skip unused frames from start of page */ + offset &= ~PAGE_MASK; + + pages += PFN_UP(offset + size); + } + + return pages; +} + static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; @@ -487,23 +543,23 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) grant_ref_t ref; unsigned long mfn; int notify; - int frags = skb_shinfo(skb)->nr_frags; + int slots; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned long flags; - frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); - if (unlikely(frags > MAX_SKB_FRAGS + 1)) { - printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", - frags); - dump_stack(); + slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + + xennet_count_skb_frag_slots(skb); + if (unlikely(slots > MAX_SKB_FRAGS + 1)) { + net_alert_ratelimited( + "xennet: skb rides the rocket: %d slots\n", slots); goto drop; } spin_lock_irqsave(&np->tx_lock, flags); if (unlikely(!netif_carrier_ok(dev) || - (frags > 1 && !xennet_can_sg(dev)) || + (slots > 1 && !xennet_can_sg(dev)) || netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&np->tx_lock, flags); goto drop; diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index 18e279d3e83..ada681b01a1 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -702,13 +702,14 @@ static void pn533_wq_cmd(struct work_struct *work) cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue); + list_del(&cmd->queue); + mutex_unlock(&dev->cmd_lock); __pn533_send_cmd_frame_async(dev, cmd->out_frame, cmd->in_frame, cmd->in_frame_len, cmd->cmd_complete, cmd->arg, cmd->flags); - list_del(&cmd->queue); kfree(cmd); } @@ -1680,11 +1681,14 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, u8 *params, int params_len) { - struct pn533_cmd_jump_dep *cmd; struct pn533_cmd_jump_dep_response *resp; struct nfc_target nfc_target; u8 target_gt_len; int rc; + struct pn533_cmd_jump_dep *cmd = (struct pn533_cmd_jump_dep *)arg; + u8 active = cmd->active; + + kfree(arg); if (params_len == -ENOENT) { nfc_dev_dbg(&dev->interface->dev, ""); @@ -1706,7 +1710,6 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, } resp = (struct pn533_cmd_jump_dep_response *) params; - cmd = (struct pn533_cmd_jump_dep *) arg; rc = resp->status & PN533_CMD_RET_MASK; if (rc != PN533_CMD_RET_SUCCESS) { nfc_dev_err(&dev->interface->dev, @@ -1736,7 +1739,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, if (rc == 0) rc = nfc_dep_link_is_up(dev->nfc_dev, dev->nfc_dev->targets[0].idx, - !cmd->active, NFC_RF_INITIATOR); + !active, NFC_RF_INITIATOR); return 0; } @@ -1821,12 +1824,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame, dev->in_maxlen, pn533_in_dep_link_up_complete, cmd, GFP_KERNEL); - if (rc) - goto out; - - -out: - kfree(cmd); + if (rc < 0) + kfree(cmd); return rc; } @@ -2080,8 +2079,12 @@ error: static int pn533_tm_send_complete(struct pn533 *dev, void *arg, u8 *params, int params_len) { + struct sk_buff *skb_out = arg; + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); + dev_kfree_skb(skb_out); + if (params_len < 0) { nfc_dev_err(&dev->interface->dev, "Error %d when sending data", @@ -2119,7 +2122,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame, dev->in_maxlen, pn533_tm_send_complete, - NULL, GFP_KERNEL); + skb, GFP_KERNEL); if (rc) { nfc_dev_err(&dev->interface->dev, "Error %d when trying to send data", rc); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index d96caefd914..aeecf0f72ca 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -178,7 +178,7 @@ config PINCTRL_COH901 ports of 8 GPIO pins each. config PINCTRL_SAMSUNG - bool "Samsung pinctrl driver" + bool depends on OF && GPIOLIB select PINMUX select PINCONF diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index c1bafc3f3fb..9594ab62702 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -1972,7 +1972,7 @@ sci_io_request_frame_handler(struct isci_request *ireq, frame_index, (void **)&frame_buffer); - sci_controller_copy_sata_response(&ireq->stp.req, + sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 2936b447cae..2c0d0ec8150 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -55,6 +55,7 @@ #include <linux/cpu.h> #include <linux/mutex.h> #include <linux/async.h> +#include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -1062,6 +1063,50 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, EXPORT_SYMBOL_GPL(scsi_get_vpd_page); /** + * scsi_report_opcode - Find out if a given command opcode is supported + * @sdev: scsi device to query + * @buffer: scratch buffer (must be at least 20 bytes long) + * @len: length of buffer + * @opcode: opcode for command to look up + * + * Uses the REPORT SUPPORTED OPERATION CODES to look up the given + * opcode. Returns 0 if RSOC fails or if the command opcode is + * unsupported. Returns 1 if the device claims to support the command. + */ +int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, + unsigned int len, unsigned char opcode) +{ + unsigned char cmd[16]; + struct scsi_sense_hdr sshdr; + int result; + + if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3) + return 0; + + memset(cmd, 0, 16); + cmd[0] = MAINTENANCE_IN; + cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES; + cmd[2] = 1; /* One command format */ + cmd[3] = opcode; + put_unaligned_be32(len, &cmd[6]); + memset(buffer, 0, len); + + result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, + &sshdr, 30 * HZ, 3, NULL); + + if (result && scsi_sense_valid(&sshdr) && + sshdr.sense_key == ILLEGAL_REQUEST && + (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00) + return 0; + + if ((buffer[1] & 3) == 3) /* Command supported */ + return 1; + + return 0; +} +EXPORT_SYMBOL(scsi_report_opcode); + +/** * scsi_device_get - get an additional reference to a scsi_device * @sdev: device to get a reference to * diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index da36a3a81a9..9032e910bca 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -900,11 +900,23 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) action = ACTION_FAIL; error = -EILSEQ; /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ - } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) && - (cmd->cmnd[0] == UNMAP || - cmd->cmnd[0] == WRITE_SAME_16 || - cmd->cmnd[0] == WRITE_SAME)) { - description = "Discard failure"; + } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { + switch (cmd->cmnd[0]) { + case UNMAP: + description = "Discard failure"; + break; + case WRITE_SAME: + case WRITE_SAME_16: + if (cmd->cmnd[1] & 0x8) + description = "Discard failure"; + else + description = + "Write same failure"; + break; + default: + description = "Invalid command failure"; + break; + } action = ACTION_FAIL; error = -EREMOTEIO; } else diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 12f6fdfc114..352bc77b7c8 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -99,6 +99,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); #endif static void sd_config_discard(struct scsi_disk *, unsigned int); +static void sd_config_write_same(struct scsi_disk *); static int sd_revalidate_disk(struct gendisk *); static void sd_unlock_native_capacity(struct gendisk *disk); static int sd_probe(struct device *); @@ -395,6 +396,45 @@ sd_store_max_medium_access_timeouts(struct device *dev, return err ? err : count; } +static ssize_t +sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return snprintf(buf, 20, "%u\n", sdkp->max_ws_blocks); +} + +static ssize_t +sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + unsigned long max; + int err; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (sdp->type != TYPE_DISK) + return -EINVAL; + + err = kstrtoul(buf, 10, &max); + + if (err) + return err; + + if (max == 0) + sdp->no_write_same = 1; + else if (max <= SD_MAX_WS16_BLOCKS) + sdkp->max_ws_blocks = max; + + sd_config_write_same(sdkp); + + return count; +} + static struct device_attribute sd_disk_attrs[] = { __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, sd_store_cache_type), @@ -410,6 +450,8 @@ static struct device_attribute sd_disk_attrs[] = { __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode, sd_store_provisioning_mode), + __ATTR(max_write_same_blocks, S_IRUGO|S_IWUSR, + sd_show_write_same_blocks, sd_store_write_same_blocks), __ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR, sd_show_max_medium_access_timeouts, sd_store_max_medium_access_timeouts), @@ -561,19 +603,23 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) return; case SD_LBP_UNMAP: - max_blocks = min_not_zero(sdkp->max_unmap_blocks, 0xffffffff); + max_blocks = min_not_zero(sdkp->max_unmap_blocks, + (u32)SD_MAX_WS16_BLOCKS); break; case SD_LBP_WS16: - max_blocks = min_not_zero(sdkp->max_ws_blocks, 0xffffffff); + max_blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS16_BLOCKS); break; case SD_LBP_WS10: - max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff); + max_blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS10_BLOCKS); break; case SD_LBP_ZERO: - max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff); + max_blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS10_BLOCKS); q->limits.discard_zeroes_data = 1; break; } @@ -583,29 +629,26 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) } /** - * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device + * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device * @sdp: scsi device to operate one * @rq: Request to prepare * * Will issue either UNMAP or WRITE SAME(16) depending on preference * indicated by target device. **/ -static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) +static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) { struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); - struct bio *bio = rq->bio; - sector_t sector = bio->bi_sector; - unsigned int nr_sectors = bio_sectors(bio); + sector_t sector = blk_rq_pos(rq); + unsigned int nr_sectors = blk_rq_sectors(rq); + unsigned int nr_bytes = blk_rq_bytes(rq); unsigned int len; int ret; char *buf; struct page *page; - if (sdkp->device->sector_size == 4096) { - sector >>= 3; - nr_sectors >>= 3; - } - + sector >>= ilog2(sdp->sector_size) - 9; + nr_sectors >>= ilog2(sdp->sector_size) - 9; rq->timeout = SD_TIMEOUT; memset(rq->cmd, 0, rq->cmd_len); @@ -660,6 +703,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) blk_add_request_payload(rq, page, len); ret = scsi_setup_blk_pc_cmnd(sdp, rq); rq->buffer = page_address(page); + rq->__data_len = nr_bytes; out: if (ret != BLKPREP_OK) { @@ -669,6 +713,83 @@ out: return ret; } +static void sd_config_write_same(struct scsi_disk *sdkp) +{ + struct request_queue *q = sdkp->disk->queue; + unsigned int logical_block_size = sdkp->device->sector_size; + unsigned int blocks = 0; + + if (sdkp->device->no_write_same) { + sdkp->max_ws_blocks = 0; + goto out; + } + + /* Some devices can not handle block counts above 0xffff despite + * supporting WRITE SAME(16). Consequently we default to 64k + * blocks per I/O unless the device explicitly advertises a + * bigger limit. + */ + if (sdkp->max_ws_blocks == 0) + sdkp->max_ws_blocks = SD_MAX_WS10_BLOCKS; + + if (sdkp->ws16 || sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) + blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS16_BLOCKS); + else + blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS10_BLOCKS); + +out: + blk_queue_max_write_same_sectors(q, blocks * (logical_block_size >> 9)); +} + +/** + * sd_setup_write_same_cmnd - write the same data to multiple blocks + * @sdp: scsi device to operate one + * @rq: Request to prepare + * + * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on + * preference indicated by target device. + **/ +static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq) +{ + struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); + struct bio *bio = rq->bio; + sector_t sector = blk_rq_pos(rq); + unsigned int nr_sectors = blk_rq_sectors(rq); + unsigned int nr_bytes = blk_rq_bytes(rq); + int ret; + + if (sdkp->device->no_write_same) + return BLKPREP_KILL; + + BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size); + + sector >>= ilog2(sdp->sector_size) - 9; + nr_sectors >>= ilog2(sdp->sector_size) - 9; + + rq->__data_len = sdp->sector_size; + rq->timeout = SD_WRITE_SAME_TIMEOUT; + memset(rq->cmd, 0, rq->cmd_len); + + if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) { + rq->cmd_len = 16; + rq->cmd[0] = WRITE_SAME_16; + put_unaligned_be64(sector, &rq->cmd[2]); + put_unaligned_be32(nr_sectors, &rq->cmd[10]); + } else { + rq->cmd_len = 10; + rq->cmd[0] = WRITE_SAME; + put_unaligned_be32(sector, &rq->cmd[2]); + put_unaligned_be16(nr_sectors, &rq->cmd[7]); + } + + ret = scsi_setup_blk_pc_cmnd(sdp, rq); + rq->__data_len = nr_bytes; + + return ret; +} + static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) { rq->timeout = SD_FLUSH_TIMEOUT; @@ -712,7 +833,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) * block PC requests to make life easier. */ if (rq->cmd_flags & REQ_DISCARD) { - ret = scsi_setup_discard_cmnd(sdp, rq); + ret = sd_setup_discard_cmnd(sdp, rq); + goto out; + } else if (rq->cmd_flags & REQ_WRITE_SAME) { + ret = sd_setup_write_same_cmnd(sdp, rq); goto out; } else if (rq->cmd_flags & REQ_FLUSH) { ret = scsi_setup_flush_cmnd(sdp, rq); @@ -1482,12 +1606,21 @@ static int sd_done(struct scsi_cmnd *SCpnt) unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); struct scsi_sense_hdr sshdr; struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); + struct request *req = SCpnt->request; int sense_valid = 0; int sense_deferred = 0; unsigned char op = SCpnt->cmnd[0]; + unsigned char unmap = SCpnt->cmnd[1] & 8; - if ((SCpnt->request->cmd_flags & REQ_DISCARD) && !result) - scsi_set_resid(SCpnt, 0); + if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) { + if (!result) { + good_bytes = blk_rq_bytes(req); + scsi_set_resid(SCpnt, 0); + } else { + good_bytes = 0; + scsi_set_resid(SCpnt, blk_rq_bytes(req)); + } + } if (result) { sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); @@ -1536,9 +1669,25 @@ static int sd_done(struct scsi_cmnd *SCpnt) if (sshdr.asc == 0x10) /* DIX: Host detected corruption */ good_bytes = sd_completed_bytes(SCpnt); /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ - if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) && - (op == UNMAP || op == WRITE_SAME_16 || op == WRITE_SAME)) - sd_config_discard(sdkp, SD_LBP_DISABLE); + if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { + switch (op) { + case UNMAP: + sd_config_discard(sdkp, SD_LBP_DISABLE); + break; + case WRITE_SAME_16: + case WRITE_SAME: + if (unmap) + sd_config_discard(sdkp, SD_LBP_DISABLE); + else { + sdkp->device->no_write_same = 1; + sd_config_write_same(sdkp); + + good_bytes = 0; + req->__data_len = blk_rq_bytes(req); + req->cmd_flags |= REQ_QUIET; + } + } + } break; default: break; @@ -2374,9 +2523,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) if (buffer[3] == 0x3c) { unsigned int lba_count, desc_count; - sdkp->max_ws_blocks = - (u32) min_not_zero(get_unaligned_be64(&buffer[36]), - (u64)0xffffffff); + sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]); if (!sdkp->lbpme) goto out; @@ -2469,6 +2616,13 @@ static void sd_read_block_provisioning(struct scsi_disk *sdkp) kfree(buffer); } +static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) +{ + if (scsi_report_opcode(sdkp->device, buffer, SD_BUF_SIZE, + WRITE_SAME_16)) + sdkp->ws16 = 1; +} + static int sd_try_extended_inquiry(struct scsi_device *sdp) { /* @@ -2528,6 +2682,7 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_write_protect_flag(sdkp, buffer); sd_read_cache_type(sdkp, buffer); sd_read_app_tag_own(sdkp, buffer); + sd_read_write_same(sdkp, buffer); } sdkp->first_scan = 0; @@ -2545,6 +2700,7 @@ static int sd_revalidate_disk(struct gendisk *disk) blk_queue_flush(sdkp->disk->queue, flush); set_capacity(disk, sdkp->capacity); + sd_config_write_same(sdkp); kfree(buffer); out: diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 47c52a6d733..74a1e4ca540 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -14,6 +14,7 @@ #define SD_TIMEOUT (30 * HZ) #define SD_MOD_TIMEOUT (75 * HZ) #define SD_FLUSH_TIMEOUT (60 * HZ) +#define SD_WRITE_SAME_TIMEOUT (120 * HZ) /* * Number of allowed retries @@ -39,6 +40,11 @@ enum { }; enum { + SD_MAX_WS10_BLOCKS = 0xffff, + SD_MAX_WS16_BLOCKS = 0x7fffff, +}; + +enum { SD_LBP_FULL = 0, /* Full logical block provisioning */ SD_LBP_UNMAP, /* Use UNMAP command */ SD_LBP_WS16, /* Use WRITE SAME(16) with UNMAP bit */ @@ -77,6 +83,7 @@ struct scsi_disk { unsigned lbpws : 1; unsigned lbpws10 : 1; unsigned lbpvpd : 1; + unsigned ws16 : 1; }; #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index a3d54366afc..92f35abee92 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -186,6 +186,12 @@ static int slave_configure(struct scsi_device *sdev) /* Some devices don't handle VPD pages correctly */ sdev->skip_vpd_pages = 1; + /* Do not attempt to use REPORT SUPPORTED OPERATION CODES */ + sdev->no_report_opcodes = 1; + + /* Do not attempt to use WRITE SAME */ + sdev->no_write_same = 1; + /* Some disks return the total number of blocks in response * to READ CAPACITY rather than the highest block number. * If this device makes that mistake, tell the sd driver. */ diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index d64ac384288..bee92846cfa 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c @@ -365,11 +365,20 @@ struct platform_device *dsi_get_dsidev_from_id(int module) struct omap_dss_output *out; enum omap_dss_output_id id; - id = module == 0 ? OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2; + switch (module) { + case 0: + id = OMAP_DSS_OUTPUT_DSI1; + break; + case 1: + id = OMAP_DSS_OUTPUT_DSI2; + break; + default: + return NULL; + } out = omap_dss_get_output(id); - return out->pdev; + return out ? out->pdev : NULL; } static inline void dsi_write_reg(struct platform_device *dsidev, diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index 2ab1c3e9655..5f6eea801b0 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c @@ -697,11 +697,15 @@ static int dss_get_clocks(void) dss.dss_clk = clk; - clk = clk_get(NULL, dss.feat->clk_name); - if (IS_ERR(clk)) { - DSSERR("Failed to get %s\n", dss.feat->clk_name); - r = PTR_ERR(clk); - goto err; + if (dss.feat->clk_name) { + clk = clk_get(NULL, dss.feat->clk_name); + if (IS_ERR(clk)) { + DSSERR("Failed to get %s\n", dss.feat->clk_name); + r = PTR_ERR(clk); + goto err; + } + } else { + clk = NULL; } dss.dpll4_m4_ck = clk; @@ -805,10 +809,10 @@ static int __init dss_init_features(struct device *dev) if (cpu_is_omap24xx()) src = &omap24xx_dss_feats; - else if (cpu_is_omap34xx()) - src = &omap34xx_dss_feats; else if (cpu_is_omap3630()) src = &omap3630_dss_feats; + else if (cpu_is_omap34xx()) + src = &omap34xx_dss_feats; else if (cpu_is_omap44xx()) src = &omap44xx_dss_feats; else if (soc_is_omap54xx()) diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index a48a7dd75b3..8c9b8b3b7f7 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c @@ -644,8 +644,10 @@ static void hdmi_dump_regs(struct seq_file *s) { mutex_lock(&hdmi.lock); - if (hdmi_runtime_get()) + if (hdmi_runtime_get()) { + mutex_unlock(&hdmi.lock); return; + } hdmi.ip_data.ops->dump_wrapper(&hdmi.ip_data, s); hdmi.ip_data.ops->dump_pll(&hdmi.ip_data, s); diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c index 606b89f1235..d630b26a005 100644 --- a/drivers/video/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c @@ -787,7 +787,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) case OMAPFB_WAITFORVSYNC: DBG("ioctl WAITFORVSYNC\n"); - if (!display && !display->output && !display->output->manager) { + if (!display || !display->output || !display->output->manager) { r = -EINVAL; break; } diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 8adb9cc267f..71f5c459b08 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -361,13 +361,13 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) down_write(&mm->mmap_sem); vma = find_vma(mm, m.addr); - ret = -EINVAL; if (!vma || vma->vm_ops != &privcmd_vm_ops || (m.addr != vma->vm_start) || ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) || !privcmd_enforce_singleshot_mapping(vma)) { up_write(&mm->mmap_sem); + ret = -EINVAL; goto out; } @@ -383,12 +383,16 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) up_write(&mm->mmap_sem); - if (state.global_error && (version == 1)) { - /* Write back errors in second pass. */ - state.user_mfn = (xen_pfn_t *)m.arr; - state.err = err_array; - ret = traverse_pages(m.num, sizeof(xen_pfn_t), - &pagelist, mmap_return_errors_v1, &state); + if (version == 1) { + if (state.global_error) { + /* Write back errors in second pass. */ + state.user_mfn = (xen_pfn_t *)m.arr; + state.err = err_array; + ret = traverse_pages(m.num, sizeof(xen_pfn_t), + &pagelist, mmap_return_errors_v1, &state); + } else + ret = 0; + } else if (version == 2) { ret = __copy_to_user(m.err, err_array, m.num * sizeof(int)); if (ret) |