summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/battery.c9
-rw-r--r--drivers/acpi/toshiba_acpi.c54
-rw-r--r--drivers/ata/Kconfig44
-rw-r--r--drivers/ata/ata_piix.c9
-rw-r--r--drivers/ata/libata-core.c65
-rw-r--r--drivers/ata/pata_hpt366.c16
-rw-r--r--drivers/ata/pata_ninja32.c9
-rw-r--r--drivers/ata/pata_sis.c1
-rw-r--r--drivers/block/cciss.c5
-rw-r--r--drivers/block/pktcdvd.c6
-rw-r--r--drivers/cdrom/cdrom.c16
-rw-r--r--drivers/char/cp437.uni12
-rw-r--r--drivers/char/serial167.c2
-rw-r--r--drivers/char/vt.c2
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.c3
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.h3
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.c3
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.h3
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c3
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.h3
-rw-r--r--drivers/dma/dmaengine.c3
-rw-r--r--drivers/dma/ioat_dma.c5
-rw-r--r--drivers/dma/iop-adma.c16
-rw-r--r--drivers/dma/mv_xor.c15
-rw-r--r--drivers/firewire/fw-ohci.c11
-rw-r--r--drivers/firewire/fw-transaction.c3
-rw-r--r--drivers/firewire/fw-transaction.h2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h17
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c648
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c7
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c8
-rw-r--r--drivers/i2c/busses/i2c-cpm.c1
-rw-r--r--drivers/i2c/busses/i2c-highlander.c4
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c18
-rw-r--r--drivers/ide/Kconfig14
-rw-r--r--drivers/ide/ide-io.c6
-rw-r--r--drivers/ide/pmac.c30
-rw-r--r--drivers/ide/sgiioc4.c6
-rw-r--r--drivers/ieee1394/nodemgr.c8
-rw-r--r--drivers/isdn/hardware/avm/b1isa.c6
-rw-r--r--drivers/isdn/hysdn/hysdn_net.c4
-rw-r--r--drivers/md/bitmap.c22
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/message/i2o/i2o_block.c2
-rw-r--r--drivers/message/i2o/iop.c1
-rw-r--r--drivers/misc/sgi-gru/grufault.c4
-rw-r--r--drivers/mtd/devices/m25p80.c28
-rw-r--r--drivers/mtd/maps/physmap.c26
-rw-r--r--drivers/mtd/nand/fsl_upm.c8
-rw-r--r--drivers/mtd/nand/pasemi_nand.c1
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c1
-rw-r--r--drivers/mtd/onenand/omap2.c17
-rw-r--r--drivers/net/bnx2.c41
-rw-r--r--drivers/net/bnx2.h6
-rw-r--r--drivers/net/e1000e/ich8lan.c9
-rw-r--r--drivers/net/enc28j60.c18
-rw-r--r--drivers/net/jme.h2
-rw-r--r--drivers/net/netx-eth.c2
-rw-r--r--drivers/net/phy/mdio_bus.c5
-rw-r--r--drivers/net/starfire.c5
-rw-r--r--drivers/net/sungem.c144
-rw-r--r--drivers/net/tlan.c23
-rw-r--r--drivers/net/wireless/ipw2200.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c24
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c2
-rw-r--r--drivers/pci/hotplug/acpiphp.h2
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c4
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c5
-rw-r--r--drivers/pci/hotplug/pciehp_core.c23
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c2
-rw-r--r--drivers/pci/pcie/aspm.c29
-rw-r--r--drivers/pci/slot.c1
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/rtc/rtc-ds1672.c6
-rw-r--r--drivers/rtc/rtc-max6900.c6
-rw-r--r--drivers/rtc/rtc-twl4030.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c12
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/serial/ioc3_serial.c6
-rw-r--r--drivers/serial/mpc52xx_uart.c4
-rw-r--r--drivers/serial/s3c2440.c2
-rw-r--r--drivers/sh/maple/maple.c2
-rw-r--r--drivers/staging/Kconfig3
-rw-r--r--drivers/usb/class/usbtmc.c1
-rw-r--r--drivers/usb/core/driver.c4
-rw-r--r--drivers/usb/gadget/f_rndis.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.h6
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h8
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c55
-rw-r--r--drivers/usb/storage/unusual_devs.h16
-rw-r--r--drivers/video/aty/radeon_accel.c294
-rw-r--r--drivers/video/aty/radeon_backlight.c2
-rw-r--r--drivers/video/aty/radeon_base.c40
-rw-r--r--drivers/video/aty/radeon_pm.c6
-rw-r--r--drivers/video/aty/radeonfb.h38
-rw-r--r--drivers/video/console/fbcon.c6
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c4
-rw-r--r--drivers/video/omap/omapfb_main.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c2
109 files changed, 1222 insertions, 913 deletions
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index a0a178dd189..1423b0c0cd2 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -174,15 +174,6 @@ static int acpi_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
val->intval = battery->current_now * 1000;
- /* if power units are mW, convert to mA by
- dividing by current voltage (mV/1000) */
- if (!battery->power_unit) {
- if (battery->voltage_now) {
- val->intval /= battery->voltage_now;
- val->intval *= 1000;
- } else
- val->intval = -1;
- }
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c
index 25f531d892d..40e60fc2e59 100644
--- a/drivers/acpi/toshiba_acpi.c
+++ b/drivers/acpi/toshiba_acpi.c
@@ -824,32 +824,36 @@ static int __init toshiba_acpi_init(void)
toshiba_acpi_exit();
return -ENOMEM;
}
- }
- /* Register input device for kill switch */
- toshiba_acpi.poll_dev = input_allocate_polled_device();
- if (!toshiba_acpi.poll_dev) {
- printk(MY_ERR "unable to allocate kill-switch input device\n");
- toshiba_acpi_exit();
- return -ENOMEM;
- }
- toshiba_acpi.poll_dev->private = &toshiba_acpi;
- toshiba_acpi.poll_dev->poll = bt_poll_rfkill;
- toshiba_acpi.poll_dev->poll_interval = 1000; /* msecs */
-
- toshiba_acpi.poll_dev->input->name = toshiba_acpi.rfk_name;
- toshiba_acpi.poll_dev->input->id.bustype = BUS_HOST;
- toshiba_acpi.poll_dev->input->id.vendor = 0x0930; /* Toshiba USB ID */
- set_bit(EV_SW, toshiba_acpi.poll_dev->input->evbit);
- set_bit(SW_RFKILL_ALL, toshiba_acpi.poll_dev->input->swbit);
- input_report_switch(toshiba_acpi.poll_dev->input, SW_RFKILL_ALL, TRUE);
- input_sync(toshiba_acpi.poll_dev->input);
-
- ret = input_register_polled_device(toshiba_acpi.poll_dev);
- if (ret) {
- printk(MY_ERR "unable to register kill-switch input device\n");
- toshiba_acpi_exit();
- return ret;
+ /* Register input device for kill switch */
+ toshiba_acpi.poll_dev = input_allocate_polled_device();
+ if (!toshiba_acpi.poll_dev) {
+ printk(MY_ERR
+ "unable to allocate kill-switch input device\n");
+ toshiba_acpi_exit();
+ return -ENOMEM;
+ }
+ toshiba_acpi.poll_dev->private = &toshiba_acpi;
+ toshiba_acpi.poll_dev->poll = bt_poll_rfkill;
+ toshiba_acpi.poll_dev->poll_interval = 1000; /* msecs */
+
+ toshiba_acpi.poll_dev->input->name = toshiba_acpi.rfk_name;
+ toshiba_acpi.poll_dev->input->id.bustype = BUS_HOST;
+ /* Toshiba USB ID */
+ toshiba_acpi.poll_dev->input->id.vendor = 0x0930;
+ set_bit(EV_SW, toshiba_acpi.poll_dev->input->evbit);
+ set_bit(SW_RFKILL_ALL, toshiba_acpi.poll_dev->input->swbit);
+ input_report_switch(toshiba_acpi.poll_dev->input,
+ SW_RFKILL_ALL, TRUE);
+ input_sync(toshiba_acpi.poll_dev->input);
+
+ ret = input_register_polled_device(toshiba_acpi.poll_dev);
+ if (ret) {
+ printk(MY_ERR
+ "unable to register kill-switch input device\n");
+ toshiba_acpi_exit();
+ return ret;
+ }
}
return 0;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 78fbec8ceda..421b7c71e72 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -153,7 +153,7 @@ config SATA_PROMISE
If unsure, say N.
config SATA_SX4
- tristate "Promise SATA SX4 support"
+ tristate "Promise SATA SX4 support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for Promise Serial ATA SX4.
@@ -219,8 +219,8 @@ config PATA_ACPI
otherwise unsupported hardware.
config PATA_ALI
- tristate "ALi PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ tristate "ALi PATA support"
+ depends on PCI
help
This option enables support for the ALi ATA interfaces
found on the many ALi chipsets.
@@ -263,7 +263,7 @@ config PATA_ATIIXP
If unsure, say N.
config PATA_CMD640_PCI
- tristate "CMD640 PCI PATA support (Very Experimental)"
+ tristate "CMD640 PCI PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the CMD640 PCI IDE
@@ -291,8 +291,8 @@ config PATA_CS5520
If unsure, say N.
config PATA_CS5530
- tristate "CS5530 PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ tristate "CS5530 PATA support"
+ depends on PCI
help
This option enables support for the Cyrix/NatSemi/AMD CS5530
companion chip used with the MediaGX/Geode processor family.
@@ -309,8 +309,8 @@ config PATA_CS5535
If unsure, say N.
config PATA_CS5536
- tristate "CS5536 PATA support (Experimental)"
- depends on PCI && X86 && !X86_64 && EXPERIMENTAL
+ tristate "CS5536 PATA support"
+ depends on PCI && X86 && !X86_64
help
This option enables support for the AMD CS5536
companion chip used with the Geode LX processor family.
@@ -363,7 +363,7 @@ config PATA_HPT37X
If unsure, say N.
config PATA_HPT3X2N
- tristate "HPT 372N/302N PATA support (Very Experimental)"
+ tristate "HPT 372N/302N PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the N variant HPT PATA
@@ -389,8 +389,8 @@ config PATA_HPT3X3_DMA
problems with DMA on this chipset.
config PATA_ISAPNP
- tristate "ISA Plug and Play PATA support (Experimental)"
- depends on EXPERIMENTAL && ISAPNP
+ tristate "ISA Plug and Play PATA support"
+ depends on ISAPNP
help
This option enables support for ISA plug & play ATA
controllers such as those found on old soundcards.
@@ -498,8 +498,8 @@ config PATA_NINJA32
If unsure, say N.
config PATA_NS87410
- tristate "Nat Semi NS87410 PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ tristate "Nat Semi NS87410 PATA support"
+ depends on PCI
help
This option enables support for the National Semiconductor
NS87410 PCI-IDE controller.
@@ -507,8 +507,8 @@ config PATA_NS87410
If unsure, say N.
config PATA_NS87415
- tristate "Nat Semi NS87415 PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ tristate "Nat Semi NS87415 PATA support"
+ depends on PCI
help
This option enables support for the National Semiconductor
NS87415 PCI-IDE controller.
@@ -544,8 +544,8 @@ config PATA_PCMCIA
If unsure, say N.
config PATA_PDC_OLD
- tristate "Older Promise PATA controller support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ tristate "Older Promise PATA controller support"
+ depends on PCI
help
This option enables support for the Promise 20246, 20262, 20263,
20265 and 20267 adapters.
@@ -559,7 +559,7 @@ config PATA_QDI
Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
config PATA_RADISYS
- tristate "RADISYS 82600 PATA support (Very Experimental)"
+ tristate "RADISYS 82600 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the RADISYS 82600
@@ -586,8 +586,8 @@ config PATA_RZ1000
If unsure, say N.
config PATA_SC1200
- tristate "SC1200 PATA support (Very Experimental)"
- depends on PCI && EXPERIMENTAL
+ tristate "SC1200 PATA support"
+ depends on PCI
help
This option enables support for the NatSemi/AMD SC1200 SoC
companion chip used with the Geode processor family.
@@ -620,8 +620,8 @@ config PATA_SIL680
If unsure, say N.
config PATA_SIS
- tristate "SiS PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ tristate "SiS PATA support"
+ depends on PCI
help
This option enables support for SiS PATA controllers
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index d6d97d8f3fa..c11936e13dd 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1072,7 +1072,14 @@ static int piix_broken_suspend(void)
* matching is necessary because dmi_system_id.matches is
* limited to four entries.
*/
- if (!strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") &&
+ if (dmi_get_system_info(DMI_SYS_VENDOR) &&
+ dmi_get_system_info(DMI_PRODUCT_NAME) &&
+ dmi_get_system_info(DMI_PRODUCT_VERSION) &&
+ dmi_get_system_info(DMI_PRODUCT_SERIAL) &&
+ dmi_get_system_info(DMI_BOARD_VENDOR) &&
+ dmi_get_system_info(DMI_BOARD_NAME) &&
+ dmi_get_system_info(DMI_BOARD_VERSION) &&
+ !strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") &&
!strcmp(dmi_get_system_info(DMI_PRODUCT_NAME), "000000") &&
!strcmp(dmi_get_system_info(DMI_PRODUCT_VERSION), "000000") &&
!strcmp(dmi_get_system_info(DMI_PRODUCT_SERIAL), "000000") &&
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 5e2eb740df4..bc6695e3c84 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4050,17 +4050,70 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
/* Seagate NCQ + FLUSH CACHE firmware bug */
- { "ST31500341AS", "9JU138", ATA_HORKAGE_NONCQ |
+ { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
- { "ST31000333AS", "9FZ136", ATA_HORKAGE_NONCQ |
+ { "ST31500341AS", "SD16", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3640623AS", "9FZ164", ATA_HORKAGE_NONCQ |
+ { "ST31500341AS", "SD17", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3640323AS", "9FZ134", ATA_HORKAGE_NONCQ |
+ { "ST31500341AS", "SD18", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3320813AS", "9FZ182", ATA_HORKAGE_NONCQ |
+ { "ST31500341AS", "SD19", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3320613AS", "9FZ162", ATA_HORKAGE_NONCQ |
+
+ { "ST31000333AS", "SD15", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31000333AS", "SD16", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31000333AS", "SD17", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31000333AS", "SD18", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31000333AS", "SD19", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+
+ { "ST3640623AS", "SD15", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640623AS", "SD16", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640623AS", "SD17", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640623AS", "SD18", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640623AS", "SD19", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+
+ { "ST3640323AS", "SD15", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640323AS", "SD16", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640323AS", "SD17", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640323AS", "SD18", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640323AS", "SD19", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+
+ { "ST3320813AS", "SD15", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320813AS", "SD16", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320813AS", "SD17", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320813AS", "SD18", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320813AS", "SD19", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+
+ { "ST3320613AS", "SD15", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320613AS", "SD16", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320613AS", "SD17", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320613AS", "SD18", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320613AS", "SD19", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
/* Blacklist entries taken from Silicon Image 3124/3132
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index f2b83eabc7c..e0c4f05d7d5 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -183,7 +183,9 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
mask &= ~(0xF8 << ATA_SHIFT_UDMA);
if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
mask &= ~(0xF0 << ATA_SHIFT_UDMA);
- }
+ } else if (adev->class == ATA_DEV_ATAPI)
+ mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+
return ata_bmdma_mode_filter(adev, mask);
}
@@ -211,11 +213,15 @@ static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
static int hpt36x_cable_detect(struct ata_port *ap)
{
- u8 ata66;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 ata66;
+ /*
+ * Each channel of pata_hpt366 occupies separate PCI function
+ * as the primary channel and bit1 indicates the cable type.
+ */
pci_read_config_byte(pdev, 0x5A, &ata66);
- if (ata66 & (1 << ap->port_no))
+ if (ata66 & 2)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
@@ -382,10 +388,10 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* PCI clocking determines the ATA timing values to use */
/* info_hpt366 is safe against re-entry so we can scribble on it */
switch((reg1 & 0x700) >> 8) {
- case 5:
+ case 9:
hpriv = &hpt366_40;
break;
- case 9:
+ case 5:
hpriv = &hpt366_25;
break;
default:
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 4e466eae8b4..4dd9a3b031e 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -44,7 +44,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_ninja32"
-#define DRV_VERSION "0.1.1"
+#define DRV_VERSION "0.1.3"
/**
@@ -130,7 +130,8 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
return rc;
pci_set_master(dev);
- /* Set up the register mappings */
+ /* Set up the register mappings. We use the I/O mapping as only the
+ older chips also have MMIO on BAR 1 */
base = host->iomap[0];
if (!base)
return -ENOMEM;
@@ -167,8 +168,12 @@ static int ninja32_reinit_one(struct pci_dev *pdev)
#endif
static const struct pci_device_id ninja32[] = {
+ { 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ },
};
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index d3423661175..e4be55e047f 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -56,7 +56,6 @@ static const struct sis_laptop sis_laptop[] = {
{ 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
{ 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */
{ 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
- { 0x5513, 0x1039, 0x5513 }, /* Targa Visionary 1000 */
/* end marker */
{ 0, }
};
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 9364dc55425..9f7c543cc04 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1693,6 +1693,11 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
for (i = 0; i <= h->highest_lun; i++) {
int j;
drv_found = 0;
+
+ /* skip holes in the array from already deleted drives */
+ if (h->drv[i].raid_level == -1)
+ continue;
+
for (j = 0; j < num_luns; j++) {
memcpy(&lunid, &ld_buff->LUN[j][0], 4);
lunid = le32_to_cpu(lunid);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f20bf359b84..dc7a8c352da 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -302,7 +302,7 @@ static struct kobj_type kobj_pkt_type_wqueue = {
static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
{
if (class_pktcdvd) {
- pd->dev = device_create(class_pktcdvd, NULL, pd->pkt_dev, NULL,
+ pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
"%s", pd->name);
if (IS_ERR(pd->dev))
pd->dev = NULL;
@@ -2790,7 +2790,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
return 0;
out_mem:
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+ blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return ret;
@@ -2975,7 +2975,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_debugfs_dev_remove(pd);
pkt_sysfs_dev_remove(pd);
- blkdev_put(pd->bdev, FMODE_READ|FMODE_WRITE);
+ blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
remove_proc_entry(pd->name, pkt_proc);
DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index d16b02423d6..7d2e91cccb1 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2081,10 +2081,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
if (!q)
return -ENXIO;
- rq = blk_get_request(q, READ, GFP_KERNEL);
- if (!rq)
- return -ENOMEM;
-
cdi->last_sense = 0;
while (nframes) {
@@ -2096,9 +2092,17 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
len = nr * CD_FRAMESIZE_RAW;
+ rq = blk_get_request(q, READ, GFP_KERNEL);
+ if (!rq) {
+ ret = -ENOMEM;
+ break;
+ }
+
ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
- if (ret)
+ if (ret) {
+ blk_put_request(rq);
break;
+ }
rq->cmd[0] = GPCMD_READ_CD;
rq->cmd[1] = 1 << 2;
@@ -2124,6 +2128,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
if (blk_rq_unmap_user(bio))
ret = -EFAULT;
+ blk_put_request(rq);
if (ret)
break;
@@ -2133,7 +2138,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
ubuf += len;
}
- blk_put_request(rq);
return ret;
}
diff --git a/drivers/char/cp437.uni b/drivers/char/cp437.uni
index 1f06889a96b..bc6163484f6 100644
--- a/drivers/char/cp437.uni
+++ b/drivers/char/cp437.uni
@@ -27,7 +27,7 @@
0x0c U+2640
0x0d U+266a
0x0e U+266b
-0x0f U+263c
+0x0f U+263c U+00a4
0x10 U+25b6 U+25ba
0x11 U+25c0 U+25c4
0x12 U+2195
@@ -55,7 +55,7 @@
0x24 U+0024
0x25 U+0025
0x26 U+0026
-0x27 U+0027
+0x27 U+0027 U+00b4
0x28 U+0028
0x29 U+0029
0x2a U+002a
@@ -84,7 +84,7 @@
0x41 U+0041 U+00c0 U+00c1 U+00c2 U+00c3
0x42 U+0042
0x43 U+0043 U+00a9
-0x44 U+0044
+0x44 U+0044 U+00d0
0x45 U+0045 U+00c8 U+00ca U+00cb
0x46 U+0046
0x47 U+0047
@@ -140,7 +140,7 @@
0x79 U+0079 U+00fd
0x7a U+007a
0x7b U+007b
-0x7c U+007c U+00a5
+0x7c U+007c U+00a6
0x7d U+007d
0x7e U+007e
#
@@ -263,10 +263,10 @@
0xe8 U+03a6 U+00d8
0xe9 U+0398
0xea U+03a9 U+2126
-0xeb U+03b4
+0xeb U+03b4 U+00f0
0xec U+221e
0xed U+03c6 U+00f8
-0xee U+03b5
+0xee U+03b5 U+2208
0xef U+2229
0xf0 U+2261
0xf1 U+00b1
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 3b23270eaa6..a8f15e6be59 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -418,7 +418,7 @@ static irqreturn_t cd2401_rxerr_interrupt(int irq, void *dev_id)
TTY_OVERRUN);
/*
If the flip buffer itself is
- overflowing, we still loose
+ overflowing, we still lose
the next incoming character.
*/
if (tty_buffer_request_room(tty, 1) !=
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index a5af6072e2b..008176edbd6 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2274,7 +2274,7 @@ rescan_last_byte:
continue; /* nothing to display */
}
/* Glyph not found */
- if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) {
+ if ((!(vc->vc_utf && !vc->vc_disp_ctrl) && c < 128) && !(c & ~charmask)) {
/* In legacy mode use the glyph we get by a 1:1 mapping.
This would make absolutely no sense with Unicode in mind,
but do this for ASCII characters since a font may lack
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c
index aa7f7962a9a..05d897764f0 100644
--- a/drivers/char/xilinx_hwicap/buffer_icap.c
+++ b/drivers/char/xilinx_hwicap/buffer_icap.c
@@ -21,9 +21,6 @@
* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE.
*
- * Xilinx products are not intended for use in life support appliances,
- * devices, or systems. Use in such applications is expressly prohibited.
- *
* (c) Copyright 2003-2008 Xilinx Inc.
* All rights reserved.
*
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.h b/drivers/char/xilinx_hwicap/buffer_icap.h
index 8b0252bf06e..d4f419ee87a 100644
--- a/drivers/char/xilinx_hwicap/buffer_icap.h
+++ b/drivers/char/xilinx_hwicap/buffer_icap.h
@@ -21,9 +21,6 @@
* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE.
*
- * Xilinx products are not intended for use in life support appliances,
- * devices, or systems. Use in such applications is expressly prohibited.
- *
* (c) Copyright 2003-2008 Xilinx Inc.
* All rights reserved.
*
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c
index 776b5052847..02225eb19cf 100644
--- a/drivers/char/xilinx_hwicap/fifo_icap.c
+++ b/drivers/char/xilinx_hwicap/fifo_icap.c
@@ -21,9 +21,6 @@
* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE.
*
- * Xilinx products are not intended for use in life support appliances,
- * devices, or systems. Use in such applications is expressly prohibited.
- *
* (c) Copyright 2007-2008 Xilinx Inc.
* All rights reserved.
*
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.h b/drivers/char/xilinx_hwicap/fifo_icap.h
index 62bda453c90..4c9dd9a3b62 100644
--- a/drivers/char/xilinx_hwicap/fifo_icap.h
+++ b/drivers/char/xilinx_hwicap/fifo_icap.h
@@ -21,9 +21,6 @@
* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE.
*
- * Xilinx products are not intended for use in life support appliances,
- * devices, or systems. Use in such applications is expressly prohibited.
- *
* (c) Copyright 2007-2008 Xilinx Inc.
* All rights reserved.
*
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index d1613194909..f40ab699860 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -21,9 +21,6 @@
* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE.
*
- * Xilinx products are not intended for use in life support appliances,
- * devices, or systems. Use in such applications is expressly prohibited.
- *
* (c) Copyright 2002 Xilinx Inc., Systems Engineering Group
* (c) Copyright 2004 Xilinx Inc., Systems Engineering Group
* (c) Copyright 2007-2008 Xilinx Inc.
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
index 24d0d9b938f..8cca11981c5 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
@@ -21,9 +21,6 @@
* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE.
*
- * Xilinx products are not intended for use in life support appliances,
- * devices, or systems. Use in such applications is expressly prohibited.
- *
* (c) Copyright 2003-2007 Xilinx Inc.
* All rights reserved.
*
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 5317e08221e..65799651737 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -388,7 +388,10 @@ int dma_async_device_register(struct dma_device *device)
init_completion(&device->done);
kref_init(&device->refcount);
+
+ mutex_lock(&dma_list_mutex);
device->dev_id = id++;
+ mutex_unlock(&dma_list_mutex);
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index ecd743f7cc6..6607fdd00b1 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -1341,10 +1341,12 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
*/
#define IOAT_TEST_SIZE 2000
+DECLARE_COMPLETION(test_completion);
static void ioat_dma_test_callback(void *dma_async_param)
{
printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
dma_async_param);
+ complete(&test_completion);
}
/**
@@ -1410,7 +1412,8 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
goto free_resources;
}
device->common.device_issue_pending(dma_chan);
- msleep(1);
+
+ wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000));
if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
!= DMA_SUCCESS) {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index c7a9306d951..6be31726220 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -85,18 +85,28 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt;
dma_addr_t addr;
+ dma_addr_t dest;
+ src_cnt = unmap->unmap_src_cnt;
+ dest = iop_desc_get_dest_addr(unmap, iop_chan);
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- addr = iop_desc_get_dest_addr(unmap, iop_chan);
- dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+ enum dma_data_direction dir;
+
+ if (src_cnt > 1) /* is xor? */
+ dir = DMA_BIDIRECTIONAL;
+ else
+ dir = DMA_FROM_DEVICE;
+
+ dma_unmap_page(dev, dest, len, dir);
}
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- src_cnt = unmap->unmap_src_cnt;
while (src_cnt--) {
addr = iop_desc_get_src_addr(unmap,
iop_chan,
src_cnt);
+ if (addr == dest)
+ continue;
dma_unmap_page(dev, addr, len,
DMA_TO_DEVICE);
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 0328da020a1..bcda1742641 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -311,17 +311,26 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt;
dma_addr_t addr;
+ dma_addr_t dest;
+ src_cnt = unmap->unmap_src_cnt;
+ dest = mv_desc_get_dest_addr(unmap);
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- addr = mv_desc_get_dest_addr(unmap);
- dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+ enum dma_data_direction dir;
+
+ if (src_cnt > 1) /* is xor ? */
+ dir = DMA_BIDIRECTIONAL;
+ else
+ dir = DMA_FROM_DEVICE;
+ dma_unmap_page(dev, dest, len, dir);
}
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- src_cnt = unmap->unmap_src_cnt;
while (src_cnt--) {
addr = mv_desc_get_src_addr(unmap,
src_cnt);
+ if (addr == dest)
+ continue;
dma_unmap_page(dev, addr, len,
DMA_TO_DEVICE);
}
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 46610b09041..ab9c01e462e 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -974,6 +974,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
packet->ack = RCODE_SEND_ERROR;
return -1;
}
+ packet->payload_bus = payload_bus;
d[2].req_count = cpu_to_le16(packet->payload_length);
d[2].data_address = cpu_to_le32(payload_bus);
@@ -1025,7 +1026,6 @@ static int handle_at_packet(struct context *context,
struct driver_data *driver_data;
struct fw_packet *packet;
struct fw_ohci *ohci = context->ohci;
- dma_addr_t payload_bus;
int evt;
if (last->transfer_status == 0)
@@ -1038,9 +1038,8 @@ static int handle_at_packet(struct context *context,
/* This packet was cancelled, just continue. */
return 1;
- payload_bus = le32_to_cpu(last->data_address);
- if (payload_bus != 0)
- dma_unmap_single(ohci->card.device, payload_bus,
+ if (packet->payload_bus)
+ dma_unmap_single(ohci->card.device, packet->payload_bus,
packet->payload_length, DMA_TO_DEVICE);
evt = le16_to_cpu(last->transfer_status) & 0x1f;
@@ -1697,6 +1696,10 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
if (packet->ack != 0)
goto out;
+ if (packet->payload_bus)
+ dma_unmap_single(ohci->card.device, packet->payload_bus,
+ packet->payload_length, DMA_TO_DEVICE);
+
log_ar_at_event('T', packet->speed, packet->header, 0x20);
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 022ac4fabb6..2884f876397 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -207,6 +207,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
packet->speed = speed;
packet->generation = generation;
packet->ack = 0;
+ packet->payload_bus = 0;
}
/**
@@ -581,6 +582,8 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
BUG();
return;
}
+
+ response->payload_bus = 0;
}
EXPORT_SYMBOL(fw_fill_response);
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index aed7dbb17cd..839466f0a79 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/spinlock_types.h>
#include <linux/timer.h>
+#include <linux/types.h>
#include <linux/workqueue.h>
#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
@@ -153,6 +154,7 @@ struct fw_packet {
size_t header_length;
void *payload;
size_t payload_length;
+ dma_addr_t payload_bus;
u32 timestamp;
/*
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ba89b42f790..afa8a12cd00 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -717,7 +717,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev->pci_device;
break;
case I915_PARAM_HAS_GEM:
- value = 1;
+ value = dev_priv->has_gem;
break;
default:
DRM_ERROR("Unknown parameter %d\n", param->param);
@@ -830,6 +830,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->regs = ioremap(base, size);
+#ifdef CONFIG_HIGHMEM64G
+ /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
+ dev_priv->has_gem = 0;
+#else
+ /* enable GEM by default */
+ dev_priv->has_gem = 1;
+#endif
+
i915_gem_load(dev);
/* Init HWS */
@@ -847,9 +855,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* and the registers being closely associated.
*
* According to chipset errata, on the 965GM, MSI interrupts may
- * be lost or delayed
+ * be lost or delayed, but we use them anyways to avoid
+ * stuck interrupts on some machines.
*/
- if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev))
+ if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
intel_opregion_init(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a4f39b9a0e..b3cc4731aa7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -106,6 +106,8 @@ struct intel_opregion {
typedef struct drm_i915_private {
struct drm_device *dev;
+ int has_gem;
+
void __iomem *regs;
drm_local_map_t *sarea;
@@ -244,6 +246,10 @@ typedef struct drm_i915_private {
* List of objects currently involved in rendering from the
* ringbuffer.
*
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
@@ -253,6 +259,8 @@ typedef struct drm_i915_private {
* still have a write_domain which needs to be flushed before
* unbinding.
*
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
* A reference is held on the buffer while on this list.
*/
struct list_head flushing_list;
@@ -261,6 +269,8 @@ typedef struct drm_i915_private {
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
@@ -371,8 +381,8 @@ struct drm_i915_gem_object {
uint32_t agp_type;
/**
- * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
- * GEM_DOMAIN_CPU is not in the object's read domain.
+ * If present, while GEM_DOMAIN_CPU is in the read domain this array
+ * flags which individual pages are valid.
*/
uint8_t *page_cpu_valid;
};
@@ -394,9 +404,6 @@ struct drm_i915_gem_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
- /** Cache domains that were flushed at the start of the request. */
- uint32_t flush_domains;
-
struct list_head list;
};
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d58ddef468f..24fe8c10b4b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -33,21 +33,21 @@
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
-static int
-i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain);
-static int
-i915_gem_object_set_domain_range(struct drm_gem_object *obj,
- uint64_t offset,
- uint64_t size,
- uint32_t read_domains,
- uint32_t write_domain);
-static int
-i915_gem_set_domain(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- uint32_t read_domains,
- uint32_t write_domain);
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
+static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
+static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
+ int write);
+static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
+ int write);
+static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+ uint64_t offset,
+ uint64_t size);
+static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
@@ -162,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
- I915_GEM_DOMAIN_CPU, 0);
+ ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+ args->size);
if (ret != 0) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -260,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
return ret;
}
- ret = i915_gem_set_domain(obj, file_priv,
- I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret)
goto fail;
@@ -320,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_set_domain(obj, file_priv,
- I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -397,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
/**
- * Called when user space prepares to use an object
+ * Called when user space prepares to use an object with the CPU, either
+ * through the mmap ioctl's mapping or a GTT mapping.
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -405,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_set_domain *args = data;
struct drm_gem_object *obj;
+ uint32_t read_domains = args->read_domains;
+ uint32_t write_domain = args->write_domain;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
+ /* Only handle setting domains to types used by the CPU. */
+ if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+ return -EINVAL;
+
+ if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+ return -EINVAL;
+
+ /* Having something in the write domain implies it's in the read
+ * domain, and only that read domain. Enforce that in the request.
+ */
+ if (write_domain != 0 && read_domains != write_domain)
+ return -EINVAL;
+
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
@@ -417,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
#if WATCH_BUF
DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
- obj, obj->size, args->read_domains, args->write_domain);
+ obj, obj->size, read_domains, write_domain);
#endif
- ret = i915_gem_set_domain(obj, file_priv,
- args->read_domains, args->write_domain);
+ if (read_domains & I915_GEM_DOMAIN_GTT) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+
+ /* Silently promote "you're not bound, there was nothing to do"
+ * to success, since the client was just asking us to
+ * make sure everything was done.
+ */
+ if (ret == -EINVAL)
+ ret = 0;
+ } else {
+ ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ }
+
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -455,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
obj_priv = obj->driver_private;
/* Pinned buffers may be scanout, so flush the cache */
- if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- }
+ if (obj_priv->pin_count)
+ i915_gem_object_flush_cpu_write_domain(obj);
+
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -532,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
}
static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj)
+i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -546,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
/* Move from whatever list we were on to the tail of execution. */
list_move_tail(&obj_priv->list,
&dev_priv->mm.active_list);
+ obj_priv->last_rendering_seqno = seqno;
}
+static void
+i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ BUG_ON(!obj_priv->active);
+ list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
+ obj_priv->last_rendering_seqno = 0;
+}
static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
@@ -562,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ obj_priv->last_rendering_seqno = 0;
if (obj_priv->active) {
obj_priv->active = 0;
drm_gem_object_unreference(obj);
@@ -610,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
request->seqno = seqno;
request->emitted_jiffies = jiffies;
- request->flush_domains = flush_domains;
was_empty = list_empty(&dev_priv->mm.request_list);
list_add_tail(&request->list, &dev_priv->mm.request_list);
+ /* Associate any objects on the flushing list matching the write
+ * domain we're flushing with our flush.
+ */
+ if (flush_domains != 0) {
+ struct drm_i915_gem_object *obj_priv, *next;
+
+ list_for_each_entry_safe(obj_priv, next,
+ &dev_priv->mm.flushing_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ if ((obj->write_domain & flush_domains) ==
+ obj->write_domain) {
+ obj->write_domain = 0;
+ i915_gem_object_move_to_active(obj, seqno);
+ }
+ }
+
+ }
+
if (was_empty && !dev_priv->mm.suspended)
schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
return seqno;
@@ -676,30 +731,10 @@ i915_gem_retire_request(struct drm_device *dev,
__func__, request->seqno, obj);
#endif
- if (obj->write_domain != 0) {
- list_move_tail(&obj_priv->list,
- &dev_priv->mm.flushing_list);
- } else {
+ if (obj->write_domain != 0)
+ i915_gem_object_move_to_flushing(obj);
+ else
i915_gem_object_move_to_inactive(obj);
- }
- }
-
- if (request->flush_domains != 0) {
- struct drm_i915_gem_object *obj_priv, *next;
-
- /* Clear the write domain and activity from any buffers
- * that are just waiting for a flush matching the one retired.
- */
- list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.flushing_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
-
- if (obj->write_domain & request->flush_domains) {
- obj->write_domain = 0;
- i915_gem_object_move_to_inactive(obj);
- }
- }
-
}
}
@@ -892,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret;
- /* If there are writes queued to the buffer, flush and
- * create a new seqno to wait for.
+ /* This function only exists to support waiting for existing rendering,
+ * not for emitting required flushes.
*/
- if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
- uint32_t write_domain = obj->write_domain;
-#if WATCH_BUF
- DRM_INFO("%s: flushing object %p from write domain %08x\n",
- __func__, obj, write_domain);
-#endif
- i915_gem_flush(dev, 0, write_domain);
-
- i915_gem_object_move_to_active(obj);
- obj_priv->last_rendering_seqno = i915_add_request(dev,
- write_domain);
- BUG_ON(obj_priv->last_rendering_seqno == 0);
-#if WATCH_LRU
- DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
-#endif
- }
+ BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
/* If there is rendering queued on the buffer being evicted, wait for
* it.
@@ -950,24 +970,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return -EINVAL;
}
- /* Wait for any rendering to complete
- */
- ret = i915_gem_object_wait_rendering(obj);
- if (ret) {
- DRM_ERROR("wait_rendering failed: %d\n", ret);
- return ret;
- }
-
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
* also ensure that all pending GPU writes are finished
* before we unbind.
*/
- ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
- I915_GEM_DOMAIN_CPU);
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret) {
- DRM_ERROR("set_domain failed: %d\n", ret);
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("set_domain failed: %d\n", ret);
return ret;
}
@@ -1083,6 +1095,21 @@ i915_gem_evict_something(struct drm_device *dev)
}
static int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ int ret;
+
+ for (;;) {
+ ret = i915_gem_evict_something(dev);
+ if (ret != 0)
+ break;
+ }
+ if (ret == -ENOMEM)
+ return 0;
+ return ret;
+}
+
+static int
i915_gem_object_get_page_list(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1168,7 +1195,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
ret = i915_gem_evict_something(dev);
if (ret != 0) {
- DRM_ERROR("Failed to evict a buffer %d\n", ret);
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to evict a buffer %d\n", ret);
return ret;
}
goto search_free;
@@ -1228,6 +1256,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
}
+/** Flushes any GPU write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ uint32_t seqno;
+
+ if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ return;
+
+ /* Queue the GPU write cache flushing we need. */
+ i915_gem_flush(dev, 0, obj->write_domain);
+ seqno = i915_add_request(dev, obj->write_domain);
+ obj->write_domain = 0;
+ i915_gem_object_move_to_active(obj, seqno);
+}
+
+/** Flushes the GTT write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
+{
+ if (obj->write_domain != I915_GEM_DOMAIN_GTT)
+ return;
+
+ /* No actual flushing is required for the GTT write domain. Writes
+ * to it immediately go to main memory as far as we know, so there's
+ * no chipset flush. It also doesn't land in render cache.
+ */
+ obj->write_domain = 0;
+}
+
+/** Flushes the CPU write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ if (obj->write_domain != I915_GEM_DOMAIN_CPU)
+ return;
+
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+ obj->write_domain = 0;
+}
+
+/**
+ * Moves a single object to the GTT read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+static int
+i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret;
+
+ /* Not valid to be called on unbound objects. */
+ if (obj_priv->gtt_space == NULL)
+ return -EINVAL;
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+ /* Wait on any GPU rendering and flushing to occur. */
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret != 0)
+ return ret;
+
+ /* If we're writing through the GTT domain, then CPU and GPU caches
+ * will need to be invalidated at next use.
+ */
+ if (write)
+ obj->read_domains &= I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ if (write) {
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
+ obj_priv->dirty = 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Moves a single object to the CPU read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+static int
+i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+{
+ struct drm_device *dev = obj->dev;
+ int ret;
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+ /* Wait on any GPU rendering and flushing to occur. */
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret != 0)
+ return ret;
+
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ /* If we have a partially-valid cache of the object in the CPU,
+ * finish invalidating it and free the per-page flags.
+ */
+ i915_gem_object_set_to_full_cpu_read_domain(obj);
+
+ /* Flush the CPU cache if it's still invalid. */
+ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ }
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+
+ /* If we're writing through the CPU, then the GPU read domains will
+ * need to be invalidated at next use.
+ */
+ if (write) {
+ obj->read_domains &= I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ return 0;
+}
+
/*
* Set the next domain for the specified object. This
* may not actually perform the necessary flushing/invaliding though,
@@ -1339,16 +1504,18 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
* MI_FLUSH
* drm_agp_chipset_flush
*/
-static int
-i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain)
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
- int ret;
+
+ BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
+ BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
#if WATCH_BUF
DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
@@ -1385,34 +1552,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
__func__, flush_domains, invalidate_domains);
#endif
- /*
- * If we're invaliding the CPU cache and flushing a GPU cache,
- * then pause for rendering so that the GPU caches will be
- * flushed before the cpu cache is invalidated
- */
- if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
- (flush_domains & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT))) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
- }
i915_gem_clflush_object(obj);
}
if ((write_domain | flush_domains) != 0)
obj->write_domain = write_domain;
-
- /* If we're invalidating the CPU domain, clear the per-page CPU
- * domain list as well.
- */
- if (obj_priv->page_cpu_valid != NULL &&
- (write_domain != 0 ||
- read_domains & I915_GEM_DOMAIN_CPU)) {
- drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
- DRM_MEM_DRIVER);
- obj_priv->page_cpu_valid = NULL;
- }
obj->read_domains = read_domains;
dev->invalidate_domains |= invalidate_domains;
@@ -1423,47 +1567,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
obj->read_domains, obj->write_domain,
dev->invalidate_domains, dev->flush_domains);
#endif
- return 0;
}
/**
- * Set the read/write domain on a range of the object.
+ * Moves the object from a partially CPU read to a full one.
*
- * Currently only implemented for CPU reads, otherwise drops to normal
- * i915_gem_object_set_domain().
+ * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
+ * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
*/
-static int
-i915_gem_object_set_domain_range(struct drm_gem_object *obj,
- uint64_t offset,
- uint64_t size,
- uint32_t read_domains,
- uint32_t write_domain)
+static void
+i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
{
+ struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int ret, i;
- if (obj->read_domains & I915_GEM_DOMAIN_CPU)
- return 0;
+ if (!obj_priv->page_cpu_valid)
+ return;
- if (read_domains != I915_GEM_DOMAIN_CPU ||
- write_domain != 0)
- return i915_gem_object_set_domain(obj,
- read_domains, write_domain);
+ /* If we're partially in the CPU read domain, finish moving it in.
+ */
+ if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
+ int i;
- /* Wait on any GPU rendering to the object to be flushed. */
+ for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
+ if (obj_priv->page_cpu_valid[i])
+ continue;
+ drm_clflush_pages(obj_priv->page_list + i, 1);
+ }
+ drm_agp_chipset_flush(dev);
+ }
+
+ /* Free the page_cpu_valid mappings which are now stale, whether
+ * or not we've got I915_GEM_DOMAIN_CPU.
+ */
+ drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
+ DRM_MEM_DRIVER);
+ obj_priv->page_cpu_valid = NULL;
+}
+
+/**
+ * Set the CPU read domain on a range of the object.
+ *
+ * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
+ * not entirely valid. The page_cpu_valid member of the object flags which
+ * pages have been flushed, and will be respected by
+ * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
+ * of the whole object.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+static int
+i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+ uint64_t offset, uint64_t size)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int i, ret;
+
+ if (offset == 0 && size == obj->size)
+ return i915_gem_object_set_to_cpu_domain(obj, 0);
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+ /* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
- if (ret)
+ if (ret != 0)
return ret;
+ i915_gem_object_flush_gtt_write_domain(obj);
+ /* If we're already fully in the CPU read domain, we're done. */
+ if (obj_priv->page_cpu_valid == NULL &&
+ (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
+ return 0;
+
+ /* Otherwise, create/clear the per-page CPU read domain flag if we're
+ * newly adding I915_GEM_DOMAIN_CPU
+ */
if (obj_priv->page_cpu_valid == NULL) {
obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
DRM_MEM_DRIVER);
- }
+ if (obj_priv->page_cpu_valid == NULL)
+ return -ENOMEM;
+ } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
+ memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
/* Flush the cache on any pages that are still invalid from the CPU's
* perspective.
*/
- for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
+ for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
+ i++) {
if (obj_priv->page_cpu_valid[i])
continue;
@@ -1472,39 +1663,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj,
obj_priv->page_cpu_valid[i] = 1;
}
- return 0;
-}
-
-/**
- * Once all of the objects have been set in the proper domain,
- * perform the necessary flush and invalidate operations.
- *
- * Returns the write domains flushed, for use in flush tracking.
- */
-static uint32_t
-i915_gem_dev_set_domain(struct drm_device *dev)
-{
- uint32_t flush_domains = dev->flush_domains;
-
- /*
- * Now that all the buffers are synced to the proper domains,
- * flush and invalidate the collected domains
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
*/
- if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- dev->invalidate_domains,
- dev->flush_domains);
-#endif
- i915_gem_flush(dev,
- dev->invalidate_domains,
- dev->flush_domains);
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
- }
+ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
- return flush_domains;
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
+
+ return 0;
}
/**
@@ -1585,6 +1751,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return -EINVAL;
}
+ if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
+ reloc.read_domains & I915_GEM_DOMAIN_CPU) {
+ DRM_ERROR("reloc with read/write CPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc.target_handle,
+ (int) reloc.offset,
+ reloc.read_domains,
+ reloc.write_domain);
+ return -EINVAL;
+ }
+
if (reloc.write_domain && target_obj->pending_write_domain &&
reloc.write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: "
@@ -1625,19 +1803,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
continue;
}
- /* Now that we're going to actually write some data in,
- * make sure that any rendering using this buffer's contents
- * is completed.
- */
- i915_gem_object_wait_rendering(obj);
-
- /* As we're writing through the gtt, flush
- * any CPU writes before we write the relocations
- */
- if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- obj->write_domain = 0;
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret != 0) {
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
}
/* Map the page containing the relocation we're going to
@@ -1779,6 +1949,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
int ret, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains;
+ int pin_tries;
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -1827,14 +1998,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EBUSY;
}
- /* Zero the gloabl flush/invalidate flags. These
- * will be modified as each object is bound to the
- * gtt
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
-
- /* Look up object handles and perform the relocations */
+ /* Look up object handles */
for (i = 0; i < args->buffer_count; i++) {
object_list[i] = drm_gem_object_lookup(dev, file_priv,
exec_list[i].handle);
@@ -1844,17 +2008,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = -EBADF;
goto err;
}
+ }
- object_list[i]->pending_read_domains = 0;
- object_list[i]->pending_write_domain = 0;
- ret = i915_gem_object_pin_and_relocate(object_list[i],
- file_priv,
- &exec_list[i]);
- if (ret) {
- DRM_ERROR("object bind and relocate failed %d\n", ret);
+ /* Pin and relocate */
+ for (pin_tries = 0; ; pin_tries++) {
+ ret = 0;
+ for (i = 0; i < args->buffer_count; i++) {
+ object_list[i]->pending_read_domains = 0;
+ object_list[i]->pending_write_domain = 0;
+ ret = i915_gem_object_pin_and_relocate(object_list[i],
+ file_priv,
+ &exec_list[i]);
+ if (ret)
+ break;
+ pinned = i + 1;
+ }
+ /* success */
+ if (ret == 0)
+ break;
+
+ /* error other than GTT full, or we've already tried again */
+ if (ret != -ENOMEM || pin_tries >= 1) {
+ DRM_ERROR("Failed to pin buffers %d\n", ret);
goto err;
}
- pinned = i + 1;
+
+ /* unpin all of our buffers */
+ for (i = 0; i < pinned; i++)
+ i915_gem_object_unpin(object_list[i]);
+
+ /* evict everyone we can from the aperture */
+ ret = i915_gem_evict_everything(dev);
+ if (ret)
+ goto err;
}
/* Set the pending read domains for the batch buffer to COMMAND */
@@ -1864,21 +2050,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_verify_inactive(dev, __FILE__, __LINE__);
+ /* Zero the global flush/invalidate flags. These
+ * will be modified as new domains are computed
+ * for each object
+ */
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- /* make sure all previous memory operations have passed */
- ret = i915_gem_object_set_domain(obj,
- obj->pending_read_domains,
- obj->pending_write_domain);
- if (ret)
- goto err;
+ /* Compute new gpu domains and update invalidate/flush */
+ i915_gem_object_set_to_gpu_domain(obj,
+ obj->pending_read_domains,
+ obj->pending_write_domain);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
- /* Flush/invalidate caches and chipset buffer */
- flush_domains = i915_gem_dev_set_domain(dev);
+ if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ dev->invalidate_domains,
+ dev->flush_domains);
+#endif
+ i915_gem_flush(dev,
+ dev->invalidate_domains,
+ dev->flush_domains);
+ if (dev->flush_domains)
+ (void)i915_add_request(dev, dev->flush_domains);
+ }
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -1898,8 +2100,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
~0);
#endif
- (void)i915_add_request(dev, flush_domains);
-
/* Exec the batchbuffer */
ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
if (ret) {
@@ -1927,10 +2127,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_file_priv->mm.last_gem_seqno = seqno;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- i915_gem_object_move_to_active(obj);
- obj_priv->last_rendering_seqno = seqno;
+ i915_gem_object_move_to_active(obj, seqno);
#if WATCH_LRU
DRM_INFO("%s: move to exec list %p\n", __func__, obj);
#endif
@@ -2061,11 +2259,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
- if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- obj->write_domain = 0;
- }
+ i915_gem_object_flush_cpu_write_domain(obj);
args->offset = obj_priv->gtt_offset;
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -2115,7 +2309,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
}
obj_priv = obj->driver_private;
- args->busy = obj_priv->active;
+ /* Don't count being on the flushing list against the object being
+ * done. Otherwise, a buffer left on the flushing list but not getting
+ * flushed (because nobody's flushing that domain) won't ever return
+ * unbusy and get reused by libdrm's bo cache. The other expected
+ * consumer of this interface, OpenGL's occlusion queries, also specs
+ * that the objects get unbusy "eventually" without any interference.
+ */
+ args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -2167,29 +2368,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
}
-static int
-i915_gem_set_domain(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- uint32_t read_domains,
- uint32_t write_domain)
-{
- struct drm_device *dev = obj->dev;
- int ret;
- uint32_t flush_domains;
-
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
- if (ret)
- return ret;
- flush_domains = i915_gem_dev_set_domain(obj->dev);
-
- if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
- (void) i915_add_request(dev, flush_domains);
-
- return 0;
-}
-
/** Unbinds all objects that are on the given buffer list. */
static int
i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
index 93de15b4c9a..e8d5abe1250 100644
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset,
list_for_each_entry(gem_request, &dev_priv->mm.request_list,
list)
{
- DRM_PROC_PRINT(" %d @ %d %08x\n",
+ DRM_PROC_PRINT(" %d @ %d\n",
gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies),
- gem_request->flush_domains);
+ (int) (jiffies - gem_request->emitted_jiffies));
}
if (len > request + offset)
return request;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index e8b85ac4ca0..a8cb69469c6 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
dcc & DCC_CHANNEL_XOR_DISABLE) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_I965GM(dev) || IS_GM45(dev)) {
- /* GM965 only does bit 11-based channel
- * randomization
+ } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
+ (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+ /* GM965/GM45 does either bit 11 or bit 17
+ * swizzling.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0e476eba36e..9d24aaeb8a4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -522,6 +522,7 @@
#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
#define DCC_ADDRESSING_MODE_MASK (3 << 0)
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
+#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
/** 965 MCH register controlling DRAM channel configuration */
#define C0DRB3 0x10206
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 7a183789be9..3bbb871b25d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -299,7 +299,6 @@ typedef struct drm_radeon_private {
atomic_t swi_emitted;
int vblank_crtc;
uint32_t irq_enable_reg;
- int irq_enabled;
uint32_t r500_disp_irq_reg;
struct radeon_surface surfaces[RADEON_MAX_SURFACES];
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 97c0599fdb1..99be11418ac 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -44,7 +44,8 @@ void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
else
dev_priv->irq_enable_reg &= ~mask;
- RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+ if (!dev->irq_enabled)
+ RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
}
static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
@@ -56,7 +57,8 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
else
dev_priv->r500_disp_irq_reg &= ~mask;
- RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
+ if (!dev->irq_enabled)
+ RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
}
int radeon_enable_vblank(struct drm_device *dev, int crtc)
@@ -355,8 +357,6 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
- dev_priv->irq_enabled = 0;
-
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
/* Disable *all* interrupts */
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 228f7572306..3fcf78e906d 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -365,6 +365,7 @@ static int cpm_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
pmsg = &msgs[tptr];
if (pmsg->flags & I2C_M_RD)
ret = wait_event_interruptible_timeout(cpm->i2c_wait,
+ (in_be16(&tbdf[tptr].cbd_sc) & BD_SC_NAK) ||
!(in_be16(&rbdf[rptr].cbd_sc) & BD_SC_EMPTY),
1 * HZ);
else
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index f4d22ae9d29..e5a8dae4a28 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -92,7 +92,7 @@ static void highlander_i2c_setup(struct highlander_i2c_dev *dev)
static void smbus_write_data(u8 *src, u16 *dst, int len)
{
for (; len > 1; len -= 2) {
- *dst++ = be16_to_cpup((u16 *)src);
+ *dst++ = be16_to_cpup((__be16 *)src);
src += 2;
}
@@ -103,7 +103,7 @@ static void smbus_write_data(u8 *src, u16 *dst, int len)
static void smbus_read_data(u16 *src, u8 *dst, int len)
{
for (; len > 1; len -= 2) {
- *(u16 *)dst = cpu_to_be16p(src++);
+ *(__be16 *)dst = cpu_to_be16p(src++);
dst += 2;
}
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index dcf2045b522..0bdb2d7f057 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -486,7 +486,7 @@ static enum pmcmsptwi_xfer_result pmcmsptwi_xfer_cmd(
if (cmd->type == MSP_TWI_CMD_WRITE ||
cmd->type == MSP_TWI_CMD_WRITE_READ) {
- __be64 tmp = cpu_to_be64p((u64 *)cmd->write_data);
+ u64 tmp = be64_to_cpup((__be64 *)cmd->write_data);
tmp >>= (MSP_MAX_BYTES_PER_RW - cmd->write_len) * 8;
dev_dbg(&pmcmsptwi_adapter.dev, "Writing 0x%016llx\n", tmp);
pmcmsptwi_writel(tmp & 0x00000000ffffffffLL,
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 1fac4e23313..b7434d24904 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -56,6 +56,7 @@ enum s3c24xx_i2c_state {
struct s3c24xx_i2c {
spinlock_t lock;
wait_queue_head_t wait;
+ unsigned int suspended:1;
struct i2c_msg *msg;
unsigned int msg_num;
@@ -507,7 +508,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c, struct i2c_msg *msgs, int
unsigned long timeout;
int ret;
- if (!(readl(i2c->regs + S3C2410_IICCON) & S3C2410_IICCON_IRQEN))
+ if (i2c->suspended)
return -EIO;
ret = s3c24xx_i2c_set_master(i2c);
@@ -986,17 +987,26 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
+static int s3c24xx_i2c_suspend_late(struct platform_device *dev,
+ pm_message_t msg)
+{
+ struct s3c24xx_i2c *i2c = platform_get_drvdata(dev);
+ i2c->suspended = 1;
+ return 0;
+}
+
static int s3c24xx_i2c_resume(struct platform_device *dev)
{
struct s3c24xx_i2c *i2c = platform_get_drvdata(dev);
- if (i2c != NULL)
- s3c24xx_i2c_init(i2c);
+ i2c->suspended = 0;
+ s3c24xx_i2c_init(i2c);
return 0;
}
#else
+#define s3c24xx_i2c_suspend_late NULL
#define s3c24xx_i2c_resume NULL
#endif
@@ -1005,6 +1015,7 @@ static int s3c24xx_i2c_resume(struct platform_device *dev)
static struct platform_driver s3c2410_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove = s3c24xx_i2c_remove,
+ .suspend_late = s3c24xx_i2c_suspend_late,
.resume = s3c24xx_i2c_resume,
.driver = {
.owner = THIS_MODULE,
@@ -1015,6 +1026,7 @@ static struct platform_driver s3c2410_i2c_driver = {
static struct platform_driver s3c2440_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove = s3c24xx_i2c_remove,
+ .suspend_late = s3c24xx_i2c_suspend_late,
.resume = s3c24xx_i2c_resume,
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 6d7401772a8..e6857e01d1b 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -669,10 +669,12 @@ config BLK_DEV_CELLEB
endif
+# TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF
config BLK_DEV_IDE_PMAC
tristate "PowerMac on-board IDE support"
depends on PPC_PMAC && IDE=y
select IDE_TIMINGS
+ select BLK_DEV_IDEDMA_PCI
help
This driver provides support for the on-board IDE controller on
most of the recent Apple Power Macintoshes and PowerBooks.
@@ -689,16 +691,6 @@ config BLK_DEV_IDE_PMAC_ATA100FIRST
CD-ROM on hda. This option changes this to more natural hda for
hard disk and hdc for CD-ROM.
-config BLK_DEV_IDEDMA_PMAC
- bool "PowerMac IDE DMA support"
- depends on BLK_DEV_IDE_PMAC
- select BLK_DEV_IDEDMA_PCI
- help
- This option allows the driver for the on-board IDE controller on
- Power Macintoshes and PowerBooks to use DMA (direct memory access)
- to transfer data to and from memory. Saying Y is safe and improves
- performance.
-
config BLK_DEV_IDE_AU1XXX
bool "IDE for AMD Alchemy Au1200"
depends on SOC_AU1200
@@ -912,7 +904,7 @@ config BLK_DEV_UMC8672
endif
config BLK_DEV_IDEDMA
- def_bool BLK_DEV_IDEDMA_SFF || BLK_DEV_IDEDMA_PMAC || \
+ def_bool BLK_DEV_IDEDMA_SFF || \
BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
endif # IDE
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 7d275b2af3e..cc35d6dbd41 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -208,8 +208,10 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
*/
if (drive->hwif->dma_ops == NULL)
break;
- if (drive->dev_flags & IDE_DFLAG_USING_DMA)
- ide_set_dma(drive);
+ /*
+ * TODO: respect IDE_DFLAG_USING_DMA
+ */
+ ide_set_dma(drive);
break;
}
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 2e19d629853..7c481bb56fa 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -66,7 +66,6 @@ typedef struct pmac_ide_hwif {
struct macio_dev *mdev;
u32 timings[4];
volatile u32 __iomem * *kauai_fcr;
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
/* Those fields are duplicating what is in hwif. We currently
* can't use the hwif ones because of some assumptions that are
* beeing done by the generic code about the kind of dma controller
@@ -74,8 +73,6 @@ typedef struct pmac_ide_hwif {
*/
volatile struct dbdma_regs __iomem * dma_regs;
struct dbdma_cmd* dma_table_cpu;
-#endif
-
} pmac_ide_hwif_t;
enum {
@@ -222,8 +219,6 @@ static const char* model_name[] = {
#define KAUAI_FCR_UATA_RESET_N 0x00000002
#define KAUAI_FCR_UATA_ENABLE 0x00000001
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
-
/* Rounded Multiword DMA timings
*
* I gave up finding a generic formula for all controller
@@ -413,8 +408,6 @@ static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq);
static void pmac_ide_selectproc(ide_drive_t *drive);
static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
-#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
-
#define PMAC_IDE_REG(x) \
((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
@@ -584,8 +577,6 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
pmac_ide_do_update_timings(drive);
}
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
-
/*
* Calculate KeyLargo ATA/66 UDMA timings
*/
@@ -786,7 +777,6 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
drive->name, speed & 0xf, *timings);
#endif
}
-#endif /* #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC */
static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
{
@@ -804,7 +794,6 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
tl[0] = *timings;
tl[1] = *timings2;
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
if (speed >= XFER_UDMA_0) {
if (pmif->kind == controller_kl_ata4)
ret = set_timings_udma_ata4(&tl[0], speed);
@@ -817,7 +806,7 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
ret = -1;
} else
set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
-#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
+
if (ret)
return;
@@ -1008,9 +997,7 @@ static const struct ide_port_info pmac_port_info = {
.chipset = ide_pmac,
.tp_ops = &pmac_tp_ops,
.port_ops = &pmac_ide_port_ops,
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
.dma_ops = &pmac_dma_ops,
-#endif
.host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
IDE_HFLAG_POST_SET_MODE |
IDE_HFLAG_MMIO |
@@ -1182,7 +1169,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
pmif->regbase = regbase;
pmif->irq = irq;
pmif->kauai_fcr = NULL;
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
+
if (macio_resource_count(mdev) >= 2) {
if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
printk(KERN_WARNING "ide-pmac: can't request DMA "
@@ -1192,7 +1179,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
} else
pmif->dma_regs = NULL;
-#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
+
dev_set_drvdata(&mdev->ofdev.dev, pmif);
memset(&hw, 0, sizeof(hw));
@@ -1300,9 +1287,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
base = ioremap(rbase, rlen);
pmif->regbase = (unsigned long) base + 0x2000;
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
pmif->dma_regs = base + 0x1000;
-#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
pmif->kauai_fcr = base;
pmif->irq = pdev->irq;
@@ -1434,8 +1419,6 @@ out:
return error;
}
-#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
-
/*
* pmac_ide_build_dmatable builds the DBDMA command list
* for a transfer and sets the DBDMA channel to point to it.
@@ -1723,13 +1706,6 @@ static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
return 0;
}
-#else
-static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
-{
- return -EOPNOTSUPP;
-}
-#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
module_init(pmac_ide_probe);
diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c
index 7defa0ae201..a687a7dfea6 100644
--- a/drivers/ide/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -550,7 +550,7 @@ static const struct ide_dma_ops sgiioc4_dma_ops = {
.dma_timeout = ide_dma_timeout,
};
-static const struct ide_port_info sgiioc4_port_info __devinitdata = {
+static const struct ide_port_info sgiioc4_port_info __devinitconst = {
.name = DRV_NAME,
.chipset = ide_pci,
.init_dma = ide_dma_sgiioc4,
@@ -633,7 +633,7 @@ out:
return ret;
}
-int
+int __devinit
ioc4_ide_attach_one(struct ioc4_driver_data *idd)
{
/* PCI-RT does not bring out IDE connection.
@@ -645,7 +645,7 @@ ioc4_ide_attach_one(struct ioc4_driver_data *idd)
return pci_init_sgiioc4(idd->idd_pdev);
}
-static struct ioc4_submodule ioc4_ide_submodule = {
+static struct ioc4_submodule __devinitdata ioc4_ide_submodule = {
.is_name = "IOC4_ide",
.is_owner = THIS_MODULE,
.is_probe = ioc4_ide_attach_one,
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 9e39f73282e..79ef5fd928a 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -115,8 +115,14 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
return error;
}
+#define OUI_FREECOM_TECHNOLOGIES_GMBH 0x0001db
+
static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci)
{
+ /* Freecom FireWire Hard Drive firmware bug */
+ if (be32_to_cpu(bus_info_data[3]) >> 8 == OUI_FREECOM_TECHNOLOGIES_GMBH)
+ return 0;
+
return (be32_to_cpu(bus_info_data[2]) >> 8) & 0x3;
}
@@ -1685,6 +1691,7 @@ static int nodemgr_host_thread(void *data)
g = get_hpsb_generation(host);
for (i = 0; i < 4 ; i++) {
msleep_interruptible(63);
+ try_to_freeze();
if (kthread_should_stop())
goto exit;
@@ -1725,6 +1732,7 @@ static int nodemgr_host_thread(void *data)
/* Sleep 3 seconds */
for (i = 3000/200; i; i--) {
msleep_interruptible(200);
+ try_to_freeze();
if (kthread_should_stop())
goto exit;
diff --git a/drivers/isdn/hardware/avm/b1isa.c b/drivers/isdn/hardware/avm/b1isa.c
index 1e288eeb5e2..6461a32bc83 100644
--- a/drivers/isdn/hardware/avm/b1isa.c
+++ b/drivers/isdn/hardware/avm/b1isa.c
@@ -233,10 +233,8 @@ static void __exit b1isa_exit(void)
int i;
for (i = 0; i < MAX_CARDS; i++) {
- if (!io[i])
- break;
-
- b1isa_remove(&isa_dev[i]);
+ if (isa_dev[i].resource[0].start)
+ b1isa_remove(&isa_dev[i]);
}
unregister_capi_driver(&capi_driver_b1isa);
}
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index cfa8fa5e44a..3f2a0a20c19 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -83,12 +83,12 @@ net_open(struct net_device *dev)
/* Fill in the MAC-level header (if not already set) */
if (!card->mac_addr[0]) {
- for (i = 0; i < ETH_ALEN - sizeof(unsigned long); i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = 0xfc;
if ((in_dev = dev->ip_ptr) != NULL) {
struct in_ifaddr *ifa = in_dev->ifa_list;
if (ifa != NULL)
- memcpy(dev->dev_addr + (ETH_ALEN - sizeof(unsigned long)), &ifa->ifa_local, sizeof(unsigned long));
+ memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local));
}
} else
memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index ac89a5deaca..ab7c8e4a61f 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -208,16 +208,19 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
*/
/* IO operations when bitmap is stored near all superblocks */
-static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long index)
+static struct page *read_sb_page(mddev_t *mddev, long offset,
+ struct page *page,
+ unsigned long index, int size)
{
/* choose a good rdev and read the page from there */
mdk_rdev_t *rdev;
struct list_head *tmp;
- struct page *page = alloc_page(GFP_KERNEL);
sector_t target;
if (!page)
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
return ERR_PTR(-ENOMEM);
rdev_for_each(rdev, tmp, mddev) {
@@ -227,7 +230,9 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde
target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
- if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) {
+ if (sync_page_io(rdev->bdev, target,
+ roundup(size, bdev_hardsect_size(rdev->bdev)),
+ page, READ)) {
page->index = index;
attach_page_buffers(page, NULL); /* so that free_buffer will
* quietly no-op */
@@ -544,7 +549,9 @@ static int bitmap_read_sb(struct bitmap *bitmap)
bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes);
} else {
- bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0);
+ bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset,
+ NULL,
+ 0, sizeof(bitmap_super_t));
}
if (IS_ERR(bitmap->sb_page)) {
err = PTR_ERR(bitmap->sb_page);
@@ -957,11 +964,16 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
*/
page = bitmap->sb_page;
offset = sizeof(bitmap_super_t);
+ read_sb_page(bitmap->mddev, bitmap->offset,
+ page,
+ index, count);
} else if (file) {
page = read_page(file, index, bitmap, count);
offset = 0;
} else {
- page = read_sb_page(bitmap->mddev, bitmap->offset, index);
+ page = read_sb_page(bitmap->mddev, bitmap->offset,
+ NULL,
+ index, count);
offset = 0;
}
if (IS_ERR(page)) { /* read error */
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a63161aec48..04e5fd742c2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -668,7 +668,7 @@ static void check_for_valid_limits(struct io_restrictions *rs)
if (!rs->max_segment_size)
rs->max_segment_size = MAX_SEGMENT_SIZE;
if (!rs->seg_boundary_mask)
- rs->seg_boundary_mask = -1;
+ rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
if (!rs->bounce_pfn)
rs->bounce_pfn = -1;
}
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 84bdc2ee69e..a443e136dc4 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -354,7 +354,7 @@ static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
* @req: the request to prepare
*
* Allocate the necessary i2o_block_request struct and connect it to
- * the request. This is needed that we not loose the SG list later on.
+ * the request. This is needed that we not lose the SG list later on.
*
* Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
*/
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index be2b5926d26..6e53a30bfd3 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -49,7 +49,6 @@ static int i2o_hrt_get(struct i2o_controller *c);
/**
* i2o_msg_get_wait - obtain an I2O message from the IOP
* @c: I2O controller
- * @msg: pointer to a I2O message pointer
* @wait: how long to wait until timeout
*
* This function waits up to wait seconds for a message slot to be
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 8c389d606c3..3ee698ad859 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -254,7 +254,11 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
return 1;
*paddr = pte_pfn(pte) << PAGE_SHIFT;
+#ifdef CONFIG_HUGETLB_PAGE
*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
+#else
+ *pageshift = PAGE_SHIFT;
+#endif
return 0;
err:
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 76a76751da3..6659b2275c0 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -37,9 +37,9 @@
#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */
#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
-#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
+#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
-#define OPCODE_BE 0xc7 /* Erase whole flash block */
+#define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
#define OPCODE_RDID 0x9f /* Read JEDEC ID */
@@ -167,7 +167,7 @@ static int wait_till_ready(struct m25p *flash)
*
* Returns 0 if successful, non-zero otherwise.
*/
-static int erase_block(struct m25p *flash)
+static int erase_chip(struct m25p *flash)
{
DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n",
flash->spi->dev.bus_id, __func__,
@@ -181,7 +181,7 @@ static int erase_block(struct m25p *flash)
write_enable(flash);
/* Set up command buffer. */
- flash->command[0] = OPCODE_BE;
+ flash->command[0] = OPCODE_CHIP_ERASE;
spi_write(flash->spi, flash->command, 1);
@@ -250,15 +250,18 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
mutex_lock(&flash->lock);
- /* REVISIT in some cases we could speed up erasing large regions
- * by using OPCODE_SE instead of OPCODE_BE_4K
- */
-
- /* now erase those sectors */
- if (len == flash->mtd.size && erase_block(flash)) {
+ /* whole-chip erase? */
+ if (len == flash->mtd.size && erase_chip(flash)) {
instr->state = MTD_ERASE_FAILED;
mutex_unlock(&flash->lock);
return -EIO;
+
+ /* REVISIT in some cases we could speed up erasing large regions
+ * by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up
+ * to use "small sector erase", but that's not always optimal.
+ */
+
+ /* "sector"-at-a-time erase */
} else {
while (len) {
if (erase_sector(flash, addr)) {
@@ -574,10 +577,11 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
for (tmp = 0, info = m25p_data;
tmp < ARRAY_SIZE(m25p_data);
tmp++, info++) {
- if (info->jedec_id == jedec)
- if (ext_jedec != 0 && info->ext_id != ext_jedec)
+ if (info->jedec_id == jedec) {
+ if (info->ext_id != 0 && info->ext_id != ext_jedec)
continue;
return info;
+ }
}
dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
return NULL;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 42d844f8f6b..dfbf3f270ce 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -19,7 +19,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/concat.h>
-#include <asm/io.h>
+#include <linux/io.h>
#define MAX_RESOURCES 4
@@ -27,7 +27,6 @@ struct physmap_flash_info {
struct mtd_info *mtd[MAX_RESOURCES];
struct mtd_info *cmtd;
struct map_info map[MAX_RESOURCES];
- struct resource *res;
#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
struct mtd_partition *parts;
@@ -70,16 +69,7 @@ static int physmap_flash_remove(struct platform_device *dev)
#endif
map_destroy(info->mtd[i]);
}
-
- if (info->map[i].virt != NULL)
- iounmap(info->map[i].virt);
- }
-
- if (info->res != NULL) {
- release_resource(info->res);
- kfree(info->res);
}
-
return 0;
}
@@ -101,7 +91,8 @@ static int physmap_flash_probe(struct platform_device *dev)
if (physmap_data == NULL)
return -ENODEV;
- info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL);
+ info = devm_kzalloc(&dev->dev, sizeof(struct physmap_flash_info),
+ GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
goto err_out;
@@ -114,10 +105,10 @@ static int physmap_flash_probe(struct platform_device *dev)
(unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1),
(unsigned long long)dev->resource[i].start);
- info->res = request_mem_region(dev->resource[i].start,
- dev->resource[i].end - dev->resource[i].start + 1,
- dev->dev.bus_id);
- if (info->res == NULL) {
+ if (!devm_request_mem_region(&dev->dev,
+ dev->resource[i].start,
+ dev->resource[i].end - dev->resource[i].start + 1,
+ dev->dev.bus_id)) {
dev_err(&dev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
goto err_out;
@@ -129,7 +120,8 @@ static int physmap_flash_probe(struct platform_device *dev)
info->map[i].bankwidth = physmap_data->width;
info->map[i].set_vpp = physmap_data->set_vpp;
- info->map[i].virt = ioremap(info->map[i].phys, info->map[i].size);
+ info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys,
+ info->map[i].size);
if (info->map[i].virt == NULL) {
dev_err(&dev->dev, "Failed to ioremap flash region\n");
err = EIO;
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 024e3fffd4b..a83192f80eb 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -163,9 +163,11 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
#ifdef CONFIG_MTD_OF_PARTS
- if (ret == 0)
- ret = of_mtd_parse_partitions(fun->dev, &fun->mtd,
- flash_np, &fun->parts);
+ if (ret == 0) {
+ ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts);
+ if (ret < 0)
+ goto err;
+ }
#endif
if (ret > 0)
ret = add_mtd_partitions(&fun->mtd, fun->parts, ret);
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 75c89903902..9bd6c9ac844 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -141,6 +141,7 @@ static int __devinit pasemi_nand_probe(struct of_device *ofdev,
}
lpcctl = pci_resource_start(pdev, 0);
+ pci_dev_put(pdev);
if (!request_region(lpcctl, 4, driver_name)) {
err = -EBUSY;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index c0fa9c9edf0..15f0a26730a 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -269,6 +269,7 @@ static struct pxa3xx_nand_timing stm2GbX16_timing = {
static struct pxa3xx_nand_flash stm2GbX16 = {
.timing = &stm2GbX16_timing,
+ .cmdset = &largepage_cmdset,
.page_per_block = 64,
.page_size = 2048,
.flash_width = 16,
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index e39b21d3e16..a7e4d985f5e 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -32,19 +32,18 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/mach/flash.h>
-#include <asm/arch/gpmc.h>
-#include <asm/arch/onenand.h>
-#include <asm/arch/gpio.h>
-#include <asm/arch/pm.h>
+#include <mach/gpmc.h>
+#include <mach/onenand.h>
+#include <mach/gpio.h>
+#include <mach/pm.h>
-#include <linux/dma-mapping.h>
-#include <asm/dma-mapping.h>
-#include <asm/arch/dma.h>
+#include <mach/dma.h>
-#include <asm/arch/board.h>
+#include <mach/board.h>
#define DRIVER_NAME "omap2-onenand"
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index d07e3f14895..9e8222f9e90 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -543,9 +543,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
for (j = 0; j < bp->rx_max_pg_ring; j++) {
if (rxr->rx_pg_desc_ring[j])
pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
- rxr->rx_pg_desc_ring[i],
- rxr->rx_pg_desc_mapping[i]);
- rxr->rx_pg_desc_ring[i] = NULL;
+ rxr->rx_pg_desc_ring[j],
+ rxr->rx_pg_desc_mapping[j]);
+ rxr->rx_pg_desc_ring[j] = NULL;
}
if (rxr->rx_pg_ring)
vfree(rxr->rx_pg_ring);
@@ -3144,6 +3144,28 @@ bnx2_has_work(struct bnx2_napi *bnapi)
return 0;
}
+static void
+bnx2_chk_missed_msi(struct bnx2 *bp)
+{
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ u32 msi_ctrl;
+
+ if (bnx2_has_work(bnapi)) {
+ msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
+ if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
+ return;
+
+ if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
+ REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
+ ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
+ REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
+ bnx2_msi(bp->irq_tbl[0].vector, bnapi);
+ }
+ }
+
+ bp->idle_chk_status_idx = bnapi->last_status_idx;
+}
+
static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
{
struct status_block *sblk = bnapi->status_blk.msi;
@@ -3218,14 +3240,15 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
- if (unlikely(work_done >= budget))
- break;
-
/* bnapi->last_status_idx is used below to tell the hw how
* much work has been processed, so we must read it before
* checking for more work.
*/
bnapi->last_status_idx = sblk->status_idx;
+
+ if (unlikely(work_done >= budget))
+ break;
+
rmb();
if (likely(!bnx2_has_work(bnapi))) {
netif_rx_complete(bp->dev, napi);
@@ -4570,6 +4593,8 @@ bnx2_init_chip(struct bnx2 *bp)
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
bp->bnx2_napi[i].last_status_idx = 0;
+ bp->idle_chk_status_idx = 0xffff;
+
bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
/* Set up how to generate a link change interrupt. */
@@ -5718,6 +5743,10 @@ bnx2_timer(unsigned long data)
if (atomic_read(&bp->intr_sem) != 0)
goto bnx2_restart_timer;
+ if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
+ BNX2_FLAG_USING_MSI)
+ bnx2_chk_missed_msi(bp);
+
bnx2_send_heart_beat(bp);
bp->stats_blk->stat_FwRxDrop =
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 617d9534016..0b032c3c7b6 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -378,6 +378,9 @@ struct l2_fhdr {
* pci_config_l definition
* offset: 0000
*/
+#define BNX2_PCICFG_MSI_CONTROL 0x00000058
+#define BNX2_PCICFG_MSI_CONTROL_ENABLE (1L<<16)
+
#define BNX2_PCICFG_MISC_CONFIG 0x00000068
#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2)
#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3)
@@ -6863,6 +6866,9 @@ struct bnx2 {
u8 num_tx_rings;
u8 num_rx_rings;
+
+ u32 idle_chk_status_idx;
+
};
#define REG_RD(bp, offset) \
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 523b9716a54..d115a6d30f2 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1893,12 +1893,17 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl |= E1000_CTRL_PHY_RST;
}
ret_val = e1000_acquire_swflag_ich8lan(hw);
+ /* Whether or not the swflag was acquired, we need to reset the part */
hw_dbg(hw, "Issuing a global reset to ich8lan");
ew32(CTRL, (ctrl | E1000_CTRL_RST));
msleep(20);
- /* release the swflag because it is not reset by hardware reset */
- e1000_release_swflag_ich8lan(hw);
+ if (!ret_val) {
+ /* release the swflag because it is not reset by
+ * hardware reset
+ */
+ e1000_release_swflag_ich8lan(hw);
+ }
ret_val = e1000e_get_auto_rd_done(hw);
if (ret_val) {
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index e1b441effbb..36cb6e95b46 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -568,6 +568,17 @@ static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end)
return erxrdpt;
}
+/*
+ * Calculate wrap around when reading beyond the end of the RX buffer
+ */
+static u16 rx_packet_start(u16 ptr)
+{
+ if (ptr + RSV_SIZE > RXEND_INIT)
+ return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1);
+ else
+ return ptr + RSV_SIZE;
+}
+
static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
{
u16 erxrdpt;
@@ -938,8 +949,9 @@ static void enc28j60_hw_rx(struct net_device *ndev)
skb->dev = ndev;
skb_reserve(skb, NET_IP_ALIGN);
/* copy the packet from the receive buffer */
- enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv),
- len, skb_put(skb, len));
+ enc28j60_mem_read(priv,
+ rx_packet_start(priv->next_pk_ptr),
+ len, skb_put(skb, len));
if (netif_msg_pktdata(priv))
dump_packet(__func__, skb->len, skb->data);
skb->protocol = eth_type_trans(skb, ndev);
@@ -947,7 +959,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += len;
ndev->last_rx = jiffies;
- netif_rx(skb);
+ netif_rx_ni(skb);
}
}
/*
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index f863aee6648..3f5d9154324 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -22,7 +22,7 @@
*/
#ifndef __JME_H_INCLUDED__
-#define __JME_H_INCLUDEE__
+#define __JME_H_INCLUDED__
#define DRV_NAME "jme"
#define DRV_VERSION "1.0.3"
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index b9bed82e1d2..b289a0a2b94 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -401,6 +401,8 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
priv->xmac_base = priv->xc->xmac_base;
priv->sram_base = priv->xc->sram_base;
+ spin_lock_init(&priv->lock);
+
ret = pfifo_request(PFIFO_MASK(priv->id));
if (ret) {
printk("unable to request PFIFO\n");
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 536bda1f428..289fc267edf 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -105,8 +105,6 @@ int mdiobus_register(struct mii_bus *bus)
return -EINVAL;
}
- bus->state = MDIOBUS_REGISTERED;
-
mutex_init(&bus->mdio_lock);
if (bus->reset)
@@ -123,6 +121,9 @@ int mdiobus_register(struct mii_bus *bus)
}
}
+ if (!err)
+ bus->state = MDIOBUS_REGISTERED;
+
pr_info("%s: probed\n", bus->name);
return err;
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 1d2ef8f4778..5a40f2d78be 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1509,6 +1509,11 @@ static int __netdev_rx(struct net_device *dev, int *quota)
desc->status = 0;
np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
}
+
+ if (*quota == 0) { /* out of rx quota */
+ retcode = 1;
+ goto out;
+ }
writew(np->rx_done, np->base + CompletionQConsumerIdx);
out:
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 1349e419673..fed7eba65ea 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1142,6 +1142,70 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static void gem_pcs_reset(struct gem *gp)
+{
+ int limit;
+ u32 val;
+
+ /* Reset PCS unit. */
+ val = readl(gp->regs + PCS_MIICTRL);
+ val |= PCS_MIICTRL_RST;
+ writel(val, gp->regs + PCS_MIICTRL);
+
+ limit = 32;
+ while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
+ udelay(100);
+ if (limit-- <= 0)
+ break;
+ }
+ if (limit <= 0)
+ printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
+ gp->dev->name);
+}
+
+static void gem_pcs_reinit_adv(struct gem *gp)
+{
+ u32 val;
+
+ /* Make sure PCS is disabled while changing advertisement
+ * configuration.
+ */
+ val = readl(gp->regs + PCS_CFG);
+ val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
+ writel(val, gp->regs + PCS_CFG);
+
+ /* Advertise all capabilities except assymetric
+ * pause.
+ */
+ val = readl(gp->regs + PCS_MIIADV);
+ val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
+ PCS_MIIADV_SP | PCS_MIIADV_AP);
+ writel(val, gp->regs + PCS_MIIADV);
+
+ /* Enable and restart auto-negotiation, disable wrapback/loopback,
+ * and re-enable PCS.
+ */
+ val = readl(gp->regs + PCS_MIICTRL);
+ val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
+ val &= ~PCS_MIICTRL_WB;
+ writel(val, gp->regs + PCS_MIICTRL);
+
+ val = readl(gp->regs + PCS_CFG);
+ val |= PCS_CFG_ENABLE;
+ writel(val, gp->regs + PCS_CFG);
+
+ /* Make sure serialink loopback is off. The meaning
+ * of this bit is logically inverted based upon whether
+ * you are in Serialink or SERDES mode.
+ */
+ val = readl(gp->regs + PCS_SCTRL);
+ if (gp->phy_type == phy_serialink)
+ val &= ~PCS_SCTRL_LOOP;
+ else
+ val |= PCS_SCTRL_LOOP;
+ writel(val, gp->regs + PCS_SCTRL);
+}
+
#define STOP_TRIES 32
/* Must be invoked under gp->lock and gp->tx_lock. */
@@ -1168,6 +1232,9 @@ static void gem_reset(struct gem *gp)
if (limit <= 0)
printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
+
+ if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
+ gem_pcs_reinit_adv(gp);
}
/* Must be invoked under gp->lock and gp->tx_lock. */
@@ -1324,7 +1391,7 @@ static int gem_set_link_modes(struct gem *gp)
gp->phy_type == phy_serdes) {
u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
- if (pcs_lpa & PCS_MIIADV_FD)
+ if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
full_duplex = 1;
speed = SPEED_1000;
}
@@ -1488,6 +1555,9 @@ static void gem_link_timer(unsigned long data)
val = readl(gp->regs + PCS_MIISTAT);
if ((val & PCS_MIISTAT_LS) != 0) {
+ if (gp->lstate == link_up)
+ goto restart;
+
gp->lstate = link_up;
netif_carrier_on(gp->dev);
(void)gem_set_link_modes(gp);
@@ -1708,61 +1778,8 @@ static void gem_init_phy(struct gem *gp)
if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
gp->phy_mii.def->ops->init(&gp->phy_mii);
} else {
- u32 val;
- int limit;
-
- /* Reset PCS unit. */
- val = readl(gp->regs + PCS_MIICTRL);
- val |= PCS_MIICTRL_RST;
- writel(val, gp->regs + PCS_MIICTRL);
-
- limit = 32;
- while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
- udelay(100);
- if (limit-- <= 0)
- break;
- }
- if (limit <= 0)
- printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
- gp->dev->name);
-
- /* Make sure PCS is disabled while changing advertisement
- * configuration.
- */
- val = readl(gp->regs + PCS_CFG);
- val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
- writel(val, gp->regs + PCS_CFG);
-
- /* Advertise all capabilities except assymetric
- * pause.
- */
- val = readl(gp->regs + PCS_MIIADV);
- val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
- PCS_MIIADV_SP | PCS_MIIADV_AP);
- writel(val, gp->regs + PCS_MIIADV);
-
- /* Enable and restart auto-negotiation, disable wrapback/loopback,
- * and re-enable PCS.
- */
- val = readl(gp->regs + PCS_MIICTRL);
- val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
- val &= ~PCS_MIICTRL_WB;
- writel(val, gp->regs + PCS_MIICTRL);
-
- val = readl(gp->regs + PCS_CFG);
- val |= PCS_CFG_ENABLE;
- writel(val, gp->regs + PCS_CFG);
-
- /* Make sure serialink loopback is off. The meaning
- * of this bit is logically inverted based upon whether
- * you are in Serialink or SERDES mode.
- */
- val = readl(gp->regs + PCS_SCTRL);
- if (gp->phy_type == phy_serialink)
- val &= ~PCS_SCTRL_LOOP;
- else
- val |= PCS_SCTRL_LOOP;
- writel(val, gp->regs + PCS_SCTRL);
+ gem_pcs_reset(gp);
+ gem_pcs_reinit_adv(gp);
}
/* Default aneg parameters */
@@ -2680,6 +2697,21 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->speed = 0;
cmd->duplex = cmd->port = cmd->phy_address =
cmd->transceiver = cmd->autoneg = 0;
+
+ /* serdes means usually a Fibre connector, with most fixed */
+ if (gp->phy_type == phy_serdes) {
+ cmd->port = PORT_FIBRE;
+ cmd->supported = (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE | SUPPORTED_Autoneg |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ cmd->advertising = cmd->supported;
+ cmd->transceiver = XCVR_INTERNAL;
+ if (gp->lstate == link_up)
+ cmd->speed = SPEED_1000;
+ cmd->duplex = DUPLEX_FULL;
+ cmd->autoneg = 1;
+ }
}
cmd->maxtxpkt = cmd->maxrxpkt = 0;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index c41d6876136..e60498232b9 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1098,6 +1098,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
dma_addr_t tail_list_phys;
u8 *tail_buffer;
unsigned long flags;
+ unsigned int txlen;
if ( ! priv->phyOnline ) {
TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
@@ -1108,6 +1109,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
return 0;
+ txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
tail_list = priv->txList + priv->txTail;
tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
@@ -1125,16 +1127,16 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
if ( bbuf ) {
tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
- skb_copy_from_linear_data(skb, tail_buffer, skb->len);
+ skb_copy_from_linear_data(skb, tail_buffer, txlen);
} else {
tail_list->buffer[0].address = pci_map_single(priv->pciDev,
- skb->data, skb->len,
+ skb->data, txlen,
PCI_DMA_TODEVICE);
TLan_StoreSKB(tail_list, skb);
}
- tail_list->frameSize = (u16) skb->len;
- tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len;
+ tail_list->frameSize = (u16) txlen;
+ tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
tail_list->buffer[1].count = 0;
tail_list->buffer[1].address = 0;
@@ -1431,7 +1433,9 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
if ( ! bbuf ) {
struct sk_buff *skb = TLan_GetSKB(head_list);
pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
- skb->len, PCI_DMA_TODEVICE);
+ max(skb->len,
+ (unsigned int)TLAN_MIN_FRAME_SIZE),
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb);
head_list->buffer[8].address = 0;
head_list->buffer[9].address = 0;
@@ -2055,9 +2059,12 @@ static void TLan_FreeLists( struct net_device *dev )
list = priv->txList + i;
skb = TLan_GetSKB(list);
if ( skb ) {
- pci_unmap_single(priv->pciDev,
- list->buffer[0].address, skb->len,
- PCI_DMA_TODEVICE);
+ pci_unmap_single(
+ priv->pciDev,
+ list->buffer[0].address,
+ max(skb->len,
+ (unsigned int)TLAN_MIN_FRAME_SIZE),
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any( skb );
list->buffer[8].address = 0;
list->buffer[9].address = 0;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index dcce3542d5a..7a9f901d4ff 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -3897,6 +3897,7 @@ static int ipw_disassociate(void *data)
if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
return 0;
ipw_send_disassociate(data, 0);
+ netif_carrier_off(priv->net_dev);
return 1;
}
@@ -10190,6 +10191,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
u16 remaining_bytes;
int fc;
+ if (!(priv->status & STATUS_ASSOCIATED))
+ goto drop;
+
hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
switch (priv->ieee->iw_mode) {
case IW_MODE_ADHOC:
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 4c312c55f90..01a84585133 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -290,6 +290,9 @@ void iwl_clear_stations_table(struct iwl_priv *priv)
priv->num_stations = 0;
memset(priv->stations, 0, sizeof(priv->stations));
+ /* clean ucode key table bit map */
+ priv->ucode_key_table = 0;
+
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
EXPORT_SYMBOL(iwl_clear_stations_table);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 61797f3f8d5..26f7084d301 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -475,7 +475,7 @@ static int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
if (!test_and_set_bit(i, &priv->ucode_key_table))
return i;
- return -1;
+ return WEP_INVALID_OFFSET;
}
int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
@@ -620,6 +620,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
+ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for new kew");
+
priv->stations[sta_id].sta.key.key_flags = key_flags;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
@@ -637,6 +640,7 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
{
unsigned long flags;
__le16 key_flags = 0;
+ int ret;
key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@@ -664,14 +668,18 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
+ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for new kew");
+
priv->stations[sta_id].sta.key.key_flags = key_flags;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+
spin_unlock_irqrestore(&priv->sta_lock, flags);
- IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
- return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+ return ret;
}
static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
@@ -696,6 +704,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
+ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for new kew");
+
/* This copy is acutally not needed: we get the key with each TX */
memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
@@ -734,6 +745,13 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
return 0;
}
+ if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+ IWL_WARNING("Removing wrong key %d 0x%x\n",
+ keyconf->keyidx, key_flags);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return 0;
+ }
+
if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
&priv->ucode_key_table))
IWL_ERROR("index %d not used in uCode key table.\n",
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index fe1867b25ff..cac732f4047 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -615,7 +615,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
struct ieee80211_hdr *tx_hdr;
tx_hdr = (struct ieee80211_hdr *)skb->data;
- if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1)))
+ if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
{
__skb_unlink(skb, q);
tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1);
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index f9e244da30a..9bcb6cbd5aa 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -113,7 +113,7 @@ struct acpiphp_slot {
u8 device; /* pci device# */
- u32 sun; /* ACPI _SUN (slot unique number) */
+ unsigned long long sun; /* ACPI _SUN (slot unique number) */
u32 flags; /* see below */
};
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 95b536a23d2..43c10bd261b 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -337,7 +337,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
acpiphp_slot->slot = slot;
- snprintf(name, SLOT_NAME_SIZE, "%u", slot->acpi_slot->sun);
+ snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun);
retval = pci_hp_register(slot->hotplug_slot,
acpiphp_slot->bridge->pci_bus,
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 955aae4071f..3affc6472e6 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -255,13 +255,13 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
bridge->nr_slots++;
- dbg("found ACPI PCI Hotplug slot %d at PCI %04x:%02x:%02x\n",
+ dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
slot->sun, pci_domain_nr(bridge->pci_bus),
bridge->pci_bus->number, slot->device);
retval = acpiphp_register_hotplug_slot(slot);
if (retval) {
if (retval == -EBUSY)
- warn("Slot %d already registered by another "
+ warn("Slot %llu already registered by another "
"hotplug driver\n", slot->sun);
else
warn("acpiphp_register_hotplug_slot failed "
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index c892daae74d..633e743442a 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1402,10 +1402,6 @@ static int __init ibmphp_init(void)
goto error;
}
- /* lock ourselves into memory with a module
- * count of -1 so that no one can unload us. */
- module_put(THIS_MODULE);
-
exit:
return rc;
@@ -1423,4 +1419,3 @@ static void __exit ibmphp_exit(void)
}
module_init(ibmphp_init);
-module_exit(ibmphp_exit);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 4b23bc39b11..39cf248d24e 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -432,18 +432,19 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
goto err_out_release_ctlr;
}
+ /* Check if slot is occupied */
t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
-
- t_slot->hpc_ops->get_adapter_status(t_slot, &value); /* Check if slot is occupied */
- if (value && pciehp_force) {
- rc = pciehp_enable_slot(t_slot);
- if (rc) /* -ENODEV: shouldn't happen, but deal with it */
- value = 0;
- }
- if ((POWER_CTRL(ctrl)) && !value) {
- rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/
- if (rc)
- goto err_out_free_ctrl_slot;
+ t_slot->hpc_ops->get_adapter_status(t_slot, &value);
+ if (value) {
+ if (pciehp_force)
+ pciehp_enable_slot(t_slot);
+ } else {
+ /* Power off slot if not occupied */
+ if (POWER_CTRL(ctrl)) {
+ rc = t_slot->hpc_ops->power_off_slot(t_slot);
+ if (rc)
+ goto err_out_free_ctrl_slot;
+ }
}
return 0;
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index dfc63d01f20..aac7006949f 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -252,7 +252,7 @@ static void report_resume(struct pci_dev *dev, void *data)
if (!dev->driver ||
!dev->driver->err_handler ||
- !dev->driver->err_handler->slot_reset)
+ !dev->driver->err_handler->resume)
return;
err_handler = dev->driver->err_handler;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 8f63f4c6b85..9aad608bcf3 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -16,6 +16,7 @@
#include <linux/pm.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/jiffies.h>
#include <linux/pci-aspm.h>
#include "../pci.h"
@@ -161,11 +162,12 @@ static void pcie_check_clock_pm(struct pci_dev *pdev)
*/
static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
{
- int pos, child_pos;
+ int pos, child_pos, i = 0;
u16 reg16 = 0;
struct pci_dev *child_dev;
int same_clock = 1;
-
+ unsigned long start_jiffies;
+ u16 child_regs[8], parent_reg;
/*
* all functions of a slot should have the same Slot Clock
* Configuration, so just check one function
@@ -191,16 +193,19 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
&reg16);
+ child_regs[i] = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
reg16 &= ~PCI_EXP_LNKCTL_CCC;
pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
reg16);
+ i++;
}
/* Configure upstream component */
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+ parent_reg = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
@@ -212,12 +217,30 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
/* Wait for link training end */
- while (1) {
+ /* break out after waiting for 1 second */
+ start_jiffies = jiffies;
+ while ((jiffies - start_jiffies) < HZ) {
pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_LT))
break;
cpu_relax();
}
+ /* training failed -> recover */
+ if ((jiffies - start_jiffies) >= HZ) {
+ dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure"
+ " common clock\n");
+ i = 0;
+ list_for_each_entry(child_dev, &pdev->subordinate->devices,
+ bus_list) {
+ child_pos = pci_find_capability(child_dev,
+ PCI_CAP_ID_EXP);
+ pci_write_config_word(child_dev,
+ child_pos + PCI_EXP_LNKCTL,
+ child_regs[i]);
+ i++;
+ }
+ pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg);
+ }
}
/*
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 4dd1c3e157a..5a8ccb4f604 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -253,6 +253,7 @@ placeholder:
__func__, pci_domain_nr(parent), parent->number, slot_nr);
out:
+ kfree(slot_name);
up_write(&pci_bus_sem);
return slot;
err:
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index bb7338863fb..b59d4115d20 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -334,6 +334,6 @@ static void __exit bfin_cf_exit(void)
module_init(bfin_cf_init);
module_exit(bfin_cf_exit);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>")
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("BFIN CF/PCMCIA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 341d7a5b45a..4e91419e891 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -209,12 +209,18 @@ static int ds1672_probe(struct i2c_client *client,
return err;
}
+static struct i2c_device_id ds1672_id[] = {
+ { "ds1672", 0 },
+ { }
+};
+
static struct i2c_driver ds1672_driver = {
.driver = {
.name = "rtc-ds1672",
},
.probe = &ds1672_probe,
.remove = &ds1672_remove,
+ .id_table = ds1672_id,
};
static int __init ds1672_init(void)
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index 80782798763..a4f6665ab3c 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -247,12 +247,18 @@ max6900_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
}
+static struct i2c_device_id max6900_id[] = {
+ { "max6900", 0 },
+ { }
+};
+
static struct i2c_driver max6900_driver = {
.driver = {
.name = "rtc-max6900",
},
.probe = max6900_probe,
.remove = max6900_remove,
+ .id_table = max6900_id,
};
static int __init max6900_init(void)
diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl4030.c
index abe87a4d266..01d8da9afdc 100644
--- a/drivers/rtc/rtc-twl4030.c
+++ b/drivers/rtc/rtc-twl4030.c
@@ -337,7 +337,7 @@ static int twl4030_rtc_ioctl(struct device *dev, unsigned int cmd,
}
#else
-#define omap_rtc_ioctl NULL
+#define twl4030_rtc_ioctl NULL
#endif
static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 9aec4ca64e5..f7da7530875 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -107,6 +107,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
struct request *req;
int ret;
+retry:
req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
if (!req)
return SCSI_DH_RES_TEMP_UNAVAIL;
@@ -121,7 +122,6 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
req->sense_len = 0;
-retry:
ret = blk_execute_rq(req->q, NULL, req, 1);
if (ret == -EIO) {
if (req->sense_len > 0) {
@@ -136,8 +136,10 @@ retry:
h->path_state = HP_SW_PATH_ACTIVE;
ret = SCSI_DH_OK;
}
- if (ret == SCSI_DH_IMM_RETRY)
+ if (ret == SCSI_DH_IMM_RETRY) {
+ blk_put_request(req);
goto retry;
+ }
if (ret == SCSI_DH_DEV_OFFLINED) {
h->path_state = HP_SW_PATH_PASSIVE;
ret = SCSI_DH_OK;
@@ -200,6 +202,7 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
struct request *req;
int ret, retry;
+retry:
req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
if (!req)
return SCSI_DH_RES_TEMP_UNAVAIL;
@@ -216,7 +219,6 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
req->sense_len = 0;
retry = h->retries;
-retry:
ret = blk_execute_rq(req->q, NULL, req, 1);
if (ret == -EIO) {
if (req->sense_len > 0) {
@@ -231,8 +233,10 @@ retry:
ret = SCSI_DH_OK;
if (ret == SCSI_DH_RETRY) {
- if (--retry)
+ if (--retry) {
+ blk_put_request(req);
goto retry;
+ }
ret = SCSI_DH_IO;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c9e1242eaf2..5081b3981d3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -757,7 +757,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
* access to the device is prohibited.
*/
error = scsi_nonblockable_ioctl(sdp, cmd, p,
- (mode & FMODE_NDELAY_NOW) != 0);
+ (mode & FMODE_NDELAY) != 0);
if (!scsi_block_when_processing_errors(sdp) || !error)
return error;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 62b6633e3a9..45b66b98a51 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -521,7 +521,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
* if it doesn't recognise the ioctl
*/
ret = scsi_nonblockable_ioctl(sdev, cmd, argp,
- (mode & FMODE_NDELAY_NOW) != 0);
+ (mode & FMODE_NDELAY) != 0);
if (ret != -ENODEV)
return ret;
return scsi_ioctl(sdev, cmd, argp);
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index 6dd98f9fb89..ae3699d77dd 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -2149,7 +2149,7 @@ out4:
return ret;
}
-static struct ioc3_submodule ioc3uart_submodule = {
+static struct ioc3_submodule ioc3uart_ops = {
.name = "IOC3uart",
.probe = ioc3uart_probe,
.remove = ioc3uart_remove,
@@ -2173,7 +2173,7 @@ static int __devinit ioc3uart_init(void)
__func__);
return ret;
}
- ret = ioc3_register_submodule(&ioc3uart_submodule);
+ ret = ioc3_register_submodule(&ioc3uart_ops);
if (ret)
uart_unregister_driver(&ioc3_uart);
return ret;
@@ -2181,7 +2181,7 @@ static int __devinit ioc3uart_init(void)
static void __devexit ioc3uart_exit(void)
{
- ioc3_unregister_submodule(&ioc3uart_submodule);
+ ioc3_unregister_submodule(&ioc3uart_ops);
uart_unregister_driver(&ioc3_uart);
}
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 6117d3db0b6..28c00c3d58f 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -591,8 +591,8 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
/* Update the per-port timeout */
uart_update_timeout(port, new->c_cflag, baud);
- /* Do our best to flush TX & RX, so we don't loose anything */
- /* But we don't wait indefinitly ! */
+ /* Do our best to flush TX & RX, so we don't lose anything */
+ /* But we don't wait indefinitely ! */
j = 5000000; /* Maximum wait */
/* FIXME Can't receive chars since set_termios might be called at early
* boot for the console, all stuff is not yet ready to receive at that
diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c
index 317d239ab74..29cbb0afef8 100644
--- a/drivers/serial/s3c2440.c
+++ b/drivers/serial/s3c2440.c
@@ -177,5 +177,5 @@ module_exit(s3c2440_serial_exit);
MODULE_DESCRIPTION("Samsung S3C2440,S3C2442 SoC Serial port driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPLi v2");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:s3c2440-uart");
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index d1812d32f47..63f0de29aa1 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -827,7 +827,7 @@ static int __init maple_bus_init(void)
maple_queue_cache =
kmem_cache_create("maple_queue_cache", 0x400, 0,
- SLAB_POISON|SLAB_HWCACHE_ALIGN, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (!maple_queue_cache)
goto cleanup_bothirqs;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index c95b286a123..5d457c96bd7 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -22,6 +22,8 @@ menuconfig STAGING
If in doubt, say N here.
+if STAGING
+
config STAGING_EXCLUDE_BUILD
bool "Exclude Staging drivers from being built" if STAGING
default y
@@ -62,3 +64,4 @@ source "drivers/staging/at76_usb/Kconfig"
source "drivers/staging/poch/Kconfig"
endif # !STAGING_EXCLUDE_BUILD
+endif # STAGING
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 8e74657f106..43a863c5cc4 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -51,6 +51,7 @@ static struct usb_device_id usbtmc_devices[] = {
{ USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), },
{ 0, } /* terminating entry */
};
+MODULE_DEVICE_TABLE(usb, usbtmc_devices);
/*
* This structure is the capabilities for the device
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 3d7793d9303..8c081308b0e 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -279,7 +279,9 @@ static int usb_unbind_interface(struct device *dev)
* altsetting means creating new endpoint device entries).
* When either of these happens, defer the Set-Interface.
*/
- if (!error && intf->dev.power.status == DPM_ON)
+ if (intf->cur_altsetting->desc.bAlternateSetting == 0)
+ ; /* Already in altsetting 0 so skip Set-Interface */
+ else if (!error && intf->dev.power.status == DPM_ON)
usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
else
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 428b5993575..3a8bb53fc47 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -651,6 +651,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
fs_in_desc.bEndpointAddress;
hs_out_desc.bEndpointAddress =
fs_out_desc.bEndpointAddress;
+ hs_notify_desc.bEndpointAddress =
+ fs_notify_desc.bEndpointAddress;
/* copy descriptors, and track endpoint copies */
f->hs_descriptors = usb_copy_descriptors(eth_hs_function);
@@ -662,6 +664,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
f->hs_descriptors, &hs_in_desc);
rndis->hs.out = usb_find_endpoint(eth_hs_function,
f->hs_descriptors, &hs_out_desc);
+ rndis->hs.notify = usb_find_endpoint(eth_hs_function,
+ f->hs_descriptors, &hs_notify_desc);
}
rndis->port.open = rndis_open;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 51d7bdea286..fb6f2933b01 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -143,6 +143,7 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -166,6 +167,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
@@ -1498,7 +1500,7 @@ static int ftdi_open(struct tty_struct *tty,
priv->interface, buf, 0, WDR_TIMEOUT);
/* Termios defaults are set by usb_serial_init. We don't change
- port->tty->termios - this would loose speed settings, etc.
+ port->tty->termios - this would lose speed settings, etc.
This is same behaviour as serial.c/rs_open() - Kuba */
/* ftdi_set_termios will send usb control messages */
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 07a3992abad..373ee09975b 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -40,6 +40,9 @@
/* AlphaMicro Components AMC-232USB01 device */
#define FTDI_AMC232_PID 0xFF00 /* Product Id */
+/* www.candapter.com Ewert Energy Systems CANdapter device */
+#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
+
/* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */
/* the VID is the standard ftdi vid (FTDI_VID) */
#define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */
@@ -75,6 +78,9 @@
/* OpenDCC (www.opendcc.de) product id */
#define FTDI_OPENDCC_PID 0xBFD8
+/* Sprog II (Andrew Crosland's SprogII DCC interface) */
+#define FTDI_SPROG_II 0xF0C8
+
/* www.crystalfontz.com devices - thanx for providing free devices for evaluation ! */
/* they use the ftdi chipset for the USB interface and the vendor id is the same */
#define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 491c8857b64..1aed584be5e 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -91,6 +91,8 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) },
{ USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) },
{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
+ { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index a3bd039c78e..54974f446a8 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -110,3 +110,11 @@
/* Y.C. Cable U.S.A., Inc - USB to RS-232 */
#define YCCABLE_VENDOR_ID 0x05ad
#define YCCABLE_PRODUCT_ID 0x0fba
+
+/* "Superial" USB - Serial */
+#define SUPERIAL_VENDOR_ID 0x5372
+#define SUPERIAL_PRODUCT_ID 0x2303
+
+/* Hewlett-Packard LD220-HP POS Pole Display */
+#define HP_VENDOR_ID 0x03f0
+#define HP_LD220_PRODUCT_ID 0x3524
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 31c42d1cae1..01d0c70d60e 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -16,56 +16,6 @@
* For questions or problems with this driver, contact Texas Instruments
* technical support, or Al Borchers <alborchers@steinerpoint.com>, or
* Peter Berger <pberger@brimson.com>.
- *
- * This driver needs this hotplug script in /etc/hotplug/usb/ti_usb_3410_5052
- * or in /etc/hotplug.d/usb/ti_usb_3410_5052.hotplug to set the device
- * configuration.
- *
- * #!/bin/bash
- *
- * BOOT_CONFIG=1
- * ACTIVE_CONFIG=2
- *
- * if [[ "$ACTION" != "add" ]]
- * then
- * exit
- * fi
- *
- * CONFIG_PATH=/sys${DEVPATH%/?*}/bConfigurationValue
- *
- * if [[ 0`cat $CONFIG_PATH` -ne $BOOT_CONFIG ]]
- * then
- * exit
- * fi
- *
- * PRODUCT=${PRODUCT%/?*} # delete version
- * VENDOR_ID=`printf "%d" 0x${PRODUCT%/?*}`
- * PRODUCT_ID=`printf "%d" 0x${PRODUCT#*?/}`
- *
- * PARAM_PATH=/sys/module/ti_usb_3410_5052/parameters
- *
- * function scan() {
- * s=$1
- * shift
- * for i
- * do
- * if [[ $s -eq $i ]]
- * then
- * return 0
- * fi
- * done
- * return 1
- * }
- *
- * IFS=$IFS,
- *
- * if (scan $VENDOR_ID 1105 `cat $PARAM_PATH/vendor_3410` &&
- * scan $PRODUCT_ID 13328 `cat $PARAM_PATH/product_3410`) ||
- * (scan $VENDOR_ID 1105 `cat $PARAM_PATH/vendor_5052` &&
- * scan $PRODUCT_ID 20562 20818 20570 20575 `cat $PARAM_PATH/product_5052`)
- * then
- * echo $ACTIVE_CONFIG > $CONFIG_PATH
- * fi
*/
#include <linux/kernel.h>
@@ -457,9 +407,10 @@ static int ti_startup(struct usb_serial *serial)
goto free_tdev;
}
- /* the second configuration must be set (in sysfs by hotplug script) */
+ /* the second configuration must be set */
if (dev->actconfig->desc.bConfigurationValue == TI_BOOT_CONFIG) {
- status = -ENODEV;
+ status = usb_driver_set_configuration(dev, TI_ACTIVE_CONFIG);
+ status = status ? status : -ENODEV;
goto free_tdev;
}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index e61f2bfc64a..bfcc1fe8251 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -167,8 +167,22 @@ UNUSUAL_DEV( 0x0421, 0x005d, 0x0001, 0x0600,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Ozan Sener <themgzzy@gmail.com> */
+UNUSUAL_DEV( 0x0421, 0x0060, 0x0551, 0x0551,
+ "Nokia",
+ "3500c",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
+/* Reported by CSECSY Laszlo <boobaa@frugalware.org> */
+UNUSUAL_DEV( 0x0421, 0x0063, 0x0001, 0x0601,
+ "Nokia",
+ "Nokia 3109c",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
/* Patch for Nokia 5310 capacity */
-UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591,
+UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0701,
"Nokia",
"5310",
US_SC_DEVICE, US_PR_DEVICE, NULL,
diff --git a/drivers/video/aty/radeon_accel.c b/drivers/video/aty/radeon_accel.c
index a547e5d4c8b..a469a3d6edc 100644
--- a/drivers/video/aty/radeon_accel.c
+++ b/drivers/video/aty/radeon_accel.c
@@ -5,61 +5,61 @@
* --dte
*/
-#define FLUSH_CACHE_WORKAROUND 1
-
-void radeon_fifo_update_and_wait(struct radeonfb_info *rinfo, int entries)
+static void radeon_fixup_offset(struct radeonfb_info *rinfo)
{
- int i;
+ u32 local_base;
+
+ /* *** Ugly workaround *** */
+ /*
+ * On some platforms, the video memory is mapped at 0 in radeon chip space
+ * (like PPCs) by the firmware. X will always move it up so that it's seen
+ * by the chip to be at the same address as the PCI BAR.
+ * That means that when switching back from X, there is a mismatch between
+ * the offsets programmed into the engine. This means that potentially,
+ * accel operations done before radeonfb has a chance to re-init the engine
+ * will have incorrect offsets, and potentially trash system memory !
+ *
+ * The correct fix is for fbcon to never call any accel op before the engine
+ * has properly been re-initialized (by a call to set_var), but this is a
+ * complex fix. This workaround in the meantime, called before every accel
+ * operation, makes sure the offsets are in sync.
+ */
- for (i=0; i<2000000; i++) {
- rinfo->fifo_free = INREG(RBBM_STATUS) & 0x7f;
- if (rinfo->fifo_free >= entries)
- return;
- udelay(10);
- }
- printk(KERN_ERR "radeonfb: FIFO Timeout !\n");
- /* XXX Todo: attempt to reset the engine */
-}
+ radeon_fifo_wait (1);
+ local_base = INREG(MC_FB_LOCATION) << 16;
+ if (local_base == rinfo->fb_local_base)
+ return;
-static inline void radeon_fifo_wait(struct radeonfb_info *rinfo, int entries)
-{
- if (entries <= rinfo->fifo_free)
- rinfo->fifo_free -= entries;
- else
- radeon_fifo_update_and_wait(rinfo, entries);
-}
+ rinfo->fb_local_base = local_base;
-static inline void radeonfb_set_creg(struct radeonfb_info *rinfo, u32 reg,
- u32 *cache, u32 new_val)
-{
- if (new_val == *cache)
- return;
- *cache = new_val;
- radeon_fifo_wait(rinfo, 1);
- OUTREG(reg, new_val);
+ radeon_fifo_wait (3);
+ OUTREG(DEFAULT_PITCH_OFFSET, (rinfo->pitch << 0x16) |
+ (rinfo->fb_local_base >> 10));
+ OUTREG(DST_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10));
+ OUTREG(SRC_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10));
}
static void radeonfb_prim_fillrect(struct radeonfb_info *rinfo,
const struct fb_fillrect *region)
{
- radeonfb_set_creg(rinfo, DP_GUI_MASTER_CNTL, &rinfo->dp_gui_mc_cache,
- rinfo->dp_gui_mc_base | GMC_BRUSH_SOLID_COLOR | ROP3_P);
- radeonfb_set_creg(rinfo, DP_CNTL, &rinfo->dp_cntl_cache,
- DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM);
- radeonfb_set_creg(rinfo, DP_BRUSH_FRGD_CLR, &rinfo->dp_brush_fg_cache,
- region->color);
-
- /* Ensure the dst cache is flushed and the engine idle before
- * issuing the operation.
- *
- * This works around engine lockups on some cards
- */
-#if FLUSH_CACHE_WORKAROUND
- radeon_fifo_wait(rinfo, 2);
+ radeon_fifo_wait(4);
+
+ OUTREG(DP_GUI_MASTER_CNTL,
+ rinfo->dp_gui_master_cntl /* contains, like GMC_DST_32BPP */
+ | GMC_BRUSH_SOLID_COLOR
+ | ROP3_P);
+ if (radeon_get_dstbpp(rinfo->depth) != DST_8BPP)
+ OUTREG(DP_BRUSH_FRGD_CLR, rinfo->pseudo_palette[region->color]);
+ else
+ OUTREG(DP_BRUSH_FRGD_CLR, region->color);
+ OUTREG(DP_WRITE_MSK, 0xffffffff);
+ OUTREG(DP_CNTL, (DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM));
+
+ radeon_fifo_wait(2);
OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
-#endif
- radeon_fifo_wait(rinfo, 2);
+
+ radeon_fifo_wait(2);
OUTREG(DST_Y_X, (region->dy << 16) | region->dx);
OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height);
}
@@ -70,14 +70,15 @@ void radeonfb_fillrect(struct fb_info *info, const struct fb_fillrect *region)
struct fb_fillrect modded;
int vxres, vyres;
- WARN_ON(rinfo->gfx_mode);
- if (info->state != FBINFO_STATE_RUNNING || rinfo->gfx_mode)
+ if (info->state != FBINFO_STATE_RUNNING)
return;
if (info->flags & FBINFO_HWACCEL_DISABLED) {
cfb_fillrect(info, region);
return;
}
+ radeon_fixup_offset(rinfo);
+
vxres = info->var.xres_virtual;
vyres = info->var.yres_virtual;
@@ -90,10 +91,6 @@ void radeonfb_fillrect(struct fb_info *info, const struct fb_fillrect *region)
if(modded.dx + modded.width > vxres) modded.width = vxres - modded.dx;
if(modded.dy + modded.height > vyres) modded.height = vyres - modded.dy;
- if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
- info->fix.visual == FB_VISUAL_DIRECTCOLOR )
- modded.color = ((u32 *) (info->pseudo_palette))[region->color];
-
radeonfb_prim_fillrect(rinfo, &modded);
}
@@ -112,22 +109,22 @@ static void radeonfb_prim_copyarea(struct radeonfb_info *rinfo,
if ( xdir < 0 ) { sx += w-1; dx += w-1; }
if ( ydir < 0 ) { sy += h-1; dy += h-1; }
- radeonfb_set_creg(rinfo, DP_GUI_MASTER_CNTL, &rinfo->dp_gui_mc_cache,
- rinfo->dp_gui_mc_base |
- GMC_BRUSH_NONE |
- GMC_SRC_DATATYPE_COLOR |
- ROP3_S |
- DP_SRC_SOURCE_MEMORY);
- radeonfb_set_creg(rinfo, DP_CNTL, &rinfo->dp_cntl_cache,
- (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0) |
- (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0));
-
-#if FLUSH_CACHE_WORKAROUND
- radeon_fifo_wait(rinfo, 2);
+ radeon_fifo_wait(3);
+ OUTREG(DP_GUI_MASTER_CNTL,
+ rinfo->dp_gui_master_cntl /* i.e. GMC_DST_32BPP */
+ | GMC_BRUSH_NONE
+ | GMC_SRC_DSTCOLOR
+ | ROP3_S
+ | DP_SRC_SOURCE_MEMORY );
+ OUTREG(DP_WRITE_MSK, 0xffffffff);
+ OUTREG(DP_CNTL, (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0)
+ | (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0));
+
+ radeon_fifo_wait(2);
OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
-#endif
- radeon_fifo_wait(rinfo, 3);
+
+ radeon_fifo_wait(3);
OUTREG(SRC_Y_X, (sy << 16) | sx);
OUTREG(DST_Y_X, (dy << 16) | dx);
OUTREG(DST_HEIGHT_WIDTH, (h << 16) | w);
@@ -146,14 +143,15 @@ void radeonfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
modded.width = area->width;
modded.height = area->height;
- WARN_ON(rinfo->gfx_mode);
- if (info->state != FBINFO_STATE_RUNNING || rinfo->gfx_mode)
+ if (info->state != FBINFO_STATE_RUNNING)
return;
if (info->flags & FBINFO_HWACCEL_DISABLED) {
cfb_copyarea(info, area);
return;
}
+ radeon_fixup_offset(rinfo);
+
vxres = info->var.xres_virtual;
vyres = info->var.yres_virtual;
@@ -170,115 +168,13 @@ void radeonfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
radeonfb_prim_copyarea(rinfo, &modded);
}
-static void radeonfb_prim_imageblit(struct radeonfb_info *rinfo,
- const struct fb_image *image,
- u32 fg, u32 bg)
-{
- unsigned int dwords;
- u32 *bits;
-
- radeonfb_set_creg(rinfo, DP_GUI_MASTER_CNTL, &rinfo->dp_gui_mc_cache,
- rinfo->dp_gui_mc_base |
- GMC_BRUSH_NONE | GMC_DST_CLIP_LEAVE |
- GMC_SRC_DATATYPE_MONO_FG_BG |
- ROP3_S |
- GMC_BYTE_ORDER_MSB_TO_LSB |
- DP_SRC_SOURCE_HOST_DATA);
- radeonfb_set_creg(rinfo, DP_CNTL, &rinfo->dp_cntl_cache,
- DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM);
- radeonfb_set_creg(rinfo, DP_SRC_FRGD_CLR, &rinfo->dp_src_fg_cache, fg);
- radeonfb_set_creg(rinfo, DP_SRC_BKGD_CLR, &rinfo->dp_src_bg_cache, bg);
-
- /* Ensure the dst cache is flushed and the engine idle before
- * issuing the operation.
- *
- * This works around engine lockups on some cards
- */
-#if FLUSH_CACHE_WORKAROUND
- radeon_fifo_wait(rinfo, 2);
- OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
- OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
-#endif
-
- /* X here pads width to a multiple of 32 and uses the clipper to
- * adjust the result. Is that really necessary ? Things seem to
- * work ok for me without that and the doco doesn't seem to imply]
- * there is such a restriction.
- */
- radeon_fifo_wait(rinfo, 4);
- OUTREG(SC_TOP_LEFT, (image->dy << 16) | image->dx);
- OUTREG(SC_BOTTOM_RIGHT, ((image->dy + image->height) << 16) |
- (image->dx + image->width));
- OUTREG(DST_Y_X, (image->dy << 16) | image->dx);
-
- OUTREG(DST_HEIGHT_WIDTH, (image->height << 16) | ((image->width + 31) & ~31));
-
- dwords = (image->width + 31) >> 5;
- dwords *= image->height;
- bits = (u32*)(image->data);
-
- while(dwords >= 8) {
- radeon_fifo_wait(rinfo, 8);
-#if BITS_PER_LONG == 64
- __raw_writeq(*((u64 *)(bits)), rinfo->mmio_base + HOST_DATA0);
- __raw_writeq(*((u64 *)(bits+2)), rinfo->mmio_base + HOST_DATA2);
- __raw_writeq(*((u64 *)(bits+4)), rinfo->mmio_base + HOST_DATA4);
- __raw_writeq(*((u64 *)(bits+6)), rinfo->mmio_base + HOST_DATA6);
- bits += 8;
-#else
- __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA0);
- __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA1);
- __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA2);
- __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA3);
- __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA4);
- __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA5);
- __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA6);
- __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA7);
-#endif
- dwords -= 8;
- }
- while(dwords--) {
- radeon_fifo_wait(rinfo, 1);
- __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA0);
- }
-}
-
void radeonfb_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct radeonfb_info *rinfo = info->par;
- u32 fg, bg;
-
- WARN_ON(rinfo->gfx_mode);
- if (info->state != FBINFO_STATE_RUNNING || rinfo->gfx_mode)
- return;
- if (!image->width || !image->height)
+ if (info->state != FBINFO_STATE_RUNNING)
return;
-
- /* We only do 1 bpp color expansion for now */
- if (info->flags & FBINFO_HWACCEL_DISABLED || image->depth != 1)
- goto fallback;
-
- /* Fallback if running out of the screen. We may do clipping
- * in the future */
- if ((image->dx + image->width) > info->var.xres_virtual ||
- (image->dy + image->height) > info->var.yres_virtual)
- goto fallback;
-
- if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
- info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
- fg = ((u32*)(info->pseudo_palette))[image->fg_color];
- bg = ((u32*)(info->pseudo_palette))[image->bg_color];
- } else {
- fg = image->fg_color;
- bg = image->bg_color;
- }
-
- radeonfb_prim_imageblit(rinfo, image, fg, bg);
- return;
-
- fallback:
- radeon_engine_idle(rinfo);
+ radeon_engine_idle();
cfb_imageblit(info, image);
}
@@ -289,8 +185,7 @@ int radeonfb_sync(struct fb_info *info)
if (info->state != FBINFO_STATE_RUNNING)
return 0;
-
- radeon_engine_idle(rinfo);
+ radeon_engine_idle();
return 0;
}
@@ -366,10 +261,9 @@ void radeonfb_engine_init (struct radeonfb_info *rinfo)
/* disable 3D engine */
OUTREG(RB3D_CNTL, 0);
- rinfo->fifo_free = 0;
radeonfb_engine_reset(rinfo);
- radeon_fifo_wait(rinfo, 1);
+ radeon_fifo_wait (1);
if (IS_R300_VARIANT(rinfo)) {
OUTREG(RB2D_DSTCACHE_MODE, INREG(RB2D_DSTCACHE_MODE) |
RB2D_DC_AUTOFLUSH_ENABLE |
@@ -383,7 +277,7 @@ void radeonfb_engine_init (struct radeonfb_info *rinfo)
OUTREG(RB2D_DSTCACHE_MODE, 0);
}
- radeon_fifo_wait(rinfo, 3);
+ radeon_fifo_wait (3);
/* We re-read MC_FB_LOCATION from card as it can have been
* modified by XFree drivers (ouch !)
*/
@@ -394,57 +288,41 @@ void radeonfb_engine_init (struct radeonfb_info *rinfo)
OUTREG(DST_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10));
OUTREG(SRC_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10));
- radeon_fifo_wait(rinfo, 1);
-#ifdef __BIG_ENDIAN
+ radeon_fifo_wait (1);
+#if defined(__BIG_ENDIAN)
OUTREGP(DP_DATATYPE, HOST_BIG_ENDIAN_EN, ~HOST_BIG_ENDIAN_EN);
#else
OUTREGP(DP_DATATYPE, 0, ~HOST_BIG_ENDIAN_EN);
#endif
- radeon_fifo_wait(rinfo, 2);
+ radeon_fifo_wait (2);
OUTREG(DEFAULT_SC_TOP_LEFT, 0);
OUTREG(DEFAULT_SC_BOTTOM_RIGHT, (DEFAULT_SC_RIGHT_MAX |
DEFAULT_SC_BOTTOM_MAX));
- /* set default DP_GUI_MASTER_CNTL */
temp = radeon_get_dstbpp(rinfo->depth);
- rinfo->dp_gui_mc_base = ((temp << 8) | GMC_CLR_CMP_CNTL_DIS);
+ rinfo->dp_gui_master_cntl = ((temp << 8) | GMC_CLR_CMP_CNTL_DIS);
- rinfo->dp_gui_mc_cache = rinfo->dp_gui_mc_base |
- GMC_BRUSH_SOLID_COLOR |
- GMC_SRC_DATATYPE_COLOR;
- radeon_fifo_wait(rinfo, 1);
- OUTREG(DP_GUI_MASTER_CNTL, rinfo->dp_gui_mc_cache);
+ radeon_fifo_wait (1);
+ OUTREG(DP_GUI_MASTER_CNTL, (rinfo->dp_gui_master_cntl |
+ GMC_BRUSH_SOLID_COLOR |
+ GMC_SRC_DATATYPE_COLOR));
+ radeon_fifo_wait (7);
/* clear line drawing regs */
- radeon_fifo_wait(rinfo, 2);
OUTREG(DST_LINE_START, 0);
OUTREG(DST_LINE_END, 0);
- /* set brush and source color regs */
- rinfo->dp_brush_fg_cache = 0xffffffff;
- rinfo->dp_brush_bg_cache = 0x00000000;
- rinfo->dp_src_fg_cache = 0xffffffff;
- rinfo->dp_src_bg_cache = 0x00000000;
- radeon_fifo_wait(rinfo, 4);
- OUTREG(DP_BRUSH_FRGD_CLR, rinfo->dp_brush_fg_cache);
- OUTREG(DP_BRUSH_BKGD_CLR, rinfo->dp_brush_bg_cache);
- OUTREG(DP_SRC_FRGD_CLR, rinfo->dp_src_fg_cache);
- OUTREG(DP_SRC_BKGD_CLR, rinfo->dp_src_bg_cache);
-
- /* Default direction */
- rinfo->dp_cntl_cache = DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM;
- radeon_fifo_wait(rinfo, 1);
- OUTREG(DP_CNTL, rinfo->dp_cntl_cache);
+ /* set brush color regs */
+ OUTREG(DP_BRUSH_FRGD_CLR, 0xffffffff);
+ OUTREG(DP_BRUSH_BKGD_CLR, 0x00000000);
+
+ /* set source color regs */
+ OUTREG(DP_SRC_FRGD_CLR, 0xffffffff);
+ OUTREG(DP_SRC_BKGD_CLR, 0x00000000);
/* default write mask */
- radeon_fifo_wait(rinfo, 1);
OUTREG(DP_WRITE_MSK, 0xffffffff);
- /* Default to no swapping of host data */
- radeon_fifo_wait(rinfo, 1);
- OUTREG(RBBM_GUICNTL, RBBM_GUICNTL_HOST_DATA_SWAP_NONE);
-
- /* Make sure it's settled */
- radeon_engine_idle(rinfo);
+ radeon_engine_idle ();
}
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index f343ba83f0a..1a056adb61c 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -66,7 +66,7 @@ static int radeon_bl_update_status(struct backlight_device *bd)
level = bd->props.brightness;
del_timer_sync(&rinfo->lvds_timer);
- radeon_engine_idle(rinfo);
+ radeon_engine_idle();
lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
if (level > 0) {
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index b3ffe8205d2..d0f1a7fc2c9 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -852,6 +852,7 @@ static int radeonfb_pan_display (struct fb_var_screeninfo *var,
if (rinfo->asleep)
return 0;
+ radeon_fifo_wait(2);
OUTREG(CRTC_OFFSET, ((var->yoffset * var->xres_virtual + var->xoffset)
* var->bits_per_pixel / 8) & ~7);
return 0;
@@ -881,6 +882,7 @@ static int radeonfb_ioctl (struct fb_info *info, unsigned int cmd,
if (rc)
return rc;
+ radeon_fifo_wait(2);
if (value & 0x01) {
tmp = INREG(LVDS_GEN_CNTL);
@@ -938,7 +940,7 @@ int radeon_screen_blank(struct radeonfb_info *rinfo, int blank, int mode_switch)
if (rinfo->lock_blank)
return 0;
- radeon_engine_idle(rinfo);
+ radeon_engine_idle();
val = INREG(CRTC_EXT_CNTL);
val &= ~(CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS |
@@ -1046,7 +1048,7 @@ static int radeonfb_blank (int blank, struct fb_info *info)
if (rinfo->asleep)
return 0;
-
+
return radeon_screen_blank(rinfo, blank, 0);
}
@@ -1072,6 +1074,8 @@ static int radeon_setcolreg (unsigned regno, unsigned red, unsigned green,
pindex = regno;
if (!rinfo->asleep) {
+ radeon_fifo_wait(9);
+
if (rinfo->bpp == 16) {
pindex = regno * 8;
@@ -1240,6 +1244,8 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg
{
int i;
+ radeon_fifo_wait(20);
+
/* Workaround from XFree */
if (rinfo->is_mobility) {
/* A temporal workaround for the occational blanking on certain laptop
@@ -1335,7 +1341,7 @@ static void radeon_lvds_timer_func(unsigned long data)
{
struct radeonfb_info *rinfo = (struct radeonfb_info *)data;
- radeon_engine_idle(rinfo);
+ radeon_engine_idle();
OUTREG(LVDS_GEN_CNTL, rinfo->pending_lvds_gen_cntl);
}
@@ -1353,11 +1359,10 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
if (nomodeset)
return;
- radeon_engine_idle(rinfo);
-
if (!regs_only)
radeon_screen_blank(rinfo, FB_BLANK_NORMAL, 0);
+ radeon_fifo_wait(31);
for (i=0; i<10; i++)
OUTREG(common_regs[i].reg, common_regs[i].val);
@@ -1385,6 +1390,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
radeon_write_pll_regs(rinfo, mode);
if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) {
+ radeon_fifo_wait(10);
OUTREG(FP_CRTC_H_TOTAL_DISP, mode->fp_crtc_h_total_disp);
OUTREG(FP_CRTC_V_TOTAL_DISP, mode->fp_crtc_v_total_disp);
OUTREG(FP_H_SYNC_STRT_WID, mode->fp_h_sync_strt_wid);
@@ -1399,6 +1405,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
if (!regs_only)
radeon_screen_blank(rinfo, FB_BLANK_UNBLANK, 0);
+ radeon_fifo_wait(2);
OUTPLL(VCLK_ECP_CNTL, mode->vclk_ecp_cntl);
return;
@@ -1549,7 +1556,7 @@ static int radeonfb_set_par(struct fb_info *info)
/* We always want engine to be idle on a mode switch, even
* if we won't actually change the mode
*/
- radeon_engine_idle(rinfo);
+ radeon_engine_idle();
hSyncStart = mode->xres + mode->right_margin;
hSyncEnd = hSyncStart + mode->hsync_len;
@@ -1844,6 +1851,7 @@ static int radeonfb_set_par(struct fb_info *info)
return 0;
}
+
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = radeonfb_check_var,
@@ -1867,7 +1875,6 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
info->par = rinfo;
info->pseudo_palette = rinfo->pseudo_palette;
info->flags = FBINFO_DEFAULT
- | FBINFO_HWACCEL_IMAGEBLIT
| FBINFO_HWACCEL_COPYAREA
| FBINFO_HWACCEL_FILLRECT
| FBINFO_HWACCEL_XPAN
@@ -1875,7 +1882,6 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
info->fbops = &radeonfb_ops;
info->screen_base = rinfo->fb_base;
info->screen_size = rinfo->mapped_vram;
-
/* Fill fix common fields */
strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
info->fix.smem_start = rinfo->fb_base_phys;
@@ -1890,25 +1896,8 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
info->fix.mmio_len = RADEON_REGSIZE;
info->fix.accel = FB_ACCEL_ATI_RADEON;
- /* Allocate colormap */
fb_alloc_cmap(&info->cmap, 256, 0);
- /* Setup pixmap used for acceleration */
-#define PIXMAP_SIZE (2048 * 4)
-
- info->pixmap.addr = kmalloc(PIXMAP_SIZE, GFP_KERNEL);
- if (!info->pixmap.addr) {
- printk(KERN_ERR "radeonfb: Failed to allocate pixmap !\n");
- noaccel = 1;
- goto bail;
- }
- info->pixmap.size = PIXMAP_SIZE;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->pixmap.scan_align = 4;
- info->pixmap.buf_align = 4;
- info->pixmap.access_align = 32;
-
-bail:
if (noaccel)
info->flags |= FBINFO_HWACCEL_DISABLED;
@@ -2017,6 +2006,7 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
u32 tom = INREG(NB_TOM);
tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
+ radeon_fifo_wait(6);
OUTREG(MC_FB_LOCATION, tom);
OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index 3df5015f1d1..675abdafc2d 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -2653,9 +2653,9 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
if (!(info->flags & FBINFO_HWACCEL_DISABLED)) {
/* Make sure engine is reset */
- radeon_engine_idle(rinfo);
+ radeon_engine_idle();
radeonfb_engine_reset(rinfo);
- radeon_engine_idle(rinfo);
+ radeon_engine_idle();
}
/* Blank display and LCD */
@@ -2767,7 +2767,7 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
rinfo->asleep = 0;
} else
- radeon_engine_idle(rinfo);
+ radeon_engine_idle();
/* Restore display & engine */
radeon_write_mode (rinfo, &rinfo->state, 1);
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index ea0b5b47aca..3ea1b00fdd2 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -336,15 +336,7 @@ struct radeonfb_info {
int mon2_type;
u8 *mon2_EDID;
- /* accel bits */
- u32 dp_gui_mc_base;
- u32 dp_gui_mc_cache;
- u32 dp_cntl_cache;
- u32 dp_brush_fg_cache;
- u32 dp_brush_bg_cache;
- u32 dp_src_fg_cache;
- u32 dp_src_bg_cache;
- u32 fifo_free;
+ u32 dp_gui_master_cntl;
struct pll_info pll;
@@ -356,7 +348,6 @@ struct radeonfb_info {
int lock_blank;
int dynclk;
int no_schedule;
- int gfx_mode;
enum radeon_pm_mode pm_mode;
reinit_function_ptr reinit_func;
@@ -401,14 +392,8 @@ static inline void _radeon_msleep(struct radeonfb_info *rinfo, unsigned long ms)
#define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr)
#define INREG16(addr) readw((rinfo->mmio_base)+addr)
#define OUTREG16(addr,val) writew(val, (rinfo->mmio_base)+addr)
-
-#ifdef CONFIG_PPC
-#define INREG(addr) ({ eieio(); ld_le32(rinfo->mmio_base+(addr)); })
-#define OUTREG(addr,val) do { eieio(); st_le32(rinfo->mmio_base+(addr),(val)); } while(0)
-#else
#define INREG(addr) readl((rinfo->mmio_base)+addr)
#define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr)
-#endif
static inline void _OUTREGP(struct radeonfb_info *rinfo, u32 addr,
u32 val, u32 mask)
@@ -550,7 +535,17 @@ static inline u32 radeon_get_dstbpp(u16 depth)
* 2D Engine helper routines
*/
-extern void radeon_fifo_update_and_wait(struct radeonfb_info *rinfo, int entries);
+static inline void _radeon_fifo_wait(struct radeonfb_info *rinfo, int entries)
+{
+ int i;
+
+ for (i=0; i<2000000; i++) {
+ if ((INREG(RBBM_STATUS) & 0x7f) >= entries)
+ return;
+ udelay(1);
+ }
+ printk(KERN_ERR "radeonfb: FIFO Timeout !\n");
+}
static inline void radeon_engine_flush (struct radeonfb_info *rinfo)
{
@@ -563,7 +558,7 @@ static inline void radeon_engine_flush (struct radeonfb_info *rinfo)
/* Ensure FIFO is empty, ie, make sure the flush commands
* has reached the cache
*/
- radeon_fifo_update_and_wait(rinfo, 64);
+ _radeon_fifo_wait (rinfo, 64);
/* Wait for the flush to complete */
for (i=0; i < 2000000; i++) {
@@ -575,12 +570,12 @@ static inline void radeon_engine_flush (struct radeonfb_info *rinfo)
}
-static inline void radeon_engine_idle(struct radeonfb_info *rinfo)
+static inline void _radeon_engine_idle(struct radeonfb_info *rinfo)
{
int i;
/* ensure FIFO is empty before waiting for idle */
- radeon_fifo_update_and_wait (rinfo, 64);
+ _radeon_fifo_wait (rinfo, 64);
for (i=0; i<2000000; i++) {
if (((INREG(RBBM_STATUS) & GUI_ACTIVE)) == 0) {
@@ -593,6 +588,8 @@ static inline void radeon_engine_idle(struct radeonfb_info *rinfo)
}
+#define radeon_engine_idle() _radeon_engine_idle(rinfo)
+#define radeon_fifo_wait(entries) _radeon_fifo_wait(rinfo,entries)
#define radeon_msleep(ms) _radeon_msleep(rinfo,ms)
@@ -622,7 +619,6 @@ extern void radeonfb_imageblit(struct fb_info *p, const struct fb_image *image);
extern int radeonfb_sync(struct fb_info *info);
extern void radeonfb_engine_init (struct radeonfb_info *rinfo);
extern void radeonfb_engine_reset(struct radeonfb_info *rinfo);
-extern void radeon_fixup_mem_offset(struct radeonfb_info *rinfo);
/* Other functions */
extern int radeon_screen_blank(struct radeonfb_info *rinfo, int blank, int mode_switch);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 67ff370d80a..0b2adefe9e3 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3531,12 +3531,18 @@ static void fbcon_exit(void)
softback_buf = 0UL;
for (i = 0; i < FB_MAX; i++) {
+ int pending;
+
mapped = 0;
info = registered_fb[i];
if (info == NULL)
continue;
+ pending = cancel_work_sync(&info->queue);
+ DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
+ "no"));
+
for (j = first_fb_vc; j <= last_fb_vc; j++) {
if (con2fb_map[j] == i)
mapped = 1;
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
index 38718d95fbb..fb64234a382 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -927,9 +927,9 @@ static int __devinit mb862xx_pci_probe(struct pci_dev *pdev,
}
dev_dbg(dev, "fb phys 0x%llx 0x%lx\n",
- (u64)par->fb_base_phys, (ulong)par->mapped_vram);
+ (unsigned long long)par->fb_base_phys, (ulong)par->mapped_vram);
dev_dbg(dev, "mmio phys 0x%llx 0x%lx\n",
- (u64)par->mmio_base_phys, (ulong)par->mmio_len);
+ (unsigned long long)par->mmio_base_phys, (ulong)par->mmio_len);
if (mb862xx_pci_gdc_init(par))
goto io_unmap;
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 5a5e407dc45..1a49519dafa 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -392,7 +392,7 @@ static void set_fb_fix(struct fb_info *fbi)
int bpp;
rg = &plane->fbdev->mem_desc.region[plane->idx];
- fbi->screen_base = (char __iomem *)rg->vaddr;
+ fbi->screen_base = rg->vaddr;
fix->smem_start = rg->paddr;
fix->smem_len = rg->size;
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 26173a270e9..5b395a4ddfd 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -392,7 +392,7 @@ static int iTCO_wdt_stop(void)
/* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
val32 = inl(SMI_EN);
- val32 &= 0x00002000;
+ val32 |= 0x00002000;
outl(val32, SMI_EN);
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */