summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-12 16:53:38 +1000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-12 16:53:38 +1000
commitbc47ab0241c7c86da4f5e5f82fbca7d45387c18d (patch)
treeb9c33ae8b6de43e44cc5fcbaa3e4a15f18a5ed42 /drivers
parent37f9ef553bed630957e025504cdcbc76f5de49d5 (diff)
parent8ebf975608aaebd7feb33d77f07ba21a6380e086 (diff)
Merge commit 'origin/master' into next
Manual merge of: arch/powerpc/kernel/asm-offsets.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_bind.c24
-rw-r--r--drivers/acpi/pci_irq.c5
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/processor_idle.c8
-rw-r--r--drivers/acpi/processor_perflib.c12
-rw-r--r--drivers/acpi/processor_throttling.c2
-rw-r--r--drivers/acpi/video.c18
-rw-r--r--drivers/ata/ahci.c159
-rw-r--r--drivers/ata/ata_piix.c20
-rw-r--r--drivers/ata/libata-core.c11
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/libata-sff.c20
-rw-r--r--drivers/ata/pata_ali.c17
-rw-r--r--drivers/ata/pata_efar.c17
-rw-r--r--drivers/ata/pata_legacy.c2
-rw-r--r--drivers/ata/pata_netcell.c13
-rw-r--r--drivers/ata/sata_nv.c131
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/ata/sata_sx4.c11
-rw-r--r--drivers/base/bus.c4
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/base/driver.c4
-rw-r--r--drivers/block/DAC960.c10
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/amiflop.c54
-rw-r--r--drivers/block/ataflop.c66
-rw-r--r--drivers/block/brd.c7
-rw-r--r--drivers/block/cciss.c927
-rw-r--r--drivers/block/cciss.h34
-rw-r--r--drivers/block/cciss_cmd.h2
-rw-r--r--drivers/block/cciss_scsi.c109
-rw-r--r--drivers/block/cpqarray.c20
-rw-r--r--drivers/block/floppy.c85
-rw-r--r--drivers/block/hd.c106
-rw-r--r--drivers/block/loop.c37
-rw-r--r--drivers/block/mg_disk.c537
-rw-r--r--drivers/block/nbd.c23
-rw-r--r--drivers/block/paride/pcd.c29
-rw-r--r--drivers/block/paride/pd.c22
-rw-r--r--drivers/block/paride/pf.c47
-rw-r--r--drivers/block/pktcdvd.c8
-rw-r--r--drivers/block/ps3disk.c24
-rw-r--r--drivers/block/sunvdc.c14
-rw-r--r--drivers/block/swim.c48
-rw-r--r--drivers/block/swim3.c107
-rw-r--r--drivers/block/sx8.c17
-rw-r--r--drivers/block/ub.c54
-rw-r--r--drivers/block/viodasd.c12
-rw-r--r--drivers/block/virtio_blk.c110
-rw-r--r--drivers/block/xd.c41
-rw-r--r--drivers/block/xen-blkfront.c34
-rw-r--r--drivers/block/xsysace.c46
-rw-r--r--drivers/block/z2ram.c19
-rw-r--r--drivers/bluetooth/hci_ldisc.c5
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/cdrom/gdrom.c36
-rw-r--r--drivers/cdrom/viocd.c33
-rw-r--r--drivers/char/Kconfig13
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/bfin_jtag_comm.c365
-rw-r--r--drivers/char/cyclades.c290
-rw-r--r--drivers/char/epca.c17
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/ip2/i2lib.c4
-rw-r--r--drivers/char/ip2/ip2main.c4
-rw-r--r--drivers/char/isicom.c19
-rw-r--r--drivers/char/istallion.c8
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/moxa.c5
-rw-r--r--drivers/char/mxser.c14
-rw-r--r--drivers/char/n_hdlc.c4
-rw-r--r--drivers/char/n_tty.c29
-rw-r--r--drivers/char/pcmcia/synclink_cs.c11
-rw-r--r--drivers/char/pty.c11
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/rocket.c19
-rw-r--r--drivers/char/selection.c2
-rw-r--r--drivers/char/stallion.c6
-rw-r--r--drivers/char/synclink.c9
-rw-r--r--drivers/char/synclink_gt.c86
-rw-r--r--drivers/char/synclinkmp.c9
-rw-r--r--drivers/char/sysrq.c2
-rw-r--r--drivers/char/tty_audit.c10
-rw-r--r--drivers/char/tty_io.c122
-rw-r--r--drivers/char/tty_ioctl.c88
-rw-r--r--drivers/char/tty_ldisc.c549
-rw-r--r--drivers/char/tty_port.c47
-rw-r--r--drivers/char/vt.c9
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/sh_cmt.c116
-rw-r--r--drivers/clocksource/sh_mtu2.c357
-rw-r--r--drivers/clocksource/sh_tmu.c461
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/dma/fsldma.c58
-rw-r--r--drivers/dma/ioat_dma.c2
-rw-r--r--drivers/edac/Kconfig34
-rw-r--r--drivers/edac/Makefile9
-rw-r--r--drivers/edac/amd64_edac.c3354
-rw-r--r--drivers/edac/amd64_edac.h644
-rw-r--r--drivers/edac/amd64_edac_dbg.c255
-rw-r--r--drivers/edac/amd64_edac_err_types.c161
-rw-r--r--drivers/edac/amd64_edac_inj.c185
-rw-r--r--drivers/edac/amd8111_edac.c4
-rw-r--r--drivers/edac/amd8131_edac.c2
-rw-r--r--drivers/edac/edac_core.h9
-rw-r--r--drivers/firmware/dmi_scan.c1
-rw-r--r--drivers/gpu/drm/Kconfig14
-rw-r--r--drivers/gpu/drm/drm_crtc.c7
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c109
-rw-r--r--drivers/gpu/drm/drm_edid.c5
-rw-r--r--drivers/gpu/drm/drm_irq.c8
-rw-r--r--drivers/gpu/drm/drm_sysfs.c7
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c14
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h5
-rw-r--r--drivers/hwmon/lm78.c2
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c2
-rw-r--r--drivers/ide/alim15x3.c10
-rw-r--r--drivers/ide/ide-atapi.c183
-rw-r--r--drivers/ide/ide-cd.c152
-rw-r--r--drivers/ide/ide-cd.h4
-rw-r--r--drivers/ide/ide-disk.c11
-rw-r--r--drivers/ide/ide-dma.c22
-rw-r--r--drivers/ide/ide-floppy.c32
-rw-r--r--drivers/ide/ide-io.c57
-rw-r--r--drivers/ide/ide-ioctls.c1
-rw-r--r--drivers/ide/ide-lib.c2
-rw-r--r--drivers/ide/ide-park.c7
-rw-r--r--drivers/ide/ide-pci-generic.c11
-rw-r--r--drivers/ide/ide-pm.c38
-rw-r--r--drivers/ide/ide-tape.c736
-rw-r--r--drivers/ide/ide-taskfile.c20
-rw-r--r--drivers/ide/pdc202xx_old.c24
-rw-r--r--drivers/ide/tc86c001.c2
-rw-r--r--drivers/ide/tx4939ide.c2
-rw-r--r--drivers/idle/i7300_idle.c6
-rw-r--r--drivers/ieee1394/dv1394.c5
-rw-r--r--drivers/ieee1394/ieee1394_core.h6
-rw-r--r--drivers/input/input.c1
-rw-r--r--drivers/input/serio/libps2.c2
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c2
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm-exception-store.c2
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-table.c38
-rw-r--r--drivers/md/dm.c8
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c32
-rw-r--r--drivers/media/video/ivtv/ivtv-queue.c3
-rw-r--r--drivers/memstick/core/mspro_block.c19
-rw-r--r--drivers/message/fusion/mptsas.c22
-rw-r--r--drivers/message/i2o/i2o_block.c43
-rw-r--r--drivers/mmc/card/block.c12
-rw-r--r--drivers/mmc/card/queue.c11
-rw-r--r--drivers/mmc/host/mvsdio.c35
-rw-r--r--drivers/mmc/host/mxcmmc.c47
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mmc/host/sdhci-of.c9
-rw-r--r--drivers/mtd/mtd_blkdevs.c43
-rw-r--r--drivers/mtd/nand/davinci_nand.c7
-rw-r--r--drivers/mtd/nand/mxc_nand.c43
-rw-r--r--drivers/net/3c509.c4
-rw-r--r--drivers/net/atl1e/atl1e_main.c1
-rw-r--r--drivers/net/atlx/atl1.c6
-rw-r--r--drivers/net/atlx/atlx.h6
-rw-r--r--drivers/net/bfin_mac.c29
-rw-r--r--drivers/net/cxgb3/adapter.h4
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c8
-rw-r--r--drivers/net/cxgb3/sge.c11
-rw-r--r--drivers/net/cxgb3/t3_hw.c5
-rw-r--r--drivers/net/e1000/e1000_main.c5
-rw-r--r--drivers/net/forcedeth.c15
-rw-r--r--drivers/net/gianfar.h2
-rw-r--r--drivers/net/mac8390.c2
-rw-r--r--drivers/net/mlx4/en_tx.c8
-rw-r--r--drivers/net/r8169.c11
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/at76c50x-usb.c12
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c2
-rw-r--r--drivers/parisc/iosapic.c6
-rw-r--r--drivers/parport/parport_gsc.c4
-rw-r--r--drivers/parport/parport_pc.c1802
-rw-r--r--drivers/parport/share.c13
-rw-r--r--drivers/pci/hotplug/acpiphp.h1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c63
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c54
-rw-r--r--drivers/pci/htirq.c4
-rw-r--r--drivers/pci/intel-iommu.c9
-rw-r--r--drivers/pci/intr_remapping.c54
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c2
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/s390/block/dasd.c37
-rw-r--r--drivers/s390/block/dasd_diag.c5
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/block/dasd_fba.c7
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tape_block.c26
-rw-r--r--drivers/sbus/char/jsflash.c26
-rw-r--r--drivers/scsi/eata.c24
-rw-r--r--drivers/scsi/libsas/sas_expander.c16
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c22
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c23
-rw-r--r--drivers/scsi/osd/osd_initiator.c72
-rw-r--r--drivers/scsi/scsi_lib.c87
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.c26
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sg.c18
-rw-r--r--drivers/scsi/sr.c17
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/scsi/u14-34f.c22
-rw-r--r--drivers/serial/8250.c22
-rw-r--r--drivers/serial/8250_gsc.c4
-rw-r--r--drivers/serial/8250_pci.c3
-rw-r--r--drivers/serial/Kconfig8
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/bfin_5xx.c77
-rw-r--r--drivers/serial/bfin_sport_uart.c58
-rw-r--r--drivers/serial/icom.c20
-rw-r--r--drivers/serial/imx.c296
-rw-r--r--drivers/serial/jsm/jsm.h1
-rw-r--r--drivers/serial/jsm/jsm_tty.c14
-rw-r--r--drivers/serial/mpc52xx_uart.c2
-rw-r--r--drivers/serial/sh-sci.c388
-rw-r--r--drivers/serial/sh-sci.h42
-rw-r--r--drivers/serial/timbuart.c526
-rw-r--r--drivers/serial/timbuart.h58
-rw-r--r--drivers/sh/intc.c11
-rw-r--r--drivers/ssb/embedded.c1
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/class/cdc-acm.c445
-rw-r--r--drivers/usb/class/cdc-acm.h5
-rw-r--r--drivers/usb/core/inode.c5
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c5
-rw-r--r--drivers/usb/host/isp1760-hcd.c24
-rw-r--r--drivers/usb/serial/belkin_sa.c6
-rw-r--r--drivers/usb/serial/ch341.c46
-rw-r--r--drivers/usb/serial/console.c6
-rw-r--r--drivers/usb/serial/cp210x.c253
-rw-r--r--drivers/usb/serial/cyberjack.c6
-rw-r--r--drivers/usb/serial/cypress_m8.c81
-rw-r--r--drivers/usb/serial/digi_acceleport.c75
-rw-r--r--drivers/usb/serial/empeg.c6
-rw-r--r--drivers/usb/serial/ftdi_sio.c149
-rw-r--r--drivers/usb/serial/garmin_gps.c3
-rw-r--r--drivers/usb/serial/generic.c3
-rw-r--r--drivers/usb/serial/io_edgeport.c10
-rw-r--r--drivers/usb/serial/io_ti.c3
-rw-r--r--drivers/usb/serial/ipaq.c6
-rw-r--r--drivers/usb/serial/ipw.c18
-rw-r--r--drivers/usb/serial/ir-usb.c6
-rw-r--r--drivers/usb/serial/iuu_phoenix.c102
-rw-r--r--drivers/usb/serial/keyspan.c13
-rw-r--r--drivers/usb/serial/keyspan.h8
-rw-r--r--drivers/usb/serial/keyspan_pda.c48
-rw-r--r--drivers/usb/serial/kl5kusb105.c6
-rw-r--r--drivers/usb/serial/kobil_sct.c9
-rw-r--r--drivers/usb/serial/mct_u232.c37
-rw-r--r--drivers/usb/serial/mos7720.c3
-rw-r--r--drivers/usb/serial/mos7840.c48
-rw-r--r--drivers/usb/serial/navman.c3
-rw-r--r--drivers/usb/serial/omninet.c6
-rw-r--r--drivers/usb/serial/opticon.c3
-rw-r--r--drivers/usb/serial/option.c68
-rw-r--r--drivers/usb/serial/oti6858.c57
-rw-r--r--drivers/usb/serial/pl2303.c79
-rw-r--r--drivers/usb/serial/sierra.c351
-rw-r--r--drivers/usb/serial/spcp8x5.c85
-rw-r--r--drivers/usb/serial/symbolserial.c3
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c6
-rw-r--r--drivers/usb/serial/usb-serial.c145
-rw-r--r--drivers/usb/serial/visor.c6
-rw-r--r--drivers/usb/serial/whiteheat.c33
-rw-r--r--drivers/usb/storage/scsiglue.c4
-rw-r--r--drivers/video/atmel_lcdfb.c10
-rw-r--r--drivers/video/console/vgacon.c5
-rw-r--r--drivers/video/hitfb.c4
-rw-r--r--drivers/video/s3c-fb.c12
-rw-r--r--drivers/xen/Kconfig20
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/events.c20
-rw-r--r--drivers/xen/evtchn.c507
-rw-r--r--drivers/xen/manage.c9
-rw-r--r--drivers/xen/sys-hypervisor.c445
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c61
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c2
-rw-r--r--drivers/xen/xenfs/super.c19
309 files changed, 14385 insertions, 5819 deletions
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index 95650f83ce2..bc46de3d967 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -116,9 +116,6 @@ int acpi_pci_bind(struct acpi_device *device)
struct acpi_pci_data *pdata;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_handle handle;
- struct pci_dev *dev;
- struct pci_bus *bus;
-
if (!device || !device->parent)
return -EINVAL;
@@ -176,20 +173,9 @@ int acpi_pci_bind(struct acpi_device *device)
* Locate matching device in PCI namespace. If it doesn't exist
* this typically means that the device isn't currently inserted
* (e.g. docking station, port replicator, etc.).
- * We cannot simply search the global pci device list, since
- * PCI devices are added to the global pci list when the root
- * bridge start ops are run, which may not have happened yet.
*/
- bus = pci_find_bus(data->id.segment, data->id.bus);
- if (bus) {
- list_for_each_entry(dev, &bus->devices, bus_list) {
- if (dev->devfn == PCI_DEVFN(data->id.device,
- data->id.function)) {
- data->dev = dev;
- break;
- }
- }
- }
+ data->dev = pci_get_slot(pdata->bus,
+ PCI_DEVFN(data->id.device, data->id.function));
if (!data->dev) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device %04x:%02x:%02x.%d not present in PCI namespace\n",
@@ -259,9 +245,10 @@ int acpi_pci_bind(struct acpi_device *device)
end:
kfree(buffer.pointer);
- if (result)
+ if (result) {
+ pci_dev_put(data->dev);
kfree(data);
-
+ }
return result;
}
@@ -303,6 +290,7 @@ static int acpi_pci_unbind(struct acpi_device *device)
if (data->dev->subordinate) {
acpi_pci_irq_del_prt(data->id.segment, data->bus->number);
}
+ pci_dev_put(data->dev);
kfree(data);
end:
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 51b9f8280f8..2faa9e2ac89 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -401,7 +401,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
/* Interrupt Line values above 0xF are forbidden */
if (dev->irq > 0 && (dev->irq <= 0xF)) {
printk(" - using IRQ %d\n", dev->irq);
- acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE,
+ acpi_register_gsi(&dev->dev, dev->irq,
+ ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
return 0;
} else {
@@ -410,7 +411,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
}
}
- rc = acpi_register_gsi(gsi, triggering, polarity);
+ rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity);
if (rc < 0) {
dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n",
pin_name(pin));
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 45ad3288c5f..23f0fb84f1c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -844,7 +844,7 @@ static int acpi_processor_add(struct acpi_device *device)
if (!pr)
return -ENOMEM;
- if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
kfree(pr);
return -ENOMEM;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 72069ba5f1e..10a2d913635 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -148,6 +148,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
return;
+ if (boot_cpu_has(X86_FEATURE_AMDC1E))
+ type = ACPI_STATE_C1;
+
/*
* Check, if one of the previous states already marked the lapic
* unstable
@@ -611,6 +614,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
switch (cx->type) {
case ACPI_STATE_C1:
cx->valid = 1;
+ acpi_timer_check_state(i, pr, cx);
break;
case ACPI_STATE_C2:
@@ -830,11 +834,12 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
/* Do not access any ACPI IO ports in suspend path */
if (acpi_idle_suspend) {
- acpi_safe_halt();
local_irq_enable();
+ cpu_relax();
return 0;
}
+ acpi_state_timer_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
@@ -842,6 +847,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
local_irq_enable();
cx->usage++;
+ acpi_state_timer_broadcast(pr, cx, 0);
return idle_time;
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index cafb41000f6..60e543d3234 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -309,9 +309,15 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
(u32) px->bus_master_latency,
(u32) px->control, (u32) px->status));
- if (!px->core_frequency) {
- printk(KERN_ERR PREFIX
- "Invalid _PSS data: freq is zero\n");
+ /*
+ * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
+ */
+ if (!px->core_frequency ||
+ ((u32)(px->core_frequency * 1000) !=
+ (px->core_frequency * 1000))) {
+ printk(KERN_ERR FW_BUG PREFIX
+ "Invalid BIOS _PSS frequency: 0x%llx MHz\n",
+ px->core_frequency);
result = -EFAULT;
kfree(pr->performance->states);
goto end;
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 7f16f5f8e7d..227543789ba 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -840,7 +840,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
state = acpi_get_throttling_state(pr, value);
if (state == -1) {
ACPI_WARNING((AE_INFO,
- "Invalid throttling state, reset\n"));
+ "Invalid throttling state, reset"));
state = 0;
ret = acpi_processor_set_throttling(pr, state);
if (ret)
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 810cca90ca7..1bdfb37377e 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -570,6 +570,22 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710Z"),
},
},
+ {
+ .callback = video_set_bqc_offset,
+ .ident = "eMachines E510",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "EMACHINES"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "eMachines E510"),
+ },
+ },
+ {
+ .callback = video_set_bqc_offset,
+ .ident = "Acer Aspire 5315",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
+ },
+ },
{}
};
@@ -2334,7 +2350,7 @@ static int __init acpi_video_init(void)
return acpi_video_register();
}
-void __exit acpi_video_exit(void)
+void acpi_video_exit(void)
{
acpi_bus_unregister_driver(&acpi_video_bus);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 08186ecbaf8..15a23031833 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -77,8 +77,6 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
size_t size);
static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
ssize_t size);
-#define MAX_SLOTS 8
-#define MAX_RETRY 15
enum {
AHCI_PCI_BAR = 5,
@@ -220,6 +218,7 @@ enum {
AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
+ AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
/* ap->flags bits */
@@ -230,6 +229,10 @@ enum {
ICH_MAP = 0x90, /* ICH MAP register */
+ /* em constants */
+ EM_MAX_SLOTS = 8,
+ EM_MAX_RETRY = 5,
+
/* em_ctl bits */
EM_CTL_RST = (1 << 9), /* Reset */
EM_CTL_TM = (1 << 8), /* Transmit Message */
@@ -281,8 +284,8 @@ struct ahci_port_priv {
unsigned int ncq_saw_dmas:1;
unsigned int ncq_saw_sdb:1;
u32 intr_mask; /* interrupts to enable */
- struct ahci_em_priv em_priv[MAX_SLOTS];/* enclosure management info
- * per PM slot */
+ /* enclosure management info per PM slot */
+ struct ahci_em_priv em_priv[EM_MAX_SLOTS];
};
static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
@@ -312,7 +315,6 @@ static void ahci_error_handler(struct ata_port *ap);
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
static int ahci_port_resume(struct ata_port *ap);
static void ahci_dev_config(struct ata_device *dev);
-static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
u32 opts);
#ifdef CONFIG_PM
@@ -403,14 +405,14 @@ static struct ata_port_operations ahci_sb600_ops = {
#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
static const struct ata_port_info ahci_port_info[] = {
- /* board_ahci */
+ [board_ahci] =
{
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- /* board_ahci_vt8251 */
+ [board_ahci_vt8251] =
{
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
.flags = AHCI_FLAG_COMMON,
@@ -418,7 +420,7 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_vt8251_ops,
},
- /* board_ahci_ign_iferr */
+ [board_ahci_ign_iferr] =
{
AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
.flags = AHCI_FLAG_COMMON,
@@ -426,17 +428,16 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- /* board_ahci_sb600 */
+ [board_ahci_sb600] =
{
AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
- AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
- AHCI_HFLAG_SECT255),
+ AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_sb600_ops,
},
- /* board_ahci_mv */
+ [board_ahci_mv] =
{
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
@@ -446,7 +447,7 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- /* board_ahci_sb700, for SB700 and SB800 */
+ [board_ahci_sb700] = /* for SB700 and SB800 */
{
AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
.flags = AHCI_FLAG_COMMON,
@@ -454,7 +455,7 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_sb600_ops,
},
- /* board_ahci_mcp65 */
+ [board_ahci_mcp65] =
{
AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
.flags = AHCI_FLAG_COMMON,
@@ -462,7 +463,7 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- /* board_ahci_nopmp */
+ [board_ahci_nopmp] =
{
AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
.flags = AHCI_FLAG_COMMON,
@@ -1140,12 +1141,12 @@ static void ahci_start_port(struct ata_port *ap)
emp = &pp->em_priv[link->pmp];
/* EM Transmit bit maybe busy during init */
- for (i = 0; i < MAX_RETRY; i++) {
+ for (i = 0; i < EM_MAX_RETRY; i++) {
rc = ahci_transmit_led_message(ap,
emp->led_state,
4);
if (rc == -EBUSY)
- udelay(100);
+ msleep(1);
else
break;
}
@@ -1339,7 +1340,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < MAX_SLOTS)
+ if (pmp < EM_MAX_SLOTS)
emp = &pp->em_priv[pmp];
else
return -EINVAL;
@@ -1407,7 +1408,7 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < MAX_SLOTS)
+ if (pmp < EM_MAX_SLOTS)
emp = &pp->em_priv[pmp];
else
return -EINVAL;
@@ -2316,9 +2317,17 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
u32 ctl;
+ if (mesg.event & PM_EVENT_SUSPEND &&
+ hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "BIOS update required for suspend/resume\n");
+ return -EIO;
+ }
+
if (mesg.event & PM_EVENT_SLEEP) {
/* AHCI spec rev1.1 section 8.3.3:
* Software must disable interrupts prior to requesting a
@@ -2575,6 +2584,51 @@ static void ahci_p5wdh_workaround(struct ata_host *host)
}
}
+/*
+ * SB600 ahci controller on ASUS M2A-VM can't do 64bit DMA with older
+ * BIOS. The oldest version known to be broken is 0901 and working is
+ * 1501 which was released on 2007-10-26. Force 32bit DMA on anything
+ * older than 1501. Please read bko#9412 for more info.
+ */
+static bool ahci_asus_m2a_vm_32bit_only(struct pci_dev *pdev)
+{
+ static const struct dmi_system_id sysids[] = {
+ {
+ .ident = "ASUS M2A-VM",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR,
+ "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
+ },
+ },
+ { }
+ };
+ const char *cutoff_mmdd = "10/26";
+ const char *date;
+ int year;
+
+ if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
+ !dmi_check_system(sysids))
+ return false;
+
+ /*
+ * Argh.... both version and date are free form strings.
+ * Let's hope they're using the same date format across
+ * different versions.
+ */
+ date = dmi_get_system_info(DMI_BIOS_DATE);
+ year = dmi_get_year(DMI_BIOS_DATE);
+ if (date && strlen(date) >= 10 && date[2] == '/' && date[5] == '/' &&
+ (year > 2007 ||
+ (year == 2007 && strncmp(date, cutoff_mmdd, 5) >= 0)))
+ return false;
+
+ dev_printk(KERN_WARNING, &pdev->dev, "ASUS M2A-VM: BIOS too old, "
+ "forcing 32bit DMA, update BIOS\n");
+
+ return true;
+}
+
static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
{
static const struct dmi_system_id broken_systems[] = {
@@ -2610,6 +2664,63 @@ static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
return false;
}
+static bool ahci_broken_suspend(struct pci_dev *pdev)
+{
+ static const struct dmi_system_id sysids[] = {
+ /*
+ * On HP dv[4-6] and HDX18 with earlier BIOSen, link
+ * to the harddisk doesn't become online after
+ * resuming from STR. Warn and fail suspend.
+ */
+ {
+ .ident = "dv4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME,
+ "HP Pavilion dv4 Notebook PC"),
+ },
+ .driver_data = "F.30", /* cutoff BIOS version */
+ },
+ {
+ .ident = "dv5",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME,
+ "HP Pavilion dv5 Notebook PC"),
+ },
+ .driver_data = "F.16", /* cutoff BIOS version */
+ },
+ {
+ .ident = "dv6",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME,
+ "HP Pavilion dv6 Notebook PC"),
+ },
+ .driver_data = "F.21", /* cutoff BIOS version */
+ },
+ {
+ .ident = "HDX18",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME,
+ "HP HDX18 Notebook PC"),
+ },
+ .driver_data = "F.23", /* cutoff BIOS version */
+ },
+ { } /* terminate list */
+ };
+ const struct dmi_system_id *dmi = dmi_first_match(sysids);
+ const char *ver;
+
+ if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
+ return false;
+
+ ver = dmi_get_system_info(DMI_BIOS_VERSION);
+
+ return !ver || strcmp(ver, dmi->driver_data) < 0;
+}
+
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
@@ -2678,6 +2789,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
+ /* apply ASUS M2A_VM quirk */
+ if (ahci_asus_m2a_vm_32bit_only(pdev))
+ hpriv->flags |= AHCI_HFLAG_32BIT_ONLY;
+
if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
pci_enable_msi(pdev);
@@ -2715,6 +2830,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
"quirky BIOS, skipping spindown on poweroff\n");
}
+ if (ahci_broken_suspend(pdev)) {
+ hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "BIOS update required for suspend/resume\n");
+ }
+
/* CAP.NP sometimes indicate the index of the last enabled
* port, at other times, that of the last possible port, so
* determining the maximum port number requires looking at
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index d51a17c0f59..d0a14cf2bd7 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -223,10 +223,8 @@ static const struct pci_device_id piix_pci_tbl[] = {
/* ICH8 Mobile PATA Controller */
{ 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
- /* NOTE: The following PCI ids must be kept in sync with the
- * list in drivers/pci/quirks.c.
- */
-
+ /* SATA ports */
+
/* 82801EB (ICH5) */
{ 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
/* 82801EB (ICH5) */
@@ -1455,6 +1453,15 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev)
/* PCI slot number of the controller */
.driver_data = (void *)0x1FUL,
},
+ {
+ .ident = "HP Compaq nc6000",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6000"),
+ },
+ /* PCI slot number of the controller */
+ .driver_data = (void *)0x1FUL,
+ },
{ } /* terminate list */
};
@@ -1500,8 +1507,8 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- /* no hotplugging support (FIXME) */
- if (!in_module_init)
+ /* no hotplugging support for later devices (FIXME) */
+ if (!in_module_init && ent->driver_data >= ich5_sata)
return -ENODEV;
if (piix_broken_system_poweroff(pdev)) {
@@ -1582,6 +1589,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
host->ports[1]->mwdma_mask = 0;
host->ports[1]->udma_mask = 0;
}
+ host->flags |= ATA_HOST_PARALLEL_SCAN;
pci_set_master(pdev);
return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c9242301cfa..ca4d208ddf3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5031,7 +5031,6 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
{
int nr_done = 0;
u32 done_mask;
- int i;
done_mask = ap->qc_active ^ qc_active;
@@ -5041,16 +5040,16 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
return -EINVAL;
}
- for (i = 0; i < ATA_MAX_QUEUE; i++) {
+ while (done_mask) {
struct ata_queued_cmd *qc;
+ unsigned int tag = __ffs(done_mask);
- if (!(done_mask & (1 << i)))
- continue;
-
- if ((qc = ata_qc_from_tag(ap, i))) {
+ qc = ata_qc_from_tag(ap, tag);
+ if (qc) {
ata_qc_complete(qc);
nr_done++;
}
+ done_mask &= ~(1 << tag);
}
return nr_done;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 342316064e9..d0dfeef55db 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1084,7 +1084,7 @@ static int atapi_drain_needed(struct request *rq)
if (likely(!blk_pc_request(rq)))
return 0;
- if (!rq->data_len || (rq->cmd_flags & REQ_RW))
+ if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
return 0;
return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index bb18415d3d6..bbbb1fab175 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -727,17 +727,23 @@ unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
else
iowrite16_rep(data_addr, buf, words);
- /* Transfer trailing 1 byte, if any. */
+ /* Transfer trailing byte, if any. */
if (unlikely(buflen & 0x01)) {
- __le16 align_buf[1] = { 0 };
- unsigned char *trailing_buf = buf + buflen - 1;
+ unsigned char pad[2];
+ /* Point buf to the tail of buffer */
+ buf += buflen - 1;
+
+ /*
+ * Use io*16_rep() accessors here as well to avoid pointlessly
+ * swapping bytes to and fro on the big endian machines...
+ */
if (rw == READ) {
- align_buf[0] = cpu_to_le16(ioread16(data_addr));
- memcpy(trailing_buf, align_buf, 1);
+ ioread16_rep(data_addr, pad, 1);
+ *buf = pad[0];
} else {
- memcpy(align_buf, trailing_buf, 1);
- iowrite16(le16_to_cpu(align_buf[0]), data_addr);
+ pad[0] = *buf;
+ iowrite16_rep(data_addr, pad, 1);
}
words++;
}
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 751b7ea4816..fc9c5d6d7d8 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -497,14 +497,16 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
/* Revision 0x20 added DMA */
static const struct ata_port_info info_20 = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+ ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &ali_20_port_ops
};
/* Revision 0x20 with support logic added UDMA */
static const struct ata_port_info info_20_udma = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+ ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
@@ -512,7 +514,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
/* Revision 0xC2 adds UDMA66 */
static const struct ata_port_info info_c2 = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+ ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
@@ -520,7 +523,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
/* Revision 0xC3 is UDMA66 for now */
static const struct ata_port_info info_c3 = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+ ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
@@ -528,7 +532,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
/* Revision 0xC4 is UDMA100 */
static const struct ata_port_info info_c4 = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+ ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
@@ -536,7 +541,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
/* Revision 0xC5 is UDMA133 with LBA48 DMA */
static const struct ata_port_info info_c5 = {
- .flags = ATA_FLAG_SLAVE_POSS,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 2085e0a3a05..2a6412f5d11 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -22,7 +22,7 @@
#include <linux/ata.h>
#define DRV_NAME "pata_efar"
-#define DRV_VERSION "0.4.4"
+#define DRV_VERSION "0.4.5"
/**
* efar_pre_reset - Enable bits
@@ -98,18 +98,17 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
{ 2, 1 },
{ 2, 3 }, };
- if (pio > 2)
- control |= 1; /* TIME1 enable */
+ if (pio > 1)
+ control |= 1; /* TIME */
if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */
- control |= 2; /* IE enable */
- /* Intel specifies that the PPE functionality is for disk only */
+ control |= 2; /* IE */
+ /* Intel specifies that the prefetch/posting is for disk only */
if (adev->class == ATA_DEV_ATA)
- control |= 4; /* PPE enable */
+ control |= 4; /* PPE */
pci_read_config_word(dev, idetm_port, &idetm_data);
- /* Enable PPE, IE and TIME as appropriate */
-
+ /* Set PPE, IE, and TIME as appropriate */
if (adev->devno == 0) {
idetm_data &= 0xCCF0;
idetm_data |= control;
@@ -129,7 +128,7 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
pci_write_config_byte(dev, 0x44, slave_data);
}
- idetm_data |= 0x4000; /* Ensure SITRE is enabled */
+ idetm_data |= 0x4000; /* Ensure SITRE is set */
pci_write_config_word(dev, idetm_port, idetm_data);
}
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index f72c6c5b820..6932e56d179 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -48,6 +48,7 @@
*
*/
+#include <linux/async.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -1028,6 +1029,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
&legacy_sht);
if (ret)
goto fail;
+ async_synchronize_full();
ld->platform_dev = pdev;
/* Nothing found means we drop the port as its probably not there */
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index bdb236957cb..f0d52f72f5b 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -20,13 +20,24 @@
/* No PIO or DMA methods needed for this device */
+static unsigned int netcell_read_id(struct ata_device *adev,
+ struct ata_taskfile *tf, u16 *id)
+{
+ unsigned int err_mask = ata_do_dev_read_id(adev, tf, id);
+ /* Firmware forgets to mark words 85-87 valid */
+ if (err_mask == 0)
+ id[ATA_ID_CSF_DEFAULT] |= 0x4000;
+ return err_mask;
+}
+
static struct scsi_host_template netcell_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations netcell_ops = {
.inherits = &ata_bmdma_port_ops,
- .cable_detect = ata_cable_80wire,
+ .cable_detect = ata_cable_80wire,
+ .read_id = netcell_read_id,
};
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 6cda12ba812..b2d11f300c3 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -305,8 +305,8 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
-static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
+static int nv_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_ck804_freeze(struct ata_port *ap);
@@ -406,49 +406,82 @@ static struct scsi_host_template nv_swncq_sht = {
.slave_configure = nv_swncq_slave_config,
};
-static struct ata_port_operations nv_common_ops = {
+/*
+ * NV SATA controllers have various different problems with hardreset
+ * protocol depending on the specific controller and device.
+ *
+ * GENERIC:
+ *
+ * bko11195 reports that link doesn't come online after hardreset on
+ * generic nv's and there have been several other similar reports on
+ * linux-ide.
+ *
+ * bko12351#c23 reports that warmplug on MCP61 doesn't work with
+ * softreset.
+ *
+ * NF2/3:
+ *
+ * bko3352 reports nf2/3 controllers can't determine device signature
+ * reliably after hardreset. The following thread reports detection
+ * failure on cold boot with the standard debouncing timing.
+ *
+ * http://thread.gmane.org/gmane.linux.ide/34098
+ *
+ * bko12176 reports that hardreset fails to bring up the link during
+ * boot on nf2.
+ *
+ * CK804:
+ *
+ * For initial probing after boot and hot plugging, hardreset mostly
+ * works fine on CK804 but curiously, reprobing on the initial port
+ * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
+ * FIS in somewhat undeterministic way.
+ *
+ * SWNCQ:
+ *
+ * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
+ * hardreset should be used and hardreset can't report proper
+ * signature, which suggests that mcp5x is closer to nf2 as long as
+ * reset quirkiness is concerned.
+ *
+ * bko12703 reports that boot probing fails for intel SSD with
+ * hardreset. Link fails to come online. Softreset works fine.
+ *
+ * The failures are varied but the following patterns seem true for
+ * all flavors.
+ *
+ * - Softreset during boot always works.
+ *
+ * - Hardreset during boot sometimes fails to bring up the link on
+ * certain comibnations and device signature acquisition is
+ * unreliable.
+ *
+ * - Hardreset is often necessary after hotplug.
+ *
+ * So, preferring softreset for boot probing and error handling (as
+ * hardreset might bring down the link) but using hardreset for
+ * post-boot probing should work around the above issues in most
+ * cases. Define nv_hardreset() which only kicks in for post-boot
+ * probing and use it for all variants.
+ */
+static struct ata_port_operations nv_generic_ops = {
.inherits = &ata_bmdma_port_ops,
.lost_interrupt = ATA_OP_NULL,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
+ .hardreset = nv_hardreset,
};
-/* OSDL bz11195 reports that link doesn't come online after hardreset
- * on generic nv's and there have been several other similar reports
- * on linux-ide. Disable hardreset for generic nv's.
- */
-static struct ata_port_operations nv_generic_ops = {
- .inherits = &nv_common_ops,
- .hardreset = ATA_OP_NULL,
-};
-
-/* nf2 is ripe with hardreset related problems.
- *
- * kernel bz#3352 reports nf2/3 controllers can't determine device
- * signature reliably. The following thread reports detection failure
- * on cold boot with the standard debouncing timing.
- *
- * http://thread.gmane.org/gmane.linux.ide/34098
- *
- * And bz#12176 reports that hardreset simply doesn't work on nf2.
- * Give up on it and just don't do hardreset.
- */
static struct ata_port_operations nv_nf2_ops = {
.inherits = &nv_generic_ops,
.freeze = nv_nf2_freeze,
.thaw = nv_nf2_thaw,
};
-/* For initial probing after boot and hot plugging, hardreset mostly
- * works fine on CK804 but curiously, reprobing on the initial port by
- * rescanning or rmmod/insmod fails to acquire the initial D2H Reg FIS
- * in somewhat undeterministic way. Use noclassify hardreset.
- */
static struct ata_port_operations nv_ck804_ops = {
- .inherits = &nv_common_ops,
+ .inherits = &nv_generic_ops,
.freeze = nv_ck804_freeze,
.thaw = nv_ck804_thaw,
- .hardreset = nv_noclassify_hardreset,
.host_stop = nv_ck804_host_stop,
};
@@ -476,19 +509,8 @@ static struct ata_port_operations nv_adma_ops = {
.host_stop = nv_adma_host_stop,
};
-/* Kernel bz#12351 reports that when SWNCQ is enabled, for hotplug to
- * work, hardreset should be used and hardreset can't report proper
- * signature, which suggests that mcp5x is closer to nf2 as long as
- * reset quirkiness is concerned. Define separate ops for mcp5x with
- * nv_noclassify_hardreset().
- */
-static struct ata_port_operations nv_mcp5x_ops = {
- .inherits = &nv_common_ops,
- .hardreset = nv_noclassify_hardreset,
-};
-
static struct ata_port_operations nv_swncq_ops = {
- .inherits = &nv_mcp5x_ops,
+ .inherits = &nv_generic_ops,
.qc_defer = ata_std_qc_defer,
.qc_prep = nv_swncq_qc_prep,
@@ -557,7 +579,7 @@ static const struct ata_port_info nv_port_info[] = {
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
- .port_ops = &nv_mcp5x_ops,
+ .port_ops = &nv_generic_ops,
.private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
},
/* SWNCQ */
@@ -1559,15 +1581,24 @@ static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
return 0;
}
-static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
+static int nv_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
{
- bool online;
- int rc;
+ struct ata_eh_context *ehc = &link->eh_context;
- rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
- &online, NULL);
- return online ? -EAGAIN : rc;
+ /* Do hardreset iff it's post-boot probing, please read the
+ * comment above port ops for details.
+ */
+ if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
+ !ata_dev_enabled(link->device))
+ sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
+ NULL, NULL);
+ else if (!(ehc->i.flags & ATA_EHI_QUIET))
+ ata_link_printk(link, KERN_INFO,
+ "nv: skipping hardreset on occupied port\n");
+
+ /* device signature acquisition is unreliable */
+ return -EAGAIN;
}
static void nv_nf2_freeze(struct ata_port *ap)
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index e67ce8e5caa..030ec079b18 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -183,7 +183,7 @@ static struct scsi_host_template sil_sht = {
};
static struct ata_port_operations sil_ops = {
- .inherits = &ata_bmdma_port_ops,
+ .inherits = &ata_bmdma32_port_ops,
.dev_config = sil_dev_config,
.set_mode = sil_set_mode,
.bmdma_setup = sil_bmdma_setup,
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index eb05a3c82a9..bbcf970068a 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -193,6 +193,7 @@ enum {
PDC_TIMER_MASK_INT,
};
+#define ECC_ERASE_BUF_SZ (128 * 1024)
struct pdc_port_priv {
u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
@@ -1280,7 +1281,6 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
{
int speed, size, length;
u32 addr, spd0, pci_status;
- u32 tmp = 0;
u32 time_period = 0;
u32 tcount = 0;
u32 ticks = 0;
@@ -1395,14 +1395,17 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0);
if (spd0 == 0x02) {
+ void *buf;
VPRINTK("Start ECC initialization\n");
addr = 0;
length = size * 1024 * 1024;
+ buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
while (addr < length) {
- pdc20621_put_to_dimm(host, (void *) &tmp, addr,
- sizeof(u32));
- addr += sizeof(u32);
+ pdc20621_put_to_dimm(host, buf, addr,
+ ECC_ERASE_BUF_SZ);
+ addr += ECC_ERASE_BUF_SZ;
}
+ kfree(buf);
VPRINTK("Finish ECC initialization\n");
}
return 0;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index dc030f1f00f..c6599618523 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -700,8 +700,10 @@ int bus_add_driver(struct device_driver *drv)
}
kobject_uevent(&priv->kobj, KOBJ_ADD);
- return error;
+ return 0;
out_unregister:
+ kfree(drv->p);
+ drv->p = NULL;
kobject_put(&priv->kobj);
out_put_bus:
bus_put(bus);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 4aa527b8a91..1977d4beb89 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -879,7 +879,7 @@ int device_add(struct device *dev)
}
if (!dev_name(dev))
- goto done;
+ goto name_error;
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
@@ -978,6 +978,9 @@ done:
cleanup_device_parent(dev);
if (parent)
put_device(parent);
+name_error:
+ kfree(dev->p);
+ dev->p = NULL;
goto done;
}
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index c51f11bb29a..8ae0f63602e 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -257,6 +257,10 @@ EXPORT_SYMBOL_GPL(driver_register);
*/
void driver_unregister(struct device_driver *drv)
{
+ if (!drv || !drv->p) {
+ WARN(1, "Unexpected driver unregister!\n");
+ return;
+ }
driver_remove_groups(drv, drv->groups);
bus_remove_driver(drv);
}
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index f22ed6cc69f..668dc234b8e 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3321,7 +3321,7 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
DAC960_Command_T *Command;
while(1) {
- Request = elv_next_request(req_q);
+ Request = blk_peek_request(req_q);
if (!Request)
return 1;
@@ -3338,10 +3338,10 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
}
Command->Completion = Request->end_io_data;
Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
- Command->BlockNumber = Request->sector;
- Command->BlockCount = Request->nr_sectors;
+ Command->BlockNumber = blk_rq_pos(Request);
+ Command->BlockCount = blk_rq_sectors(Request);
Command->Request = Request;
- blkdev_dequeue_request(Request);
+ blk_start_request(Request);
Command->SegmentCount = blk_rq_map_sg(req_q,
Command->Request, Command->cmd_sglist);
/* pci_map_sg MAY change the value of SegCount */
@@ -3431,7 +3431,7 @@ static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
* successfully as possible.
*/
Command->SegmentCount = 1;
- Command->BlockNumber = Request->sector;
+ Command->BlockNumber = blk_rq_pos(Request);
Command->BlockCount = 1;
DAC960_QueueReadWriteCommand(Command);
return;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ddea8e485cc..f42fa50d355 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -412,7 +412,7 @@ config ATA_OVER_ETH
config MG_DISK
tristate "mGine mflash, gflash support"
- depends on ARM && ATA && GPIOLIB
+ depends on ARM && GPIOLIB
help
mGine mFlash(gFlash) block device driver
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8df436ff706..9c6e5b0fe89 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -112,8 +112,6 @@ module_param(fd_def_df0, ulong, 0);
MODULE_LICENSE("GPL");
static struct request_queue *floppy_queue;
-#define QUEUE (floppy_queue)
-#define CURRENT elv_next_request(floppy_queue)
/*
* Macros
@@ -1335,64 +1333,60 @@ static int get_track(int drive, int track)
static void redo_fd_request(void)
{
+ struct request *rq;
unsigned int cnt, block, track, sector;
int drive;
struct amiga_floppy_struct *floppy;
char *data;
unsigned long flags;
+ int err;
- repeat:
- if (!CURRENT) {
+next_req:
+ rq = blk_fetch_request(floppy_queue);
+ if (!rq) {
/* Nothing left to do */
return;
}
- floppy = CURRENT->rq_disk->private_data;
+ floppy = rq->rq_disk->private_data;
drive = floppy - unit;
+next_segment:
/* Here someone could investigate to be more efficient */
- for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) {
+ for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
#ifdef DEBUG
printk("fd: sector %ld + %d requested for %s\n",
- CURRENT->sector,cnt,
- (rq_data_dir(CURRENT) == READ) ? "read" : "write");
+ blk_rq_pos(rq), cnt,
+ (rq_data_dir(rq) == READ) ? "read" : "write");
#endif
- block = CURRENT->sector + cnt;
+ block = blk_rq_pos(rq) + cnt;
if ((int)block > floppy->blocks) {
- end_request(CURRENT, 0);
- goto repeat;
+ err = -EIO;
+ break;
}
track = block / (floppy->dtype->sects * floppy->type->sect_mult);
sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
- data = CURRENT->buffer + 512 * cnt;
+ data = rq->buffer + 512 * cnt;
#ifdef DEBUG
printk("access to track %d, sector %d, with buffer at "
"0x%08lx\n", track, sector, data);
#endif
- if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) {
- printk(KERN_WARNING "do_fd_request: unknown command\n");
- end_request(CURRENT, 0);
- goto repeat;
- }
if (get_track(drive, track) == -1) {
- end_request(CURRENT, 0);
- goto repeat;
+ err = -EIO;
+ break;
}
- switch (rq_data_dir(CURRENT)) {
- case READ:
+ if (rq_data_dir(rq) == READ) {
memcpy(data, floppy->trackbuf + sector * 512, 512);
- break;
-
- case WRITE:
+ } else {
memcpy(floppy->trackbuf + sector * 512, data, 512);
/* keep the drive spinning while writes are scheduled */
if (!fd_motor_on(drive)) {
- end_request(CURRENT, 0);
- goto repeat;
+ err = -EIO;
+ break;
}
/*
* setup a callback to write the track buffer
@@ -1404,14 +1398,12 @@ static void redo_fd_request(void)
/* reset the timer */
mod_timer (flush_track_timer + drive, jiffies + 1);
local_irq_restore(flags);
- break;
}
}
- CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
- CURRENT->sector += CURRENT->current_nr_sectors;
- end_request(CURRENT, 1);
- goto repeat;
+ if (__blk_end_request_cur(rq, err))
+ goto next_segment;
+ goto next_req;
}
static void do_fd_request(struct request_queue * q)
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4234c11c1e4..f5e7180d7f4 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -79,9 +79,7 @@
#undef DEBUG
static struct request_queue *floppy_queue;
-
-#define QUEUE (floppy_queue)
-#define CURRENT elv_next_request(floppy_queue)
+static struct request *fd_request;
/* Disk types: DD, HD, ED */
static struct atari_disk_type {
@@ -376,6 +374,12 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
static DEFINE_TIMER(fd_timer, check_change, 0, 0);
+static void fd_end_request_cur(int err)
+{
+ if (!__blk_end_request_cur(fd_request, err))
+ fd_request = NULL;
+}
+
static inline void start_motor_off_timer(void)
{
mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
@@ -606,15 +610,15 @@ static void fd_error( void )
return;
}
- if (!CURRENT)
+ if (!fd_request)
return;
- CURRENT->errors++;
- if (CURRENT->errors >= MAX_ERRORS) {
+ fd_request->errors++;
+ if (fd_request->errors >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
- end_request(CURRENT, 0);
+ fd_end_request_cur(-EIO);
}
- else if (CURRENT->errors == RECALIBRATE_ERRORS) {
+ else if (fd_request->errors == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
if (SelectedDrive != -1)
SUD.track = -1;
@@ -725,16 +729,14 @@ static void do_fd_action( int drive )
if (IS_BUFFERED( drive, ReqSide, ReqTrack )) {
if (ReqCmd == READ) {
copy_buffer( SECTOR_BUFFER(ReqSector), ReqData );
- if (++ReqCnt < CURRENT->current_nr_sectors) {
+ if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
/* read next sector */
setup_req_params( drive );
goto repeat;
}
else {
/* all sectors finished */
- CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
- CURRENT->sector += CURRENT->current_nr_sectors;
- end_request(CURRENT, 1);
+ fd_end_request_cur(0);
redo_fd_request();
return;
}
@@ -1132,16 +1134,14 @@ static void fd_rwsec_done1(int status)
}
}
- if (++ReqCnt < CURRENT->current_nr_sectors) {
+ if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
/* read next sector */
setup_req_params( SelectedDrive );
do_fd_action( SelectedDrive );
}
else {
/* all sectors finished */
- CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
- CURRENT->sector += CURRENT->current_nr_sectors;
- end_request(CURRENT, 1);
+ fd_end_request_cur(0);
redo_fd_request();
}
return;
@@ -1382,7 +1382,7 @@ static void setup_req_params( int drive )
ReqData = ReqBuffer + 512 * ReqCnt;
if (UseTrackbuffer)
- read_track = (ReqCmd == READ && CURRENT->errors == 0);
+ read_track = (ReqCmd == READ && fd_request->errors == 0);
else
read_track = 0;
@@ -1396,25 +1396,27 @@ static void redo_fd_request(void)
int drive, type;
struct atari_floppy_struct *floppy;
- DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n",
- CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "",
- CURRENT ? CURRENT->sector : 0 ));
+ DPRINT(("redo_fd_request: fd_request=%p dev=%s fd_request->sector=%ld\n",
+ fd_request, fd_request ? fd_request->rq_disk->disk_name : "",
+ fd_request ? blk_rq_pos(fd_request) : 0 ));
IsFormatting = 0;
repeat:
+ if (!fd_request) {
+ fd_request = blk_fetch_request(floppy_queue);
+ if (!fd_request)
+ goto the_end;
+ }
- if (!CURRENT)
- goto the_end;
-
- floppy = CURRENT->rq_disk->private_data;
+ floppy = fd_request->rq_disk->private_data;
drive = floppy - unit;
type = floppy->type;
if (!UD.connected) {
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
- end_request(CURRENT, 0);
+ fd_end_request_cur(-EIO);
goto repeat;
}
@@ -1430,12 +1432,12 @@ repeat:
/* user supplied disk type */
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
- end_request(CURRENT, 0);
+ fd_end_request_cur(-EIO);
goto repeat;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
- end_request(CURRENT, 0);
+ fd_end_request_cur(-EIO);
goto repeat;
}
type = minor2disktype[type].index;
@@ -1444,8 +1446,8 @@ repeat:
UD.autoprobe = 0;
}
- if (CURRENT->sector + 1 > UDT->blocks) {
- end_request(CURRENT, 0);
+ if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
+ fd_end_request_cur(-EIO);
goto repeat;
}
@@ -1453,9 +1455,9 @@ repeat:
del_timer( &motor_off_timer );
ReqCnt = 0;
- ReqCmd = rq_data_dir(CURRENT);
- ReqBlock = CURRENT->sector;
- ReqBuffer = CURRENT->buffer;
+ ReqCmd = rq_data_dir(fd_request);
+ ReqBlock = blk_rq_pos(fd_request);
+ ReqBuffer = fd_request->buffer;
setup_req_params( drive );
do_fd_action( drive );
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 5f7e64ba87e..4bf8705b3ac 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -407,12 +407,7 @@ static int __init ramdisk_size(char *str)
rd_size = simple_strtol(str, NULL, 0);
return 1;
}
-static int __init ramdisk_size2(char *str)
-{
- return ramdisk_size(str);
-}
-__setup("ramdisk=", ramdisk_size);
-__setup("ramdisk_size=", ramdisk_size2);
+__setup("ramdisk_size=", ramdisk_size);
#endif
/*
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 4d4d5e0d3fa..b22cec97ea1 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -180,11 +180,13 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
__u32);
static void start_io(ctlr_info_t *h);
static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
- unsigned int use_unit_num, unsigned int log_unit,
__u8 page_code, unsigned char *scsi3addr, int cmd_type);
static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
- unsigned int use_unit_num, unsigned int log_unit,
- __u8 page_code, int cmd_type);
+ __u8 page_code, unsigned char scsi3addr[],
+ int cmd_type);
+static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
+ int attempt_retry);
+static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
static void fail_all_cmds(unsigned long ctlr);
static int scan_thread(void *data);
@@ -437,6 +439,194 @@ static void __devinit cciss_procinit(int i)
}
#endif /* CONFIG_PROC_FS */
+#define MAX_PRODUCT_NAME_LEN 19
+
+#define to_hba(n) container_of(n, struct ctlr_info, dev)
+#define to_drv(n) container_of(n, drive_info_struct, dev)
+
+static struct device_type cciss_host_type = {
+ .name = "cciss_host",
+};
+
+static ssize_t dev_show_unique_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ drive_info_struct *drv = to_drv(dev);
+ struct ctlr_info *h = to_hba(drv->dev.parent);
+ __u8 sn[16];
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ if (h->busy_configuring)
+ ret = -EBUSY;
+ else
+ memcpy(sn, drv->serial_no, sizeof(sn));
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+
+ if (ret)
+ return ret;
+ else
+ return snprintf(buf, 16 * 2 + 2,
+ "%02X%02X%02X%02X%02X%02X%02X%02X"
+ "%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ sn[0], sn[1], sn[2], sn[3],
+ sn[4], sn[5], sn[6], sn[7],
+ sn[8], sn[9], sn[10], sn[11],
+ sn[12], sn[13], sn[14], sn[15]);
+}
+DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
+
+static ssize_t dev_show_vendor(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ drive_info_struct *drv = to_drv(dev);
+ struct ctlr_info *h = to_hba(drv->dev.parent);
+ char vendor[VENDOR_LEN + 1];
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ if (h->busy_configuring)
+ ret = -EBUSY;
+ else
+ memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+
+ if (ret)
+ return ret;
+ else
+ return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor);
+}
+DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
+
+static ssize_t dev_show_model(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ drive_info_struct *drv = to_drv(dev);
+ struct ctlr_info *h = to_hba(drv->dev.parent);
+ char model[MODEL_LEN + 1];
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ if (h->busy_configuring)
+ ret = -EBUSY;
+ else
+ memcpy(model, drv->model, MODEL_LEN + 1);
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+
+ if (ret)
+ return ret;
+ else
+ return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model);
+}
+DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
+
+static ssize_t dev_show_rev(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ drive_info_struct *drv = to_drv(dev);
+ struct ctlr_info *h = to_hba(drv->dev.parent);
+ char rev[REV_LEN + 1];
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ if (h->busy_configuring)
+ ret = -EBUSY;
+ else
+ memcpy(rev, drv->rev, REV_LEN + 1);
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+
+ if (ret)
+ return ret;
+ else
+ return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev);
+}
+DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
+
+static struct attribute *cciss_dev_attrs[] = {
+ &dev_attr_unique_id.attr,
+ &dev_attr_model.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_rev.attr,
+ NULL
+};
+
+static struct attribute_group cciss_dev_attr_group = {
+ .attrs = cciss_dev_attrs,
+};
+
+static struct attribute_group *cciss_dev_attr_groups[] = {
+ &cciss_dev_attr_group,
+ NULL
+};
+
+static struct device_type cciss_dev_type = {
+ .name = "cciss_device",
+ .groups = cciss_dev_attr_groups,
+};
+
+static struct bus_type cciss_bus_type = {
+ .name = "cciss",
+};
+
+
+/*
+ * Initialize sysfs entry for each controller. This sets up and registers
+ * the 'cciss#' directory for each individual controller under
+ * /sys/bus/pci/devices/<dev>/.
+ */
+static int cciss_create_hba_sysfs_entry(struct ctlr_info *h)
+{
+ device_initialize(&h->dev);
+ h->dev.type = &cciss_host_type;
+ h->dev.bus = &cciss_bus_type;
+ dev_set_name(&h->dev, "%s", h->devname);
+ h->dev.parent = &h->pdev->dev;
+
+ return device_add(&h->dev);
+}
+
+/*
+ * Remove sysfs entries for an hba.
+ */
+static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
+{
+ device_del(&h->dev);
+}
+
+/*
+ * Initialize sysfs for each logical drive. This sets up and registers
+ * the 'c#d#' directory for each individual logical drive under
+ * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from
+ * /sys/block/cciss!c#d# to this entry.
+ */
+static int cciss_create_ld_sysfs_entry(struct ctlr_info *h,
+ drive_info_struct *drv,
+ int drv_index)
+{
+ device_initialize(&drv->dev);
+ drv->dev.type = &cciss_dev_type;
+ drv->dev.bus = &cciss_bus_type;
+ dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index);
+ drv->dev.parent = &h->dev;
+ return device_add(&drv->dev);
+}
+
+/*
+ * Remove sysfs entries for a logical drive.
+ */
+static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv)
+{
+ device_del(&drv->dev);
+}
+
/*
* For operations that cannot sleep, a command block is allocated at init,
* and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
@@ -1299,7 +1489,6 @@ static void cciss_softirq_done(struct request *rq)
{
CommandList_struct *cmd = rq->completion_data;
ctlr_info_t *h = hba[cmd->ctlr];
- unsigned int nr_bytes;
unsigned long flags;
u64bit temp64;
int i, ddir;
@@ -1321,15 +1510,11 @@ static void cciss_softirq_done(struct request *rq)
printk("Done with %p\n", rq);
#endif /* CCISS_DEBUG */
- /*
- * Store the full size and set the residual count for pc requests
- */
- nr_bytes = blk_rq_bytes(rq);
+ /* set the residual count for pc requests */
if (blk_pc_request(rq))
- rq->data_len = cmd->err_info->ResidualCnt;
+ rq->resid_len = cmd->err_info->ResidualCnt;
- if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, nr_bytes))
- BUG();
+ blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
spin_lock_irqsave(&h->lock, flags);
cmd_free(h, cmd, 1);
@@ -1337,6 +1522,56 @@ static void cciss_softirq_done(struct request *rq)
spin_unlock_irqrestore(&h->lock, flags);
}
+static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[],
+ uint32_t log_unit)
+{
+ log_unit = h->drv[log_unit].LunID & 0x03fff;
+ memset(&scsi3addr[4], 0, 4);
+ memcpy(&scsi3addr[0], &log_unit, 4);
+ scsi3addr[3] |= 0x40;
+}
+
+/* This function gets the SCSI vendor, model, and revision of a logical drive
+ * via the inquiry page 0. Model, vendor, and rev are set to empty strings if
+ * they cannot be read.
+ */
+static void cciss_get_device_descr(int ctlr, int logvol, int withirq,
+ char *vendor, char *model, char *rev)
+{
+ int rc;
+ InquiryData_struct *inq_buf;
+ unsigned char scsi3addr[8];
+
+ *vendor = '\0';
+ *model = '\0';
+ *rev = '\0';
+
+ inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
+ if (!inq_buf)
+ return;
+
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
+ if (withirq)
+ rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf,
+ sizeof(InquiryData_struct), 0,
+ scsi3addr, TYPE_CMD);
+ else
+ rc = sendcmd(CISS_INQUIRY, ctlr, inq_buf,
+ sizeof(InquiryData_struct), 0,
+ scsi3addr, TYPE_CMD);
+ if (rc == IO_OK) {
+ memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
+ vendor[VENDOR_LEN] = '\0';
+ memcpy(model, &inq_buf->data_byte[16], MODEL_LEN);
+ model[MODEL_LEN] = '\0';
+ memcpy(rev, &inq_buf->data_byte[32], REV_LEN);
+ rev[REV_LEN] = '\0';
+ }
+
+ kfree(inq_buf);
+ return;
+}
+
/* This function gets the serial number of a logical drive via
* inquiry page 0x83. Serial no. is 16 bytes. If the serial
* number cannot be had, for whatever reason, 16 bytes of 0xff
@@ -1348,6 +1583,7 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
#define PAGE_83_INQ_BYTES 64
int rc;
unsigned char *buf;
+ unsigned char scsi3addr[8];
if (buflen > 16)
buflen = 16;
@@ -1356,12 +1592,13 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
if (!buf)
return;
memset(serial_no, 0, buflen);
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
if (withirq)
rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
- PAGE_83_INQ_BYTES, 1, logvol, 0x83, TYPE_CMD);
+ PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
else
rc = sendcmd(CISS_INQUIRY, ctlr, buf,
- PAGE_83_INQ_BYTES, 1, logvol, 0x83, NULL, TYPE_CMD);
+ PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
if (rc == IO_OK)
memcpy(serial_no, &buf[8], buflen);
kfree(buf);
@@ -1377,7 +1614,7 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
disk->first_minor = drv_index << NWD_SHIFT;
disk->fops = &cciss_fops;
disk->private_data = &h->drv[drv_index];
- disk->driverfs_dev = &h->pdev->dev;
+ disk->driverfs_dev = &h->drv[drv_index].dev;
/* Set up queue information */
blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
@@ -1394,8 +1631,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
disk->queue->queuedata = h;
- blk_queue_hardsect_size(disk->queue,
- h->drv[drv_index].block_size);
+ blk_queue_logical_block_size(disk->queue,
+ h->drv[drv_index].block_size);
/* Make sure all queue data is written out before */
/* setting h->drv[drv_index].queue, as setting this */
@@ -1468,6 +1705,8 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
drvinfo->block_size = block_size;
drvinfo->nr_blocks = total_size + 1;
+ cciss_get_device_descr(ctlr, drv_index, 1, drvinfo->vendor,
+ drvinfo->model, drvinfo->rev);
cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
sizeof(drvinfo->serial_no));
@@ -1517,6 +1756,9 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
h->drv[drv_index].cylinders = drvinfo->cylinders;
h->drv[drv_index].raid_level = drvinfo->raid_level;
memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16);
+ memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1);
+ memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1);
+ memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1);
++h->num_luns;
disk = h->gendisk[drv_index];
@@ -1591,6 +1833,8 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
}
}
h->drv[drv_index].LunID = lunid;
+ if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index))
+ goto err_free_disk;
/* Don't need to mark this busy because nobody */
/* else knows about this disk yet to contend */
@@ -1598,6 +1842,11 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
h->drv[drv_index].busy_configuring = 0;
wmb();
return drv_index;
+
+err_free_disk:
+ put_disk(h->gendisk[drv_index]);
+ h->gendisk[drv_index] = NULL;
+ return -1;
}
/* This is for the special case of a controller which
@@ -1668,8 +1917,8 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
goto mem_msg;
return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
- sizeof(ReportLunData_struct), 0,
- 0, 0, TYPE_CMD);
+ sizeof(ReportLunData_struct),
+ 0, CTLR_LUNID, TYPE_CMD);
if (return_code == IO_OK)
listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
@@ -1718,6 +1967,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
h->drv[i].busy_configuring = 1;
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
return_code = deregister_disk(h, i, 1);
+ cciss_destroy_ld_sysfs_entry(&h->drv[i]);
h->drv[i].busy_configuring = 0;
}
}
@@ -1877,11 +2127,9 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
return 0;
}
-static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
- 1: address logical volume log_unit,
- 2: periph device address is scsi3addr */
- unsigned int log_unit, __u8 page_code,
- unsigned char *scsi3addr, int cmd_type)
+static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ size_t size, __u8 page_code, unsigned char *scsi3addr,
+ int cmd_type)
{
ctlr_info_t *h = hba[ctlr];
u64bit buff_dma_handle;
@@ -1897,27 +2145,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
c->Header.SGTotal = 0;
}
c->Header.Tag.lower = c->busaddr;
+ memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
c->Request.Type.Type = cmd_type;
if (cmd_type == TYPE_CMD) {
switch (cmd) {
case CISS_INQUIRY:
- /* If the logical unit number is 0 then, this is going
- to controller so It's a physical command
- mode = 0 target = 0. So we have nothing to write.
- otherwise, if use_unit_num == 1,
- mode = 1(volume set addressing) target = LUNID
- otherwise, if use_unit_num == 2,
- mode = 0(periph dev addr) target = scsi3addr */
- if (use_unit_num == 1) {
- c->Header.LUN.LogDev.VolId =
- h->drv[log_unit].LunID;
- c->Header.LUN.LogDev.Mode = 1;
- } else if (use_unit_num == 2) {
- memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
- 8);
- c->Header.LUN.LogDev.Mode = 0;
- }
/* are we trying to read a vital product page */
if (page_code != 0) {
c->Request.CDB[1] = 0x01;
@@ -1947,8 +2180,6 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
break;
case CCISS_READ_CAPACITY:
- c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
- c->Header.LUN.LogDev.Mode = 1;
c->Request.CDBLen = 10;
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_READ;
@@ -1956,8 +2187,6 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
c->Request.CDB[0] = cmd;
break;
case CCISS_READ_CAPACITY_16:
- c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
- c->Header.LUN.LogDev.Mode = 1;
c->Request.CDBLen = 16;
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_READ;
@@ -1979,6 +2208,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
c->Request.CDB[0] = BMIC_WRITE;
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
break;
+ case TEST_UNIT_READY:
+ c->Request.CDBLen = 6;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_NONE;
+ c->Request.Timeout = 0;
+ break;
default:
printk(KERN_WARNING
"cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
@@ -1997,13 +2232,13 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
memcpy(&c->Request.CDB[4], buff, 8);
break;
case 1: /* RESET message */
- c->Request.CDBLen = 12;
+ c->Request.CDBLen = 16;
c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_WRITE;
+ c->Request.Type.Direction = XFER_NONE;
c->Request.Timeout = 0;
memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
c->Request.CDB[0] = cmd; /* reset */
- c->Request.CDB[1] = 0x04; /* reset a LUN */
+ c->Request.CDB[1] = 0x03; /* reset a target */
break;
case 3: /* No-Op message */
c->Request.CDBLen = 1;
@@ -2035,114 +2270,152 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
return status;
}
-static int sendcmd_withirq(__u8 cmd,
- int ctlr,
- void *buff,
- size_t size,
- unsigned int use_unit_num,
- unsigned int log_unit, __u8 page_code, int cmd_type)
+static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
{
- ctlr_info_t *h = hba[ctlr];
- CommandList_struct *c;
+ switch (c->err_info->ScsiStatus) {
+ case SAM_STAT_GOOD:
+ return IO_OK;
+ case SAM_STAT_CHECK_CONDITION:
+ switch (0xf & c->err_info->SenseInfo[2]) {
+ case 0: return IO_OK; /* no sense */
+ case 1: return IO_OK; /* recovered error */
+ default:
+ printk(KERN_WARNING "cciss%d: cmd 0x%02x "
+ "check condition, sense key = 0x%02x\n",
+ h->ctlr, c->Request.CDB[0],
+ c->err_info->SenseInfo[2]);
+ }
+ break;
+ default:
+ printk(KERN_WARNING "cciss%d: cmd 0x%02x"
+ "scsi status = 0x%02x\n", h->ctlr,
+ c->Request.CDB[0], c->err_info->ScsiStatus);
+ break;
+ }
+ return IO_ERROR;
+}
+
+static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
+{
+ int return_status = IO_OK;
+
+ if (c->err_info->CommandStatus == CMD_SUCCESS)
+ return IO_OK;
+
+ switch (c->err_info->CommandStatus) {
+ case CMD_TARGET_STATUS:
+ return_status = check_target_status(h, c);
+ break;
+ case CMD_DATA_UNDERRUN:
+ case CMD_DATA_OVERRUN:
+ /* expected for inquiry and report lun commands */
+ break;
+ case CMD_INVALID:
+ printk(KERN_WARNING "cciss: cmd 0x%02x is "
+ "reported invalid\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_PROTOCOL_ERR:
+ printk(KERN_WARNING "cciss: cmd 0x%02x has "
+ "protocol error \n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_HARDWARE_ERR:
+ printk(KERN_WARNING "cciss: cmd 0x%02x had "
+ " hardware error\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_CONNECTION_LOST:
+ printk(KERN_WARNING "cciss: cmd 0x%02x had "
+ "connection lost\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_ABORTED:
+ printk(KERN_WARNING "cciss: cmd 0x%02x was "
+ "aborted\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_ABORT_FAILED:
+ printk(KERN_WARNING "cciss: cmd 0x%02x reports "
+ "abort failed\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_UNSOLICITED_ABORT:
+ printk(KERN_WARNING
+ "cciss%d: unsolicited abort 0x%02x\n", h->ctlr,
+ c->Request.CDB[0]);
+ return_status = IO_NEEDS_RETRY;
+ break;
+ default:
+ printk(KERN_WARNING "cciss: cmd 0x%02x returned "
+ "unknown status %x\n", c->Request.CDB[0],
+ c->err_info->CommandStatus);
+ return_status = IO_ERROR;
+ }
+ return return_status;
+}
+
+static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
+ int attempt_retry)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
u64bit buff_dma_handle;
unsigned long flags;
- int return_status;
- DECLARE_COMPLETION_ONSTACK(wait);
+ int return_status = IO_OK;
- if ((c = cmd_alloc(h, 0)) == NULL)
- return -ENOMEM;
- return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
- log_unit, page_code, NULL, cmd_type);
- if (return_status != IO_OK) {
- cmd_free(h, c, 0);
- return return_status;
- }
- resend_cmd2:
+resend_cmd2:
c->waiting = &wait;
-
/* Put the request on the tail of the queue and send it */
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
addQ(&h->reqQ, c);
h->Qdepth++;
start_io(h);
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
wait_for_completion(&wait);
- if (c->err_info->CommandStatus != 0) { /* an error has occurred */
- switch (c->err_info->CommandStatus) {
- case CMD_TARGET_STATUS:
- printk(KERN_WARNING "cciss: cmd %p has "
- " completed with errors\n", c);
- if (c->err_info->ScsiStatus) {
- printk(KERN_WARNING "cciss: cmd %p "
- "has SCSI Status = %x\n",
- c, c->err_info->ScsiStatus);
- }
+ if (c->err_info->CommandStatus == 0 || !attempt_retry)
+ goto command_done;
- break;
- case CMD_DATA_UNDERRUN:
- case CMD_DATA_OVERRUN:
- /* expected for inquire and report lun commands */
- break;
- case CMD_INVALID:
- printk(KERN_WARNING "cciss: Cmd %p is "
- "reported invalid\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_PROTOCOL_ERR:
- printk(KERN_WARNING "cciss: cmd %p has "
- "protocol error \n", c);
- return_status = IO_ERROR;
- break;
- case CMD_HARDWARE_ERR:
- printk(KERN_WARNING "cciss: cmd %p had "
- " hardware error\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_CONNECTION_LOST:
- printk(KERN_WARNING "cciss: cmd %p had "
- "connection lost\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_ABORTED:
- printk(KERN_WARNING "cciss: cmd %p was "
- "aborted\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_ABORT_FAILED:
- printk(KERN_WARNING "cciss: cmd %p reports "
- "abort failed\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_UNSOLICITED_ABORT:
- printk(KERN_WARNING
- "cciss%d: unsolicited abort %p\n", ctlr, c);
- if (c->retry_count < MAX_CMD_RETRIES) {
- printk(KERN_WARNING
- "cciss%d: retrying %p\n", ctlr, c);
- c->retry_count++;
- /* erase the old error information */
- memset(c->err_info, 0,
- sizeof(ErrorInfo_struct));
- return_status = IO_OK;
- INIT_COMPLETION(wait);
- goto resend_cmd2;
- }
- return_status = IO_ERROR;
- break;
- default:
- printk(KERN_WARNING "cciss: cmd %p returned "
- "unknown status %x\n", c,
- c->err_info->CommandStatus);
- return_status = IO_ERROR;
- }
+ return_status = process_sendcmd_error(h, c);
+
+ if (return_status == IO_NEEDS_RETRY &&
+ c->retry_count < MAX_CMD_RETRIES) {
+ printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr,
+ c->Request.CDB[0]);
+ c->retry_count++;
+ /* erase the old error information */
+ memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+ return_status = IO_OK;
+ INIT_COMPLETION(wait);
+ goto resend_cmd2;
}
+
+command_done:
/* unlock the buffers from DMA */
buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
+ return return_status;
+}
+
+static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
+ __u8 page_code, unsigned char scsi3addr[],
+ int cmd_type)
+{
+ ctlr_info_t *h = hba[ctlr];
+ CommandList_struct *c;
+ int return_status;
+
+ c = cmd_alloc(h, 0);
+ if (!c)
+ return -ENOMEM;
+ return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
+ scsi3addr, cmd_type);
+ if (return_status == IO_OK)
+ return_status = sendcmd_withirq_core(h, c, 1);
+
cmd_free(h, c, 0);
return return_status;
}
@@ -2155,15 +2428,17 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
{
int return_code;
unsigned long t;
+ unsigned char scsi3addr[8];
memset(inq_buff, 0, sizeof(InquiryData_struct));
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
if (withirq)
return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
- inq_buff, sizeof(*inq_buff), 1,
- logvol, 0xC1, TYPE_CMD);
+ inq_buff, sizeof(*inq_buff),
+ 0xC1, scsi3addr, TYPE_CMD);
else
return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
- sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
+ sizeof(*inq_buff), 0xC1, scsi3addr,
TYPE_CMD);
if (return_code == IO_OK) {
if (inq_buff->data_byte[8] == 0xFF) {
@@ -2204,6 +2479,7 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
{
ReadCapdata_struct *buf;
int return_code;
+ unsigned char scsi3addr[8];
buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
if (!buf) {
@@ -2211,14 +2487,15 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
return;
}
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
if (withirq)
return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
ctlr, buf, sizeof(ReadCapdata_struct),
- 1, logvol, 0, TYPE_CMD);
+ 0, scsi3addr, TYPE_CMD);
else
return_code = sendcmd(CCISS_READ_CAPACITY,
ctlr, buf, sizeof(ReadCapdata_struct),
- 1, logvol, 0, NULL, TYPE_CMD);
+ 0, scsi3addr, TYPE_CMD);
if (return_code == IO_OK) {
*total_size = be32_to_cpu(*(__be32 *) buf->total_size);
*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
@@ -2238,6 +2515,7 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size,
{
ReadCapdata_struct_16 *buf;
int return_code;
+ unsigned char scsi3addr[8];
buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
if (!buf) {
@@ -2245,15 +2523,16 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size,
return;
}
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
if (withirq) {
return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
ctlr, buf, sizeof(ReadCapdata_struct_16),
- 1, logvol, 0, TYPE_CMD);
+ 0, scsi3addr, TYPE_CMD);
}
else {
return_code = sendcmd(CCISS_READ_CAPACITY_16,
ctlr, buf, sizeof(ReadCapdata_struct_16),
- 1, logvol, 0, NULL, TYPE_CMD);
+ 0, scsi3addr, TYPE_CMD);
}
if (return_code == IO_OK) {
*total_size = be64_to_cpu(*(__be64 *) buf->total_size);
@@ -2303,7 +2582,7 @@ static int cciss_revalidate(struct gendisk *disk)
cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
inq_buff, drv);
- blk_queue_hardsect_size(drv->queue, drv->block_size);
+ blk_queue_logical_block_size(drv->queue, drv->block_size);
set_capacity(disk, drv->nr_blocks);
kfree(inq_buff);
@@ -2333,86 +2612,21 @@ static unsigned long pollcomplete(int ctlr)
return 1;
}
-static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
-{
- /* We get in here if sendcmd() is polling for completions
- and gets some command back that it wasn't expecting --
- something other than that which it just sent down.
- Ordinarily, that shouldn't happen, but it can happen when
- the scsi tape stuff gets into error handling mode, and
- starts using sendcmd() to try to abort commands and
- reset tape drives. In that case, sendcmd may pick up
- completions of commands that were sent to logical drives
- through the block i/o system, or cciss ioctls completing, etc.
- In that case, we need to save those completions for later
- processing by the interrupt handler.
- */
-
-#ifdef CONFIG_CISS_SCSI_TAPE
- struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
-
- /* If it's not the scsi tape stuff doing error handling, (abort */
- /* or reset) then we don't expect anything weird. */
- if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
-#endif
- printk(KERN_WARNING "cciss cciss%d: SendCmd "
- "Invalid command list address returned! (%lx)\n",
- ctlr, complete);
- /* not much we can do. */
-#ifdef CONFIG_CISS_SCSI_TAPE
- return 1;
- }
-
- /* We've sent down an abort or reset, but something else
- has completed */
- if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
- /* Uh oh. No room to save it for later... */
- printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
- "reject list overflow, command lost!\n", ctlr);
- return 1;
- }
- /* Save it for later */
- srl->complete[srl->ncompletions] = complete;
- srl->ncompletions++;
-#endif
- return 0;
-}
-
-/*
- * Send a command to the controller, and wait for it to complete.
- * Only used at init time.
+/* Send command c to controller h and poll for it to complete.
+ * Turns interrupts off on the board. Used at driver init time
+ * and during SCSI error recovery.
*/
-static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
- 1: address logical volume log_unit,
- 2: periph device address is scsi3addr */
- unsigned int log_unit,
- __u8 page_code, unsigned char *scsi3addr, int cmd_type)
+static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
{
- CommandList_struct *c;
int i;
unsigned long complete;
- ctlr_info_t *info_p = hba[ctlr];
+ int status = IO_ERROR;
u64bit buff_dma_handle;
- int status, done = 0;
- if ((c = cmd_alloc(info_p, 1)) == NULL) {
- printk(KERN_WARNING "cciss: unable to get memory");
- return IO_ERROR;
- }
- status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
- log_unit, page_code, scsi3addr, cmd_type);
- if (status != IO_OK) {
- cmd_free(info_p, c, 1);
- return status;
- }
- resend_cmd1:
- /*
- * Disable interrupt
- */
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss: turning intr off\n");
-#endif /* CCISS_DEBUG */
- info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
+resend_cmd1:
+
+ /* Disable interrupt on the board. */
+ h->access.set_intr_mask(h, CCISS_INTR_OFF);
/* Make sure there is room in the command FIFO */
/* Actually it should be completely empty at this time */
@@ -2420,21 +2634,15 @@ static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use
/* tape side of the driver. */
for (i = 200000; i > 0; i--) {
/* if fifo isn't full go */
- if (!(info_p->access.fifo_full(info_p))) {
-
+ if (!(h->access.fifo_full(h)))
break;
- }
udelay(10);
printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
- " waiting!\n", ctlr);
+ " waiting!\n", h->ctlr);
}
- /*
- * Send the cmd
- */
- info_p->access.submit_command(info_p, c);
- done = 0;
+ h->access.submit_command(h, c); /* Send the cmd */
do {
- complete = pollcomplete(ctlr);
+ complete = pollcomplete(h->ctlr);
#ifdef CCISS_DEBUG
printk(KERN_DEBUG "cciss: command completed\n");
@@ -2443,97 +2651,102 @@ static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use
if (complete == 1) {
printk(KERN_WARNING
"cciss cciss%d: SendCmd Timeout out, "
- "No command list address returned!\n", ctlr);
+ "No command list address returned!\n", h->ctlr);
status = IO_ERROR;
- done = 1;
break;
}
- /* This will need to change for direct lookup completions */
- if ((complete & CISS_ERROR_BIT)
- && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
- /* if data overrun or underun on Report command
- ignore it
- */
- if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
- (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
- (c->Request.CDB[0] == CISS_INQUIRY)) &&
- ((c->err_info->CommandStatus ==
- CMD_DATA_OVERRUN) ||
- (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
- )) {
- complete = c->busaddr;
- } else {
- if (c->err_info->CommandStatus ==
- CMD_UNSOLICITED_ABORT) {
- printk(KERN_WARNING "cciss%d: "
- "unsolicited abort %p\n",
- ctlr, c);
- if (c->retry_count < MAX_CMD_RETRIES) {
- printk(KERN_WARNING
- "cciss%d: retrying %p\n",
- ctlr, c);
- c->retry_count++;
- /* erase the old error */
- /* information */
- memset(c->err_info, 0,
- sizeof
- (ErrorInfo_struct));
- goto resend_cmd1;
- } else {
- printk(KERN_WARNING
- "cciss%d: retried %p too "
- "many times\n", ctlr, c);
- status = IO_ERROR;
- goto cleanup1;
- }
- } else if (c->err_info->CommandStatus ==
- CMD_UNABORTABLE) {
- printk(KERN_WARNING
- "cciss%d: command could not be aborted.\n",
- ctlr);
- status = IO_ERROR;
- goto cleanup1;
- }
- printk(KERN_WARNING "ciss ciss%d: sendcmd"
- " Error %x \n", ctlr,
- c->err_info->CommandStatus);
- printk(KERN_WARNING "ciss ciss%d: sendcmd"
- " offensive info\n"
- " size %x\n num %x value %x\n",
- ctlr,
- c->err_info->MoreErrInfo.Invalid_Cmd.
- offense_size,
- c->err_info->MoreErrInfo.Invalid_Cmd.
- offense_num,
- c->err_info->MoreErrInfo.Invalid_Cmd.
- offense_value);
- status = IO_ERROR;
- goto cleanup1;
- }
+ /* Make sure it's the command we're expecting. */
+ if ((complete & ~CISS_ERROR_BIT) != c->busaddr) {
+ printk(KERN_WARNING "cciss%d: Unexpected command "
+ "completion.\n", h->ctlr);
+ continue;
+ }
+
+ /* It is our command. If no error, we're done. */
+ if (!(complete & CISS_ERROR_BIT)) {
+ status = IO_OK;
+ break;
}
- /* This will need changing for direct lookup completions */
- if (complete != c->busaddr) {
- if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
- BUG(); /* we are pretty much hosed if we get here. */
+
+ /* There is an error... */
+
+ /* if data overrun or underun on Report command ignore it */
+ if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
+ (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
+ (c->Request.CDB[0] == CISS_INQUIRY)) &&
+ ((c->err_info->CommandStatus == CMD_DATA_OVERRUN) ||
+ (c->err_info->CommandStatus == CMD_DATA_UNDERRUN))) {
+ complete = c->busaddr;
+ status = IO_OK;
+ break;
+ }
+
+ if (c->err_info->CommandStatus == CMD_UNSOLICITED_ABORT) {
+ printk(KERN_WARNING "cciss%d: unsolicited abort %p\n",
+ h->ctlr, c);
+ if (c->retry_count < MAX_CMD_RETRIES) {
+ printk(KERN_WARNING "cciss%d: retrying %p\n",
+ h->ctlr, c);
+ c->retry_count++;
+ /* erase the old error information */
+ memset(c->err_info, 0, sizeof(c->err_info));
+ goto resend_cmd1;
}
- continue;
- } else
- done = 1;
- } while (!done);
+ printk(KERN_WARNING "cciss%d: retried %p too many "
+ "times\n", h->ctlr, c);
+ status = IO_ERROR;
+ break;
+ }
+
+ if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
+ printk(KERN_WARNING "cciss%d: command could not be "
+ "aborted.\n", h->ctlr);
+ status = IO_ERROR;
+ break;
+ }
+
+ if (c->err_info->CommandStatus == CMD_TARGET_STATUS) {
+ status = check_target_status(h, c);
+ break;
+ }
+
+ printk(KERN_WARNING "cciss%d: sendcmd error\n", h->ctlr);
+ printk(KERN_WARNING "cmd = 0x%02x, CommandStatus = 0x%02x\n",
+ c->Request.CDB[0], c->err_info->CommandStatus);
+ status = IO_ERROR;
+ break;
+
+ } while (1);
- cleanup1:
/* unlock the data buffer from DMA */
buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
- pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
+ pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
-#ifdef CONFIG_CISS_SCSI_TAPE
- /* if we saved some commands for later, process them now. */
- if (info_p->scsi_rejects.ncompletions > 0)
- do_cciss_intr(0, info_p);
-#endif
- cmd_free(info_p, c, 1);
+ return status;
+}
+
+/*
+ * Send a command to the controller, and wait for it to complete.
+ * Used at init time, and during SCSI error recovery.
+ */
+static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
+ __u8 page_code, unsigned char *scsi3addr, int cmd_type)
+{
+ CommandList_struct *c;
+ int status;
+
+ c = cmd_alloc(hba[ctlr], 1);
+ if (!c) {
+ printk(KERN_WARNING "cciss: unable to get memory");
+ return IO_ERROR;
+ }
+ status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
+ scsi3addr, cmd_type);
+ if (status == IO_OK)
+ status = sendcmd_core(hba[ctlr], c);
+ cmd_free(hba[ctlr], c, 1);
return status;
}
@@ -2691,7 +2904,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
printk(KERN_WARNING "cciss: cmd %p has"
" completed with data underrun "
"reported\n", cmd);
- cmd->rq->data_len = cmd->err_info->ResidualCnt;
+ cmd->rq->resid_len = cmd->err_info->ResidualCnt;
}
break;
case CMD_DATA_OVERRUN:
@@ -2806,7 +3019,7 @@ static void do_cciss_request(struct request_queue *q)
goto startio;
queue:
- creq = elv_next_request(q);
+ creq = blk_peek_request(q);
if (!creq)
goto startio;
@@ -2815,7 +3028,7 @@ static void do_cciss_request(struct request_queue *q)
if ((c = cmd_alloc(h, 1)) == NULL)
goto full;
- blkdev_dequeue_request(creq);
+ blk_start_request(creq);
spin_unlock_irq(q->queue_lock);
@@ -2840,10 +3053,10 @@ static void do_cciss_request(struct request_queue *q)
c->Request.Timeout = 0; // Don't time out
c->Request.CDB[0] =
(rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
- start_blk = creq->sector;
+ start_blk = blk_rq_pos(creq);
#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
- (int)creq->nr_sectors);
+ printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
+ (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
#endif /* CCISS_DEBUG */
sg_init_table(tmp_sg, MAXSGENTRIES);
@@ -2869,8 +3082,8 @@ static void do_cciss_request(struct request_queue *q)
h->maxSG = seg;
#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n",
- creq->nr_sectors, seg);
+ printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n",
+ blk_rq_sectors(creq), seg);
#endif /* CCISS_DEBUG */
c->Header.SGList = c->Header.SGTotal = seg;
@@ -2882,8 +3095,8 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[4] = (start_blk >> 8) & 0xff;
c->Request.CDB[5] = start_blk & 0xff;
c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
- c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
- c->Request.CDB[8] = creq->nr_sectors & 0xff;
+ c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
+ c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
} else {
u32 upper32 = upper_32_bits(start_blk);
@@ -2898,10 +3111,10 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[7]= (start_blk >> 16) & 0xff;
c->Request.CDB[8]= (start_blk >> 8) & 0xff;
c->Request.CDB[9]= start_blk & 0xff;
- c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
- c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
- c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
- c->Request.CDB[13]= creq->nr_sectors & 0xff;
+ c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff;
+ c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff;
+ c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff;
+ c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
c->Request.CDB[14] = c->Request.CDB[15] = 0;
}
} else if (blk_pc_request(creq)) {
@@ -2931,44 +3144,18 @@ startio:
static inline unsigned long get_next_completion(ctlr_info_t *h)
{
-#ifdef CONFIG_CISS_SCSI_TAPE
- /* Any rejects from sendcmd() lying around? Process them first */
- if (h->scsi_rejects.ncompletions == 0)
- return h->access.command_completed(h);
- else {
- struct sendcmd_reject_list *srl;
- int n;
- srl = &h->scsi_rejects;
- n = --srl->ncompletions;
- /* printk("cciss%d: processing saved reject\n", h->ctlr); */
- printk("p");
- return srl->complete[n];
- }
-#else
return h->access.command_completed(h);
-#endif
}
static inline int interrupt_pending(ctlr_info_t *h)
{
-#ifdef CONFIG_CISS_SCSI_TAPE
- return (h->access.intr_pending(h)
- || (h->scsi_rejects.ncompletions > 0));
-#else
return h->access.intr_pending(h);
-#endif
}
static inline long interrupt_not_for_us(ctlr_info_t *h)
{
-#ifdef CONFIG_CISS_SCSI_TAPE
- return (((h->access.intr_pending(h) == 0) ||
- (h->interrupts_enabled == 0))
- && (h->scsi_rejects.ncompletions == 0));
-#else
return (((h->access.intr_pending(h) == 0) ||
(h->interrupts_enabled == 0)));
-#endif
}
static irqreturn_t do_cciss_intr(int irq, void *dev_id)
@@ -3723,12 +3910,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
INIT_HLIST_HEAD(&hba[i]->reqQ);
if (cciss_pci_init(hba[i], pdev) != 0)
- goto clean1;
+ goto clean0;
sprintf(hba[i]->devname, "cciss%d", i);
hba[i]->ctlr = i;
hba[i]->pdev = pdev;
+ if (cciss_create_hba_sysfs_entry(hba[i]))
+ goto clean0;
+
/* configure PCI DMA stuff */
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
dac = 1;
@@ -3787,15 +3977,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
printk(KERN_ERR "cciss: out of memory");
goto clean4;
}
-#ifdef CONFIG_CISS_SCSI_TAPE
- hba[i]->scsi_rejects.complete =
- kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
- (hba[i]->nr_cmds + 5), GFP_KERNEL);
- if (hba[i]->scsi_rejects.complete == NULL) {
- printk(KERN_ERR "cciss: out of memory");
- goto clean4;
- }
-#endif
spin_lock_init(&hba[i]->lock);
/* Initialize the pdev driver private data.
@@ -3828,7 +4009,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
}
return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
- sizeof(InquiryData_struct), 0, 0 , 0, TYPE_CMD);
+ sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
if (return_code == IO_OK) {
hba[i]->firm_ver[0] = inq_buff->data_byte[32];
hba[i]->firm_ver[1] = inq_buff->data_byte[33];
@@ -3855,9 +4036,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
clean4:
kfree(inq_buff);
-#ifdef CONFIG_CISS_SCSI_TAPE
- kfree(hba[i]->scsi_rejects.complete);
-#endif
kfree(hba[i]->cmd_pool_bits);
if (hba[i]->cmd_pool)
pci_free_consistent(hba[i]->pdev,
@@ -3872,6 +4050,8 @@ clean4:
clean2:
unregister_blkdev(hba[i]->major, hba[i]->devname);
clean1:
+ cciss_destroy_hba_sysfs_entry(hba[i]);
+clean0:
hba[i]->busy_initializing = 0;
/* cleanup any queues that may have been initialized */
for (j=0; j <= hba[i]->highest_lun; j++){
@@ -3907,8 +4087,8 @@ static void cciss_shutdown(struct pci_dev *pdev)
/* sendcmd will turn off interrupt, and send the flush...
* To write all data in the battery backed cache to disks */
memset(flush_buf, 0, 4);
- return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
- TYPE_CMD);
+ return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0,
+ CTLR_LUNID, TYPE_CMD);
if (return_code == IO_OK) {
printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
} else {
@@ -3973,15 +4153,13 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
kfree(hba[i]->cmd_pool_bits);
-#ifdef CONFIG_CISS_SCSI_TAPE
- kfree(hba[i]->scsi_rejects.complete);
-#endif
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
+ cciss_destroy_hba_sysfs_entry(hba[i]);
free_hba(i);
}
@@ -3999,6 +4177,8 @@ static struct pci_driver cciss_pci_driver = {
*/
static int __init cciss_init(void)
{
+ int err;
+
/*
* The hardware requires that commands are aligned on a 64-bit
* boundary. Given that we use pci_alloc_consistent() to allocate an
@@ -4008,8 +4188,20 @@ static int __init cciss_init(void)
printk(KERN_INFO DRIVER_NAME "\n");
+ err = bus_register(&cciss_bus_type);
+ if (err)
+ return err;
+
/* Register for our PCI devices */
- return pci_register_driver(&cciss_pci_driver);
+ err = pci_register_driver(&cciss_pci_driver);
+ if (err)
+ goto err_bus_register;
+
+ return 0;
+
+err_bus_register:
+ bus_unregister(&cciss_bus_type);
+ return err;
}
static void __exit cciss_cleanup(void)
@@ -4026,6 +4218,7 @@ static void __exit cciss_cleanup(void)
}
}
remove_proc_entry("driver/cciss", NULL);
+ bus_unregister(&cciss_bus_type);
}
static void fail_all_cmds(unsigned long ctlr)
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 703e08038fb..06a5db25b29 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -11,6 +11,11 @@
#define IO_OK 0
#define IO_ERROR 1
+#define IO_NEEDS_RETRY 3
+
+#define VENDOR_LEN 8
+#define MODEL_LEN 16
+#define REV_LEN 4
struct ctlr_info;
typedef struct ctlr_info ctlr_info_t;
@@ -34,23 +39,20 @@ typedef struct _drive_info_struct
int cylinders;
int raid_level; /* set to -1 to indicate that
* the drive is not in use/configured
- */
- int busy_configuring; /*This is set when the drive is being removed
- *to prevent it from being opened or it's queue
- *from being started.
- */
- __u8 serial_no[16]; /* from inquiry page 0x83, */
- /* not necc. null terminated. */
+ */
+ int busy_configuring; /* This is set when a drive is being removed
+ * to prevent it from being opened or it's
+ * queue from being started.
+ */
+ struct device dev;
+ __u8 serial_no[16]; /* from inquiry page 0x83,
+ * not necc. null terminated.
+ */
+ char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
+ char model[MODEL_LEN + 1]; /* SCSI model string */
+ char rev[REV_LEN + 1]; /* SCSI revision string */
} drive_info_struct;
-#ifdef CONFIG_CISS_SCSI_TAPE
-
-struct sendcmd_reject_list {
- int ncompletions;
- unsigned long *complete; /* array of NR_CMDS tags */
-};
-
-#endif
struct ctlr_info
{
int ctlr;
@@ -118,11 +120,11 @@ struct ctlr_info
void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
/* list of block side commands the scsi error handling sucked up */
/* and saved for later processing */
- struct sendcmd_reject_list scsi_rejects;
#endif
unsigned char alive;
struct completion *rescan_wait;
struct task_struct *cciss_scan_thread;
+ struct device dev;
};
/* Defining the diffent access_menthods */
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index 40b1b92dae7..cd665b00c7c 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -217,6 +217,8 @@ typedef union _LUNAddr_struct {
LogDevAddr_struct LogDev;
} LUNAddr_struct;
+#define CTLR_LUNID "\0\0\0\0\0\0\0\0"
+
typedef struct _CommandListHeader_struct {
BYTE ReplyQueue;
BYTE SGList;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index a3fd87b4144..3315268b4ec 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -44,20 +44,13 @@
#define CCISS_ABORT_MSG 0x00
#define CCISS_RESET_MSG 0x01
-/* some prototypes... */
-static int sendcmd(
- __u8 cmd,
- int ctlr,
- void *buff,
- size_t size,
- unsigned int use_unit_num, /* 0: address the controller,
- 1: address logical volume log_unit,
- 2: address is in scsi3addr */
- unsigned int log_unit,
- __u8 page_code,
- unsigned char *scsi3addr,
+static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ size_t size,
+ __u8 page_code, unsigned char *scsi3addr,
int cmd_type);
+static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool);
+static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool);
static int cciss_scsi_proc_info(
struct Scsi_Host *sh,
@@ -1575,6 +1568,75 @@ cciss_seq_tape_report(struct seq_file *seq, int ctlr)
CPQ_TAPE_UNLOCK(ctlr, flags);
}
+static int wait_for_device_to_become_ready(ctlr_info_t *h,
+ unsigned char lunaddr[])
+{
+ int rc;
+ int count = 0;
+ int waittime = HZ;
+ CommandList_struct *c;
+
+ c = cmd_alloc(h, 1);
+ if (!c) {
+ printk(KERN_WARNING "cciss%d: out of memory in "
+ "wait_for_device_to_become_ready.\n", h->ctlr);
+ return IO_ERROR;
+ }
+
+ /* Send test unit ready until device ready, or give up. */
+ while (count < 20) {
+
+ /* Wait for a bit. do this first, because if we send
+ * the TUR right away, the reset will just abort it.
+ */
+ schedule_timeout_uninterruptible(waittime);
+ count++;
+
+ /* Increase wait time with each try, up to a point. */
+ if (waittime < (HZ * 30))
+ waittime = waittime * 2;
+
+ /* Send the Test Unit Ready */
+ rc = fill_cmd(c, TEST_UNIT_READY, h->ctlr, NULL, 0, 0,
+ lunaddr, TYPE_CMD);
+ if (rc == 0)
+ rc = sendcmd_withirq_core(h, c, 0);
+
+ (void) process_sendcmd_error(h, c);
+
+ if (rc != 0)
+ goto retry_tur;
+
+ if (c->err_info->CommandStatus == CMD_SUCCESS)
+ break;
+
+ if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
+ c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
+ if (c->err_info->SenseInfo[2] == NO_SENSE)
+ break;
+ if (c->err_info->SenseInfo[2] == UNIT_ATTENTION) {
+ unsigned char asc;
+ asc = c->err_info->SenseInfo[12];
+ check_for_unit_attention(h, c);
+ if (asc == POWER_OR_RESET)
+ break;
+ }
+ }
+retry_tur:
+ printk(KERN_WARNING "cciss%d: Waiting %d secs "
+ "for device to become ready.\n",
+ h->ctlr, waittime / HZ);
+ rc = 1; /* device not ready. */
+ }
+
+ if (rc)
+ printk("cciss%d: giving up on device.\n", h->ctlr);
+ else
+ printk(KERN_WARNING "cciss%d: device is ready.\n", h->ctlr);
+
+ cmd_free(h, c, 1);
+ return rc;
+}
/* Need at least one of these error handlers to keep ../scsi/hosts.c from
* complaining. Doing a host- or bus-reset can't do anything good here.
@@ -1591,6 +1653,7 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
{
int rc;
CommandList_struct *cmd_in_trouble;
+ unsigned char lunaddr[8];
ctlr_info_t **c;
int ctlr;
@@ -1600,19 +1663,15 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
return FAILED;
ctlr = (*c)->ctlr;
printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr);
-
/* find the command that's giving us trouble */
cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
- if (cmd_in_trouble == NULL) { /* paranoia */
+ if (cmd_in_trouble == NULL) /* paranoia */
return FAILED;
- }
+ memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8);
/* send a reset to the SCSI LUN which the command was sent to */
- rc = sendcmd(CCISS_RESET_MSG, ctlr, NULL, 0, 2, 0, 0,
- (unsigned char *) &cmd_in_trouble->Header.LUN.LunAddrBytes[0],
+ rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr,
TYPE_MSG);
- /* sendcmd turned off interrupts on the board, turn 'em back on. */
- (*c)->access.set_intr_mask(*c, CCISS_INTR_ON);
- if (rc == 0)
+ if (rc == 0 && wait_for_device_to_become_ready(*c, lunaddr) == 0)
return SUCCESS;
printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr);
return FAILED;
@@ -1622,6 +1681,7 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
{
int rc;
CommandList_struct *cmd_to_abort;
+ unsigned char lunaddr[8];
ctlr_info_t **c;
int ctlr;
@@ -1636,12 +1696,9 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble;
if (cmd_to_abort == NULL) /* paranoia */
return FAILED;
- rc = sendcmd(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag,
- 0, 2, 0, 0,
- (unsigned char *) &cmd_to_abort->Header.LUN.LunAddrBytes[0],
- TYPE_MSG);
- /* sendcmd turned off interrupts on the board, turn 'em back on. */
- (*c)->access.set_intr_mask(*c, CCISS_INTR_ON);
+ memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8);
+ rc = sendcmd_withirq(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag,
+ 0, 0, lunaddr, TYPE_MSG);
if (rc == 0)
return SUCCESS;
return FAILED;
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index ca268ca1115..44fa2018f6b 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -474,7 +474,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
disk->fops = &ida_fops;
if (j && !drv->nr_blks)
continue;
- blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
+ blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
set_capacity(disk, drv->nr_blks);
disk->queue = hba[i]->queue;
disk->private_data = drv;
@@ -903,7 +903,7 @@ static void do_ida_request(struct request_queue *q)
goto startio;
queue_next:
- creq = elv_next_request(q);
+ creq = blk_peek_request(q);
if (!creq)
goto startio;
@@ -912,17 +912,18 @@ queue_next:
if ((c = cmd_alloc(h,1)) == NULL)
goto startio;
- blkdev_dequeue_request(creq);
+ blk_start_request(creq);
c->ctlr = h->ctlr;
c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
c->hdr.size = sizeof(rblk_t) >> 2;
c->size += sizeof(rblk_t);
- c->req.hdr.blk = creq->sector;
+ c->req.hdr.blk = blk_rq_pos(creq);
c->rq = creq;
DBGPX(
- printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
+ printk("sector=%d, nr_sectors=%u\n",
+ blk_rq_pos(creq), blk_rq_sectors(creq));
);
sg_init_table(tmp_sg, SG_MAX);
seg = blk_rq_map_sg(q, creq, tmp_sg);
@@ -940,9 +941,9 @@ DBGPX(
tmp_sg[i].offset,
tmp_sg[i].length, dir);
}
-DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
+DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
c->req.hdr.sg_cnt = seg;
- c->req.hdr.blk_cnt = creq->nr_sectors;
+ c->req.hdr.blk_cnt = blk_rq_sectors(creq);
c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
c->type = CMD_RWREQ;
@@ -1024,8 +1025,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
cmd->req.sg[i].size, ddir);
DBGPX(printk("Done with %p\n", rq););
- if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
- BUG();
+ __blk_end_request_all(rq, error);
}
/*
@@ -1546,7 +1546,7 @@ static int revalidate_allvol(ctlr_info_t *host)
drv_info_t *drv = &host->drv[i];
if (i && !drv->nr_blks)
continue;
- blk_queue_hardsect_size(host->queue, drv->blk_size);
+ blk_queue_logical_block_size(host->queue, drv->blk_size);
set_capacity(disk, drv->nr_blks);
disk->queue = host->queue;
disk->private_data = drv;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 1300df6f164..862b40c9018 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -931,7 +931,7 @@ static inline void unlock_fdc(void)
del_timer(&fd_timeout);
cont = NULL;
clear_bit(0, &fdc_busy);
- if (elv_next_request(floppy_queue))
+ if (current_req || blk_peek_request(floppy_queue))
do_fd_request(floppy_queue);
spin_unlock_irqrestore(&floppy_lock, flags);
wake_up(&fdc_wait);
@@ -2303,7 +2303,7 @@ static void floppy_end_request(struct request *req, int error)
/* current_count_sectors can be zero if transfer failed */
if (error)
- nr_sectors = req->current_nr_sectors;
+ nr_sectors = blk_rq_cur_sectors(req);
if (__blk_end_request(req, error, nr_sectors << 9))
return;
@@ -2332,7 +2332,7 @@ static void request_done(int uptodate)
if (uptodate) {
/* maintain values for invalidation on geometry
* change */
- block = current_count_sectors + req->sector;
+ block = current_count_sectors + blk_rq_pos(req);
INFBOUND(DRS->maxblock, block);
if (block > _floppy->sect)
DRS->maxtrack = 1;
@@ -2346,10 +2346,10 @@ static void request_done(int uptodate)
/* record write error information */
DRWE->write_errors++;
if (DRWE->write_errors == 1) {
- DRWE->first_error_sector = req->sector;
+ DRWE->first_error_sector = blk_rq_pos(req);
DRWE->first_error_generation = DRS->generation;
}
- DRWE->last_error_sector = req->sector;
+ DRWE->last_error_sector = blk_rq_pos(req);
DRWE->last_error_generation = DRS->generation;
}
spin_lock_irqsave(q->queue_lock, flags);
@@ -2503,24 +2503,23 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
max_sector = transfer_size(ssize,
min(max_sector, max_sector_2),
- current_req->nr_sectors);
+ blk_rq_sectors(current_req));
if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
- buffer_max > fsector_t + current_req->nr_sectors)
+ buffer_max > fsector_t + blk_rq_sectors(current_req))
current_count_sectors = min_t(int, buffer_max - fsector_t,
- current_req->nr_sectors);
+ blk_rq_sectors(current_req));
remaining = current_count_sectors << 9;
#ifdef FLOPPY_SANITY_CHECK
- if ((remaining >> 9) > current_req->nr_sectors &&
- CT(COMMAND) == FD_WRITE) {
+ if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) {
DPRINT("in copy buffer\n");
printk("current_count_sectors=%ld\n", current_count_sectors);
printk("remaining=%d\n", remaining >> 9);
- printk("current_req->nr_sectors=%ld\n",
- current_req->nr_sectors);
+ printk("current_req->nr_sectors=%u\n",
+ blk_rq_sectors(current_req));
printk("current_req->current_nr_sectors=%u\n",
- current_req->current_nr_sectors);
+ blk_rq_cur_sectors(current_req));
printk("max_sector=%d\n", max_sector);
printk("ssize=%d\n", ssize);
}
@@ -2530,7 +2529,7 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
- size = current_req->current_nr_sectors << 9;
+ size = blk_rq_cur_bytes(current_req);
rq_for_each_segment(bv, current_req, iter) {
if (!remaining)
@@ -2648,10 +2647,10 @@ static int make_raw_rw_request(void)
max_sector = _floppy->sect * _floppy->head;
- TRACK = (int)current_req->sector / max_sector;
- fsector_t = (int)current_req->sector % max_sector;
+ TRACK = (int)blk_rq_pos(current_req) / max_sector;
+ fsector_t = (int)blk_rq_pos(current_req) % max_sector;
if (_floppy->track && TRACK >= _floppy->track) {
- if (current_req->current_nr_sectors & 1) {
+ if (blk_rq_cur_sectors(current_req) & 1) {
current_count_sectors = 1;
return 1;
} else
@@ -2669,7 +2668,7 @@ static int make_raw_rw_request(void)
if (fsector_t >= max_sector) {
current_count_sectors =
min_t(int, _floppy->sect - fsector_t,
- current_req->nr_sectors);
+ blk_rq_sectors(current_req));
return 1;
}
SIZECODE = 2;
@@ -2720,7 +2719,7 @@ static int make_raw_rw_request(void)
in_sector_offset = (fsector_t % _floppy->sect) % ssize;
aligned_sector_t = fsector_t - in_sector_offset;
- max_size = current_req->nr_sectors;
+ max_size = blk_rq_sectors(current_req);
if ((raw_cmd->track == buffer_track) &&
(current_drive == buffer_drive) &&
(fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
@@ -2729,10 +2728,10 @@ static int make_raw_rw_request(void)
copy_buffer(1, max_sector, buffer_max);
return 1;
}
- } else if (in_sector_offset || current_req->nr_sectors < ssize) {
+ } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
if (CT(COMMAND) == FD_WRITE) {
- if (fsector_t + current_req->nr_sectors > ssize &&
- fsector_t + current_req->nr_sectors < ssize + ssize)
+ if (fsector_t + blk_rq_sectors(current_req) > ssize &&
+ fsector_t + blk_rq_sectors(current_req) < ssize + ssize)
max_size = ssize + ssize;
else
max_size = ssize;
@@ -2776,7 +2775,7 @@ static int make_raw_rw_request(void)
(indirect * 2 > direct * 3 &&
*errors < DP->max_errors.read_track && ((!probing
|| (DP->read_track & (1 << DRS->probed_format)))))) {
- max_size = current_req->nr_sectors;
+ max_size = blk_rq_sectors(current_req);
} else {
raw_cmd->kernel_data = current_req->buffer;
raw_cmd->length = current_count_sectors << 9;
@@ -2801,7 +2800,7 @@ static int make_raw_rw_request(void)
fsector_t > buffer_max ||
fsector_t < buffer_min ||
((CT(COMMAND) == FD_READ ||
- (!in_sector_offset && current_req->nr_sectors >= ssize)) &&
+ (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
max_sector > 2 * max_buffer_sectors + buffer_min &&
max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)
/* not enough space */
@@ -2879,8 +2878,8 @@ static int make_raw_rw_request(void)
printk("write\n");
return 0;
}
- } else if (raw_cmd->length > current_req->nr_sectors << 9 ||
- current_count_sectors > current_req->nr_sectors) {
+ } else if (raw_cmd->length > blk_rq_bytes(current_req) ||
+ current_count_sectors > blk_rq_sectors(current_req)) {
DPRINT("buffer overrun in direct transfer\n");
return 0;
} else if (raw_cmd->length < current_count_sectors << 9) {
@@ -2913,7 +2912,7 @@ static void redo_fd_request(void)
struct request *req;
spin_lock_irq(floppy_queue->queue_lock);
- req = elv_next_request(floppy_queue);
+ req = blk_fetch_request(floppy_queue);
spin_unlock_irq(floppy_queue->queue_lock);
if (!req) {
do_floppy = NULL;
@@ -2990,8 +2989,9 @@ static void do_fd_request(struct request_queue * q)
if (usage_count == 0) {
printk("warning: usage count=0, current_req=%p exiting\n",
current_req);
- printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector,
- current_req->cmd_type, current_req->cmd_flags);
+ printk("sect=%ld type=%x flags=%x\n",
+ (long)blk_rq_pos(current_req), current_req->cmd_type,
+ current_req->cmd_flags);
return;
}
if (test_bit(0, &fdc_busy)) {
@@ -4148,6 +4148,24 @@ static void floppy_device_release(struct device *dev)
{
}
+static int floppy_resume(struct platform_device *dev)
+{
+ int fdc;
+
+ for (fdc = 0; fdc < N_FDC; fdc++)
+ if (FDCS->address != -1)
+ user_reset_fdc(-1, FD_RESET_ALWAYS, 0);
+
+ return 0;
+}
+
+static struct platform_driver floppy_driver = {
+ .resume = floppy_resume,
+ .driver = {
+ .name = "floppy",
+ },
+};
+
static struct platform_device floppy_device[N_DRIVE];
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
@@ -4196,10 +4214,14 @@ static int __init floppy_init(void)
if (err)
goto out_put_disk;
+ err = platform_driver_register(&floppy_driver);
+ if (err)
+ goto out_unreg_blkdev;
+
floppy_queue = blk_init_queue(do_fd_request, &floppy_lock);
if (!floppy_queue) {
err = -ENOMEM;
- goto out_unreg_blkdev;
+ goto out_unreg_driver;
}
blk_queue_max_sectors(floppy_queue, 64);
@@ -4346,6 +4368,8 @@ out_flush_work:
out_unreg_region:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
blk_cleanup_queue(floppy_queue);
+out_unreg_driver:
+ platform_driver_unregister(&floppy_driver);
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
@@ -4566,6 +4590,7 @@ static void __exit floppy_module_exit(void)
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
unregister_blkdev(FLOPPY_MAJOR, "fd");
+ platform_driver_unregister(&floppy_driver);
for (drive = 0; drive < N_DRIVE; drive++) {
del_timer_sync(&motor_off_timer[drive]);
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index baaa9e486e5..f65b3f369eb 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -98,10 +98,9 @@
static DEFINE_SPINLOCK(hd_lock);
static struct request_queue *hd_queue;
+static struct request *hd_req;
#define MAJOR_NR HD_MAJOR
-#define QUEUE (hd_queue)
-#define CURRENT elv_next_request(hd_queue)
#define TIMEOUT_VALUE (6*HZ)
#define HD_DELAY 0
@@ -195,11 +194,24 @@ static void __init hd_setup(char *str, int *ints)
NR_HD = hdind+1;
}
+static bool hd_end_request(int err, unsigned int bytes)
+{
+ if (__blk_end_request(hd_req, err, bytes))
+ return true;
+ hd_req = NULL;
+ return false;
+}
+
+static bool hd_end_request_cur(int err)
+{
+ return hd_end_request(err, blk_rq_cur_bytes(hd_req));
+}
+
static void dump_status(const char *msg, unsigned int stat)
{
char *name = "hd?";
- if (CURRENT)
- name = CURRENT->rq_disk->disk_name;
+ if (hd_req)
+ name = hd_req->rq_disk->disk_name;
#ifdef VERBOSE_ERRORS
printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
@@ -227,8 +239,8 @@ static void dump_status(const char *msg, unsigned int stat)
if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
- if (CURRENT)
- printk(", sector=%ld", CURRENT->sector);
+ if (hd_req)
+ printk(", sector=%ld", blk_rq_pos(hd_req));
}
printk("\n");
}
@@ -406,11 +418,12 @@ static void unexpected_hd_interrupt(void)
*/
static void bad_rw_intr(void)
{
- struct request *req = CURRENT;
+ struct request *req = hd_req;
+
if (req != NULL) {
struct hd_i_struct *disk = req->rq_disk->private_data;
if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
- end_request(req, 0);
+ hd_end_request_cur(-EIO);
disk->special_op = disk->recalibrate = 1;
} else if (req->errors % RESET_FREQ == 0)
reset = 1;
@@ -452,37 +465,30 @@ static void read_intr(void)
bad_rw_intr();
hd_request();
return;
+
ok_to_read:
- req = CURRENT;
+ req = hd_req;
insw(HD_DATA, req->buffer, 256);
- req->sector++;
- req->buffer += 512;
- req->errors = 0;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
#ifdef DEBUG
- printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n",
- req->rq_disk->disk_name, req->sector, req->nr_sectors,
- req->buffer+512);
+ printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
+ req->rq_disk->disk_name, blk_rq_pos(req) + 1,
+ blk_rq_sectors(req) - 1, req->buffer+512);
#endif
- if (req->current_nr_sectors <= 0)
- end_request(req, 1);
- if (i > 0) {
+ if (hd_end_request(0, 512)) {
SET_HANDLER(&read_intr);
return;
}
+
(void) inb_p(HD_STATUS);
#if (HD_DELAY > 0)
last_req = read_timer();
#endif
- if (elv_next_request(QUEUE))
- hd_request();
- return;
+ hd_request();
}
static void write_intr(void)
{
- struct request *req = CURRENT;
+ struct request *req = hd_req;
int i;
int retries = 100000;
@@ -492,30 +498,25 @@ static void write_intr(void)
continue;
if (!OK_STATUS(i))
break;
- if ((req->nr_sectors <= 1) || (i & DRQ_STAT))
+ if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT))
goto ok_to_write;
} while (--retries > 0);
dump_status("write_intr", i);
bad_rw_intr();
hd_request();
return;
+
ok_to_write:
- req->sector++;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
- req->buffer += 512;
- if (!i || (req->bio && req->current_nr_sectors <= 0))
- end_request(req, 1);
- if (i > 0) {
+ if (hd_end_request(0, 512)) {
SET_HANDLER(&write_intr);
outsw(HD_DATA, req->buffer, 256);
- } else {
+ return;
+ }
+
#if (HD_DELAY > 0)
- last_req = read_timer();
+ last_req = read_timer();
#endif
- hd_request();
- }
- return;
+ hd_request();
}
static void recal_intr(void)
@@ -537,18 +538,18 @@ static void hd_times_out(unsigned long dummy)
do_hd = NULL;
- if (!CURRENT)
+ if (!hd_req)
return;
spin_lock_irq(hd_queue->queue_lock);
reset = 1;
- name = CURRENT->rq_disk->disk_name;
+ name = hd_req->rq_disk->disk_name;
printk("%s: timeout\n", name);
- if (++CURRENT->errors >= MAX_ERRORS) {
+ if (++hd_req->errors >= MAX_ERRORS) {
#ifdef DEBUG
printk("%s: too many errors\n", name);
#endif
- end_request(CURRENT, 0);
+ hd_end_request_cur(-EIO);
}
hd_request();
spin_unlock_irq(hd_queue->queue_lock);
@@ -563,7 +564,7 @@ static int do_special_op(struct hd_i_struct *disk, struct request *req)
}
if (disk->head > 16) {
printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
- end_request(req, 0);
+ hd_end_request_cur(-EIO);
}
disk->special_op = 0;
return 1;
@@ -590,24 +591,27 @@ static void hd_request(void)
repeat:
del_timer(&device_timer);
- req = CURRENT;
- if (!req) {
- do_hd = NULL;
- return;
+ if (!hd_req) {
+ hd_req = blk_fetch_request(hd_queue);
+ if (!hd_req) {
+ do_hd = NULL;
+ return;
+ }
}
+ req = hd_req;
if (reset) {
reset_hd();
return;
}
disk = req->rq_disk->private_data;
- block = req->sector;
- nsect = req->nr_sectors;
+ block = blk_rq_pos(req);
+ nsect = blk_rq_sectors(req);
if (block >= get_capacity(req->rq_disk) ||
((block+nsect) > get_capacity(req->rq_disk))) {
printk("%s: bad access: block=%d, count=%d\n",
req->rq_disk->disk_name, block, nsect);
- end_request(req, 0);
+ hd_end_request_cur(-EIO);
goto repeat;
}
@@ -647,7 +651,7 @@ repeat:
break;
default:
printk("unknown hd-command\n");
- end_request(req, 0);
+ hd_end_request_cur(-EIO);
break;
}
}
@@ -720,7 +724,7 @@ static int __init hd_init(void)
blk_queue_max_sectors(hd_queue, 255);
init_timer(&device_timer);
device_timer.function = hd_times_out;
- blk_queue_hardsect_size(hd_queue, 512);
+ blk_queue_logical_block_size(hd_queue, 512);
if (!NR_HD) {
/*
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ddae8082589..801f4ab8330 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -511,11 +511,7 @@ out:
*/
static void loop_add_bio(struct loop_device *lo, struct bio *bio)
{
- if (lo->lo_biotail) {
- lo->lo_biotail->bi_next = bio;
- lo->lo_biotail = bio;
- } else
- lo->lo_bio = lo->lo_biotail = bio;
+ bio_list_add(&lo->lo_bio_list, bio);
}
/*
@@ -523,16 +519,7 @@ static void loop_add_bio(struct loop_device *lo, struct bio *bio)
*/
static struct bio *loop_get_bio(struct loop_device *lo)
{
- struct bio *bio;
-
- if ((bio = lo->lo_bio)) {
- if (bio == lo->lo_biotail)
- lo->lo_biotail = NULL;
- lo->lo_bio = bio->bi_next;
- bio->bi_next = NULL;
- }
-
- return bio;
+ return bio_list_pop(&lo->lo_bio_list);
}
static int loop_make_request(struct request_queue *q, struct bio *old_bio)
@@ -609,12 +596,13 @@ static int loop_thread(void *data)
set_user_nice(current, -20);
- while (!kthread_should_stop() || lo->lo_bio) {
+ while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
wait_event_interruptible(lo->lo_event,
- lo->lo_bio || kthread_should_stop());
+ !bio_list_empty(&lo->lo_bio_list) ||
+ kthread_should_stop());
- if (!lo->lo_bio)
+ if (bio_list_empty(&lo->lo_bio_list))
continue;
spin_lock_irq(&lo->lo_lock);
bio = loop_get_bio(lo);
@@ -721,10 +709,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
goto out_putf;
- /* new backing store needs to support loop (eg splice_read) */
- if (!inode->i_fop->splice_read)
- goto out_putf;
-
/* size of the new backing store needs to be the same */
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
goto out_putf;
@@ -800,12 +784,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
error = -EINVAL;
if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
const struct address_space_operations *aops = mapping->a_ops;
- /*
- * If we can't read - sorry. If we only can't write - well,
- * it's going to be read-only.
- */
- if (!file->f_op->splice_read)
- goto out_putf;
+
if (aops->write_begin)
lo_flags |= LO_FLAGS_USE_AOPS;
if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
@@ -841,7 +820,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- lo->lo_bio = lo->lo_biotail = NULL;
+ bio_list_init(&lo->lo_bio_list);
/*
* set queue make_request_fn, and add limits based on lower level
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index f3898353d0a..60de5a01e71 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -17,71 +17,220 @@
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
-#include <linux/libata.h>
+#include <linux/ata.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
-#include <linux/mg_disk.h>
#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
+/* name for block device */
+#define MG_DISK_NAME "mgd"
+/* name for platform device */
+#define MG_DEV_NAME "mg_disk"
+
+#define MG_DISK_MAJ 0
+#define MG_DISK_MAX_PART 16
+#define MG_SECTOR_SIZE 512
+#define MG_MAX_SECTS 256
+
+/* Register offsets */
+#define MG_BUFF_OFFSET 0x8000
+#define MG_STORAGE_BUFFER_SIZE 0x200
+#define MG_REG_OFFSET 0xC000
+#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
+#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
+#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
+#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
+#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
+#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
+#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
+#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
+#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
+#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
+#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
+
+/* handy status */
+#define MG_STAT_READY (ATA_DRDY | ATA_DSC)
+#define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \
+ ATA_ERR))) == MG_STAT_READY)
+
+/* error code for others */
+#define MG_ERR_NONE 0
+#define MG_ERR_TIMEOUT 0x100
+#define MG_ERR_INIT_STAT 0x101
+#define MG_ERR_TRANSLATION 0x102
+#define MG_ERR_CTRL_RST 0x103
+#define MG_ERR_INV_STAT 0x104
+#define MG_ERR_RSTOUT 0x105
+
+#define MG_MAX_ERRORS 6 /* Max read/write errors */
+
+/* command */
+#define MG_CMD_RD 0x20
+#define MG_CMD_WR 0x30
+#define MG_CMD_SLEEP 0x99
+#define MG_CMD_WAKEUP 0xC3
+#define MG_CMD_ID 0xEC
+#define MG_CMD_WR_CONF 0x3C
+#define MG_CMD_RD_CONF 0x40
+
+/* operation mode */
+#define MG_OP_CASCADE (1 << 0)
+#define MG_OP_CASCADE_SYNC_RD (1 << 1)
+#define MG_OP_CASCADE_SYNC_WR (1 << 2)
+#define MG_OP_INTERLEAVE (1 << 3)
+
+/* synchronous */
+#define MG_BURST_LAT_4 (3 << 4)
+#define MG_BURST_LAT_5 (4 << 4)
+#define MG_BURST_LAT_6 (5 << 4)
+#define MG_BURST_LAT_7 (6 << 4)
+#define MG_BURST_LAT_8 (7 << 4)
+#define MG_BURST_LEN_4 (1 << 1)
+#define MG_BURST_LEN_8 (2 << 1)
+#define MG_BURST_LEN_16 (3 << 1)
+#define MG_BURST_LEN_32 (4 << 1)
+#define MG_BURST_LEN_CONT (0 << 1)
+
+/* timeout value (unit: ms) */
+#define MG_TMAX_CONF_TO_CMD 1
+#define MG_TMAX_WAIT_RD_DRQ 10
+#define MG_TMAX_WAIT_WR_DRQ 500
+#define MG_TMAX_RST_TO_BUSY 10
+#define MG_TMAX_HDRST_TO_RDY 500
+#define MG_TMAX_SWRST_TO_RDY 500
+#define MG_TMAX_RSTOUT 3000
+
+/* device attribution */
+/* use mflash as boot device */
+#define MG_BOOT_DEV (1 << 0)
+/* use mflash as storage device */
+#define MG_STORAGE_DEV (1 << 1)
+/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
+#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
+
+#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
+
+/* names of GPIO resource */
+#define MG_RST_PIN "mg_rst"
+/* except MG_BOOT_DEV, reset-out pin should be assigned */
+#define MG_RSTOUT_PIN "mg_rstout"
+
+/* private driver data */
+struct mg_drv_data {
+ /* disk resource */
+ u32 use_polling;
+
+ /* device attribution */
+ u32 dev_attr;
+
+ /* internally used */
+ struct mg_host *host;
+};
+
+/* main structure for mflash driver */
+struct mg_host {
+ struct device *dev;
+
+ struct request_queue *breq;
+ struct request *req;
+ spinlock_t lock;
+ struct gendisk *gd;
+
+ struct timer_list timer;
+ void (*mg_do_intr) (struct mg_host *);
+
+ u16 id[ATA_ID_WORDS];
+
+ u16 cyls;
+ u16 heads;
+ u16 sectors;
+ u32 n_sectors;
+ u32 nres_sectors;
+
+ void __iomem *dev_base;
+ unsigned int irq;
+ unsigned int rst;
+ unsigned int rstout;
+
+ u32 major;
+ u32 error;
+};
+
+/*
+ * Debugging macro and defines
+ */
+#undef DO_MG_DEBUG
+#ifdef DO_MG_DEBUG
+# define MG_DBG(fmt, args...) \
+ printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
+#else /* CONFIG_MG_DEBUG */
+# define MG_DBG(fmt, args...) do { } while (0)
+#endif /* CONFIG_MG_DEBUG */
+
static void mg_request(struct request_queue *);
+static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes)
+{
+ if (__blk_end_request(host->req, err, nr_bytes))
+ return true;
+
+ host->req = NULL;
+ return false;
+}
+
+static bool mg_end_request_cur(struct mg_host *host, int err)
+{
+ return mg_end_request(host, err, blk_rq_cur_bytes(host->req));
+}
+
static void mg_dump_status(const char *msg, unsigned int stat,
struct mg_host *host)
{
char *name = MG_DISK_NAME;
- struct request *req;
- if (host->breq) {
- req = elv_next_request(host->breq);
- if (req)
- name = req->rq_disk->disk_name;
- }
+ if (host->req)
+ name = host->req->rq_disk->disk_name;
printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
- if (stat & MG_REG_STATUS_BIT_BUSY)
+ if (stat & ATA_BUSY)
printk("Busy ");
- if (stat & MG_REG_STATUS_BIT_READY)
+ if (stat & ATA_DRDY)
printk("DriveReady ");
- if (stat & MG_REG_STATUS_BIT_WRITE_FAULT)
+ if (stat & ATA_DF)
printk("WriteFault ");
- if (stat & MG_REG_STATUS_BIT_SEEK_DONE)
+ if (stat & ATA_DSC)
printk("SeekComplete ");
- if (stat & MG_REG_STATUS_BIT_DATA_REQ)
+ if (stat & ATA_DRQ)
printk("DataRequest ");
- if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR)
+ if (stat & ATA_CORR)
printk("CorrectedError ");
- if (stat & MG_REG_STATUS_BIT_ERROR)
+ if (stat & ATA_ERR)
printk("Error ");
printk("}\n");
- if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) {
+ if ((stat & ATA_ERR) == 0) {
host->error = 0;
} else {
host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
host->error & 0xff);
- if (host->error & MG_REG_ERR_BBK)
+ if (host->error & ATA_BBK)
printk("BadSector ");
- if (host->error & MG_REG_ERR_UNC)
+ if (host->error & ATA_UNC)
printk("UncorrectableError ");
- if (host->error & MG_REG_ERR_IDNF)
+ if (host->error & ATA_IDNF)
printk("SectorIdNotFound ");
- if (host->error & MG_REG_ERR_ABRT)
+ if (host->error & ATA_ABORTED)
printk("DriveStatusError ");
- if (host->error & MG_REG_ERR_AMNF)
+ if (host->error & ATA_AMNF)
printk("AddrMarkNotFound ");
printk("}");
- if (host->error &
- (MG_REG_ERR_BBK | MG_REG_ERR_UNC |
- MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) {
- if (host->breq) {
- req = elv_next_request(host->breq);
- if (req)
- printk(", sector=%u", (u32)req->sector);
- }
-
+ if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) {
+ if (host->req)
+ printk(", sector=%u",
+ (unsigned int)blk_rq_pos(host->req));
}
printk("\n");
}
@@ -100,12 +249,12 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
do {
cur_jiffies = jiffies;
- if (status & MG_REG_STATUS_BIT_BUSY) {
- if (expect == MG_REG_STATUS_BIT_BUSY)
+ if (status & ATA_BUSY) {
+ if (expect == ATA_BUSY)
break;
} else {
/* Check the error condition! */
- if (status & MG_REG_STATUS_BIT_ERROR) {
+ if (status & ATA_ERR) {
mg_dump_status("mg_wait", status, host);
break;
}
@@ -114,8 +263,8 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
if (MG_READY_OK(status))
break;
- if (expect == MG_REG_STATUS_BIT_DATA_REQ)
- if (status & MG_REG_STATUS_BIT_DATA_REQ)
+ if (expect == ATA_DRQ)
+ if (status & ATA_DRQ)
break;
}
if (!msec) {
@@ -173,6 +322,42 @@ static irqreturn_t mg_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/* local copy of ata_id_string() */
+static void mg_id_string(const u16 *id, unsigned char *s,
+ unsigned int ofs, unsigned int len)
+{
+ unsigned int c;
+
+ BUG_ON(len & 1);
+
+ while (len > 0) {
+ c = id[ofs] >> 8;
+ *s = c;
+ s++;
+
+ c = id[ofs] & 0xff;
+ *s = c;
+ s++;
+
+ ofs++;
+ len -= 2;
+ }
+}
+
+/* local copy of ata_id_c_string() */
+static void mg_id_c_string(const u16 *id, unsigned char *s,
+ unsigned int ofs, unsigned int len)
+{
+ unsigned char *p;
+
+ mg_id_string(id, s, ofs, len - 1);
+
+ p = s + strnlen(s, len - 1);
+ while (p > s && p[-1] == ' ')
+ p--;
+ *p = '\0';
+}
+
static int mg_get_disk_id(struct mg_host *host)
{
u32 i;
@@ -184,12 +369,10 @@ static int mg_get_disk_id(struct mg_host *host)
char serial[ATA_ID_SERNO_LEN + 1];
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_DISABLE,
- (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
- err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ);
+ err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ);
if (err)
return err;
@@ -219,9 +402,9 @@ static int mg_get_disk_id(struct mg_host *host)
host->n_sectors -= host->nres_sectors;
}
- ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
- ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
- ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
+ mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
+ mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
+ mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
printk(KERN_INFO "mg_disk: model: %s\n", model);
printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
printk(KERN_INFO "mg_disk: serial: %s\n", serial);
@@ -229,8 +412,7 @@ static int mg_get_disk_id(struct mg_host *host)
host->n_sectors, host->nres_sectors);
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
return err;
}
@@ -244,7 +426,7 @@ static int mg_disk_init(struct mg_host *host)
/* hdd rst low */
gpio_set_value(host->rst, 0);
- err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
+ err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
if (err)
return err;
@@ -255,17 +437,14 @@ static int mg_disk_init(struct mg_host *host)
return err;
/* soft reset on */
- outb(MG_REG_CTRL_RESET |
- (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
- MG_REG_CTRL_INTR_ENABLE),
+ outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0),
(unsigned long)host->dev_base + MG_REG_DRV_CTRL);
- err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
+ err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
if (err)
return err;
/* soft reset off */
- outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
- MG_REG_CTRL_INTR_ENABLE,
+ outb(prv_data->use_polling ? ATA_NIEN : 0,
(unsigned long)host->dev_base + MG_REG_DRV_CTRL);
err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
if (err)
@@ -281,11 +460,10 @@ static int mg_disk_init(struct mg_host *host)
static void mg_bad_rw_intr(struct mg_host *host)
{
- struct request *req = elv_next_request(host->breq);
- if (req != NULL)
- if (++req->errors >= MG_MAX_ERRORS ||
- host->error == MG_ERR_TIMEOUT)
- end_request(req, 0);
+ if (host->req)
+ if (++host->req->errors >= MG_MAX_ERRORS ||
+ host->error == MG_ERR_TIMEOUT)
+ mg_end_request_cur(host, -EIO);
}
static unsigned int mg_out(struct mg_host *host,
@@ -311,7 +489,7 @@ static unsigned int mg_out(struct mg_host *host,
MG_REG_CYL_LOW);
outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
MG_REG_CYL_HIGH);
- outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE),
+ outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS),
(unsigned long)host->dev_base + MG_REG_DRV_HEAD);
outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
return MG_ERR_NONE;
@@ -319,105 +497,77 @@ static unsigned int mg_out(struct mg_host *host,
static void mg_read(struct request *req)
{
- u32 remains, j;
+ u32 j;
struct mg_host *host = req->rq_disk->private_data;
- remains = req->nr_sectors;
-
- if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) !=
- MG_ERR_NONE)
+ if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
+ MG_CMD_RD, NULL) != MG_ERR_NONE)
mg_bad_rw_intr(host);
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
- remains, req->sector, req->buffer);
+ blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
+
+ do {
+ u16 *buff = (u16 *)req->buffer;
- while (remains) {
- if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
- MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
+ if (mg_wait(host, ATA_DRQ,
+ MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
mg_bad_rw_intr(host);
return;
}
- for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
- *(u16 *)req->buffer =
- inw((unsigned long)host->dev_base +
- MG_BUFF_OFFSET + (j << 1));
- req->buffer += 2;
- }
-
- req->sector++;
- req->errors = 0;
- remains = --req->nr_sectors;
- --req->current_nr_sectors;
-
- if (req->current_nr_sectors <= 0) {
- MG_DBG("remain : %d sects\n", remains);
- end_request(req, 1);
- if (remains > 0)
- req = elv_next_request(host->breq);
- }
+ for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
+ *buff++ = inw((unsigned long)host->dev_base +
+ MG_BUFF_OFFSET + (j << 1));
outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
- }
+ } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
}
static void mg_write(struct request *req)
{
- u32 remains, j;
+ u32 j;
struct mg_host *host = req->rq_disk->private_data;
- remains = req->nr_sectors;
-
- if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) !=
- MG_ERR_NONE) {
+ if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
+ MG_CMD_WR, NULL) != MG_ERR_NONE) {
mg_bad_rw_intr(host);
return;
}
-
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
- remains, req->sector, req->buffer);
- while (remains) {
- if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
- MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
+ blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
+
+ do {
+ u16 *buff = (u16 *)req->buffer;
+
+ if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
mg_bad_rw_intr(host);
return;
}
- for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
- outw(*(u16 *)req->buffer,
- (unsigned long)host->dev_base +
- MG_BUFF_OFFSET + (j << 1));
- req->buffer += 2;
- }
- req->sector++;
- remains = --req->nr_sectors;
- --req->current_nr_sectors;
-
- if (req->current_nr_sectors <= 0) {
- MG_DBG("remain : %d sects\n", remains);
- end_request(req, 1);
- if (remains > 0)
- req = elv_next_request(host->breq);
- }
+ for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
+ outw(*buff++, (unsigned long)host->dev_base +
+ MG_BUFF_OFFSET + (j << 1));
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
- }
+ } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
}
static void mg_read_intr(struct mg_host *host)
{
+ struct request *req = host->req;
u32 i;
- struct request *req;
+ u16 *buff;
/* check status */
do {
i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
- if (i & MG_REG_STATUS_BIT_BUSY)
+ if (i & ATA_BUSY)
break;
if (!MG_READY_OK(i))
break;
- if (i & MG_REG_STATUS_BIT_DATA_REQ)
+ if (i & ATA_DRQ)
goto ok_to_read;
} while (0);
mg_dump_status("mg_read_intr", i, host);
@@ -427,60 +577,42 @@ static void mg_read_intr(struct mg_host *host)
ok_to_read:
/* get current segment of request */
- req = elv_next_request(host->breq);
+ buff = (u16 *)req->buffer;
/* read 1 sector */
- for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
- *(u16 *)req->buffer =
- inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
- (i << 1));
- req->buffer += 2;
- }
+ for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
+ *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
+ (i << 1));
- /* manipulate request */
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
- req->sector, req->nr_sectors - 1, req->buffer);
-
- req->sector++;
- req->errors = 0;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
-
- /* let know if current segment done */
- if (req->current_nr_sectors <= 0)
- end_request(req, 1);
-
- /* set handler if read remains */
- if (i > 0) {
- host->mg_do_intr = mg_read_intr;
- mod_timer(&host->timer, jiffies + 3 * HZ);
- }
+ blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
/* send read confirm */
outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
- /* goto next request */
- if (!i)
+ if (mg_end_request(host, 0, MG_SECTOR_SIZE)) {
+ /* set handler if read remains */
+ host->mg_do_intr = mg_read_intr;
+ mod_timer(&host->timer, jiffies + 3 * HZ);
+ } else /* goto next request */
mg_request(host->breq);
}
static void mg_write_intr(struct mg_host *host)
{
+ struct request *req = host->req;
u32 i, j;
u16 *buff;
- struct request *req;
-
- /* get current segment of request */
- req = elv_next_request(host->breq);
+ bool rem;
/* check status */
do {
i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
- if (i & MG_REG_STATUS_BIT_BUSY)
+ if (i & ATA_BUSY)
break;
if (!MG_READY_OK(i))
break;
- if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ))
+ if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
goto ok_to_write;
} while (0);
mg_dump_status("mg_write_intr", i, host);
@@ -489,18 +621,8 @@ static void mg_write_intr(struct mg_host *host)
return;
ok_to_write:
- /* manipulate request */
- req->sector++;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
- req->buffer += MG_SECTOR_SIZE;
-
- /* let know if current segment or all done */
- if (!i || (req->bio && req->current_nr_sectors <= 0))
- end_request(req, 1);
-
- /* write 1 sector and set handler if remains */
- if (i > 0) {
+ if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
+ /* write 1 sector and set handler if remains */
buff = (u16 *)req->buffer;
for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
outw(*buff, (unsigned long)host->dev_base +
@@ -508,7 +630,7 @@ ok_to_write:
buff++;
}
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
- req->sector, req->nr_sectors, req->buffer);
+ blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
host->mg_do_intr = mg_write_intr;
mod_timer(&host->timer, jiffies + 3 * HZ);
}
@@ -516,7 +638,7 @@ ok_to_write:
/* send write confirm */
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
- if (!i)
+ if (!rem)
mg_request(host->breq);
}
@@ -524,49 +646,45 @@ void mg_times_out(unsigned long data)
{
struct mg_host *host = (struct mg_host *)data;
char *name;
- struct request *req;
spin_lock_irq(&host->lock);
- req = elv_next_request(host->breq);
- if (!req)
+ if (!host->req)
goto out_unlock;
host->mg_do_intr = NULL;
- name = req->rq_disk->disk_name;
+ name = host->req->rq_disk->disk_name;
printk(KERN_DEBUG "%s: timeout\n", name);
host->error = MG_ERR_TIMEOUT;
mg_bad_rw_intr(host);
- mg_request(host->breq);
out_unlock:
+ mg_request(host->breq);
spin_unlock_irq(&host->lock);
}
static void mg_request_poll(struct request_queue *q)
{
- struct request *req;
- struct mg_host *host;
+ struct mg_host *host = q->queuedata;
- while ((req = elv_next_request(q)) != NULL) {
- host = req->rq_disk->private_data;
- if (blk_fs_request(req)) {
- switch (rq_data_dir(req)) {
- case READ:
- mg_read(req);
- break;
- case WRITE:
- mg_write(req);
- break;
- default:
- printk(KERN_WARNING "%s:%d unknown command\n",
- __func__, __LINE__);
- end_request(req, 0);
+ while (1) {
+ if (!host->req) {
+ host->req = blk_fetch_request(q);
+ if (!host->req)
break;
- }
}
+
+ if (unlikely(!blk_fs_request(host->req))) {
+ mg_end_request_cur(host, -EIO);
+ continue;
+ }
+
+ if (rq_data_dir(host->req) == READ)
+ mg_read(host->req);
+ else
+ mg_write(host->req);
}
}
@@ -588,18 +706,15 @@ static unsigned int mg_issue_req(struct request *req,
break;
case WRITE:
/* TODO : handler */
- outb(MG_REG_CTRL_INTR_DISABLE,
- (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
!= MG_ERR_NONE) {
mg_bad_rw_intr(host);
return host->error;
}
del_timer(&host->timer);
- mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ);
- outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ);
+ outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
if (host->error) {
mg_bad_rw_intr(host);
return host->error;
@@ -614,11 +729,6 @@ static unsigned int mg_issue_req(struct request *req,
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
break;
- default:
- printk(KERN_WARNING "%s:%d unknown command\n",
- __func__, __LINE__);
- end_request(req, 0);
- break;
}
return MG_ERR_NONE;
}
@@ -626,16 +736,17 @@ static unsigned int mg_issue_req(struct request *req,
/* This function also called from IRQ context */
static void mg_request(struct request_queue *q)
{
+ struct mg_host *host = q->queuedata;
struct request *req;
- struct mg_host *host;
u32 sect_num, sect_cnt;
while (1) {
- req = elv_next_request(q);
- if (!req)
- return;
-
- host = req->rq_disk->private_data;
+ if (!host->req) {
+ host->req = blk_fetch_request(q);
+ if (!host->req)
+ break;
+ }
+ req = host->req;
/* check unwanted request call */
if (host->mg_do_intr)
@@ -643,9 +754,9 @@ static void mg_request(struct request_queue *q)
del_timer(&host->timer);
- sect_num = req->sector;
+ sect_num = blk_rq_pos(req);
/* deal whole segments */
- sect_cnt = req->nr_sectors;
+ sect_cnt = blk_rq_sectors(req);
/* sanity check */
if (sect_num >= get_capacity(req->rq_disk) ||
@@ -655,12 +766,14 @@ static void mg_request(struct request_queue *q)
"%s: bad access: sector=%d, count=%d\n",
req->rq_disk->disk_name,
sect_num, sect_cnt);
- end_request(req, 0);
+ mg_end_request_cur(host, -EIO);
continue;
}
- if (!blk_fs_request(req))
- return;
+ if (unlikely(!blk_fs_request(req))) {
+ mg_end_request_cur(host, -EIO);
+ continue;
+ }
if (!mg_issue_req(req, host, sect_num, sect_cnt))
return;
@@ -690,9 +803,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
return -EIO;
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_DISABLE,
- (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
/* wait until mflash deep sleep */
@@ -700,9 +811,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_ENABLE,
- (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
return -EIO;
}
@@ -725,8 +834,7 @@ static int mg_resume(struct platform_device *plat_dev)
return -EIO;
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
return 0;
}
@@ -877,6 +985,7 @@ static int mg_probe(struct platform_device *plat_dev)
__func__, __LINE__);
goto probe_err_5;
}
+ host->breq->queuedata = host;
/* mflash is random device, thanx for the noop */
elevator_exit(host->breq->elevator);
@@ -887,7 +996,7 @@ static int mg_probe(struct platform_device *plat_dev)
goto probe_err_6;
}
blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
- blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE);
+ blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
init_timer(&host->timer);
host->timer.function = mg_times_out;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4d6de4f15cc..5d23ffad7c7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -110,7 +110,7 @@ static void nbd_end_request(struct request *req)
req, error ? "failed" : "done");
spin_lock_irqsave(q->queue_lock, flags);
- __blk_end_request(req, error, req->nr_sectors << 9);
+ __blk_end_request_all(req, error);
spin_unlock_irqrestore(q->queue_lock, flags);
}
@@ -231,19 +231,19 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
{
int result, flags;
struct nbd_request request;
- unsigned long size = req->nr_sectors << 9;
+ unsigned long size = blk_rq_bytes(req);
request.magic = htonl(NBD_REQUEST_MAGIC);
request.type = htonl(nbd_cmd(req));
- request.from = cpu_to_be64((u64) req->sector << 9);
+ request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
memcpy(request.handle, &req, sizeof(req));
- dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n",
+ dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
lo->disk->disk_name, req,
nbdcmd_to_ascii(nbd_cmd(req)),
- (unsigned long long)req->sector << 9,
- req->nr_sectors << 9);
+ (unsigned long long)blk_rq_pos(req) << 9,
+ blk_rq_bytes(req));
result = sock_xmit(lo, 1, &request, sizeof(request),
(nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
if (result <= 0) {
@@ -533,11 +533,9 @@ static void do_nbd_request(struct request_queue *q)
{
struct request *req;
- while ((req = elv_next_request(q)) != NULL) {
+ while ((req = blk_fetch_request(q)) != NULL) {
struct nbd_device *lo;
- blkdev_dequeue_request(req);
-
spin_unlock_irq(q->queue_lock);
dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
@@ -580,13 +578,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
blk_rq_init(NULL, &sreq);
sreq.cmd_type = REQ_TYPE_SPECIAL;
nbd_cmd(&sreq) = NBD_CMD_DISC;
- /*
- * Set these to sane values in case server implementation
- * fails to check the request type first and also to keep
- * debugging output cleaner.
- */
- sreq.sector = 0;
- sreq.nr_sectors = 0;
if (!lo->sock)
return -EINVAL;
nbd_send_req(lo, &sreq);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index e91d4b4b014..911dfd98d81 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -719,32 +719,37 @@ static void do_pcd_request(struct request_queue * q)
if (pcd_busy)
return;
while (1) {
- pcd_req = elv_next_request(q);
- if (!pcd_req)
- return;
+ if (!pcd_req) {
+ pcd_req = blk_fetch_request(q);
+ if (!pcd_req)
+ return;
+ }
if (rq_data_dir(pcd_req) == READ) {
struct pcd_unit *cd = pcd_req->rq_disk->private_data;
if (cd != pcd_current)
pcd_bufblk = -1;
pcd_current = cd;
- pcd_sector = pcd_req->sector;
- pcd_count = pcd_req->current_nr_sectors;
+ pcd_sector = blk_rq_pos(pcd_req);
+ pcd_count = blk_rq_cur_sectors(pcd_req);
pcd_buf = pcd_req->buffer;
pcd_busy = 1;
ps_set_intr(do_pcd_read, NULL, 0, nice);
return;
- } else
- end_request(pcd_req, 0);
+ } else {
+ __blk_end_request_all(pcd_req, -EIO);
+ pcd_req = NULL;
+ }
}
}
-static inline void next_request(int success)
+static inline void next_request(int err)
{
unsigned long saved_flags;
spin_lock_irqsave(&pcd_lock, saved_flags);
- end_request(pcd_req, success);
+ if (!__blk_end_request_cur(pcd_req, err))
+ pcd_req = NULL;
pcd_busy = 0;
do_pcd_request(pcd_queue);
spin_unlock_irqrestore(&pcd_lock, saved_flags);
@@ -781,7 +786,7 @@ static void pcd_start(void)
if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
pcd_bufblk = -1;
- next_request(0);
+ next_request(-EIO);
return;
}
@@ -796,7 +801,7 @@ static void do_pcd_read(void)
pcd_retries = 0;
pcd_transfer();
if (!pcd_count) {
- next_request(1);
+ next_request(0);
return;
}
@@ -815,7 +820,7 @@ static void do_pcd_read_drq(void)
return;
}
pcd_bufblk = -1;
- next_request(0);
+ next_request(-EIO);
return;
}
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 9299455b0af..bf5955b3d87 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -410,10 +410,12 @@ static void run_fsm(void)
pd_claimed = 0;
phase = NULL;
spin_lock_irqsave(&pd_lock, saved_flags);
- end_request(pd_req, res);
- pd_req = elv_next_request(pd_queue);
- if (!pd_req)
- stop = 1;
+ if (!__blk_end_request_cur(pd_req,
+ res == Ok ? 0 : -EIO)) {
+ pd_req = blk_fetch_request(pd_queue);
+ if (!pd_req)
+ stop = 1;
+ }
spin_unlock_irqrestore(&pd_lock, saved_flags);
if (stop)
return;
@@ -443,11 +445,11 @@ static enum action do_pd_io_start(void)
pd_cmd = rq_data_dir(pd_req);
if (pd_cmd == READ || pd_cmd == WRITE) {
- pd_block = pd_req->sector;
- pd_count = pd_req->current_nr_sectors;
+ pd_block = blk_rq_pos(pd_req);
+ pd_count = blk_rq_cur_sectors(pd_req);
if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
return Fail;
- pd_run = pd_req->nr_sectors;
+ pd_run = blk_rq_sectors(pd_req);
pd_buf = pd_req->buffer;
pd_retries = 0;
if (pd_cmd == READ)
@@ -477,8 +479,8 @@ static int pd_next_buf(void)
if (pd_count)
return 0;
spin_lock_irqsave(&pd_lock, saved_flags);
- end_request(pd_req, 1);
- pd_count = pd_req->current_nr_sectors;
+ __blk_end_request_cur(pd_req, 0);
+ pd_count = blk_rq_cur_sectors(pd_req);
pd_buf = pd_req->buffer;
spin_unlock_irqrestore(&pd_lock, saved_flags);
return 0;
@@ -702,7 +704,7 @@ static void do_pd_request(struct request_queue * q)
{
if (pd_req)
return;
- pd_req = elv_next_request(q);
+ pd_req = blk_fetch_request(q);
if (!pd_req)
return;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index bef3b997ba3..68a90834e99 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -750,12 +750,10 @@ static int pf_ready(void)
static struct request_queue *pf_queue;
-static void pf_end_request(int uptodate)
+static void pf_end_request(int err)
{
- if (pf_req) {
- end_request(pf_req, uptodate);
+ if (pf_req && !__blk_end_request_cur(pf_req, err))
pf_req = NULL;
- }
}
static void do_pf_request(struct request_queue * q)
@@ -763,17 +761,19 @@ static void do_pf_request(struct request_queue * q)
if (pf_busy)
return;
repeat:
- pf_req = elv_next_request(q);
- if (!pf_req)
- return;
+ if (!pf_req) {
+ pf_req = blk_fetch_request(q);
+ if (!pf_req)
+ return;
+ }
pf_current = pf_req->rq_disk->private_data;
- pf_block = pf_req->sector;
- pf_run = pf_req->nr_sectors;
- pf_count = pf_req->current_nr_sectors;
+ pf_block = blk_rq_pos(pf_req);
+ pf_run = blk_rq_sectors(pf_req);
+ pf_count = blk_rq_cur_sectors(pf_req);
if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
- pf_end_request(0);
+ pf_end_request(-EIO);
goto repeat;
}
@@ -788,7 +788,7 @@ repeat:
pi_do_claimed(pf_current->pi, do_pf_write);
else {
pf_busy = 0;
- pf_end_request(0);
+ pf_end_request(-EIO);
goto repeat;
}
}
@@ -805,23 +805,22 @@ static int pf_next_buf(void)
return 1;
if (!pf_count) {
spin_lock_irqsave(&pf_spin_lock, saved_flags);
- pf_end_request(1);
- pf_req = elv_next_request(pf_queue);
+ pf_end_request(0);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
if (!pf_req)
return 1;
- pf_count = pf_req->current_nr_sectors;
+ pf_count = blk_rq_cur_sectors(pf_req);
pf_buf = pf_req->buffer;
}
return 0;
}
-static inline void next_request(int success)
+static inline void next_request(int err)
{
unsigned long saved_flags;
spin_lock_irqsave(&pf_spin_lock, saved_flags);
- pf_end_request(success);
+ pf_end_request(err);
pf_busy = 0;
do_pf_request(pf_queue);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
@@ -844,7 +843,7 @@ static void do_pf_read_start(void)
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pf_mask = STAT_DRQ;
@@ -863,7 +862,7 @@ static void do_pf_read_drq(void)
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pi_read_block(pf_current->pi, pf_buf, 512);
@@ -871,7 +870,7 @@ static void do_pf_read_drq(void)
break;
}
pi_disconnect(pf_current->pi);
- next_request(1);
+ next_request(0);
}
static void do_pf_write(void)
@@ -890,7 +889,7 @@ static void do_pf_write_start(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
@@ -903,7 +902,7 @@ static void do_pf_write_start(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pi_write_block(pf_current->pi, pf_buf, 512);
@@ -923,11 +922,11 @@ static void do_pf_write_done(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pi_disconnect(pf_current->pi);
- next_request(1);
+ next_request(0);
}
static int __init pf_init(void)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index dc7a8c352da..d57f1175948 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -991,13 +991,15 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
*/
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
{
- if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
+ if ((pd->settings.size << 9) / CD_FRAMESIZE
+ <= queue_max_phys_segments(q)) {
/*
* The cdrom device can handle one segment/frame
*/
clear_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
- } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
+ } else if ((pd->settings.size << 9) / PAGE_SIZE
+ <= queue_max_phys_segments(q)) {
/*
* We can handle this case at the expense of some extra memory
* copies during write operations
@@ -2657,7 +2659,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
struct request_queue *q = pd->disk->queue;
blk_queue_make_request(q, pkt_make_request);
- blk_queue_hardsect_size(q, CD_FRAMESIZE);
+ blk_queue_logical_block_size(q, CD_FRAMESIZE);
blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
blk_queue_merge_bvec(q, pkt_merge_bvec);
q->queuedata = pd;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index bccc42bb921..aaeeb544228 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -134,13 +134,12 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
rq_for_each_segment(bv, req, iter)
n++;
dev_dbg(&dev->sbd.core,
- "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
- __func__, __LINE__, op, n, req->nr_sectors,
- req->hard_nr_sectors);
+ "%s:%u: %s req has %u bvecs for %u sectors\n",
+ __func__, __LINE__, op, n, blk_rq_sectors(req));
#endif
- start_sector = req->sector * priv->blocking_factor;
- sectors = req->nr_sectors * priv->blocking_factor;
+ start_sector = blk_rq_pos(req) * priv->blocking_factor;
+ sectors = blk_rq_sectors(req) * priv->blocking_factor;
dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
__func__, __LINE__, op, sectors, start_sector);
@@ -158,7 +157,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
__LINE__, op, res);
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
return 0;
}
@@ -180,7 +179,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
__func__, __LINE__, res);
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
return 0;
}
@@ -195,7 +194,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
- while ((req = elv_next_request(q))) {
+ while ((req = blk_fetch_request(q))) {
if (blk_fs_request(req)) {
if (ps3disk_submit_request_sg(dev, req))
break;
@@ -205,7 +204,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
break;
} else {
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
continue;
}
}
@@ -231,7 +230,6 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
struct request *req;
int res, read, error;
u64 tag, status;
- unsigned long num_sectors;
const char *op;
res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
@@ -261,11 +259,9 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
req->cmd[0] == REQ_LB_OP_FLUSH) {
read = 0;
- num_sectors = req->hard_cur_sectors;
op = "flush";
} else {
read = !rq_data_dir(req);
- num_sectors = req->nr_sectors;
op = read ? "read" : "write";
}
if (status) {
@@ -281,7 +277,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
}
spin_lock(&priv->lock);
- __blk_end_request(req, error, num_sectors << 9);
+ __blk_end_request_all(req, error);
priv->req = NULL;
ps3disk_do_request(dev, priv->queue);
spin_unlock(&priv->lock);
@@ -481,7 +477,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
blk_queue_max_sectors(queue, dev->bounce_size >> 9);
blk_queue_segment_boundary(queue, -1UL);
blk_queue_dma_alignment(queue, dev->blk_size-1);
- blk_queue_hardsect_size(queue, dev->blk_size);
+ blk_queue_logical_block_size(queue, dev->blk_size);
blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
ps3disk_prepare_flush);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 5861e33efe6..cbfd9c0aef0 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -212,11 +212,6 @@ static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
}
-static void vdc_end_request(struct request *req, int error, int num_sectors)
-{
- __blk_end_request(req, error, num_sectors << 9);
-}
-
static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
unsigned int index)
{
@@ -239,7 +234,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
rqe->req = NULL;
- vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9);
+ __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
if (blk_queue_stopped(port->disk->queue))
blk_start_queue(port->disk->queue);
@@ -421,7 +416,7 @@ static int __send_request(struct request *req)
desc->slice = 0;
}
desc->status = ~0;
- desc->offset = (req->sector << 9) / port->vdisk_block_size;
+ desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
desc->size = len;
desc->ncookies = err;
@@ -446,14 +441,13 @@ out:
static void do_vdc_request(struct request_queue *q)
{
while (1) {
- struct request *req = elv_next_request(q);
+ struct request *req = blk_fetch_request(q);
if (!req)
break;
- blkdev_dequeue_request(req);
if (__send_request(req) < 0)
- vdc_end_request(req, -EIO, req->hard_nr_sectors);
+ __blk_end_request_all(req, -EIO);
}
}
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index d22cc385693..cf7877fb8a7 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -514,7 +514,7 @@ static int floppy_read_sectors(struct floppy_state *fs,
ret = swim_read_sector(fs, side, track, sector,
buffer);
if (try-- == 0)
- return -1;
+ return -EIO;
} while (ret != 512);
buffer += ret;
@@ -528,45 +528,31 @@ static void redo_fd_request(struct request_queue *q)
struct request *req;
struct floppy_state *fs;
- while ((req = elv_next_request(q))) {
+ req = blk_fetch_request(q);
+ while (req) {
+ int err = -EIO;
fs = req->rq_disk->private_data;
- if (req->sector < 0 || req->sector >= fs->total_secs) {
- end_request(req, 0);
- continue;
- }
- if (req->current_nr_sectors == 0) {
- end_request(req, 1);
- continue;
- }
- if (!fs->disk_in) {
- end_request(req, 0);
- continue;
- }
- if (rq_data_dir(req) == WRITE) {
- if (fs->write_protected) {
- end_request(req, 0);
- continue;
- }
- }
+ if (blk_rq_pos(req) >= fs->total_secs)
+ goto done;
+ if (!fs->disk_in)
+ goto done;
+ if (rq_data_dir(req) == WRITE && fs->write_protected)
+ goto done;
+
switch (rq_data_dir(req)) {
case WRITE:
/* NOT IMPLEMENTED */
- end_request(req, 0);
break;
case READ:
- if (floppy_read_sectors(fs, req->sector,
- req->current_nr_sectors,
- req->buffer)) {
- end_request(req, 0);
- continue;
- }
- req->nr_sectors -= req->current_nr_sectors;
- req->sector += req->current_nr_sectors;
- req->buffer += req->current_nr_sectors * 512;
- end_request(req, 1);
+ err = floppy_read_sectors(fs, blk_rq_pos(req),
+ blk_rq_cur_sectors(req),
+ req->buffer);
break;
}
+ done:
+ if (!__blk_end_request_cur(req, err))
+ req = blk_fetch_request(q);
}
}
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 612965307ba..80df93e3cdd 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -251,6 +251,20 @@ static int floppy_release(struct gendisk *disk, fmode_t mode);
static int floppy_check_change(struct gendisk *disk);
static int floppy_revalidate(struct gendisk *disk);
+static bool swim3_end_request(int err, unsigned int nr_bytes)
+{
+ if (__blk_end_request(fd_req, err, nr_bytes))
+ return true;
+
+ fd_req = NULL;
+ return false;
+}
+
+static bool swim3_end_request_cur(int err)
+{
+ return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+}
+
static void swim3_select(struct floppy_state *fs, int sel)
{
struct swim3 __iomem *sw = fs->swim3;
@@ -310,25 +324,27 @@ static void start_request(struct floppy_state *fs)
wake_up(&fs->wait);
return;
}
- while (fs->state == idle && (req = elv_next_request(swim3_queue))) {
+ while (fs->state == idle) {
+ if (!fd_req) {
+ fd_req = blk_fetch_request(swim3_queue);
+ if (!fd_req)
+ break;
+ }
+ req = fd_req;
#if 0
- printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
+ printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
req->rq_disk->disk_name, req->cmd,
- (long)req->sector, req->nr_sectors, req->buffer);
- printk(" errors=%d current_nr_sectors=%ld\n",
- req->errors, req->current_nr_sectors);
+ (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
+ printk(" errors=%d current_nr_sectors=%u\n",
+ req->errors, blk_rq_cur_sectors(req));
#endif
- if (req->sector < 0 || req->sector >= fs->total_secs) {
- end_request(req, 0);
- continue;
- }
- if (req->current_nr_sectors == 0) {
- end_request(req, 1);
+ if (blk_rq_pos(req) >= fs->total_secs) {
+ swim3_end_request_cur(-EIO);
continue;
}
if (fs->ejected) {
- end_request(req, 0);
+ swim3_end_request_cur(-EIO);
continue;
}
@@ -336,18 +352,19 @@ static void start_request(struct floppy_state *fs)
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
- end_request(req, 0);
+ swim3_end_request_cur(-EIO);
continue;
}
}
- /* Do not remove the cast. req->sector is now a sector_t and
- * can be 64 bits, but it will never go past 32 bits for this
- * driver anyway, so we can safely cast it down and not have
- * to do a 64/32 division
+ /* Do not remove the cast. blk_rq_pos(req) is now a
+ * sector_t and can be 64 bits, but it will never go
+ * past 32 bits for this driver anyway, so we can
+ * safely cast it down and not have to do a 64/32
+ * division
*/
- fs->req_cyl = ((long)req->sector) / fs->secpercyl;
- x = ((long)req->sector) % fs->secpercyl;
+ fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
+ x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
fd_req = req;
@@ -424,7 +441,7 @@ static inline void setup_transfer(struct floppy_state *fs)
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
- if (fd_req->current_nr_sectors <= 0) {
+ if (blk_rq_cur_sectors(fd_req) <= 0) {
printk(KERN_ERR "swim3: transfer 0 sectors?\n");
return;
}
@@ -432,8 +449,8 @@ static inline void setup_transfer(struct floppy_state *fs)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
- if (n > fd_req->current_nr_sectors)
- n = fd_req->current_nr_sectors;
+ if (n > blk_rq_cur_sectors(fd_req))
+ n = blk_rq_cur_sectors(fd_req);
}
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
@@ -508,7 +525,7 @@ static void act(struct floppy_state *fs)
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
return;
}
@@ -540,7 +557,7 @@ static void scan_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
} else {
@@ -559,7 +576,7 @@ static void seek_timeout(unsigned long data)
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
printk(KERN_ERR "swim3: seek timeout\n");
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
@@ -583,7 +600,7 @@ static void settle_timeout(unsigned long data)
return;
}
printk(KERN_ERR "swim3: seek settle timeout\n");
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
@@ -593,8 +610,6 @@ static void xfer_timeout(unsigned long data)
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
- struct dbdma_cmd *cp = fs->dma_cmd;
- unsigned long s;
int n;
fs->timeout_pending = 0;
@@ -605,17 +620,10 @@ static void xfer_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
- if (rq_data_dir(fd_req) == WRITE)
- ++cp;
- if (ld_le16(&cp->xfer_status) != 0)
- s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
- else
- s = 0;
- fd_req->sector += s;
- fd_req->current_nr_sectors -= s;
printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
- (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector);
- end_request(fd_req, 0);
+ (rq_data_dir(fd_req)==WRITE? "writ": "read"),
+ (long)blk_rq_pos(fd_req));
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
@@ -646,7 +654,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
} else {
@@ -719,9 +727,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
- fd_req->sector += n;
- fd_req->current_nr_sectors -= n;
- fd_req->buffer += n * 512;
+ blk_update_request(fd_req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
@@ -730,8 +736,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
} else {
printk("swim3: error %sing block %ld (err=%x)\n",
rq_data_dir(fd_req) == WRITE? "writ": "read",
- (long)fd_req->sector, err);
- end_request(fd_req, 0);
+ (long)blk_rq_pos(fd_req), err);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
}
} else {
@@ -740,18 +746,12 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(fd_req), intr, err);
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
break;
}
- fd_req->sector += fs->scount;
- fd_req->current_nr_sectors -= fs->scount;
- fd_req->buffer += fs->scount * 512;
- if (fd_req->current_nr_sectors <= 0) {
- end_request(fd_req, 1);
- fs->state = idle;
- } else {
+ if (swim3_end_request(0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
@@ -761,7 +761,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
}
}
act(fs);
- }
+ } else
+ fs->state = idle;
}
if (fs->state == idle)
start_request(fs);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index ff0448e4bf0..da403b6a7f4 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -749,8 +749,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
struct request *req = crq->rq;
int rc;
- rc = __blk_end_request(req, error, blk_rq_bytes(req));
- assert(rc == 0);
+ __blk_end_request_all(req, error);
rc = carm_put_request(host, crq);
assert(rc == 0);
@@ -811,12 +810,10 @@ static void carm_oob_rq_fn(struct request_queue *q)
while (1) {
DPRINTK("get req\n");
- rq = elv_next_request(q);
+ rq = blk_fetch_request(q);
if (!rq)
break;
- blkdev_dequeue_request(rq);
-
crq = rq->special;
assert(crq != NULL);
assert(crq->rq == rq);
@@ -847,7 +844,7 @@ static void carm_rq_fn(struct request_queue *q)
queue_one_request:
VPRINTK("get req\n");
- rq = elv_next_request(q);
+ rq = blk_peek_request(q);
if (!rq)
return;
@@ -858,7 +855,7 @@ queue_one_request:
}
crq->rq = rq;
- blkdev_dequeue_request(rq);
+ blk_start_request(rq);
if (rq_data_dir(rq) == WRITE) {
writing = 1;
@@ -904,10 +901,10 @@ queue_one_request:
msg->sg_count = n_elem;
msg->sg_type = SGT_32BIT;
msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag));
- msg->lba = cpu_to_le32(rq->sector & 0xffffffff);
- tmp = (rq->sector >> 16) >> 16;
+ msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
+ tmp = (blk_rq_pos(rq) >> 16) >> 16;
msg->lba_high = cpu_to_le16( (u16) tmp );
- msg->lba_count = cpu_to_le16(rq->nr_sectors);
+ msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
for (i = 0; i < n_elem; i++) {
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 689cd27ac89..cc54473b8e7 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -360,8 +360,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
struct ub_scsi_cmd *cmd, struct ub_request *urq);
static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
-static void ub_end_rq(struct request *rq, unsigned int status,
- unsigned int cmd_len);
+static void ub_end_rq(struct request *rq, unsigned int status);
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
struct ub_request *urq, struct ub_scsi_cmd *cmd);
static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@@ -627,7 +626,7 @@ static void ub_request_fn(struct request_queue *q)
struct ub_lun *lun = q->queuedata;
struct request *rq;
- while ((rq = elv_next_request(q)) != NULL) {
+ while ((rq = blk_peek_request(q)) != NULL) {
if (ub_request_fn_1(lun, rq) != 0) {
blk_stop_queue(q);
break;
@@ -643,14 +642,14 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
int n_elem;
if (atomic_read(&sc->poison)) {
- blkdev_dequeue_request(rq);
- ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq));
+ blk_start_request(rq);
+ ub_end_rq(rq, DID_NO_CONNECT << 16);
return 0;
}
if (lun->changed && !blk_pc_request(rq)) {
- blkdev_dequeue_request(rq);
- ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq));
+ blk_start_request(rq);
+ ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
return 0;
}
@@ -660,7 +659,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
return -1;
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
- blkdev_dequeue_request(rq);
+ blk_start_request(rq);
urq = &lun->urq;
memset(urq, 0, sizeof(struct ub_request));
@@ -702,7 +701,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
drop:
ub_put_cmd(lun, cmd);
- ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq));
+ ub_end_rq(rq, DID_ERROR << 16);
return 0;
}
@@ -723,11 +722,11 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
/*
* build the command
*
- * The call to blk_queue_hardsect_size() guarantees that request
+ * The call to blk_queue_logical_block_size() guarantees that request
* is aligned, but it is given in terms of 512 byte units, always.
*/
- block = rq->sector >> lun->capacity.bshift;
- nblks = rq->nr_sectors >> lun->capacity.bshift;
+ block = blk_rq_pos(rq) >> lun->capacity.bshift;
+ nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
/* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
@@ -739,7 +738,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
cmd->cdb[8] = nblks;
cmd->cdb_len = 10;
- cmd->len = rq->nr_sectors * 512;
+ cmd->len = blk_rq_bytes(rq);
}
static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
@@ -747,7 +746,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
{
struct request *rq = urq->rq;
- if (rq->data_len == 0) {
+ if (blk_rq_bytes(rq) == 0) {
cmd->dir = UB_DIR_NONE;
} else {
if (rq_data_dir(rq) == WRITE)
@@ -762,7 +761,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
cmd->cdb_len = rq->cmd_len;
- cmd->len = rq->data_len;
+ cmd->len = blk_rq_bytes(rq);
/*
* To reapply this to every URB is not as incorrect as it looks.
@@ -777,16 +776,15 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
struct ub_request *urq = cmd->back;
struct request *rq;
unsigned int scsi_status;
- unsigned int cmd_len;
rq = urq->rq;
if (cmd->error == 0) {
if (blk_pc_request(rq)) {
- if (cmd->act_len >= rq->data_len)
- rq->data_len = 0;
+ if (cmd->act_len >= rq->resid_len)
+ rq->resid_len = 0;
else
- rq->data_len -= cmd->act_len;
+ rq->resid_len -= cmd->act_len;
scsi_status = 0;
} else {
if (cmd->act_len != cmd->len) {
@@ -818,17 +816,14 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
urq->rq = NULL;
- cmd_len = cmd->len;
ub_put_cmd(lun, cmd);
- ub_end_rq(rq, scsi_status, cmd_len);
+ ub_end_rq(rq, scsi_status);
blk_start_queue(lun->disk->queue);
}
-static void ub_end_rq(struct request *rq, unsigned int scsi_status,
- unsigned int cmd_len)
+static void ub_end_rq(struct request *rq, unsigned int scsi_status)
{
int error;
- long rqlen;
if (scsi_status == 0) {
error = 0;
@@ -836,12 +831,7 @@ static void ub_end_rq(struct request *rq, unsigned int scsi_status,
error = -EIO;
rq->errors = scsi_status;
}
- rqlen = blk_rq_bytes(rq); /* Oddly enough, this is the residue. */
- if (__blk_end_request(rq, error, cmd_len)) {
- printk(KERN_WARNING DRV_NAME
- ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n",
- blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen);
- }
+ __blk_end_request_all(rq, error);
}
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
@@ -1759,7 +1749,7 @@ static int ub_bd_revalidate(struct gendisk *disk)
ub_revalidate(lun->udev, lun);
/* XXX Support sector size switching like in sr.c */
- blk_queue_hardsect_size(disk->queue, lun->capacity.bsize);
+ blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
set_capacity(disk, lun->capacity.nsec);
// set_disk_ro(sdkp->disk, lun->readonly);
@@ -2334,7 +2324,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
blk_queue_max_sectors(q, UB_MAX_SECTORS);
- blk_queue_hardsect_size(q, lun->capacity.bsize);
+ blk_queue_logical_block_size(q, lun->capacity.bsize);
lun->disk = disk;
q->queuedata = lun;
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index ecccf65dce2..390d69bb7c4 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -252,7 +252,7 @@ static int send_request(struct request *req)
struct viodasd_device *d;
unsigned long flags;
- start = (u64)req->sector << 9;
+ start = (u64)blk_rq_pos(req) << 9;
if (rq_data_dir(req) == READ) {
direction = DMA_FROM_DEVICE;
@@ -361,19 +361,17 @@ static void do_viodasd_request(struct request_queue *q)
* back later.
*/
while (num_req_outstanding < VIOMAXREQ) {
- req = elv_next_request(q);
+ req = blk_fetch_request(q);
if (req == NULL)
return;
- /* dequeue the current request from the queue */
- blkdev_dequeue_request(req);
/* check that request contains a valid command */
if (!blk_fs_request(req)) {
- viodasd_end_request(req, -EIO, req->hard_nr_sectors);
+ viodasd_end_request(req, -EIO, blk_rq_sectors(req));
continue;
}
/* Try sending the request */
if (send_request(req) != 0)
- viodasd_end_request(req, -EIO, req->hard_nr_sectors);
+ viodasd_end_request(req, -EIO, blk_rq_sectors(req));
}
}
@@ -590,7 +588,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
event->xRc, bevent->sub_result, err->msg);
- num_sect = req->hard_nr_sectors;
+ num_sect = blk_rq_sectors(req);
}
qlock = req->q->queue_lock;
spin_lock_irqsave(qlock, irq_flags);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5d34764c8a8..c0facaa55cf 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -37,6 +37,7 @@ struct virtblk_req
struct list_head list;
struct request *req;
struct virtio_blk_outhdr out_hdr;
+ struct virtio_scsi_inhdr in_hdr;
u8 status;
};
@@ -50,6 +51,7 @@ static void blk_done(struct virtqueue *vq)
spin_lock_irqsave(&vblk->lock, flags);
while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
int error;
+
switch (vbr->status) {
case VIRTIO_BLK_S_OK:
error = 0;
@@ -62,7 +64,13 @@ static void blk_done(struct virtqueue *vq)
break;
}
- __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req));
+ if (blk_pc_request(vbr->req)) {
+ vbr->req->resid_len = vbr->in_hdr.residual;
+ vbr->req->sense_len = vbr->in_hdr.sense_len;
+ vbr->req->errors = vbr->in_hdr.errors;
+ }
+
+ __blk_end_request_all(vbr->req, error);
list_del(&vbr->list);
mempool_free(vbr, vblk->pool);
}
@@ -74,7 +82,7 @@ static void blk_done(struct virtqueue *vq)
static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
struct request *req)
{
- unsigned long num, out, in;
+ unsigned long num, out = 0, in = 0;
struct virtblk_req *vbr;
vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
@@ -85,7 +93,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
vbr->req = req;
if (blk_fs_request(vbr->req)) {
vbr->out_hdr.type = 0;
- vbr->out_hdr.sector = vbr->req->sector;
+ vbr->out_hdr.sector = blk_rq_pos(vbr->req);
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
} else if (blk_pc_request(vbr->req)) {
vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
@@ -99,18 +107,36 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
if (blk_barrier_rq(vbr->req))
vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
- sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr));
- num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
- sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
+ sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
- if (rq_data_dir(vbr->req) == WRITE) {
- vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
- out = 1 + num;
- in = 1;
- } else {
- vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
- out = 1;
- in = 1 + num;
+ /*
+ * If this is a packet command we need a couple of additional headers.
+ * Behind the normal outhdr we put a segment with the scsi command
+ * block, and before the normal inhdr we put the sense data and the
+ * inhdr with additional status information before the normal inhdr.
+ */
+ if (blk_pc_request(vbr->req))
+ sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
+
+ num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
+
+ if (blk_pc_request(vbr->req)) {
+ sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
+ sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
+ sizeof(vbr->in_hdr));
+ }
+
+ sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
+ sizeof(vbr->status));
+
+ if (num) {
+ if (rq_data_dir(vbr->req) == WRITE) {
+ vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
+ out += num;
+ } else {
+ vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
+ in += num;
+ }
}
if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) {
@@ -124,12 +150,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
static void do_virtblk_request(struct request_queue *q)
{
- struct virtio_blk *vblk = NULL;
+ struct virtio_blk *vblk = q->queuedata;
struct request *req;
unsigned int issued = 0;
- while ((req = elv_next_request(q)) != NULL) {
- vblk = req->rq_disk->private_data;
+ while ((req = blk_peek_request(q)) != NULL) {
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
/* If this request fails, stop queue and wait for something to
@@ -138,7 +163,7 @@ static void do_virtblk_request(struct request_queue *q)
blk_stop_queue(q);
break;
}
- blkdev_dequeue_request(req);
+ blk_start_request(req);
issued++;
}
@@ -146,12 +171,51 @@ static void do_virtblk_request(struct request_queue *q)
vblk->vq->vq_ops->kick(vblk->vq);
}
+/* return ATA identify data
+ */
+static int virtblk_identify(struct gendisk *disk, void *argp)
+{
+ struct virtio_blk *vblk = disk->private_data;
+ void *opaque;
+ int err = -ENOMEM;
+
+ opaque = kmalloc(VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
+ if (!opaque)
+ goto out;
+
+ err = virtio_config_buf(vblk->vdev, VIRTIO_BLK_F_IDENTIFY,
+ offsetof(struct virtio_blk_config, identify), opaque,
+ VIRTIO_BLK_ID_BYTES);
+
+ if (err)
+ goto out_kfree;
+
+ if (copy_to_user(argp, opaque, VIRTIO_BLK_ID_BYTES))
+ err = -EFAULT;
+
+out_kfree:
+ kfree(opaque);
+out:
+ return err;
+}
+
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long data)
{
- return scsi_cmd_ioctl(bdev->bd_disk->queue,
- bdev->bd_disk, mode, cmd,
- (void __user *)data);
+ struct gendisk *disk = bdev->bd_disk;
+ struct virtio_blk *vblk = disk->private_data;
+ void __user *argp = (void __user *)data;
+
+ if (cmd == HDIO_GET_IDENTITY)
+ return virtblk_identify(disk, argp);
+
+ /*
+ * Only allow the generic SCSI ioctls if the host can support it.
+ */
+ if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
+ return -ENOIOCTLCMD;
+
+ return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
@@ -249,6 +313,7 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_put_disk;
}
+ vblk->disk->queue->queuedata = vblk;
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
if (index < 26) {
@@ -313,7 +378,7 @@ static int virtblk_probe(struct virtio_device *vdev)
offsetof(struct virtio_blk_config, blk_size),
&blk_size);
if (!err)
- blk_queue_hardsect_size(vblk->disk->queue, blk_size);
+ blk_queue_logical_block_size(vblk->disk->queue, blk_size);
add_disk(vblk->disk);
return 0;
@@ -356,6 +421,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
+ VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY
};
static struct virtio_driver virtio_blk = {
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 64b496fce98..ce242921992 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -305,30 +305,25 @@ static void do_xd_request (struct request_queue * q)
if (xdc_busy)
return;
- while ((req = elv_next_request(q)) != NULL) {
- unsigned block = req->sector;
- unsigned count = req->nr_sectors;
- int rw = rq_data_dir(req);
+ req = blk_fetch_request(q);
+ while (req) {
+ unsigned block = blk_rq_pos(req);
+ unsigned count = blk_rq_cur_sectors(req);
XD_INFO *disk = req->rq_disk->private_data;
- int res = 0;
+ int res = -EIO;
int retry;
- if (!blk_fs_request(req)) {
- end_request(req, 0);
- continue;
- }
- if (block + count > get_capacity(req->rq_disk)) {
- end_request(req, 0);
- continue;
- }
- if (rw != READ && rw != WRITE) {
- printk("do_xd_request: unknown request\n");
- end_request(req, 0);
- continue;
- }
+ if (!blk_fs_request(req))
+ goto done;
+ if (block + count > get_capacity(req->rq_disk))
+ goto done;
for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
- res = xd_readwrite(rw, disk, req->buffer, block, count);
- end_request(req, res); /* wrap up, 0 = fail, 1 = success */
+ res = xd_readwrite(rq_data_dir(req), disk, req->buffer,
+ block, count);
+ done:
+ /* wrap up, 0 = success, -errno = fail */
+ if (!__blk_end_request_cur(req, res))
+ req = blk_fetch_request(q);
}
}
@@ -418,7 +413,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
xd_recalibrate(drive);
spin_lock_irq(&xd_lock);
- return (0);
+ return -EIO;
case 2:
if (sense[0] & 0x30) {
printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
@@ -439,7 +434,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
else
printk(" - no valid disk address\n");
spin_lock_irq(&xd_lock);
- return (0);
+ return -EIO;
}
if (xd_dma_buffer)
for (i=0; i < (temp * 0x200); i++)
@@ -448,7 +443,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
count -= temp, buffer += temp * 0x200, block += temp;
}
spin_lock_irq(&xd_lock);
- return (1);
+ return 0;
}
/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a6cbf7b808e..c1996829d5e 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -122,7 +122,7 @@ static DEFINE_SPINLOCK(blkif_io_lock);
static int get_id_from_freelist(struct blkfront_info *info)
{
unsigned long free = info->shadow_free;
- BUG_ON(free > BLK_RING_SIZE);
+ BUG_ON(free >= BLK_RING_SIZE);
info->shadow_free = info->shadow[free].req.id;
info->shadow[free].req.id = 0x0fffffee; /* debug */
return free;
@@ -231,7 +231,7 @@ static int blkif_queue_request(struct request *req)
info->shadow[id].request = (unsigned long)req;
ring_req->id = id;
- ring_req->sector_number = (blkif_sector_t)req->sector;
+ ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
ring_req->handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
@@ -299,25 +299,25 @@ static void do_blkif_request(struct request_queue *rq)
queued = 0;
- while ((req = elv_next_request(rq)) != NULL) {
+ while ((req = blk_peek_request(rq)) != NULL) {
info = req->rq_disk->private_data;
- if (!blk_fs_request(req)) {
- end_request(req, 0);
- continue;
- }
if (RING_FULL(&info->ring))
goto wait;
- pr_debug("do_blk_req %p: cmd %p, sec %lx, "
- "(%u/%li) buffer:%p [%s]\n",
- req, req->cmd, (unsigned long)req->sector,
- req->current_nr_sectors,
- req->nr_sectors, req->buffer,
- rq_data_dir(req) ? "write" : "read");
+ blk_start_request(req);
+ if (!blk_fs_request(req)) {
+ __blk_end_request_all(req, -EIO);
+ continue;
+ }
+
+ pr_debug("do_blk_req %p: cmd %p, sec %lx, "
+ "(%u/%u) buffer:%p [%s]\n",
+ req, req->cmd, (unsigned long)blk_rq_pos(req),
+ blk_rq_cur_sectors(req), blk_rq_sectors(req),
+ req->buffer, rq_data_dir(req) ? "write" : "read");
- blkdev_dequeue_request(req);
if (blkif_queue_request(req)) {
blk_requeue_request(rq, req);
wait:
@@ -344,7 +344,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
/* Hard sector size and max sectors impersonate the equiv. hardware. */
- blk_queue_hardsect_size(rq, sector_size);
+ blk_queue_logical_block_size(rq, sector_size);
blk_queue_max_sectors(rq, 512);
/* Each segment in a request is up to an aligned page in size. */
@@ -551,7 +551,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
for (i = info->ring.rsp_cons; i != rp; i++) {
unsigned long id;
- int ret;
bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
@@ -578,8 +577,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
"request: %x\n", bret->status);
- ret = __blk_end_request(req, error, blk_rq_bytes(req));
- BUG_ON(ret);
+ __blk_end_request_all(req, error);
break;
default:
BUG();
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 4aecf5dc6a9..f08491a3a81 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -463,10 +463,11 @@ struct request *ace_get_next_request(struct request_queue * q)
{
struct request *req;
- while ((req = elv_next_request(q)) != NULL) {
+ while ((req = blk_peek_request(q)) != NULL) {
if (blk_fs_request(req))
break;
- end_request(req, 0);
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
}
return req;
}
@@ -492,9 +493,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
set_capacity(ace->gd, 0);
dev_info(ace->dev, "No CF in slot\n");
- /* Drop all pending requests */
- while ((req = elv_next_request(ace->queue)) != NULL)
- end_request(req, 0);
+ /* Drop all in-flight and pending requests */
+ if (ace->req) {
+ __blk_end_request_all(ace->req, -EIO);
+ ace->req = NULL;
+ }
+ while ((req = blk_fetch_request(ace->queue)) != NULL)
+ __blk_end_request_all(req, -EIO);
/* Drop back to IDLE state and notify waiters */
ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -642,19 +647,21 @@ static void ace_fsm_dostate(struct ace_device *ace)
ace->fsm_state = ACE_FSM_STATE_IDLE;
break;
}
+ blk_start_request(req);
/* Okay, it's a data request, set it up for transfer */
dev_dbg(ace->dev,
- "request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n",
- (unsigned long long) req->sector, req->hard_nr_sectors,
- req->current_nr_sectors, rq_data_dir(req));
+ "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
+ (unsigned long long)blk_rq_pos(req),
+ blk_rq_sectors(req), blk_rq_cur_sectors(req),
+ rq_data_dir(req));
ace->req = req;
ace->data_ptr = req->buffer;
- ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
- ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);
+ ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
+ ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
- count = req->hard_nr_sectors;
+ count = blk_rq_sectors(req);
if (rq_data_dir(req)) {
/* Kick off write request */
dev_dbg(ace->dev, "write data\n");
@@ -688,7 +695,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
dev_dbg(ace->dev,
"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
ace->fsm_task, ace->fsm_iter_num,
- ace->req->current_nr_sectors * 16,
+ blk_rq_cur_sectors(ace->req) * 16,
ace->data_count, ace->in_irq);
ace_fsm_yield(ace); /* need to poll CFBSY bit */
break;
@@ -697,7 +704,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
dev_dbg(ace->dev,
"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
ace->fsm_task, ace->fsm_iter_num,
- ace->req->current_nr_sectors * 16,
+ blk_rq_cur_sectors(ace->req) * 16,
ace->data_count, ace->in_irq);
ace_fsm_yieldirq(ace);
break;
@@ -717,14 +724,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
}
/* bio finished; is there another one? */
- if (__blk_end_request(ace->req, 0,
- blk_rq_cur_bytes(ace->req))) {
- /* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
- * ace->req->hard_nr_sectors,
- * ace->req->current_nr_sectors);
+ if (__blk_end_request_cur(ace->req, 0)) {
+ /* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
+ * blk_rq_sectors(ace->req),
+ * blk_rq_cur_sectors(ace->req));
*/
ace->data_ptr = ace->req->buffer;
- ace->data_count = ace->req->current_nr_sectors * 16;
+ ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
ace_fsm_yieldirq(ace);
break;
}
@@ -978,7 +984,7 @@ static int __devinit ace_setup(struct ace_device *ace)
ace->queue = blk_init_queue(ace_request, &ace->lock);
if (ace->queue == NULL)
goto err_blk_initq;
- blk_queue_hardsect_size(ace->queue, 512);
+ blk_queue_logical_block_size(ace->queue, 512);
/*
* Allocate and initialize GD structure
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 80754cdd311..4575171e5be 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -70,15 +70,18 @@ static struct gendisk *z2ram_gendisk;
static void do_z2_request(struct request_queue *q)
{
struct request *req;
- while ((req = elv_next_request(q)) != NULL) {
- unsigned long start = req->sector << 9;
- unsigned long len = req->current_nr_sectors << 9;
+
+ req = blk_fetch_request(q);
+ while (req) {
+ unsigned long start = blk_rq_pos(req) << 9;
+ unsigned long len = blk_rq_cur_bytes(req);
+ int err = 0;
if (start + len > z2ram_size) {
printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
- req->sector, req->current_nr_sectors);
- end_request(req, 0);
- continue;
+ blk_rq_pos(req), blk_rq_cur_sectors(req));
+ err = -EIO;
+ goto done;
}
while (len) {
unsigned long addr = start & Z2RAM_CHUNKMASK;
@@ -93,7 +96,9 @@ static void do_z2_request(struct request_queue *q)
start += size;
len -= size;
}
- end_request(req, 1);
+ done:
+ if (!__blk_end_request_cur(req, err))
+ req = blk_fetch_request(q);
}
}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index af761dc434f..4895f0e0532 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -277,8 +277,8 @@ static int hci_uart_tty_open(struct tty_struct *tty)
/* FIXME: why is this needed. Note don't use ldisc_ref here as the
open path is before the ldisc is referencable */
- if (tty->ldisc.ops->flush_buffer)
- tty->ldisc.ops->flush_buffer(tty);
+ if (tty->ldisc->ops->flush_buffer)
+ tty->ldisc->ops->flush_buffer(tty);
tty_driver_flush_buffer(tty);
return 0;
@@ -463,7 +463,6 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file,
clear_bit(HCI_UART_PROTO_SET, &hu->flags);
return err;
}
- tty->low_latency = 1;
} else
return -EBUSY;
break;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index cceace61ef2..71d1b9bab70 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2101,8 +2101,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
nr = nframes;
if (cdi->cdda_method == CDDA_BPC_SINGLE)
nr = 1;
- if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9))
- nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW;
+ if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
+ nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
len = nr * CD_FRAMESIZE_RAW;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 2eecb779437..b5621f27c4b 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -584,8 +584,8 @@ static void gdrom_readdisk_dma(struct work_struct *work)
list_for_each_safe(elem, next, &gdrom_deferred) {
req = list_entry(elem, struct request, queuelist);
spin_unlock(&gdrom_lock);
- block = req->sector/GD_TO_BLK + GD_SESSION_OFFSET;
- block_cnt = req->nr_sectors/GD_TO_BLK;
+ block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
+ block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG);
ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
@@ -632,39 +632,35 @@ static void gdrom_readdisk_dma(struct work_struct *work)
* before handling ending the request */
spin_lock(&gdrom_lock);
list_del_init(&req->queuelist);
- __blk_end_request(req, err, blk_rq_bytes(req));
+ __blk_end_request_all(req, err);
}
spin_unlock(&gdrom_lock);
kfree(read_command);
}
-static void gdrom_request_handler_dma(struct request *req)
-{
- /* dequeue, add to list of deferred work
- * and then schedule workqueue */
- blkdev_dequeue_request(req);
- list_add_tail(&req->queuelist, &gdrom_deferred);
- schedule_work(&work);
-}
-
static void gdrom_request(struct request_queue *rq)
{
struct request *req;
- while ((req = elv_next_request(rq)) != NULL) {
+ while ((req = blk_fetch_request(rq)) != NULL) {
if (!blk_fs_request(req)) {
printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
+ continue;
}
if (rq_data_dir(req) != READ) {
printk(KERN_NOTICE "GDROM: Read only device -");
printk(" write request ignored\n");
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
+ continue;
}
- if (req->nr_sectors)
- gdrom_request_handler_dma(req);
- else
- end_request(req, 0);
+
+ /*
+ * Add to list of deferred work and then schedule
+ * workqueue.
+ */
+ list_add_tail(&req->queuelist, &gdrom_deferred);
+ schedule_work(&work);
}
}
@@ -743,7 +739,7 @@ static void __devinit probe_gdrom_setupdisk(void)
static int __devinit probe_gdrom_setupqueue(void)
{
- blk_queue_hardsect_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
+ blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
/* using DMA so memory will need to be contiguous */
blk_queue_max_hw_segments(gd.gdrom_rq, 1);
/* set a large max size to get most from DMA */
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 9b1624e0dde..0fff646cc2f 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -282,7 +282,7 @@ static int send_request(struct request *req)
viopath_targetinst(viopath_hostLp),
(u64)req, VIOVERSION << 16,
((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
- (u64)req->sector * 512, len, 0);
+ (u64)blk_rq_pos(req) * 512, len, 0);
if (hvrc != HvLpEvent_Rc_Good) {
printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc);
return -1;
@@ -291,36 +291,19 @@ static int send_request(struct request *req)
return 0;
}
-static void viocd_end_request(struct request *req, int error)
-{
- int nsectors = req->hard_nr_sectors;
-
- /*
- * Make sure it's fully ended, and ensure that we process
- * at least one sector.
- */
- if (blk_pc_request(req))
- nsectors = (req->data_len + 511) >> 9;
- if (!nsectors)
- nsectors = 1;
-
- if (__blk_end_request(req, error, nsectors << 9))
- BUG();
-}
-
static int rwreq;
static void do_viocd_request(struct request_queue *q)
{
struct request *req;
- while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
+ while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
if (!blk_fs_request(req))
- viocd_end_request(req, -EIO);
+ __blk_end_request_all(req, -EIO);
else if (send_request(req) < 0) {
printk(VIOCD_KERN_WARNING
"unable to send message to OS/400!");
- viocd_end_request(req, -EIO);
+ __blk_end_request_all(req, -EIO);
} else
rwreq++;
}
@@ -486,8 +469,8 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
case viocdopen:
if (event->xRc == 0) {
di = &viocd_diskinfo[bevent->disk];
- blk_queue_hardsect_size(di->viocd_disk->queue,
- bevent->block_size);
+ blk_queue_logical_block_size(di->viocd_disk->queue,
+ bevent->block_size);
set_capacity(di->viocd_disk,
bevent->media_size *
bevent->block_size / 512);
@@ -531,9 +514,9 @@ return_complete:
"with rc %d:0x%04X: %s\n",
req, event->xRc,
bevent->sub_result, err->msg);
- viocd_end_request(req, -EIO);
+ __blk_end_request_all(req, -EIO);
} else
- viocd_end_request(req, 0);
+ __blk_end_request_all(req, 0);
/* restart handling of incoming requests */
spin_unlock_irqrestore(&viocd_reqlock, flags);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 735bbe2be51..02ecfd5fa61 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -97,6 +97,19 @@ config DEVKMEM
kind of kernel debugging operations.
When in doubt, say "N".
+config BFIN_JTAG_COMM
+ tristate "Blackfin JTAG Communication"
+ depends on BLACKFIN
+ help
+ Add support for emulating a TTY device over the Blackfin JTAG.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bfin_jtag_comm.
+
+config BFIN_JTAG_COMM_CONSOLE
+ bool "Console on Blackfin JTAG"
+ depends on BFIN_JTAG_COMM=y
+
config SERIAL_NONSTANDARD
bool "Non-standard serial port support"
depends on HAS_IOMEM
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 9caf5b5ad1c..189efcff08c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_LEGACY_PTYS) += pty.o
obj-$(CONFIG_UNIX98_PTYS) += pty.o
obj-y += misc.o
obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o selection.o keyboard.o
+obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o
obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
obj-$(CONFIG_AUDIT) += tty_audit.o
diff --git a/drivers/char/bfin_jtag_comm.c b/drivers/char/bfin_jtag_comm.c
new file mode 100644
index 00000000000..44c113d5604
--- /dev/null
+++ b/drivers/char/bfin_jtag_comm.c
@@ -0,0 +1,365 @@
+/*
+ * TTY over Blackfin JTAG Communication
+ *
+ * Copyright 2008-2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <asm/atomic.h>
+
+/* See the Debug/Emulation chapter in the HRM */
+#define EMUDOF 0x00000001 /* EMUDAT_OUT full & valid */
+#define EMUDIF 0x00000002 /* EMUDAT_IN full & valid */
+#define EMUDOOVF 0x00000004 /* EMUDAT_OUT overflow */
+#define EMUDIOVF 0x00000008 /* EMUDAT_IN overflow */
+
+#define DRV_NAME "bfin-jtag-comm"
+#define DEV_NAME "ttyBFJC"
+
+#define pr_init(fmt, args...) ({ static const __initdata char __fmt[] = fmt; printk(__fmt, ## args); })
+#define debug(fmt, args...) pr_debug(DRV_NAME ": " fmt, ## args)
+
+static inline uint32_t bfin_write_emudat(uint32_t emudat)
+{
+ __asm__ __volatile__("emudat = %0;" : : "d"(emudat));
+ return emudat;
+}
+
+static inline uint32_t bfin_read_emudat(void)
+{
+ uint32_t emudat;
+ __asm__ __volatile__("%0 = emudat;" : "=d"(emudat));
+ return emudat;
+}
+
+static inline uint32_t bfin_write_emudat_chars(char a, char b, char c, char d)
+{
+ return bfin_write_emudat((a << 0) | (b << 8) | (c << 16) | (d << 24));
+}
+
+#define CIRC_SIZE 2048 /* see comment in tty_io.c:do_tty_write() */
+#define CIRC_MASK (CIRC_SIZE - 1)
+#define circ_empty(circ) ((circ)->head == (circ)->tail)
+#define circ_free(circ) CIRC_SPACE((circ)->head, (circ)->tail, CIRC_SIZE)
+#define circ_cnt(circ) CIRC_CNT((circ)->head, (circ)->tail, CIRC_SIZE)
+#define circ_byte(circ, idx) ((circ)->buf[(idx) & CIRC_MASK])
+
+static struct tty_driver *bfin_jc_driver;
+static struct task_struct *bfin_jc_kthread;
+static struct tty_struct * volatile bfin_jc_tty;
+static unsigned long bfin_jc_count;
+static DEFINE_MUTEX(bfin_jc_tty_mutex);
+static volatile struct circ_buf bfin_jc_write_buf;
+
+static int
+bfin_jc_emudat_manager(void *arg)
+{
+ uint32_t inbound_len = 0, outbound_len = 0;
+
+ while (!kthread_should_stop()) {
+ /* no one left to give data to, so sleep */
+ if (bfin_jc_tty == NULL && circ_empty(&bfin_jc_write_buf)) {
+ debug("waiting for readers\n");
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ }
+
+ /* no data available, so just chill */
+ if (!(bfin_read_DBGSTAT() & EMUDIF) && circ_empty(&bfin_jc_write_buf)) {
+ debug("waiting for data (in_len = %i) (circ: %i %i)\n",
+ inbound_len, bfin_jc_write_buf.tail, bfin_jc_write_buf.head);
+ if (inbound_len)
+ schedule();
+ else
+ schedule_timeout_interruptible(HZ);
+ continue;
+ }
+
+ /* if incoming data is ready, eat it */
+ if (bfin_read_DBGSTAT() & EMUDIF) {
+ struct tty_struct *tty;
+ mutex_lock(&bfin_jc_tty_mutex);
+ tty = (struct tty_struct *)bfin_jc_tty;
+ if (tty != NULL) {
+ uint32_t emudat = bfin_read_emudat();
+ if (inbound_len == 0) {
+ debug("incoming length: 0x%08x\n", emudat);
+ inbound_len = emudat;
+ } else {
+ size_t num_chars = (4 <= inbound_len ? 4 : inbound_len);
+ debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
+ inbound_len -= num_chars;
+ tty_insert_flip_string(tty, (unsigned char *)&emudat, num_chars);
+ tty_flip_buffer_push(tty);
+ }
+ }
+ mutex_unlock(&bfin_jc_tty_mutex);
+ }
+
+ /* if outgoing data is ready, post it */
+ if (!(bfin_read_DBGSTAT() & EMUDOF) && !circ_empty(&bfin_jc_write_buf)) {
+ if (outbound_len == 0) {
+ outbound_len = circ_cnt(&bfin_jc_write_buf);
+ bfin_write_emudat(outbound_len);
+ debug("outgoing length: 0x%08x\n", outbound_len);
+ } else {
+ struct tty_struct *tty;
+ int tail = bfin_jc_write_buf.tail;
+ size_t ate = (4 <= outbound_len ? 4 : outbound_len);
+ uint32_t emudat =
+ bfin_write_emudat_chars(
+ circ_byte(&bfin_jc_write_buf, tail + 0),
+ circ_byte(&bfin_jc_write_buf, tail + 1),
+ circ_byte(&bfin_jc_write_buf, tail + 2),
+ circ_byte(&bfin_jc_write_buf, tail + 3)
+ );
+ bfin_jc_write_buf.tail += ate;
+ outbound_len -= ate;
+ mutex_lock(&bfin_jc_tty_mutex);
+ tty = (struct tty_struct *)bfin_jc_tty;
+ if (tty)
+ tty_wakeup(tty);
+ mutex_unlock(&bfin_jc_tty_mutex);
+ debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate);
+ }
+ }
+ }
+
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+static int
+bfin_jc_open(struct tty_struct *tty, struct file *filp)
+{
+ mutex_lock(&bfin_jc_tty_mutex);
+ debug("open %lu\n", bfin_jc_count);
+ ++bfin_jc_count;
+ bfin_jc_tty = tty;
+ wake_up_process(bfin_jc_kthread);
+ mutex_unlock(&bfin_jc_tty_mutex);
+ return 0;
+}
+
+static void
+bfin_jc_close(struct tty_struct *tty, struct file *filp)
+{
+ mutex_lock(&bfin_jc_tty_mutex);
+ debug("close %lu\n", bfin_jc_count);
+ if (--bfin_jc_count == 0)
+ bfin_jc_tty = NULL;
+ wake_up_process(bfin_jc_kthread);
+ mutex_unlock(&bfin_jc_tty_mutex);
+}
+
+/* XXX: we dont handle the put_char() case where we must handle count = 1 */
+static int
+bfin_jc_circ_write(const unsigned char *buf, int count)
+{
+ int i;
+ count = min(count, circ_free(&bfin_jc_write_buf));
+ debug("going to write chunk of %i bytes\n", count);
+ for (i = 0; i < count; ++i)
+ circ_byte(&bfin_jc_write_buf, bfin_jc_write_buf.head + i) = buf[i];
+ bfin_jc_write_buf.head += i;
+ return i;
+}
+
+#ifndef CONFIG_BFIN_JTAG_COMM_CONSOLE
+# define acquire_console_sem()
+# define release_console_sem()
+#endif
+static int
+bfin_jc_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ int i;
+ acquire_console_sem();
+ i = bfin_jc_circ_write(buf, count);
+ release_console_sem();
+ wake_up_process(bfin_jc_kthread);
+ return i;
+}
+
+static void
+bfin_jc_flush_chars(struct tty_struct *tty)
+{
+ wake_up_process(bfin_jc_kthread);
+}
+
+static int
+bfin_jc_write_room(struct tty_struct *tty)
+{
+ return circ_free(&bfin_jc_write_buf);
+}
+
+static int
+bfin_jc_chars_in_buffer(struct tty_struct *tty)
+{
+ return circ_cnt(&bfin_jc_write_buf);
+}
+
+static void
+bfin_jc_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ unsigned long expire = jiffies + timeout;
+ while (!circ_empty(&bfin_jc_write_buf)) {
+ if (signal_pending(current))
+ break;
+ if (time_after(jiffies, expire))
+ break;
+ }
+}
+
+static struct tty_operations bfin_jc_ops = {
+ .open = bfin_jc_open,
+ .close = bfin_jc_close,
+ .write = bfin_jc_write,
+ /*.put_char = bfin_jc_put_char,*/
+ .flush_chars = bfin_jc_flush_chars,
+ .write_room = bfin_jc_write_room,
+ .chars_in_buffer = bfin_jc_chars_in_buffer,
+ .wait_until_sent = bfin_jc_wait_until_sent,
+};
+
+static int __init bfin_jc_init(void)
+{
+ int ret;
+
+ bfin_jc_kthread = kthread_create(bfin_jc_emudat_manager, NULL, DRV_NAME);
+ if (IS_ERR(bfin_jc_kthread))
+ return PTR_ERR(bfin_jc_kthread);
+
+ ret = -ENOMEM;
+
+ bfin_jc_write_buf.head = bfin_jc_write_buf.tail = 0;
+ bfin_jc_write_buf.buf = kmalloc(CIRC_SIZE, GFP_KERNEL);
+ if (!bfin_jc_write_buf.buf)
+ goto err;
+
+ bfin_jc_driver = alloc_tty_driver(1);
+ if (!bfin_jc_driver)
+ goto err;
+
+ bfin_jc_driver->owner = THIS_MODULE;
+ bfin_jc_driver->driver_name = DRV_NAME;
+ bfin_jc_driver->name = DEV_NAME;
+ bfin_jc_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ bfin_jc_driver->subtype = SERIAL_TYPE_NORMAL;
+ bfin_jc_driver->init_termios = tty_std_termios;
+ tty_set_operations(bfin_jc_driver, &bfin_jc_ops);
+
+ ret = tty_register_driver(bfin_jc_driver);
+ if (ret)
+ goto err;
+
+ pr_init(KERN_INFO DRV_NAME ": initialized\n");
+
+ return 0;
+
+ err:
+ put_tty_driver(bfin_jc_driver);
+ kfree(bfin_jc_write_buf.buf);
+ kthread_stop(bfin_jc_kthread);
+ return ret;
+}
+module_init(bfin_jc_init);
+
+static void __exit bfin_jc_exit(void)
+{
+ kthread_stop(bfin_jc_kthread);
+ kfree(bfin_jc_write_buf.buf);
+ tty_unregister_driver(bfin_jc_driver);
+ put_tty_driver(bfin_jc_driver);
+}
+module_exit(bfin_jc_exit);
+
+#if defined(CONFIG_BFIN_JTAG_COMM_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
+static void
+bfin_jc_straight_buffer_write(const char *buf, unsigned count)
+{
+ unsigned ate = 0;
+ while (bfin_read_DBGSTAT() & EMUDOF)
+ continue;
+ bfin_write_emudat(count);
+ while (ate < count) {
+ while (bfin_read_DBGSTAT() & EMUDOF)
+ continue;
+ bfin_write_emudat_chars(buf[ate], buf[ate+1], buf[ate+2], buf[ate+3]);
+ ate += 4;
+ }
+}
+#endif
+
+#ifdef CONFIG_BFIN_JTAG_COMM_CONSOLE
+static void
+bfin_jc_console_write(struct console *co, const char *buf, unsigned count)
+{
+ if (bfin_jc_kthread == NULL)
+ bfin_jc_straight_buffer_write(buf, count);
+ else
+ bfin_jc_circ_write(buf, count);
+}
+
+static struct tty_driver *
+bfin_jc_console_device(struct console *co, int *index)
+{
+ *index = co->index;
+ return bfin_jc_driver;
+}
+
+static struct console bfin_jc_console = {
+ .name = DEV_NAME,
+ .write = bfin_jc_console_write,
+ .device = bfin_jc_console_device,
+ .flags = CON_ANYTIME | CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static int __init bfin_jc_console_init(void)
+{
+ register_console(&bfin_jc_console);
+ return 0;
+}
+console_initcall(bfin_jc_console_init);
+#endif
+
+#ifdef CONFIG_EARLY_PRINTK
+static void __init
+bfin_jc_early_write(struct console *co, const char *buf, unsigned int count)
+{
+ bfin_jc_straight_buffer_write(buf, count);
+}
+
+static struct __initdata console bfin_jc_early_console = {
+ .name = "early_BFJC",
+ .write = bfin_jc_early_write,
+ .flags = CON_ANYTIME | CON_PRINTBUFFER,
+ .index = -1,
+};
+
+struct console * __init
+bfin_jc_early_init(unsigned int port, unsigned int cflag)
+{
+ return &bfin_jc_early_console;
+}
+#endif
+
+MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
+MODULE_DESCRIPTION("TTY over Blackfin JTAG Communication");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index 1fdb9f657d8..f3366d3f06c 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -604,7 +604,6 @@
#define NR_PORTS 256
-#define ZE_V1_NPORTS 64
#define ZO_V1 0
#define ZO_V2 1
#define ZE_V1 2
@@ -663,18 +662,6 @@
static void cy_throttle(struct tty_struct *tty);
static void cy_send_xchar(struct tty_struct *tty, char ch);
-#define IS_CYC_Z(card) ((card).num_chips == (unsigned int)-1)
-
-#define Z_FPGA_CHECK(card) \
- ((readl(&((struct RUNTIME_9060 __iomem *) \
- ((card).ctl_addr))->init_ctrl) & (1<<17)) != 0)
-
-#define ISZLOADED(card) (((ZO_V1 == readl(&((struct RUNTIME_9060 __iomem *) \
- ((card).ctl_addr))->mail_box_0)) || \
- Z_FPGA_CHECK(card)) && \
- (ZFIRM_ID == readl(&((struct FIRM_ID __iomem *) \
- ((card).base_addr+ID_ADDRESS))->signature)))
-
#ifndef SERIAL_XMIT_SIZE
#define SERIAL_XMIT_SIZE (min(PAGE_SIZE, 4096))
#endif
@@ -687,8 +674,6 @@ static void cy_send_xchar(struct tty_struct *tty, char ch);
#define DRIVER_VERSION 0x02010203
#define RAM_SIZE 0x80000
-#define Z_FPGA_LOADED(X) ((readl(&(X)->init_ctrl) & (1<<17)) != 0)
-
enum zblock_type {
ZBLOCK_PRG = 0,
ZBLOCK_FPGA = 1
@@ -883,6 +868,29 @@ static void cyz_rx_restart(unsigned long);
static struct timer_list cyz_rx_full_timer[NR_PORTS];
#endif /* CONFIG_CYZ_INTR */
+static inline bool cy_is_Z(struct cyclades_card *card)
+{
+ return card->num_chips == (unsigned int)-1;
+}
+
+static inline bool __cyz_fpga_loaded(struct RUNTIME_9060 __iomem *ctl_addr)
+{
+ return readl(&ctl_addr->init_ctrl) & (1 << 17);
+}
+
+static inline bool cyz_fpga_loaded(struct cyclades_card *card)
+{
+ return __cyz_fpga_loaded(card->ctl_addr.p9060);
+}
+
+static inline bool cyz_is_loaded(struct cyclades_card *card)
+{
+ struct FIRM_ID __iomem *fw_id = card->base_addr + ID_ADDRESS;
+
+ return (card->hw_ver == ZO_V1 || cyz_fpga_loaded(card)) &&
+ readl(&fw_id->signature) == ZFIRM_ID;
+}
+
static inline int serial_paranoia_check(struct cyclades_port *info,
char *name, const char *routine)
{
@@ -1395,19 +1403,15 @@ cyz_fetch_msg(struct cyclades_card *cinfo,
unsigned long loc_doorbell;
firm_id = cinfo->base_addr + ID_ADDRESS;
- if (!ISZLOADED(*cinfo))
- return -1;
zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
board_ctrl = &zfw_ctrl->board_ctrl;
- loc_doorbell = readl(&((struct RUNTIME_9060 __iomem *)
- (cinfo->ctl_addr))->loc_doorbell);
+ loc_doorbell = readl(&cinfo->ctl_addr.p9060->loc_doorbell);
if (loc_doorbell) {
*cmd = (char)(0xff & loc_doorbell);
*channel = readl(&board_ctrl->fwcmd_channel);
*param = (__u32) readl(&board_ctrl->fwcmd_param);
- cy_writel(&((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))->
- loc_doorbell, 0xffffffff);
+ cy_writel(&cinfo->ctl_addr.p9060->loc_doorbell, 0xffffffff);
return 1;
}
return 0;
@@ -1424,15 +1428,14 @@ cyz_issue_cmd(struct cyclades_card *cinfo,
unsigned int index;
firm_id = cinfo->base_addr + ID_ADDRESS;
- if (!ISZLOADED(*cinfo))
+ if (!cyz_is_loaded(cinfo))
return -1;
zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
board_ctrl = &zfw_ctrl->board_ctrl;
index = 0;
- pci_doorbell =
- &((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))->pci_doorbell;
+ pci_doorbell = &cinfo->ctl_addr.p9060->pci_doorbell;
while ((readl(pci_doorbell) & 0xff) != 0) {
if (index++ == 1000)
return (int)(readl(pci_doorbell) & 0xff);
@@ -1624,10 +1627,8 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
static struct BOARD_CTRL __iomem *board_ctrl;
static struct CH_CTRL __iomem *ch_ctrl;
static struct BUF_CTRL __iomem *buf_ctrl;
- __u32 channel;
+ __u32 channel, param, fw_ver;
__u8 cmd;
- __u32 param;
- __u32 hw_ver, fw_ver;
int special_count;
int delta_count;
@@ -1635,8 +1636,6 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
board_ctrl = &zfw_ctrl->board_ctrl;
fw_ver = readl(&board_ctrl->fw_version);
- hw_ver = readl(&((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))->
- mail_box_0);
while (cyz_fetch_msg(cinfo, &channel, &cmd, &param) == 1) {
special_count = 0;
@@ -1737,15 +1736,7 @@ static irqreturn_t cyz_interrupt(int irq, void *dev_id)
{
struct cyclades_card *cinfo = dev_id;
- if (unlikely(cinfo == NULL)) {
-#ifdef CY_DEBUG_INTERRUPTS
- printk(KERN_DEBUG "cyz_interrupt: spurious interrupt %d\n",
- irq);
-#endif
- return IRQ_NONE; /* spurious interrupt */
- }
-
- if (unlikely(!ISZLOADED(*cinfo))) {
+ if (unlikely(!cyz_is_loaded(cinfo))) {
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyz_interrupt: board not yet loaded "
"(IRQ%d).\n", irq);
@@ -1785,7 +1776,6 @@ static void cyz_poll(unsigned long arg)
struct tty_struct *tty;
struct FIRM_ID __iomem *firm_id;
struct ZFW_CTRL __iomem *zfw_ctrl;
- struct BOARD_CTRL __iomem *board_ctrl;
struct BUF_CTRL __iomem *buf_ctrl;
unsigned long expires = jiffies + HZ;
unsigned int port, card;
@@ -1793,19 +1783,17 @@ static void cyz_poll(unsigned long arg)
for (card = 0; card < NR_CARDS; card++) {
cinfo = &cy_card[card];
- if (!IS_CYC_Z(*cinfo))
+ if (!cy_is_Z(cinfo))
continue;
- if (!ISZLOADED(*cinfo))
+ if (!cyz_is_loaded(cinfo))
continue;
firm_id = cinfo->base_addr + ID_ADDRESS;
zfw_ctrl = cinfo->base_addr +
(readl(&firm_id->zfwctrl_addr) & 0xfffff);
- board_ctrl = &(zfw_ctrl->board_ctrl);
/* Skip first polling cycle to avoid racing conditions with the FW */
if (!cinfo->intr_enabled) {
- cinfo->nports = (int)readl(&board_ctrl->n_channel);
cinfo->intr_enabled = 1;
continue;
}
@@ -1874,7 +1862,7 @@ static int startup(struct cyclades_port *info)
set_line_char(info);
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -1931,7 +1919,7 @@ static int startup(struct cyclades_port *info)
base_addr = card->base_addr;
firm_id = base_addr + ID_ADDRESS;
- if (!ISZLOADED(*card))
+ if (!cyz_is_loaded(card))
return -ENODEV;
zfw_ctrl = card->base_addr +
@@ -2026,7 +2014,7 @@ static void start_xmit(struct cyclades_port *info)
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -2070,7 +2058,7 @@ static void shutdown(struct cyclades_port *info)
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -2126,7 +2114,7 @@ static void shutdown(struct cyclades_port *info)
#endif
firm_id = base_addr + ID_ADDRESS;
- if (!ISZLOADED(*card))
+ if (!cyz_is_loaded(card))
return;
zfw_ctrl = card->base_addr +
@@ -2233,7 +2221,7 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
#endif
info->port.blocked_open++;
- if (!IS_CYC_Z(*cinfo)) {
+ if (!cy_is_Z(cinfo)) {
chip = channel >> 2;
channel &= 0x03;
index = cinfo->bus_index;
@@ -2296,7 +2284,7 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
base_addr = cinfo->base_addr;
firm_id = base_addr + ID_ADDRESS;
- if (!ISZLOADED(*cinfo)) {
+ if (!cyz_is_loaded(cinfo)) {
__set_current_state(TASK_RUNNING);
remove_wait_queue(&info->port.open_wait, &wait);
return -EINVAL;
@@ -2397,16 +2385,14 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
treat it as absent from the system. This
will make the user pay attention.
*/
- if (IS_CYC_Z(*info->card)) {
+ if (cy_is_Z(info->card)) {
struct cyclades_card *cinfo = info->card;
struct FIRM_ID __iomem *firm_id = cinfo->base_addr + ID_ADDRESS;
- if (!ISZLOADED(*cinfo)) {
- if (((ZE_V1 == readl(&((struct RUNTIME_9060 __iomem *)
- (cinfo->ctl_addr))->mail_box_0)) &&
- Z_FPGA_CHECK(*cinfo)) &&
- (ZFIRM_HLT == readl(
- &firm_id->signature))) {
+ if (!cyz_is_loaded(cinfo)) {
+ if (cinfo->hw_ver == ZE_V1 && cyz_fpga_loaded(cinfo) &&
+ readl(&firm_id->signature) ==
+ ZFIRM_HLT) {
printk(KERN_ERR "cyc:Cyclades-Z Error: you "
"need an external power supply for "
"this number of ports.\nFirmware "
@@ -2423,18 +2409,13 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
interrupts should be enabled as soon as the first open
happens to one of its ports. */
if (!cinfo->intr_enabled) {
- struct ZFW_CTRL __iomem *zfw_ctrl;
- struct BOARD_CTRL __iomem *board_ctrl;
-
- zfw_ctrl = cinfo->base_addr +
- (readl(&firm_id->zfwctrl_addr) &
- 0xfffff);
-
- board_ctrl = &zfw_ctrl->board_ctrl;
+ u16 intr;
/* Enable interrupts on the PLX chip */
- cy_writew(cinfo->ctl_addr + 0x68,
- readw(cinfo->ctl_addr + 0x68) | 0x0900);
+ intr = readw(&cinfo->ctl_addr.p9060->
+ intr_ctrl_stat) | 0x0900;
+ cy_writew(&cinfo->ctl_addr.p9060->
+ intr_ctrl_stat, intr);
/* Enable interrupts on the FW */
retval = cyz_issue_cmd(cinfo, 0,
C_CM_IRQ_ENBL, 0L);
@@ -2442,8 +2423,6 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
printk(KERN_ERR "cyc:IRQ enable retval "
"was %x\n", retval);
}
- cinfo->nports =
- (int)readl(&board_ctrl->n_channel);
cinfo->intr_enabled = 1;
}
}
@@ -2556,7 +2535,7 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
#endif
card = info->card;
channel = (info->line) - (card->first_line);
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -2601,7 +2580,7 @@ static void cy_flush_buffer(struct tty_struct *tty)
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
spin_unlock_irqrestore(&card->card_lock, flags);
- if (IS_CYC_Z(*card)) { /* If it is a Z card, flush the on-board
+ if (cy_is_Z(card)) { /* If it is a Z card, flush the on-board
buffers as well */
spin_lock_irqsave(&card->card_lock, flags);
retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L);
@@ -2682,7 +2661,7 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
spin_lock_irqsave(&card->card_lock, flags);
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
int channel = info->line - card->first_line;
int index = card->bus_index;
void __iomem *base_addr = card->base_addr +
@@ -2902,7 +2881,7 @@ static int cy_chars_in_buffer(struct tty_struct *tty)
channel = (info->line) - (card->first_line);
#ifdef Z_EXT_CHARS_IN_BUFFER
- if (!IS_CYC_Z(cy_card[card])) {
+ if (!cy_is_Z(card)) {
#endif /* Z_EXT_CHARS_IN_BUFFER */
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n",
@@ -2984,7 +2963,6 @@ static void set_line_char(struct cyclades_port *info)
void __iomem *base_addr;
int chip, channel, index;
unsigned cflag, iflag;
- unsigned short chip_number;
int baud, baud_rate = 0;
int i;
@@ -3013,9 +2991,8 @@ static void set_line_char(struct cyclades_port *info)
card = info->card;
channel = info->line - card->first_line;
- chip_number = channel / 4;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
index = card->bus_index;
@@ -3233,21 +3210,17 @@ static void set_line_char(struct cyclades_port *info)
} else {
struct FIRM_ID __iomem *firm_id;
struct ZFW_CTRL __iomem *zfw_ctrl;
- struct BOARD_CTRL __iomem *board_ctrl;
struct CH_CTRL __iomem *ch_ctrl;
- struct BUF_CTRL __iomem *buf_ctrl;
__u32 sw_flow;
int retval;
firm_id = card->base_addr + ID_ADDRESS;
- if (!ISZLOADED(*card))
+ if (!cyz_is_loaded(card))
return;
zfw_ctrl = card->base_addr +
(readl(&firm_id->zfwctrl_addr) & 0xfffff);
- board_ctrl = &zfw_ctrl->board_ctrl;
ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]);
- buf_ctrl = &zfw_ctrl->buf_ctrl[channel];
/* baud rate */
baud = tty_get_baud_rate(info->port.tty);
@@ -3457,7 +3430,7 @@ static int get_lsr_info(struct cyclades_port *info, unsigned int __user *value)
card = info->card;
channel = (info->line) - (card->first_line);
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -3497,7 +3470,7 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file)
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -3523,7 +3496,7 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file)
} else {
base_addr = card->base_addr;
firm_id = card->base_addr + ID_ADDRESS;
- if (ISZLOADED(*card)) {
+ if (cyz_is_loaded(card)) {
zfw_ctrl = card->base_addr +
(readl(&firm_id->zfwctrl_addr) & 0xfffff);
board_ctrl = &zfw_ctrl->board_ctrl;
@@ -3566,7 +3539,7 @@ cy_tiocmset(struct tty_struct *tty, struct file *file,
card = info->card;
channel = (info->line) - (card->first_line);
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -3641,7 +3614,7 @@ cy_tiocmset(struct tty_struct *tty, struct file *file,
base_addr = card->base_addr;
firm_id = card->base_addr + ID_ADDRESS;
- if (ISZLOADED(*card)) {
+ if (cyz_is_loaded(card)) {
zfw_ctrl = card->base_addr +
(readl(&firm_id->zfwctrl_addr) & 0xfffff);
board_ctrl = &zfw_ctrl->board_ctrl;
@@ -3713,7 +3686,7 @@ static int cy_break(struct tty_struct *tty, int break_state)
card = info->card;
spin_lock_irqsave(&card->card_lock, flags);
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
/* Let the transmit ISR take care of this (since it
requires stuffing characters into the output stream).
*/
@@ -3782,7 +3755,7 @@ static int set_threshold(struct cyclades_port *info, unsigned long value)
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -3810,7 +3783,7 @@ static int get_threshold(struct cyclades_port *info,
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -3844,7 +3817,7 @@ static int set_timeout(struct cyclades_port *info, unsigned long value)
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -3867,7 +3840,7 @@ static int get_timeout(struct cyclades_port *info,
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -4121,7 +4094,7 @@ static void cy_send_xchar(struct tty_struct *tty, char ch)
card = info->card;
channel = info->line - card->first_line;
- if (IS_CYC_Z(*card)) {
+ if (cy_is_Z(card)) {
if (ch == STOP_CHAR(tty))
cyz_issue_cmd(card, channel, C_CM_SENDXOFF, 0L);
else if (ch == START_CHAR(tty))
@@ -4154,7 +4127,7 @@ static void cy_throttle(struct tty_struct *tty)
card = info->card;
if (I_IXOFF(tty)) {
- if (!IS_CYC_Z(*card))
+ if (!cy_is_Z(card))
cy_send_xchar(tty, STOP_CHAR(tty));
else
info->throttle = 1;
@@ -4162,7 +4135,7 @@ static void cy_throttle(struct tty_struct *tty)
if (tty->termios->c_cflag & CRTSCTS) {
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -4219,7 +4192,7 @@ static void cy_unthrottle(struct tty_struct *tty)
if (tty->termios->c_cflag & CRTSCTS) {
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -4263,7 +4236,7 @@ static void cy_stop(struct tty_struct *tty)
cinfo = info->card;
channel = info->line - cinfo->first_line;
- if (!IS_CYC_Z(*cinfo)) {
+ if (!cy_is_Z(cinfo)) {
index = cinfo->bus_index;
chip = channel >> 2;
channel &= 0x03;
@@ -4296,7 +4269,7 @@ static void cy_start(struct tty_struct *tty)
cinfo = info->card;
channel = info->line - cinfo->first_line;
index = cinfo->bus_index;
- if (!IS_CYC_Z(*cinfo)) {
+ if (!cy_is_Z(cinfo)) {
chip = channel >> 2;
channel &= 0x03;
base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index);
@@ -4347,33 +4320,20 @@ static void cy_hangup(struct tty_struct *tty)
static int __devinit cy_init_card(struct cyclades_card *cinfo)
{
struct cyclades_port *info;
- u32 uninitialized_var(mailbox);
- unsigned int nports, port;
+ unsigned int port;
unsigned short chip_number;
- int uninitialized_var(index);
spin_lock_init(&cinfo->card_lock);
+ cinfo->intr_enabled = 0;
- if (IS_CYC_Z(*cinfo)) { /* Cyclades-Z */
- mailbox = readl(&((struct RUNTIME_9060 __iomem *)
- cinfo->ctl_addr)->mail_box_0);
- nports = (mailbox == ZE_V1) ? ZE_V1_NPORTS : 8;
- cinfo->intr_enabled = 0;
- cinfo->nports = 0; /* Will be correctly set later, after
- Z FW is loaded */
- } else {
- index = cinfo->bus_index;
- nports = cinfo->nports = CyPORTS_PER_CHIP * cinfo->num_chips;
- }
-
- cinfo->ports = kzalloc(sizeof(*cinfo->ports) * nports, GFP_KERNEL);
+ cinfo->ports = kcalloc(cinfo->nports, sizeof(*cinfo->ports),
+ GFP_KERNEL);
if (cinfo->ports == NULL) {
printk(KERN_ERR "Cyclades: cannot allocate ports\n");
- cinfo->nports = 0;
return -ENOMEM;
}
- for (port = cinfo->first_line; port < cinfo->first_line + nports;
+ for (port = cinfo->first_line; port < cinfo->first_line + cinfo->nports;
port++) {
info = &cinfo->ports[port - cinfo->first_line];
tty_port_init(&info->port);
@@ -4387,9 +4347,9 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo)
init_completion(&info->shutdown_wait);
init_waitqueue_head(&info->delta_msr_wait);
- if (IS_CYC_Z(*cinfo)) {
+ if (cy_is_Z(cinfo)) {
info->type = PORT_STARTECH;
- if (mailbox == ZO_V1)
+ if (cinfo->hw_ver == ZO_V1)
info->xmit_fifo_size = CYZ_FIFO_SIZE;
else
info->xmit_fifo_size = 4 * CYZ_FIFO_SIZE;
@@ -4398,6 +4358,7 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo)
cyz_rx_restart, (unsigned long)info);
#endif
} else {
+ int index = cinfo->bus_index;
info->type = PORT_CIRRUS;
info->xmit_fifo_size = CyMAX_CHAR_FIFO;
info->cor1 = CyPARITY_NONE | Cy_1_STOP | Cy_8_BITS;
@@ -4430,7 +4391,7 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo)
}
#ifndef CONFIG_CYZ_INTR
- if (IS_CYC_Z(*cinfo) && !timer_pending(&cyz_timerlist)) {
+ if (cy_is_Z(cinfo) && !timer_pending(&cyz_timerlist)) {
mod_timer(&cyz_timerlist, jiffies + 1);
#ifdef CY_PCI_DEBUG
printk(KERN_DEBUG "Cyclades-Z polling initialized\n");
@@ -4621,11 +4582,12 @@ static int __init cy_detect_isa(void)
/* set cy_card */
cy_card[j].base_addr = cy_isa_address;
- cy_card[j].ctl_addr = NULL;
+ cy_card[j].ctl_addr.p9050 = NULL;
cy_card[j].irq = (int)cy_isa_irq;
cy_card[j].bus_index = 0;
cy_card[j].first_line = cy_next_channel;
- cy_card[j].num_chips = cy_isa_nchan / 4;
+ cy_card[j].num_chips = cy_isa_nchan / CyPORTS_PER_CHIP;
+ cy_card[j].nports = cy_isa_nchan;
if (cy_init_card(&cy_card[j])) {
cy_card[j].base_addr = NULL;
free_irq(cy_isa_irq, &cy_card[j]);
@@ -4781,7 +4743,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
struct CUSTOM_REG __iomem *cust = base_addr;
struct ZFW_CTRL __iomem *pt_zfwctrl;
void __iomem *tmp;
- u32 mailbox, status;
+ u32 mailbox, status, nchan;
unsigned int i;
int retval;
@@ -4793,7 +4755,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
/* Check whether the firmware is already loaded and running. If
positive, skip this board */
- if (Z_FPGA_LOADED(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) {
+ if (__cyz_fpga_loaded(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) {
u32 cntval = readl(base_addr + 0x190);
udelay(100);
@@ -4812,7 +4774,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
mailbox = readl(&ctl_addr->mail_box_0);
- if (mailbox == 0 || Z_FPGA_LOADED(ctl_addr)) {
+ if (mailbox == 0 || __cyz_fpga_loaded(ctl_addr)) {
/* stops CPU and set window to beginning of RAM */
cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
cy_writel(&cust->cpu_stop, 0);
@@ -4828,7 +4790,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
base_addr);
if (retval)
goto err_rel;
- if (!Z_FPGA_LOADED(ctl_addr)) {
+ if (!__cyz_fpga_loaded(ctl_addr)) {
dev_err(&pdev->dev, "fw upload successful, but fw is "
"not loaded\n");
goto err_rel;
@@ -4887,7 +4849,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
"system before loading the new FW to the "
"Cyclades-Z.\n");
- if (Z_FPGA_LOADED(ctl_addr))
+ if (__cyz_fpga_loaded(ctl_addr))
plx_init(pdev, irq, ctl_addr);
retval = -EIO;
@@ -4902,16 +4864,16 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
base_addr + ID_ADDRESS, readl(&fid->zfwctrl_addr),
base_addr + readl(&fid->zfwctrl_addr));
+ nchan = readl(&pt_zfwctrl->board_ctrl.n_channel);
dev_info(&pdev->dev, "Cyclades-Z FW loaded: version = %x, ports = %u\n",
- readl(&pt_zfwctrl->board_ctrl.fw_version),
- readl(&pt_zfwctrl->board_ctrl.n_channel));
+ readl(&pt_zfwctrl->board_ctrl.fw_version), nchan);
- if (readl(&pt_zfwctrl->board_ctrl.n_channel) == 0) {
+ if (nchan == 0) {
dev_warn(&pdev->dev, "no Cyclades-Z ports were found. Please "
"check the connection between the Z host card and the "
"serial expanders.\n");
- if (Z_FPGA_LOADED(ctl_addr))
+ if (__cyz_fpga_loaded(ctl_addr))
plx_init(pdev, irq, ctl_addr);
dev_info(&pdev->dev, "Null number of ports detected. Board "
@@ -4932,9 +4894,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
0x00030800UL);
- plx_init(pdev, irq, ctl_addr);
-
- return 0;
+ return nchan;
err_rel:
release_firmware(fw);
err:
@@ -4946,7 +4906,7 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
{
void __iomem *addr0 = NULL, *addr2 = NULL;
char *card_name = NULL;
- u32 mailbox;
+ u32 uninitialized_var(mailbox);
unsigned int device_id, nchan = 0, card_no, i;
unsigned char plx_ver;
int retval, irq;
@@ -5023,11 +4983,12 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
}
/* Disable interrupts on the PLX before resetting it */
- cy_writew(addr0 + 0x68, readw(addr0 + 0x68) & ~0x0900);
+ cy_writew(&ctl_addr->intr_ctrl_stat,
+ readw(&ctl_addr->intr_ctrl_stat) & ~0x0900);
plx_init(pdev, irq, addr0);
- mailbox = (u32)readl(&ctl_addr->mail_box_0);
+ mailbox = readl(&ctl_addr->mail_box_0);
addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin);
@@ -5038,12 +4999,8 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
if (mailbox == ZE_V1) {
card_name = "Cyclades-Ze";
-
- readl(&ctl_addr->mail_box_0);
- nchan = ZE_V1_NPORTS;
} else {
card_name = "Cyclades-8Zo";
-
#ifdef CY_PCI_DEBUG
if (mailbox == ZO_V1) {
cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
@@ -5065,15 +5022,12 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
*/
if ((mailbox == ZO_V1) || (mailbox == ZO_V2))
cy_writel(addr2 + ID_ADDRESS, 0L);
-
- retval = cyz_load_fw(pdev, addr2, addr0, irq);
- if (retval)
- goto err_unmap;
- /* This must be a Cyclades-8Zo/PCI. The extendable
- version will have a different device_id and will
- be allocated its maximum number of ports. */
- nchan = 8;
}
+
+ retval = cyz_load_fw(pdev, addr2, addr0, irq);
+ if (retval <= 0)
+ goto err_unmap;
+ nchan = retval;
}
if ((cy_next_channel + nchan) > NR_PORTS) {
@@ -5103,8 +5057,10 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "could not allocate IRQ\n");
goto err_unmap;
}
- cy_card[card_no].num_chips = nchan / 4;
+ cy_card[card_no].num_chips = nchan / CyPORTS_PER_CHIP;
} else {
+ cy_card[card_no].hw_ver = mailbox;
+ cy_card[card_no].num_chips = (unsigned int)-1;
#ifdef CONFIG_CYZ_INTR
/* allocate IRQ only if board has an IRQ */
if (irq != 0 && irq != 255) {
@@ -5117,15 +5073,15 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
}
}
#endif /* CONFIG_CYZ_INTR */
- cy_card[card_no].num_chips = (unsigned int)-1;
}
/* set cy_card */
cy_card[card_no].base_addr = addr2;
- cy_card[card_no].ctl_addr = addr0;
+ cy_card[card_no].ctl_addr.p9050 = addr0;
cy_card[card_no].irq = irq;
cy_card[card_no].bus_index = 1;
cy_card[card_no].first_line = cy_next_channel;
+ cy_card[card_no].nports = nchan;
retval = cy_init_card(&cy_card[card_no]);
if (retval)
goto err_null;
@@ -5138,17 +5094,20 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
plx_ver = readb(addr2 + CyPLX_VER) & 0x0f;
switch (plx_ver) {
case PLX_9050:
-
cy_writeb(addr0 + 0x4c, 0x43);
break;
case PLX_9060:
case PLX_9080:
default: /* Old boards, use PLX_9060 */
- plx_init(pdev, irq, addr0);
- cy_writew(addr0 + 0x68, readw(addr0 + 0x68) | 0x0900);
+ {
+ struct RUNTIME_9060 __iomem *ctl_addr = addr0;
+ plx_init(pdev, irq, ctl_addr);
+ cy_writew(&ctl_addr->intr_ctrl_stat,
+ readw(&ctl_addr->intr_ctrl_stat) | 0x0900);
break;
}
+ }
}
dev_info(&pdev->dev, "%s/PCI #%d found: %d channels starting from "
@@ -5179,22 +5138,23 @@ static void __devexit cy_pci_remove(struct pci_dev *pdev)
unsigned int i;
/* non-Z with old PLX */
- if (!IS_CYC_Z(*cinfo) && (readb(cinfo->base_addr + CyPLX_VER) & 0x0f) ==
+ if (!cy_is_Z(cinfo) && (readb(cinfo->base_addr + CyPLX_VER) & 0x0f) ==
PLX_9050)
- cy_writeb(cinfo->ctl_addr + 0x4c, 0);
+ cy_writeb(cinfo->ctl_addr.p9050 + 0x4c, 0);
else
#ifndef CONFIG_CYZ_INTR
- if (!IS_CYC_Z(*cinfo))
+ if (!cy_is_Z(cinfo))
#endif
- cy_writew(cinfo->ctl_addr + 0x68,
- readw(cinfo->ctl_addr + 0x68) & ~0x0900);
+ cy_writew(&cinfo->ctl_addr.p9060->intr_ctrl_stat,
+ readw(&cinfo->ctl_addr.p9060->intr_ctrl_stat) &
+ ~0x0900);
iounmap(cinfo->base_addr);
- if (cinfo->ctl_addr)
- iounmap(cinfo->ctl_addr);
+ if (cinfo->ctl_addr.p9050)
+ iounmap(cinfo->ctl_addr.p9050);
if (cinfo->irq
#ifndef CONFIG_CYZ_INTR
- && !IS_CYC_Z(*cinfo)
+ && !cy_is_Z(cinfo)
#endif /* CONFIG_CYZ_INTR */
)
free_irq(cinfo->irq, cinfo);
@@ -5240,7 +5200,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
(cur_jifs - info->idle_stats.recv_idle)/
HZ, info->idle_stats.overruns,
/* FIXME: double check locking */
- (long)info->port.tty->ldisc.ops->num);
+ (long)info->port.tty->ldisc->ops->num);
else
seq_printf(m, "%3d %8lu %10lu %8lu "
"%10lu %8lu %9lu %6ld\n",
@@ -5386,11 +5346,11 @@ static void __exit cy_cleanup_module(void)
/* clear interrupt */
cy_writeb(card->base_addr + Cy_ClrIntr, 0);
iounmap(card->base_addr);
- if (card->ctl_addr)
- iounmap(card->ctl_addr);
+ if (card->ctl_addr.p9050)
+ iounmap(card->ctl_addr.p9050);
if (card->irq
#ifndef CONFIG_CYZ_INTR
- && !IS_CYC_Z(*card)
+ && !cy_is_Z(card)
#endif /* CONFIG_CYZ_INTR */
)
free_irq(card->irq, card);
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index af7c13ca949..abef1f7d84f 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -745,7 +745,7 @@ static int epca_carrier_raised(struct tty_port *port)
return 0;
}
-static void epca_raise_dtr_rts(struct tty_port *port)
+static void epca_dtr_rts(struct tty_port *port, int onoff)
{
}
@@ -925,7 +925,7 @@ static const struct tty_operations pc_ops = {
static const struct tty_port_operations epca_port_ops = {
.carrier_raised = epca_carrier_raised,
- .raise_dtr_rts = epca_raise_dtr_rts,
+ .dtr_rts = epca_dtr_rts,
};
static int info_open(struct tty_struct *tty, struct file *filp)
@@ -1518,7 +1518,7 @@ static void doevent(int crd)
if (event & MODEMCHG_IND) {
/* A modem signal change has been indicated */
ch->imodem = mstat;
- if (test_bit(ASYNC_CHECK_CD, &ch->port.flags)) {
+ if (test_bit(ASYNCB_CHECK_CD, &ch->port.flags)) {
/* We are now receiving dcd */
if (mstat & ch->dcd)
wake_up_interruptible(&ch->port.open_wait);
@@ -1765,9 +1765,9 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
* that the driver will wait on carrier detect.
*/
if (ts->c_cflag & CLOCAL)
- clear_bit(ASYNC_CHECK_CD, &ch->port.flags);
+ clear_bit(ASYNCB_CHECK_CD, &ch->port.flags);
else
- set_bit(ASYNC_CHECK_CD, &ch->port.flags);
+ set_bit(ASYNCB_CHECK_CD, &ch->port.flags);
mval = ch->m_dtr | ch->m_rts;
} /* End CBAUD not detected */
iflag = termios2digi_i(ch, ts->c_iflag);
@@ -2114,8 +2114,8 @@ static int pc_ioctl(struct tty_struct *tty, struct file *file,
tty_wait_until_sent(tty, 0);
} else {
/* ldisc lock already held in ioctl */
- if (tty->ldisc.ops->flush_buffer)
- tty->ldisc.ops->flush_buffer(tty);
+ if (tty->ldisc->ops->flush_buffer)
+ tty->ldisc->ops->flush_buffer(tty);
}
unlock_kernel();
/* Fall Thru */
@@ -2244,7 +2244,8 @@ static void do_softint(struct work_struct *work)
if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) {
tty_hangup(tty);
wake_up_interruptible(&ch->port.open_wait);
- clear_bit(ASYNC_NORMAL_ACTIVE, &ch->port.flags);
+ clear_bit(ASYNCB_NORMAL_ACTIVE,
+ &ch->port.flags);
}
}
tty_kref_put(tty);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 340ba4f9dc5..4a9f3492b92 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -224,7 +224,7 @@ static void hpet_timer_set_irq(struct hpet_dev *devp)
break;
}
- gsi = acpi_register_gsi(irq, ACPI_LEVEL_SENSITIVE,
+ gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
if (gsi > 0)
break;
@@ -939,7 +939,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
irqp = &res->data.extended_irq;
for (i = 0; i < irqp->interrupt_count; i++) {
- irq = acpi_register_gsi(irqp->interrupts[i],
+ irq = acpi_register_gsi(NULL, irqp->interrupts[i],
irqp->triggering, irqp->polarity);
if (irq < 0)
return AE_ERROR;
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c
index 0061e18aff6..0d10b89218e 100644
--- a/drivers/char/ip2/i2lib.c
+++ b/drivers/char/ip2/i2lib.c
@@ -868,11 +868,11 @@ i2Input(i2ChanStrPtr pCh)
amountToMove = count;
}
// Move the first block
- pCh->pTTY->ldisc.ops->receive_buf( pCh->pTTY,
+ pCh->pTTY->ldisc->ops->receive_buf( pCh->pTTY,
&(pCh->Ibuf[stripIndex]), NULL, amountToMove );
// If we needed to wrap, do the second data move
if (count > amountToMove) {
- pCh->pTTY->ldisc.ops->receive_buf( pCh->pTTY,
+ pCh->pTTY->ldisc->ops->receive_buf( pCh->pTTY,
pCh->Ibuf, NULL, count - amountToMove );
}
// Bump and wrap the stripIndex all at once by the amount of data read. This
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index afd9247cf08..517271c762e 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -1315,8 +1315,8 @@ static inline void isig(int sig, struct tty_struct *tty, int flush)
if (tty->pgrp)
kill_pgrp(tty->pgrp, sig, 1);
if (flush || !L_NOFLSH(tty)) {
- if ( tty->ldisc.ops->flush_buffer )
- tty->ldisc.ops->flush_buffer(tty);
+ if ( tty->ldisc->ops->flush_buffer )
+ tty->ldisc->ops->flush_buffer(tty);
i2InputFlush( tty->driver_data );
}
}
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index a59eac584d1..4d745a89504 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -329,7 +329,7 @@ static inline void drop_rts(struct isi_port *port)
/* card->lock MUST NOT be held */
-static void isicom_raise_dtr_rts(struct tty_port *port)
+static void isicom_dtr_rts(struct tty_port *port, int on)
{
struct isi_port *ip = container_of(port, struct isi_port, port);
struct isi_board *card = ip->card;
@@ -339,10 +339,17 @@ static void isicom_raise_dtr_rts(struct tty_port *port)
if (!lock_card(card))
return;
- outw(0x8000 | (channel << card->shift_count) | 0x02, base);
- outw(0x0f04, base);
- InterruptTheCard(base);
- ip->status |= (ISI_DTR | ISI_RTS);
+ if (on) {
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
+ outw(0x0f04, base);
+ InterruptTheCard(base);
+ ip->status |= (ISI_DTR | ISI_RTS);
+ } else {
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
+ outw(0x0C04, base);
+ InterruptTheCard(base);
+ ip->status &= ~(ISI_DTR | ISI_RTS);
+ }
unlock_card(card);
}
@@ -1339,7 +1346,7 @@ static const struct tty_operations isicom_ops = {
static const struct tty_port_operations isicom_port_ops = {
.carrier_raised = isicom_carrier_raised,
- .raise_dtr_rts = isicom_raise_dtr_rts,
+ .dtr_rts = isicom_dtr_rts,
};
static int __devinit reset_card(struct pci_dev *pdev,
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index fff19f7e29d..e18800c400b 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -1140,14 +1140,14 @@ static int stli_carrier_raised(struct tty_port *port)
return (portp->sigs & TIOCM_CD) ? 1 : 0;
}
-static void stli_raise_dtr_rts(struct tty_port *port)
+static void stli_dtr_rts(struct tty_port *port, int on)
{
struct stliport *portp = container_of(port, struct stliport, port);
struct stlibrd *brdp = stli_brds[portp->brdnr];
- stli_mkasysigs(&portp->asig, 1, 1);
+ stli_mkasysigs(&portp->asig, on, on);
if (stli_cmdwait(brdp, portp, A_SETSIGNALS, &portp->asig,
sizeof(asysigs_t), 0) < 0)
- printk(KERN_WARNING "istallion: dtr raise failed.\n");
+ printk(KERN_WARNING "istallion: dtr set failed.\n");
}
@@ -4417,7 +4417,7 @@ static const struct tty_operations stli_ops = {
static const struct tty_port_operations stli_port_ops = {
.carrier_raised = stli_carrier_raised,
- .raise_dtr_rts = stli_raise_dtr_rts,
+ .dtr_rts = stli_dtr_rts,
};
/*****************************************************************************/
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 8f05c38c2f0..f96d0bef855 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -694,6 +694,8 @@ static ssize_t read_zero(struct file * file, char __user * buf,
written += chunk - unwritten;
if (unwritten)
break;
+ if (signal_pending(current))
+ return written ? written : -ERESTARTSYS;
buf += chunk;
count -= chunk;
cond_resched();
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 4a4cab73d0b..65b6ff2442c 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -1184,6 +1184,11 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
return -ENODEV;
}
+ if (port % MAX_PORTS_PER_BOARD >= brd->numPorts) {
+ mutex_unlock(&moxa_openlock);
+ return -ENODEV;
+ }
+
ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
ch->port.count++;
tty->driver_data = ch;
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index a420e8d437d..9533f43a30b 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -547,14 +547,18 @@ static int mxser_carrier_raised(struct tty_port *port)
return (inb(mp->ioaddr + UART_MSR) & UART_MSR_DCD)?1:0;
}
-static void mxser_raise_dtr_rts(struct tty_port *port)
+static void mxser_dtr_rts(struct tty_port *port, int on)
{
struct mxser_port *mp = container_of(port, struct mxser_port, port);
unsigned long flags;
spin_lock_irqsave(&mp->slock, flags);
- outb(inb(mp->ioaddr + UART_MCR) |
- UART_MCR_DTR | UART_MCR_RTS, mp->ioaddr + UART_MCR);
+ if (on)
+ outb(inb(mp->ioaddr + UART_MCR) |
+ UART_MCR_DTR | UART_MCR_RTS, mp->ioaddr + UART_MCR);
+ else
+ outb(inb(mp->ioaddr + UART_MCR)&~(UART_MCR_DTR | UART_MCR_RTS),
+ mp->ioaddr + UART_MCR);
spin_unlock_irqrestore(&mp->slock, flags);
}
@@ -2356,7 +2360,7 @@ static const struct tty_operations mxser_ops = {
struct tty_port_operations mxser_port_ops = {
.carrier_raised = mxser_carrier_raised,
- .raise_dtr_rts = mxser_raise_dtr_rts,
+ .dtr_rts = mxser_dtr_rts,
};
/*
@@ -2711,7 +2715,7 @@ static int __init mxser_module_init(void)
continue;
brd = &mxser_boards[m];
- retval = mxser_get_ISA_conf(!ioaddr[b], brd);
+ retval = mxser_get_ISA_conf(ioaddr[b], brd);
if (retval <= 0) {
brd->info = NULL;
continue;
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index bacb3e2872a..461ece591a5 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -342,8 +342,8 @@ static int n_hdlc_tty_open (struct tty_struct *tty)
#endif
/* Flush any pending characters in the driver and discipline. */
- if (tty->ldisc.ops->flush_buffer)
- tty->ldisc.ops->flush_buffer(tty);
+ if (tty->ldisc->ops->flush_buffer)
+ tty->ldisc->ops->flush_buffer(tty);
tty_driver_flush_buffer(tty);
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index f6f0e4ec2b5..94a5d5020ab 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -73,24 +73,6 @@
#define ECHO_OP_SET_CANON_COL 0x81
#define ECHO_OP_ERASE_TAB 0x82
-static inline unsigned char *alloc_buf(void)
-{
- gfp_t prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
-
- if (PAGE_SIZE != N_TTY_BUF_SIZE)
- return kmalloc(N_TTY_BUF_SIZE, prio);
- else
- return (unsigned char *)__get_free_page(prio);
-}
-
-static inline void free_buf(unsigned char *buf)
-{
- if (PAGE_SIZE != N_TTY_BUF_SIZE)
- kfree(buf);
- else
- free_page((unsigned long) buf);
-}
-
static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
unsigned char __user *ptr)
{
@@ -1558,11 +1540,11 @@ static void n_tty_close(struct tty_struct *tty)
{
n_tty_flush_buffer(tty);
if (tty->read_buf) {
- free_buf(tty->read_buf);
+ kfree(tty->read_buf);
tty->read_buf = NULL;
}
if (tty->echo_buf) {
- free_buf(tty->echo_buf);
+ kfree(tty->echo_buf);
tty->echo_buf = NULL;
}
}
@@ -1584,17 +1566,16 @@ static int n_tty_open(struct tty_struct *tty)
/* These are ugly. Currently a malloc failure here can panic */
if (!tty->read_buf) {
- tty->read_buf = alloc_buf();
+ tty->read_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
if (!tty->read_buf)
return -ENOMEM;
}
if (!tty->echo_buf) {
- tty->echo_buf = alloc_buf();
+ tty->echo_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
+
if (!tty->echo_buf)
return -ENOMEM;
}
- memset(tty->read_buf, 0, N_TTY_BUF_SIZE);
- memset(tty->echo_buf, 0, N_TTY_BUF_SIZE);
reset_buffer_flags(tty);
tty->column = 0;
n_tty_set_termios(tty, NULL);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 19d79fc5446..77b36488922 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -383,7 +383,7 @@ static void async_mode(MGSLPC_INFO *info);
static void tx_timeout(unsigned long context);
static int carrier_raised(struct tty_port *port);
-static void raise_dtr_rts(struct tty_port *port);
+static void dtr_rts(struct tty_port *port, int onoff);
#if SYNCLINK_GENERIC_HDLC
#define dev_to_port(D) (dev_to_hdlc(D)->priv)
@@ -513,7 +513,7 @@ static void ldisc_receive_buf(struct tty_struct *tty,
static const struct tty_port_operations mgslpc_port_ops = {
.carrier_raised = carrier_raised,
- .raise_dtr_rts = raise_dtr_rts
+ .dtr_rts = dtr_rts
};
static int mgslpc_probe(struct pcmcia_device *link)
@@ -2528,13 +2528,16 @@ static int carrier_raised(struct tty_port *port)
return 0;
}
-static void raise_dtr_rts(struct tty_port *port)
+static void dtr_rts(struct tty_port *port, int onoff)
{
MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ if (onoff)
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->serial_signals &= ~SerialSignal_RTS + SerialSignal_DTR;
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index 31038a0052a..5acd29e6e04 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -30,7 +30,6 @@
#include <asm/system.h>
-/* These are global because they are accessed in tty_io.c */
#ifdef CONFIG_UNIX98_PTYS
static struct tty_driver *ptm_driver;
static struct tty_driver *pts_driver;
@@ -111,7 +110,7 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf,
c = to->receive_room;
if (c > count)
c = count;
- to->ldisc.ops->receive_buf(to, buf, NULL, c);
+ to->ldisc->ops->receive_buf(to, buf, NULL, c);
return c;
}
@@ -149,11 +148,11 @@ static int pty_chars_in_buffer(struct tty_struct *tty)
int count;
/* We should get the line discipline lock for "tty->link" */
- if (!to || !to->ldisc.ops->chars_in_buffer)
+ if (!to || !to->ldisc->ops->chars_in_buffer)
return 0;
/* The ldisc must report 0 if no characters available to be read */
- count = to->ldisc.ops->chars_in_buffer(to);
+ count = to->ldisc->ops->chars_in_buffer(to);
if (tty->driver->subtype == PTY_TYPE_SLAVE)
return count;
@@ -187,8 +186,8 @@ static void pty_flush_buffer(struct tty_struct *tty)
if (!to)
return;
- if (to->ldisc.ops->flush_buffer)
- to->ldisc.ops->flush_buffer(to);
+ if (to->ldisc->ops->flush_buffer)
+ to->ldisc->ops->flush_buffer(to);
if (to->packet) {
spin_lock_irqsave(&tty->ctrl_lock, flags);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 20d90e6a6e5..db32f0e4c7d 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -71,7 +71,7 @@ static int raw_open(struct inode *inode, struct file *filp)
err = bd_claim(bdev, raw_open);
if (err)
goto out1;
- err = set_blocksize(bdev, bdev_hardsect_size(bdev));
+ err = set_blocksize(bdev, bdev_logical_block_size(bdev));
if (err)
goto out2;
filp->f_flags |= O_DIRECT;
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index f59fc5cea06..63d5b628477 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -872,11 +872,16 @@ static int carrier_raised(struct tty_port *port)
return (sGetChanStatusLo(&info->channel) & CD_ACT) ? 1 : 0;
}
-static void raise_dtr_rts(struct tty_port *port)
+static void dtr_rts(struct tty_port *port, int on)
{
struct r_port *info = container_of(port, struct r_port, port);
- sSetDTR(&info->channel);
- sSetRTS(&info->channel);
+ if (on) {
+ sSetDTR(&info->channel);
+ sSetRTS(&info->channel);
+ } else {
+ sClrDTR(&info->channel);
+ sClrRTS(&info->channel);
+ }
}
/*
@@ -934,7 +939,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
/*
* Info->count is now 1; so it's safe to sleep now.
*/
- if (!test_bit(ASYNC_INITIALIZED, &port->flags)) {
+ if (!test_bit(ASYNCB_INITIALIZED, &port->flags)) {
cp = &info->channel;
sSetRxTrigger(cp, TRIG_1);
if (sGetChanStatus(cp) & CD_ACT)
@@ -958,7 +963,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
sEnRxFIFO(cp);
sEnTransmit(cp);
- set_bit(ASYNC_INITIALIZED, &info->port.flags);
+ set_bit(ASYNCB_INITIALIZED, &info->port.flags);
/*
* Set up the tty->alt_speed kludge
@@ -1641,7 +1646,7 @@ static int rp_write(struct tty_struct *tty,
/* Write remaining data into the port's xmit_buf */
while (1) {
/* Hung up ? */
- if (!test_bit(ASYNC_NORMAL_ACTIVE, &info->port.flags))
+ if (!test_bit(ASYNCB_NORMAL_ACTIVE, &info->port.flags))
goto end;
c = min(count, XMIT_BUF_SIZE - info->xmit_cnt - 1);
c = min(c, XMIT_BUF_SIZE - info->xmit_head);
@@ -2250,7 +2255,7 @@ static const struct tty_operations rocket_ops = {
static const struct tty_port_operations rocket_port_ops = {
.carrier_raised = carrier_raised,
- .raise_dtr_rts = raise_dtr_rts,
+ .dtr_rts = dtr_rts,
};
/*
diff --git a/drivers/char/selection.c b/drivers/char/selection.c
index cb8ca569896..f97b9e84806 100644
--- a/drivers/char/selection.c
+++ b/drivers/char/selection.c
@@ -327,7 +327,7 @@ int paste_selection(struct tty_struct *tty)
}
count = sel_buffer_lth - pasted;
count = min(count, tty->receive_room);
- tty->ldisc.ops->receive_buf(tty, sel_buffer + pasted,
+ tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
NULL, count);
pasted += count;
}
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 2ad813a801d..53e504f41b2 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -772,11 +772,11 @@ static int stl_carrier_raised(struct tty_port *port)
return (portp->sigs & TIOCM_CD) ? 1 : 0;
}
-static void stl_raise_dtr_rts(struct tty_port *port)
+static void stl_dtr_rts(struct tty_port *port, int on)
{
struct stlport *portp = container_of(port, struct stlport, port);
/* Takes brd_lock internally */
- stl_setsignals(portp, 1, 1);
+ stl_setsignals(portp, on, on);
}
/*****************************************************************************/
@@ -2547,7 +2547,7 @@ static const struct tty_operations stl_ops = {
static const struct tty_port_operations stl_port_ops = {
.carrier_raised = stl_carrier_raised,
- .raise_dtr_rts = stl_raise_dtr_rts,
+ .dtr_rts = stl_dtr_rts,
};
/*****************************************************************************/
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index afd0b26ca05..afded3a2379 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -3247,13 +3247,16 @@ static int carrier_raised(struct tty_port *port)
return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
}
-static void raise_dtr_rts(struct tty_port *port)
+static void dtr_rts(struct tty_port *port, int on)
{
struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
unsigned long flags;
spin_lock_irqsave(&info->irq_spinlock,flags);
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ if (on)
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
usc_set_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
@@ -4258,7 +4261,7 @@ static void mgsl_add_device( struct mgsl_struct *info )
static const struct tty_port_operations mgsl_port_ops = {
.carrier_raised = carrier_raised,
- .raise_dtr_rts = raise_dtr_rts,
+ .dtr_rts = dtr_rts,
};
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 5e256494686..1386625fc4c 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -214,6 +214,7 @@ struct slgt_desc
#define set_desc_next(a,b) (a).next = cpu_to_le32((unsigned int)(b))
#define set_desc_count(a,b)(a).count = cpu_to_le16((unsigned short)(b))
#define set_desc_eof(a,b) (a).status = cpu_to_le16((b) ? (le16_to_cpu((a).status) | BIT0) : (le16_to_cpu((a).status) & ~BIT0))
+#define set_desc_status(a, b) (a).status = cpu_to_le16((unsigned short)(b))
#define desc_count(a) (le16_to_cpu((a).count))
#define desc_status(a) (le16_to_cpu((a).status))
#define desc_complete(a) (le16_to_cpu((a).status) & BIT15)
@@ -297,6 +298,7 @@ struct slgt_info {
u32 max_frame_size; /* as set by device config */
unsigned int rbuf_fill_level;
+ unsigned int rx_pio;
unsigned int if_mode;
unsigned int base_clock;
@@ -331,6 +333,8 @@ struct slgt_info {
struct slgt_desc *rbufs;
unsigned int rbuf_current;
unsigned int rbuf_index;
+ unsigned int rbuf_fill_index;
+ unsigned short rbuf_fill_count;
unsigned int tbuf_count;
struct slgt_desc *tbufs;
@@ -2110,6 +2114,40 @@ static void ri_change(struct slgt_info *info, unsigned short status)
info->pending_bh |= BH_STATUS;
}
+static void isr_rxdata(struct slgt_info *info)
+{
+ unsigned int count = info->rbuf_fill_count;
+ unsigned int i = info->rbuf_fill_index;
+ unsigned short reg;
+
+ while (rd_reg16(info, SSR) & IRQ_RXDATA) {
+ reg = rd_reg16(info, RDR);
+ DBGISR(("isr_rxdata %s RDR=%04X\n", info->device_name, reg));
+ if (desc_complete(info->rbufs[i])) {
+ /* all buffers full */
+ rx_stop(info);
+ info->rx_restart = 1;
+ continue;
+ }
+ info->rbufs[i].buf[count++] = (unsigned char)reg;
+ /* async mode saves status byte to buffer for each data byte */
+ if (info->params.mode == MGSL_MODE_ASYNC)
+ info->rbufs[i].buf[count++] = (unsigned char)(reg >> 8);
+ if (count == info->rbuf_fill_level || (reg & BIT10)) {
+ /* buffer full or end of frame */
+ set_desc_count(info->rbufs[i], count);
+ set_desc_status(info->rbufs[i], BIT15 | (reg >> 8));
+ info->rbuf_fill_count = count = 0;
+ if (++i == info->rbuf_count)
+ i = 0;
+ info->pending_bh |= BH_RECEIVE;
+ }
+ }
+
+ info->rbuf_fill_index = i;
+ info->rbuf_fill_count = count;
+}
+
static void isr_serial(struct slgt_info *info)
{
unsigned short status = rd_reg16(info, SSR);
@@ -2125,6 +2163,8 @@ static void isr_serial(struct slgt_info *info)
if (info->tx_count)
isr_txeom(info, status);
}
+ if (info->rx_pio && (status & IRQ_RXDATA))
+ isr_rxdata(info);
if ((status & IRQ_RXBREAK) && (status & RXBREAK)) {
info->icount.brk++;
/* process break detection if tty control allows */
@@ -2141,7 +2181,8 @@ static void isr_serial(struct slgt_info *info)
} else {
if (status & (IRQ_TXIDLE + IRQ_TXUNDER))
isr_txeom(info, status);
-
+ if (info->rx_pio && (status & IRQ_RXDATA))
+ isr_rxdata(info);
if (status & IRQ_RXIDLE) {
if (status & RXIDLE)
info->icount.rxidle++;
@@ -2642,6 +2683,10 @@ static int rx_enable(struct slgt_info *info, int enable)
return -EINVAL;
}
info->rbuf_fill_level = rbuf_fill_level;
+ if (rbuf_fill_level < 128)
+ info->rx_pio = 1; /* PIO mode */
+ else
+ info->rx_pio = 0; /* DMA mode */
rx_stop(info); /* restart receiver to use new fill level */
}
@@ -3099,13 +3144,16 @@ static int carrier_raised(struct tty_port *port)
return (info->signals & SerialSignal_DCD) ? 1 : 0;
}
-static void raise_dtr_rts(struct tty_port *port)
+static void dtr_rts(struct tty_port *port, int on)
{
unsigned long flags;
struct slgt_info *info = container_of(port, struct slgt_info, port);
spin_lock_irqsave(&info->lock,flags);
- info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ if (on)
+ info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -3419,7 +3467,7 @@ static void add_device(struct slgt_info *info)
static const struct tty_port_operations slgt_port_ops = {
.carrier_raised = carrier_raised,
- .raise_dtr_rts = raise_dtr_rts,
+ .dtr_rts = dtr_rts,
};
/*
@@ -3841,15 +3889,27 @@ static void rx_start(struct slgt_info *info)
rdma_reset(info);
reset_rbufs(info);
- /* set 1st descriptor address */
- wr_reg32(info, RDDAR, info->rbufs[0].pdesc);
-
- if (info->params.mode != MGSL_MODE_ASYNC) {
- /* enable rx DMA and DMA interrupt */
- wr_reg32(info, RDCSR, (BIT2 + BIT0));
+ if (info->rx_pio) {
+ /* rx request when rx FIFO not empty */
+ wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) & ~BIT14));
+ slgt_irq_on(info, IRQ_RXDATA);
+ if (info->params.mode == MGSL_MODE_ASYNC) {
+ /* enable saving of rx status */
+ wr_reg32(info, RDCSR, BIT6);
+ }
} else {
- /* enable saving of rx status, rx DMA and DMA interrupt */
- wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0));
+ /* rx request when rx FIFO half full */
+ wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT14));
+ /* set 1st descriptor address */
+ wr_reg32(info, RDDAR, info->rbufs[0].pdesc);
+
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ /* enable rx DMA and DMA interrupt */
+ wr_reg32(info, RDCSR, (BIT2 + BIT0));
+ } else {
+ /* enable saving of rx status, rx DMA and DMA interrupt */
+ wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0));
+ }
}
slgt_irq_on(info, IRQ_RXOVER);
@@ -4467,6 +4527,8 @@ static void free_rbufs(struct slgt_info *info, unsigned int i, unsigned int last
static void reset_rbufs(struct slgt_info *info)
{
free_rbufs(info, 0, info->rbuf_count - 1);
+ info->rbuf_fill_index = 0;
+ info->rbuf_fill_count = 0;
}
/*
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 26de60efe4b..6f727e3c53a 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -3277,13 +3277,16 @@ static int carrier_raised(struct tty_port *port)
return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
}
-static void raise_dtr_rts(struct tty_port *port)
+static void dtr_rts(struct tty_port *port, int on)
{
SLMP_INFO *info = container_of(port, SLMP_INFO, port);
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ if (on)
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -3746,7 +3749,7 @@ static void add_device(SLMP_INFO *info)
static const struct tty_port_operations port_ops = {
.carrier_raised = carrier_raised,
- .raise_dtr_rts = raise_dtr_rts,
+ .dtr_rts = dtr_rts,
};
/* Allocate and initialize a device instance structure
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index d6a807f4077..39a05b5fa9c 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -25,6 +25,7 @@
#include <linux/kbd_kern.h>
#include <linux/proc_fs.h>
#include <linux/quotaops.h>
+#include <linux/perf_counter.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/suspend.h>
@@ -243,6 +244,7 @@ static void sysrq_handle_showregs(int key, struct tty_struct *tty)
struct pt_regs *regs = get_irq_regs();
if (regs)
show_regs(regs);
+ perf_counter_print_debug();
}
static struct sysrq_key_op sysrq_showregs_op = {
.handler = sysrq_handle_showregs,
diff --git a/drivers/char/tty_audit.c b/drivers/char/tty_audit.c
index 55ba6f14288..ac16fbec72d 100644
--- a/drivers/char/tty_audit.c
+++ b/drivers/char/tty_audit.c
@@ -29,10 +29,7 @@ static struct tty_audit_buf *tty_audit_buf_alloc(int major, int minor,
buf = kmalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
goto err;
- if (PAGE_SIZE != N_TTY_BUF_SIZE)
- buf->data = kmalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
- else
- buf->data = (unsigned char *)__get_free_page(GFP_KERNEL);
+ buf->data = kmalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
if (!buf->data)
goto err_buf;
atomic_set(&buf->count, 1);
@@ -52,10 +49,7 @@ err:
static void tty_audit_buf_free(struct tty_audit_buf *buf)
{
WARN_ON(buf->valid != 0);
- if (PAGE_SIZE != N_TTY_BUF_SIZE)
- kfree(buf->data);
- else
- free_page((unsigned long)buf->data);
+ kfree(buf->data);
kfree(buf);
}
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 66b99a2049e..939e198d767 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -295,7 +295,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
struct tty_driver *p, *res = NULL;
int tty_line = 0;
int len;
- char *str;
+ char *str, *stp;
for (str = name; *str; str++)
if ((*str >= '0' && *str <= '9') || *str == ',')
@@ -311,13 +311,14 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
list_for_each_entry(p, &tty_drivers, tty_drivers) {
if (strncmp(name, p->name, len) != 0)
continue;
- if (*str == ',')
- str++;
- if (*str == '\0')
- str = NULL;
+ stp = str;
+ if (*stp == ',')
+ stp++;
+ if (*stp == '\0')
+ stp = NULL;
if (tty_line >= 0 && tty_line <= p->num && p->ops &&
- p->ops->poll_init && !p->ops->poll_init(p, tty_line, str)) {
+ p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
res = tty_driver_kref_get(p);
*line = tty_line;
break;
@@ -470,43 +471,6 @@ void tty_wakeup(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_wakeup);
/**
- * tty_ldisc_flush - flush line discipline queue
- * @tty: tty
- *
- * Flush the line discipline queue (if any) for this tty. If there
- * is no line discipline active this is a no-op.
- */
-
-void tty_ldisc_flush(struct tty_struct *tty)
-{
- struct tty_ldisc *ld = tty_ldisc_ref(tty);
- if (ld) {
- if (ld->ops->flush_buffer)
- ld->ops->flush_buffer(tty);
- tty_ldisc_deref(ld);
- }
- tty_buffer_flush(tty);
-}
-
-EXPORT_SYMBOL_GPL(tty_ldisc_flush);
-
-/**
- * tty_reset_termios - reset terminal state
- * @tty: tty to reset
- *
- * Restore a terminal to the driver default state
- */
-
-static void tty_reset_termios(struct tty_struct *tty)
-{
- mutex_lock(&tty->termios_mutex);
- *tty->termios = tty->driver->init_termios;
- tty->termios->c_ispeed = tty_termios_input_baud_rate(tty->termios);
- tty->termios->c_ospeed = tty_termios_baud_rate(tty->termios);
- mutex_unlock(&tty->termios_mutex);
-}
-
-/**
* do_tty_hangup - actual handler for hangup events
* @work: tty device
*
@@ -535,7 +499,6 @@ static void do_tty_hangup(struct work_struct *work)
struct file *cons_filp = NULL;
struct file *filp, *f = NULL;
struct task_struct *p;
- struct tty_ldisc *ld;
int closecount = 0, n;
unsigned long flags;
int refs = 0;
@@ -566,40 +529,8 @@ static void do_tty_hangup(struct work_struct *work)
filp->f_op = &hung_up_tty_fops;
}
file_list_unlock();
- /*
- * FIXME! What are the locking issues here? This may me overdoing
- * things... This question is especially important now that we've
- * removed the irqlock.
- */
- ld = tty_ldisc_ref(tty);
- if (ld != NULL) {
- /* We may have no line discipline at this point */
- if (ld->ops->flush_buffer)
- ld->ops->flush_buffer(tty);
- tty_driver_flush_buffer(tty);
- if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
- ld->ops->write_wakeup)
- ld->ops->write_wakeup(tty);
- if (ld->ops->hangup)
- ld->ops->hangup(tty);
- }
- /*
- * FIXME: Once we trust the LDISC code better we can wait here for
- * ldisc completion and fix the driver call race
- */
- wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
- wake_up_interruptible_poll(&tty->read_wait, POLLIN);
- /*
- * Shutdown the current line discipline, and reset it to
- * N_TTY.
- */
- if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
- tty_reset_termios(tty);
- /* Defer ldisc switch */
- /* tty_deferred_ldisc_switch(N_TTY);
- This should get done automatically when the port closes and
- tty_release is called */
+ tty_ldisc_hangup(tty);
read_lock(&tasklist_lock);
if (tty->session) {
@@ -628,12 +559,15 @@ static void do_tty_hangup(struct work_struct *work)
read_unlock(&tasklist_lock);
spin_lock_irqsave(&tty->ctrl_lock, flags);
- tty->flags = 0;
+ clear_bit(TTY_THROTTLED, &tty->flags);
+ clear_bit(TTY_PUSH, &tty->flags);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
put_pid(tty->session);
put_pid(tty->pgrp);
tty->session = NULL;
tty->pgrp = NULL;
tty->ctrl_status = 0;
+ set_bit(TTY_HUPPED, &tty->flags);
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
/* Account for the p->signal references we killed */
@@ -659,10 +593,7 @@ static void do_tty_hangup(struct work_struct *work)
* can't yet guarantee all that.
*/
set_bit(TTY_HUPPED, &tty->flags);
- if (ld) {
- tty_ldisc_enable(tty);
- tty_ldisc_deref(ld);
- }
+ tty_ldisc_enable(tty);
unlock_kernel();
if (f)
fput(f);
@@ -2480,6 +2411,24 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int
return tty->ops->tiocmset(tty, file, set, clear);
}
+struct tty_struct *tty_pair_get_tty(struct tty_struct *tty)
+{
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER)
+ tty = tty->link;
+ return tty;
+}
+EXPORT_SYMBOL(tty_pair_get_tty);
+
+struct tty_struct *tty_pair_get_pty(struct tty_struct *tty)
+{
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER)
+ return tty;
+ return tty->link;
+}
+EXPORT_SYMBOL(tty_pair_get_pty);
+
/*
* Split this up, as gcc can choke on it otherwise..
*/
@@ -2495,11 +2444,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (tty_paranoia_check(tty, inode, "tty_ioctl"))
return -EINVAL;
- real_tty = tty;
- if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
- tty->driver->subtype == PTY_TYPE_MASTER)
- real_tty = tty->link;
-
+ real_tty = tty_pair_get_tty(tty);
/*
* Factor out some common prep work
@@ -2555,7 +2500,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case TIOCGSID:
return tiocgsid(tty, real_tty, p);
case TIOCGETD:
- return put_user(tty->ldisc.ops->num, (int __user *)p);
+ return put_user(tty->ldisc->ops->num, (int __user *)p);
case TIOCSETD:
return tiocsetd(tty, p);
/*
@@ -2770,6 +2715,7 @@ void initialize_tty_struct(struct tty_struct *tty,
tty->buf.head = tty->buf.tail = NULL;
tty_buffer_init(tty);
mutex_init(&tty->termios_mutex);
+ mutex_init(&tty->ldisc_mutex);
init_waitqueue_head(&tty->write_wait);
init_waitqueue_head(&tty->read_wait);
INIT_WORK(&tty->hangup_work, do_tty_hangup);
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index 6f4c7d0a53b..8116bb1c8f8 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -97,14 +97,19 @@ EXPORT_SYMBOL(tty_driver_flush_buffer);
* @tty: terminal
*
* Indicate that a tty should stop transmitting data down the stack.
+ * Takes the termios mutex to protect against parallel throttle/unthrottle
+ * and also to ensure the driver can consistently reference its own
+ * termios data at this point when implementing software flow control.
*/
void tty_throttle(struct tty_struct *tty)
{
+ mutex_lock(&tty->termios_mutex);
/* check TTY_THROTTLED first so it indicates our state */
if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) &&
tty->ops->throttle)
tty->ops->throttle(tty);
+ mutex_unlock(&tty->termios_mutex);
}
EXPORT_SYMBOL(tty_throttle);
@@ -113,13 +118,21 @@ EXPORT_SYMBOL(tty_throttle);
* @tty: terminal
*
* Indicate that a tty may continue transmitting data down the stack.
+ * Takes the termios mutex to protect against parallel throttle/unthrottle
+ * and also to ensure the driver can consistently reference its own
+ * termios data at this point when implementing software flow control.
+ *
+ * Drivers should however remember that the stack can issue a throttle,
+ * then change flow control method, then unthrottle.
*/
void tty_unthrottle(struct tty_struct *tty)
{
+ mutex_lock(&tty->termios_mutex);
if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) &&
tty->ops->unthrottle)
tty->ops->unthrottle(tty);
+ mutex_unlock(&tty->termios_mutex);
}
EXPORT_SYMBOL(tty_unthrottle);
@@ -613,9 +626,25 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
return 0;
}
+static void copy_termios(struct tty_struct *tty, struct ktermios *kterm)
+{
+ mutex_lock(&tty->termios_mutex);
+ memcpy(kterm, tty->termios, sizeof(struct ktermios));
+ mutex_unlock(&tty->termios_mutex);
+}
+
+static void copy_termios_locked(struct tty_struct *tty, struct ktermios *kterm)
+{
+ mutex_lock(&tty->termios_mutex);
+ memcpy(kterm, tty->termios_locked, sizeof(struct ktermios));
+ mutex_unlock(&tty->termios_mutex);
+}
+
static int get_termio(struct tty_struct *tty, struct termio __user *termio)
{
- if (kernel_termios_to_user_termio(termio, tty->termios))
+ struct ktermios kterm;
+ copy_termios(tty, &kterm);
+ if (kernel_termios_to_user_termio(termio, &kterm))
return -EFAULT;
return 0;
}
@@ -917,6 +946,8 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
struct tty_struct *real_tty;
void __user *p = (void __user *)arg;
int ret = 0;
+ struct ktermios kterm;
+ struct termiox ktermx;
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER)
@@ -952,23 +983,20 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
return set_termios(real_tty, p, TERMIOS_OLD);
#ifndef TCGETS2
case TCGETS:
- mutex_lock(&real_tty->termios_mutex);
- if (kernel_termios_to_user_termios((struct termios __user *)arg, real_tty->termios))
+ copy_termios(real_tty, &kterm);
+ if (kernel_termios_to_user_termios((struct termios __user *)arg, &kterm))
ret = -EFAULT;
- mutex_unlock(&real_tty->termios_mutex);
return ret;
#else
case TCGETS:
- mutex_lock(&real_tty->termios_mutex);
- if (kernel_termios_to_user_termios_1((struct termios __user *)arg, real_tty->termios))
+ copy_termios(real_tty, &kterm);
+ if (kernel_termios_to_user_termios_1((struct termios __user *)arg, &kterm))
ret = -EFAULT;
- mutex_unlock(&real_tty->termios_mutex);
return ret;
case TCGETS2:
- mutex_lock(&real_tty->termios_mutex);
- if (kernel_termios_to_user_termios((struct termios2 __user *)arg, real_tty->termios))
+ copy_termios(real_tty, &kterm);
+ if (kernel_termios_to_user_termios((struct termios2 __user *)arg, &kterm))
ret = -EFAULT;
- mutex_unlock(&real_tty->termios_mutex);
return ret;
case TCSETSF2:
return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT);
@@ -987,34 +1015,36 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
return set_termios(real_tty, p, TERMIOS_TERMIO);
#ifndef TCGETS2
case TIOCGLCKTRMIOS:
- mutex_lock(&real_tty->termios_mutex);
- if (kernel_termios_to_user_termios((struct termios __user *)arg, real_tty->termios_locked))
+ copy_termios_locked(real_tty, &kterm);
+ if (kernel_termios_to_user_termios((struct termios __user *)arg, &kterm))
ret = -EFAULT;
- mutex_unlock(&real_tty->termios_mutex);
return ret;
case TIOCSLCKTRMIOS:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- mutex_lock(&real_tty->termios_mutex);
- if (user_termios_to_kernel_termios(real_tty->termios_locked,
+ copy_termios_locked(real_tty, &kterm);
+ if (user_termios_to_kernel_termios(&kterm,
(struct termios __user *) arg))
- ret = -EFAULT;
+ return -EFAULT;
+ mutex_lock(&real_tty->termios_mutex);
+ memcpy(real_tty->termios_locked, &kterm, sizeof(struct ktermios));
mutex_unlock(&real_tty->termios_mutex);
- return ret;
+ return 0;
#else
case TIOCGLCKTRMIOS:
- mutex_lock(&real_tty->termios_mutex);
- if (kernel_termios_to_user_termios_1((struct termios __user *)arg, real_tty->termios_locked))
+ copy_termios_locked(real_tty, &kterm);
+ if (kernel_termios_to_user_termios_1((struct termios __user *)arg, &kterm))
ret = -EFAULT;
- mutex_unlock(&real_tty->termios_mutex);
return ret;
case TIOCSLCKTRMIOS:
if (!capable(CAP_SYS_ADMIN))
- ret = -EPERM;
- mutex_lock(&real_tty->termios_mutex);
- if (user_termios_to_kernel_termios_1(real_tty->termios_locked,
+ return -EPERM;
+ copy_termios_locked(real_tty, &kterm);
+ if (user_termios_to_kernel_termios_1(&kterm,
(struct termios __user *) arg))
- ret = -EFAULT;
+ return -EFAULT;
+ mutex_lock(&real_tty->termios_mutex);
+ memcpy(real_tty->termios_locked, &kterm, sizeof(struct ktermios));
mutex_unlock(&real_tty->termios_mutex);
return ret;
#endif
@@ -1023,9 +1053,10 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
if (real_tty->termiox == NULL)
return -EINVAL;
mutex_lock(&real_tty->termios_mutex);
- if (copy_to_user(p, real_tty->termiox, sizeof(struct termiox)))
- ret = -EFAULT;
+ memcpy(&ktermx, real_tty->termiox, sizeof(struct termiox));
mutex_unlock(&real_tty->termios_mutex);
+ if (copy_to_user(p, &ktermx, sizeof(struct termiox)))
+ ret = -EFAULT;
return ret;
case TCSETX:
return set_termiox(real_tty, p, 0);
@@ -1035,10 +1066,9 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
return set_termiox(real_tty, p, TERMIOS_FLUSH);
#endif
case TIOCGSOFTCAR:
- mutex_lock(&real_tty->termios_mutex);
- ret = put_user(C_CLOCAL(real_tty) ? 1 : 0,
+ copy_termios(real_tty, &kterm);
+ ret = put_user((kterm.c_cflag & CLOCAL) ? 1 : 0,
(int __user *)arg);
- mutex_unlock(&real_tty->termios_mutex);
return ret;
case TIOCSSOFTCAR:
if (get_user(arg, (unsigned int __user *) arg))
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index f78f5b0127a..39c8f86dedd 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -115,19 +115,22 @@ EXPORT_SYMBOL(tty_unregister_ldisc);
/**
* tty_ldisc_try_get - try and reference an ldisc
* @disc: ldisc number
- * @ld: tty ldisc structure to complete
*
* Attempt to open and lock a line discipline into place. Return
- * the line discipline refcounted and assigned in ld. On an error
- * report the error code back
+ * the line discipline refcounted or an error.
*/
-static int tty_ldisc_try_get(int disc, struct tty_ldisc *ld)
+static struct tty_ldisc *tty_ldisc_try_get(int disc)
{
unsigned long flags;
+ struct tty_ldisc *ld;
struct tty_ldisc_ops *ldops;
int err = -EINVAL;
-
+
+ ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
+ if (ld == NULL)
+ return ERR_PTR(-ENOMEM);
+
spin_lock_irqsave(&tty_ldisc_lock, flags);
ld->ops = NULL;
ldops = tty_ldiscs[disc];
@@ -140,17 +143,19 @@ static int tty_ldisc_try_get(int disc, struct tty_ldisc *ld)
/* lock it */
ldops->refcount++;
ld->ops = ldops;
+ ld->refcount = 0;
err = 0;
}
}
spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- return err;
+ if (err)
+ return ERR_PTR(err);
+ return ld;
}
/**
* tty_ldisc_get - take a reference to an ldisc
* @disc: ldisc number
- * @ld: tty line discipline structure to use
*
* Takes a reference to a line discipline. Deals with refcounts and
* module locking counts. Returns NULL if the discipline is not available.
@@ -161,52 +166,54 @@ static int tty_ldisc_try_get(int disc, struct tty_ldisc *ld)
* takes tty_ldisc_lock to guard against ldisc races
*/
-static int tty_ldisc_get(int disc, struct tty_ldisc *ld)
+static struct tty_ldisc *tty_ldisc_get(int disc)
{
- int err;
+ struct tty_ldisc *ld;
if (disc < N_TTY || disc >= NR_LDISCS)
- return -EINVAL;
- err = tty_ldisc_try_get(disc, ld);
- if (err < 0) {
+ return ERR_PTR(-EINVAL);
+ ld = tty_ldisc_try_get(disc);
+ if (IS_ERR(ld)) {
request_module("tty-ldisc-%d", disc);
- err = tty_ldisc_try_get(disc, ld);
+ ld = tty_ldisc_try_get(disc);
}
- return err;
+ return ld;
}
/**
* tty_ldisc_put - drop ldisc reference
- * @disc: ldisc number
+ * @ld: ldisc
*
* Drop a reference to a line discipline. Manage refcounts and
- * module usage counts
+ * module usage counts. Free the ldisc once the recount hits zero.
*
* Locking:
* takes tty_ldisc_lock to guard against ldisc races
*/
-static void tty_ldisc_put(struct tty_ldisc_ops *ld)
+static void tty_ldisc_put(struct tty_ldisc *ld)
{
unsigned long flags;
- int disc = ld->num;
+ int disc = ld->ops->num;
+ struct tty_ldisc_ops *ldo;
BUG_ON(disc < N_TTY || disc >= NR_LDISCS);
spin_lock_irqsave(&tty_ldisc_lock, flags);
- ld = tty_ldiscs[disc];
- BUG_ON(ld->refcount == 0);
- ld->refcount--;
- module_put(ld->owner);
+ ldo = tty_ldiscs[disc];
+ BUG_ON(ldo->refcount == 0);
+ ldo->refcount--;
+ module_put(ldo->owner);
spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ kfree(ld);
}
-static void * tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos)
+static void *tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos)
{
return (*pos < NR_LDISCS) ? pos : NULL;
}
-static void * tty_ldiscs_seq_next(struct seq_file *m, void *v, loff_t *pos)
+static void *tty_ldiscs_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return (*pos < NR_LDISCS) ? pos : NULL;
@@ -219,12 +226,13 @@ static void tty_ldiscs_seq_stop(struct seq_file *m, void *v)
static int tty_ldiscs_seq_show(struct seq_file *m, void *v)
{
int i = *(loff_t *)v;
- struct tty_ldisc ld;
-
- if (tty_ldisc_get(i, &ld) < 0)
+ struct tty_ldisc *ld;
+
+ ld = tty_ldisc_try_get(i);
+ if (IS_ERR(ld))
return 0;
- seq_printf(m, "%-10s %2d\n", ld.ops->name ? ld.ops->name : "???", i);
- tty_ldisc_put(ld.ops);
+ seq_printf(m, "%-10s %2d\n", ld->ops->name ? ld->ops->name : "???", i);
+ tty_ldisc_put(ld);
return 0;
}
@@ -263,8 +271,7 @@ const struct file_operations tty_ldiscs_proc_fops = {
static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
{
- ld->refcount = 0;
- tty->ldisc = *ld;
+ tty->ldisc = ld;
}
/**
@@ -286,7 +293,7 @@ static int tty_ldisc_try(struct tty_struct *tty)
int ret = 0;
spin_lock_irqsave(&tty_ldisc_lock, flags);
- ld = &tty->ldisc;
+ ld = tty->ldisc;
if (test_bit(TTY_LDISC, &tty->flags)) {
ld->refcount++;
ret = 1;
@@ -315,10 +322,9 @@ struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
{
/* wait_event is a macro */
wait_event(tty_ldisc_wait, tty_ldisc_try(tty));
- WARN_ON(tty->ldisc.refcount == 0);
- return &tty->ldisc;
+ WARN_ON(tty->ldisc->refcount == 0);
+ return tty->ldisc;
}
-
EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
/**
@@ -335,10 +341,9 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
{
if (tty_ldisc_try(tty))
- return &tty->ldisc;
+ return tty->ldisc;
return NULL;
}
-
EXPORT_SYMBOL_GPL(tty_ldisc_ref);
/**
@@ -366,7 +371,6 @@ void tty_ldisc_deref(struct tty_ldisc *ld)
wake_up(&tty_ldisc_wait);
spin_unlock_irqrestore(&tty_ldisc_lock, flags);
}
-
EXPORT_SYMBOL_GPL(tty_ldisc_deref);
/**
@@ -389,6 +393,26 @@ void tty_ldisc_enable(struct tty_struct *tty)
}
/**
+ * tty_ldisc_flush - flush line discipline queue
+ * @tty: tty
+ *
+ * Flush the line discipline queue (if any) for this tty. If there
+ * is no line discipline active this is a no-op.
+ */
+
+void tty_ldisc_flush(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld = tty_ldisc_ref(tty);
+ if (ld) {
+ if (ld->ops->flush_buffer)
+ ld->ops->flush_buffer(tty);
+ tty_ldisc_deref(ld);
+ }
+ tty_buffer_flush(tty);
+}
+EXPORT_SYMBOL_GPL(tty_ldisc_flush);
+
+/**
* tty_set_termios_ldisc - set ldisc field
* @tty: tty structure
* @num: line discipline number
@@ -407,6 +431,39 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
mutex_unlock(&tty->termios_mutex);
}
+/**
+ * tty_ldisc_open - open a line discipline
+ * @tty: tty we are opening the ldisc on
+ * @ld: discipline to open
+ *
+ * A helper opening method. Also a convenient debugging and check
+ * point.
+ */
+
+static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
+{
+ WARN_ON(test_and_set_bit(TTY_LDISC_OPEN, &tty->flags));
+ if (ld->ops->open)
+ return ld->ops->open(tty);
+ return 0;
+}
+
+/**
+ * tty_ldisc_close - close a line discipline
+ * @tty: tty we are opening the ldisc on
+ * @ld: discipline to close
+ *
+ * A helper close method. Also a convenient debugging and check
+ * point.
+ */
+
+static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
+{
+ WARN_ON(!test_bit(TTY_LDISC_OPEN, &tty->flags));
+ clear_bit(TTY_LDISC_OPEN, &tty->flags);
+ if (ld->ops->close)
+ ld->ops->close(tty);
+}
/**
* tty_ldisc_restore - helper for tty ldisc change
@@ -420,66 +477,136 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
{
char buf[64];
- struct tty_ldisc new_ldisc;
+ struct tty_ldisc *new_ldisc;
+ int r;
/* There is an outstanding reference here so this is safe */
- tty_ldisc_get(old->ops->num, old);
+ old = tty_ldisc_get(old->ops->num);
+ WARN_ON(IS_ERR(old));
tty_ldisc_assign(tty, old);
tty_set_termios_ldisc(tty, old->ops->num);
- if (old->ops->open && (old->ops->open(tty) < 0)) {
- tty_ldisc_put(old->ops);
+ if (tty_ldisc_open(tty, old) < 0) {
+ tty_ldisc_put(old);
/* This driver is always present */
- if (tty_ldisc_get(N_TTY, &new_ldisc) < 0)
+ new_ldisc = tty_ldisc_get(N_TTY);
+ if (IS_ERR(new_ldisc))
panic("n_tty: get");
- tty_ldisc_assign(tty, &new_ldisc);
+ tty_ldisc_assign(tty, new_ldisc);
tty_set_termios_ldisc(tty, N_TTY);
- if (new_ldisc.ops->open) {
- int r = new_ldisc.ops->open(tty);
- if (r < 0)
- panic("Couldn't open N_TTY ldisc for "
- "%s --- error %d.",
- tty_name(tty, buf), r);
- }
+ r = tty_ldisc_open(tty, new_ldisc);
+ if (r < 0)
+ panic("Couldn't open N_TTY ldisc for "
+ "%s --- error %d.",
+ tty_name(tty, buf), r);
}
}
/**
+ * tty_ldisc_halt - shut down the line discipline
+ * @tty: tty device
+ *
+ * Shut down the line discipline and work queue for this tty device.
+ * The TTY_LDISC flag being cleared ensures no further references can
+ * be obtained while the delayed work queue halt ensures that no more
+ * data is fed to the ldisc.
+ *
+ * In order to wait for any existing references to complete see
+ * tty_ldisc_wait_idle.
+ */
+
+static int tty_ldisc_halt(struct tty_struct *tty)
+{
+ clear_bit(TTY_LDISC, &tty->flags);
+ return cancel_delayed_work(&tty->buf.work);
+}
+
+/**
+ * tty_ldisc_wait_idle - wait for the ldisc to become idle
+ * @tty: tty to wait for
+ *
+ * Wait for the line discipline to become idle. The discipline must
+ * have been halted for this to guarantee it remains idle.
+ *
+ * tty_ldisc_lock protects the ref counts currently.
+ */
+
+static int tty_ldisc_wait_idle(struct tty_struct *tty)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ while (tty->ldisc->refcount) {
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ if (wait_event_timeout(tty_ldisc_wait,
+ tty->ldisc->refcount == 0, 5 * HZ) == 0)
+ return -EBUSY;
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ }
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ return 0;
+}
+
+/**
* tty_set_ldisc - set line discipline
* @tty: the terminal to set
* @ldisc: the line discipline
*
* Set the discipline of a tty line. Must be called from a process
- * context.
+ * context. The ldisc change logic has to protect itself against any
+ * overlapping ldisc change (including on the other end of pty pairs),
+ * the close of one side of a tty/pty pair, and eventually hangup.
*
- * Locking: takes tty_ldisc_lock.
- * called functions take termios_mutex
+ * Locking: takes tty_ldisc_lock, termios_mutex
*/
int tty_set_ldisc(struct tty_struct *tty, int ldisc)
{
int retval;
- struct tty_ldisc o_ldisc, new_ldisc;
- int work;
- unsigned long flags;
+ struct tty_ldisc *o_ldisc, *new_ldisc;
+ int work, o_work = 0;
struct tty_struct *o_tty;
-restart:
- /* This is a bit ugly for now but means we can break the 'ldisc
- is part of the tty struct' assumption later */
- retval = tty_ldisc_get(ldisc, &new_ldisc);
- if (retval)
- return retval;
+ new_ldisc = tty_ldisc_get(ldisc);
+ if (IS_ERR(new_ldisc))
+ return PTR_ERR(new_ldisc);
+
+ /*
+ * We need to look at the tty locking here for pty/tty pairs
+ * when both sides try to change in parallel.
+ */
+
+ o_tty = tty->link; /* o_tty is the pty side or NULL */
+
+
+ /*
+ * Check the no-op case
+ */
+
+ if (tty->ldisc->ops->num == ldisc) {
+ tty_ldisc_put(new_ldisc);
+ return 0;
+ }
/*
* Problem: What do we do if this blocks ?
+ * We could deadlock here
*/
tty_wait_until_sent(tty, 0);
- if (tty->ldisc.ops->num == ldisc) {
- tty_ldisc_put(new_ldisc.ops);
- return 0;
+ mutex_lock(&tty->ldisc_mutex);
+
+ /*
+ * We could be midstream of another ldisc change which has
+ * dropped the lock during processing. If so we need to wait.
+ */
+
+ while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
+ mutex_unlock(&tty->ldisc_mutex);
+ wait_event(tty_ldisc_wait,
+ test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
+ mutex_lock(&tty->ldisc_mutex);
}
+ set_bit(TTY_LDISC_CHANGING, &tty->flags);
/*
* No more input please, we are switching. The new ldisc
@@ -489,8 +616,6 @@ restart:
tty->receive_room = 0;
o_ldisc = tty->ldisc;
- o_tty = tty->link;
-
/*
* Make sure we don't change while someone holds a
* reference to the line discipline. The TTY_LDISC bit
@@ -501,108 +626,181 @@ restart:
* with a userspace app continually trying to use the tty in
* parallel to the change and re-referencing the tty.
*/
- clear_bit(TTY_LDISC, &tty->flags);
- if (o_tty)
- clear_bit(TTY_LDISC, &o_tty->flags);
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- if (tty->ldisc.refcount || (o_tty && o_tty->ldisc.refcount)) {
- if (tty->ldisc.refcount) {
- /* Free the new ldisc we grabbed. Must drop the lock
- first. */
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- tty_ldisc_put(o_ldisc.ops);
- /*
- * There are several reasons we may be busy, including
- * random momentary I/O traffic. We must therefore
- * retry. We could distinguish between blocking ops
- * and retries if we made tty_ldisc_wait() smarter.
- * That is up for discussion.
- */
- if (wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount == 0) < 0)
- return -ERESTARTSYS;
- goto restart;
- }
- if (o_tty && o_tty->ldisc.refcount) {
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- tty_ldisc_put(o_tty->ldisc.ops);
- if (wait_event_interruptible(tty_ldisc_wait, o_tty->ldisc.refcount == 0) < 0)
- return -ERESTARTSYS;
- goto restart;
- }
- }
- /*
- * If the TTY_LDISC bit is set, then we are racing against
- * another ldisc change
- */
- if (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
- struct tty_ldisc *ld;
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- tty_ldisc_put(new_ldisc.ops);
- ld = tty_ldisc_ref_wait(tty);
- tty_ldisc_deref(ld);
- goto restart;
- }
- /*
- * This flag is used to avoid two parallel ldisc changes. Once
- * open and close are fine grained locked this may work better
- * as a mutex shared with the open/close/hup paths
- */
- set_bit(TTY_LDISC_CHANGING, &tty->flags);
+ work = tty_ldisc_halt(tty);
if (o_tty)
- set_bit(TTY_LDISC_CHANGING, &o_tty->flags);
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-
- /*
- * From this point on we know nobody has an ldisc
- * usage reference, nor can they obtain one until
- * we say so later on.
- */
+ o_work = tty_ldisc_halt(o_tty);
- work = cancel_delayed_work(&tty->buf.work);
/*
- * Wait for ->hangup_work and ->buf.work handlers to terminate
- * MUST NOT hold locks here.
+ * Wait for ->hangup_work and ->buf.work handlers to terminate.
+ * We must drop the mutex here in case a hangup is also in process.
*/
+
+ mutex_unlock(&tty->ldisc_mutex);
+
flush_scheduled_work();
+
+ /* Let any existing reference holders finish */
+ retval = tty_ldisc_wait_idle(tty);
+ if (retval < 0) {
+ clear_bit(TTY_LDISC_CHANGING, &tty->flags);
+ tty_ldisc_put(new_ldisc);
+ return retval;
+ }
+
+ mutex_lock(&tty->ldisc_mutex);
+ if (test_bit(TTY_HUPPED, &tty->flags)) {
+ /* We were raced by the hangup method. It will have stomped
+ the ldisc data and closed the ldisc down */
+ clear_bit(TTY_LDISC_CHANGING, &tty->flags);
+ mutex_unlock(&tty->ldisc_mutex);
+ tty_ldisc_put(new_ldisc);
+ return -EIO;
+ }
+
/* Shutdown the current discipline. */
- if (o_ldisc.ops->close)
- (o_ldisc.ops->close)(tty);
+ tty_ldisc_close(tty, o_ldisc);
/* Now set up the new line discipline. */
- tty_ldisc_assign(tty, &new_ldisc);
+ tty_ldisc_assign(tty, new_ldisc);
tty_set_termios_ldisc(tty, ldisc);
- if (new_ldisc.ops->open)
- retval = (new_ldisc.ops->open)(tty);
+
+ retval = tty_ldisc_open(tty, new_ldisc);
if (retval < 0) {
- tty_ldisc_put(new_ldisc.ops);
- tty_ldisc_restore(tty, &o_ldisc);
+ /* Back to the old one or N_TTY if we can't */
+ tty_ldisc_put(new_ldisc);
+ tty_ldisc_restore(tty, o_ldisc);
}
+
/* At this point we hold a reference to the new ldisc and a
a reference to the old ldisc. If we ended up flipping back
to the existing ldisc we have two references to it */
- if (tty->ldisc.ops->num != o_ldisc.ops->num && tty->ops->set_ldisc)
+ if (tty->ldisc->ops->num != o_ldisc->ops->num && tty->ops->set_ldisc)
tty->ops->set_ldisc(tty);
- tty_ldisc_put(o_ldisc.ops);
+ tty_ldisc_put(o_ldisc);
/*
- * Allow ldisc referencing to occur as soon as the driver
- * ldisc callback completes.
+ * Allow ldisc referencing to occur again
*/
tty_ldisc_enable(tty);
if (o_tty)
tty_ldisc_enable(o_tty);
- /* Restart it in case no characters kick it off. Safe if
+ /* Restart the work queue in case no characters kick it off. Safe if
already running */
if (work)
schedule_delayed_work(&tty->buf.work, 1);
+ if (o_work)
+ schedule_delayed_work(&o_tty->buf.work, 1);
+ mutex_unlock(&tty->ldisc_mutex);
return retval;
}
+/**
+ * tty_reset_termios - reset terminal state
+ * @tty: tty to reset
+ *
+ * Restore a terminal to the driver default state.
+ */
+
+static void tty_reset_termios(struct tty_struct *tty)
+{
+ mutex_lock(&tty->termios_mutex);
+ *tty->termios = tty->driver->init_termios;
+ tty->termios->c_ispeed = tty_termios_input_baud_rate(tty->termios);
+ tty->termios->c_ospeed = tty_termios_baud_rate(tty->termios);
+ mutex_unlock(&tty->termios_mutex);
+}
+
+
+/**
+ * tty_ldisc_reinit - reinitialise the tty ldisc
+ * @tty: tty to reinit
+ *
+ * Switch the tty back to N_TTY line discipline and leave the
+ * ldisc state closed
+ */
+
+static void tty_ldisc_reinit(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld;
+
+ tty_ldisc_close(tty, tty->ldisc);
+ tty_ldisc_put(tty->ldisc);
+ tty->ldisc = NULL;
+ /*
+ * Switch the line discipline back
+ */
+ ld = tty_ldisc_get(N_TTY);
+ BUG_ON(IS_ERR(ld));
+ tty_ldisc_assign(tty, ld);
+ tty_set_termios_ldisc(tty, N_TTY);
+}
+
+/**
+ * tty_ldisc_hangup - hangup ldisc reset
+ * @tty: tty being hung up
+ *
+ * Some tty devices reset their termios when they receive a hangup
+ * event. In that situation we must also switch back to N_TTY properly
+ * before we reset the termios data.
+ *
+ * Locking: We can take the ldisc mutex as the rest of the code is
+ * careful to allow for this.
+ *
+ * In the pty pair case this occurs in the close() path of the
+ * tty itself so we must be careful about locking rules.
+ */
+
+void tty_ldisc_hangup(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld;
+
+ /*
+ * FIXME! What are the locking issues here? This may me overdoing
+ * things... This question is especially important now that we've
+ * removed the irqlock.
+ */
+ ld = tty_ldisc_ref(tty);
+ if (ld != NULL) {
+ /* We may have no line discipline at this point */
+ if (ld->ops->flush_buffer)
+ ld->ops->flush_buffer(tty);
+ tty_driver_flush_buffer(tty);
+ if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
+ ld->ops->write_wakeup)
+ ld->ops->write_wakeup(tty);
+ if (ld->ops->hangup)
+ ld->ops->hangup(tty);
+ tty_ldisc_deref(ld);
+ }
+ /*
+ * FIXME: Once we trust the LDISC code better we can wait here for
+ * ldisc completion and fix the driver call race
+ */
+ wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
+ wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+ /*
+ * Shutdown the current line discipline, and reset it to
+ * N_TTY.
+ */
+ if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
+ /* Avoid racing set_ldisc */
+ mutex_lock(&tty->ldisc_mutex);
+ /* Switch back to N_TTY */
+ tty_ldisc_reinit(tty);
+ /* At this point we have a closed ldisc and we want to
+ reopen it. We could defer this to the next open but
+ it means auditing a lot of other paths so this is a FIXME */
+ WARN_ON(tty_ldisc_open(tty, tty->ldisc));
+ tty_ldisc_enable(tty);
+ mutex_unlock(&tty->ldisc_mutex);
+ tty_reset_termios(tty);
+ }
+}
/**
* tty_ldisc_setup - open line discipline
@@ -610,24 +808,23 @@ restart:
* @o_tty: pair tty for pty/tty pairs
*
* Called during the initial open of a tty/pty pair in order to set up the
- * line discplines and bind them to the tty.
+ * line disciplines and bind them to the tty. This has no locking issues
+ * as the device isn't yet active.
*/
int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
{
- struct tty_ldisc *ld = &tty->ldisc;
+ struct tty_ldisc *ld = tty->ldisc;
int retval;
- if (ld->ops->open) {
- retval = (ld->ops->open)(tty);
- if (retval)
- return retval;
- }
- if (o_tty && o_tty->ldisc.ops->open) {
- retval = (o_tty->ldisc.ops->open)(o_tty);
+ retval = tty_ldisc_open(tty, ld);
+ if (retval)
+ return retval;
+
+ if (o_tty) {
+ retval = tty_ldisc_open(o_tty, o_tty->ldisc);
if (retval) {
- if (ld->ops->close)
- (ld->ops->close)(tty);
+ tty_ldisc_close(tty, ld);
return retval;
}
tty_ldisc_enable(o_tty);
@@ -635,32 +832,25 @@ int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
tty_ldisc_enable(tty);
return 0;
}
-
/**
* tty_ldisc_release - release line discipline
* @tty: tty being shut down
* @o_tty: pair tty for pty/tty pairs
*
- * Called during the final close of a tty/pty pair in order to shut down the
- * line discpline layer.
+ * Called during the final close of a tty/pty pair in order to shut down
+ * the line discpline layer. On exit the ldisc assigned is N_TTY and the
+ * ldisc has not been opened.
*/
void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
{
- unsigned long flags;
- struct tty_ldisc ld;
/*
* Prevent flush_to_ldisc() from rescheduling the work for later. Then
* kill any delayed work. As this is the final close it does not
* race with the set_ldisc code path.
*/
- clear_bit(TTY_LDISC, &tty->flags);
- cancel_delayed_work(&tty->buf.work);
-
- /*
- * Wait for ->hangup_work and ->buf.work handlers to terminate
- */
+ tty_ldisc_halt(tty);
flush_scheduled_work();
/*
@@ -668,38 +858,19 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
* side waiters as the file is closing so user count on the file
* side is zero.
*/
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- while (tty->ldisc.refcount) {
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- wait_event(tty_ldisc_wait, tty->ldisc.refcount == 0);
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- }
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ tty_ldisc_wait_idle(tty);
+
/*
* Shutdown the current line discipline, and reset it to N_TTY.
*
* FIXME: this MUST get fixed for the new reflocking
*/
- if (tty->ldisc.ops->close)
- (tty->ldisc.ops->close)(tty);
- tty_ldisc_put(tty->ldisc.ops);
- /*
- * Switch the line discipline back
- */
- WARN_ON(tty_ldisc_get(N_TTY, &ld));
- tty_ldisc_assign(tty, &ld);
- tty_set_termios_ldisc(tty, N_TTY);
- if (o_tty) {
- /* FIXME: could o_tty be in setldisc here ? */
- clear_bit(TTY_LDISC, &o_tty->flags);
- if (o_tty->ldisc.ops->close)
- (o_tty->ldisc.ops->close)(o_tty);
- tty_ldisc_put(o_tty->ldisc.ops);
- WARN_ON(tty_ldisc_get(N_TTY, &ld));
- tty_ldisc_assign(o_tty, &ld);
- tty_set_termios_ldisc(o_tty, N_TTY);
- }
+ tty_ldisc_reinit(tty);
+ /* This will need doing differently if we need to lock */
+ if (o_tty)
+ tty_ldisc_release(o_tty, NULL);
}
/**
@@ -712,10 +883,10 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
void tty_ldisc_init(struct tty_struct *tty)
{
- struct tty_ldisc ld;
- if (tty_ldisc_get(N_TTY, &ld) < 0)
+ struct tty_ldisc *ld = tty_ldisc_get(N_TTY);
+ if (IS_ERR(ld))
panic("n_tty: init_tty");
- tty_ldisc_assign(tty, &ld);
+ tty_ldisc_assign(tty, ld);
}
void tty_ldisc_begin(void)
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index 9b8004c7268..62dadfc95e3 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -137,7 +137,7 @@ int tty_port_carrier_raised(struct tty_port *port)
EXPORT_SYMBOL(tty_port_carrier_raised);
/**
- * tty_port_raise_dtr_rts - Riase DTR/RTS
+ * tty_port_raise_dtr_rts - Raise DTR/RTS
* @port: tty port
*
* Wrapper for the DTR/RTS raise logic. For the moment this is used
@@ -147,12 +147,28 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
void tty_port_raise_dtr_rts(struct tty_port *port)
{
- if (port->ops->raise_dtr_rts)
- port->ops->raise_dtr_rts(port);
+ if (port->ops->dtr_rts)
+ port->ops->dtr_rts(port, 1);
}
EXPORT_SYMBOL(tty_port_raise_dtr_rts);
/**
+ * tty_port_lower_dtr_rts - Lower DTR/RTS
+ * @port: tty port
+ *
+ * Wrapper for the DTR/RTS raise logic. For the moment this is used
+ * to hide some internal details. This will eventually become entirely
+ * internal to the tty port.
+ */
+
+void tty_port_lower_dtr_rts(struct tty_port *port)
+{
+ if (port->ops->dtr_rts)
+ port->ops->dtr_rts(port, 0);
+}
+EXPORT_SYMBOL(tty_port_lower_dtr_rts);
+
+/**
* tty_port_block_til_ready - Waiting logic for tty open
* @port: the tty port being opened
* @tty: the tty device being bound
@@ -167,7 +183,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
* - port flags and counts
*
* The passed tty_port must implement the carrier_raised method if it can
- * do carrier detect and the raise_dtr_rts method if it supports software
+ * do carrier detect and the dtr_rts method if it supports software
* management of these lines. Note that the dtr/rts raise is done each
* iteration as a hangup may have previously dropped them while we wait.
*/
@@ -182,7 +198,8 @@ int tty_port_block_til_ready(struct tty_port *port,
/* block if port is in the process of being closed */
if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
- interruptible_sleep_on(&port->close_wait);
+ wait_event_interruptible(port->close_wait,
+ !(port->flags & ASYNC_CLOSING));
if (port->flags & ASYNC_HUP_NOTIFY)
return -EAGAIN;
else
@@ -205,7 +222,6 @@ int tty_port_block_til_ready(struct tty_port *port,
before the next open may complete */
retval = 0;
- add_wait_queue(&port->open_wait, &wait);
/* The port lock protects the port counts */
spin_lock_irqsave(&port->lock, flags);
@@ -219,7 +235,7 @@ int tty_port_block_til_ready(struct tty_port *port,
if (tty->termios->c_cflag & CBAUD)
tty_port_raise_dtr_rts(port);
- set_current_state(TASK_INTERRUPTIBLE);
+ prepare_to_wait(&port->open_wait, &wait, TASK_INTERRUPTIBLE);
/* Check for a hangup or uninitialised port. Return accordingly */
if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) {
if (port->flags & ASYNC_HUP_NOTIFY)
@@ -240,8 +256,7 @@ int tty_port_block_til_ready(struct tty_port *port,
}
schedule();
}
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&port->open_wait, &wait);
+ finish_wait(&port->open_wait, &wait);
/* Update counts. A parallel hangup will have set count to zero and
we must not mess that up further */
@@ -292,6 +307,17 @@ int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, struct f
if (port->flags & ASYNC_INITIALIZED &&
port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
tty_wait_until_sent(tty, port->closing_wait);
+ if (port->drain_delay) {
+ unsigned int bps = tty_get_baud_rate(tty);
+ long timeout;
+
+ if (bps > 1200)
+ timeout = max_t(long, (HZ * 10 * port->drain_delay) / bps,
+ HZ / 10);
+ else
+ timeout = 2 * HZ;
+ schedule_timeout_interruptible(timeout);
+ }
return 1;
}
EXPORT_SYMBOL(tty_port_close_start);
@@ -302,6 +328,9 @@ void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
tty_ldisc_flush(tty);
+ if (tty->termios->c_cflag & HUPCL)
+ tty_port_lower_dtr_rts(port);
+
spin_lock_irqsave(&port->lock, flags);
tty->closing = 0;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 08151d4de48..de9ebee8657 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -95,7 +95,6 @@
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
-#include <linux/bootmem.h>
#include <linux/pm.h>
#include <linux/font.h>
#include <linux/bitops.h>
@@ -104,6 +103,7 @@
#include <linux/io.h>
#include <asm/system.h>
#include <linux/uaccess.h>
+#include <linux/kmemleak.h>
#define MAX_NR_CON_DRIVER 16
@@ -2875,14 +2875,11 @@ static int __init con_init(void)
mod_timer(&console_timer, jiffies + blankinterval);
}
- /*
- * kmalloc is not running yet - we use the bootmem allocator.
- */
for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) {
- vc_cons[currcons].d = vc = alloc_bootmem(sizeof(struct vc_data));
+ vc_cons[currcons].d = vc = kzalloc(sizeof(struct vc_data), GFP_NOWAIT);
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
visual_init(vc, currcons, 1);
- vc->vc_screenbuf = (unsigned short *)alloc_bootmem(vc->vc_screenbuf_size);
+ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
vc->vc_kmalloced = 0;
vc_init(vc, vc->vc_rows, vc->vc_cols,
currcons || !vc->vc_sw->con_save_screen);
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 1efb2879a94..eef216f7f61 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -3,3 +3,5 @@ obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
+obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
+obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 1c92c39a53a..cf56a2af5fe 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -18,7 +18,6 @@
*/
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -29,7 +28,7 @@
#include <linux/err.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
-#include <linux/sh_cmt.h>
+#include <linux/sh_timer.h>
struct sh_cmt_priv {
void __iomem *mapbase;
@@ -47,6 +46,7 @@ struct sh_cmt_priv {
unsigned long rate;
spinlock_t lock;
struct clock_event_device ced;
+ struct clocksource cs;
unsigned long total_cycles;
};
@@ -59,7 +59,7 @@ static DEFINE_SPINLOCK(sh_cmt_lock);
static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
{
- struct sh_cmt_config *cfg = p->pdev->dev.platform_data;
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
void __iomem *base = p->mapbase;
unsigned long offs;
@@ -83,7 +83,7 @@ static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr,
unsigned long value)
{
- struct sh_cmt_config *cfg = p->pdev->dev.platform_data;
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
void __iomem *base = p->mapbase;
unsigned long offs;
@@ -110,23 +110,28 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
int *has_wrapped)
{
unsigned long v1, v2, v3;
+ int o1, o2;
+
+ o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
/* Make sure the timer value is stable. Stolen from acpi_pm.c */
do {
+ o2 = o1;
v1 = sh_cmt_read(p, CMCNT);
v2 = sh_cmt_read(p, CMCNT);
v3 = sh_cmt_read(p, CMCNT);
- } while (unlikely((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
- || (v3 > v1 && v3 < v2)));
+ o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
+ } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
+ || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
- *has_wrapped = sh_cmt_read(p, CMCSR) & p->overflow_bit;
+ *has_wrapped = o1;
return v2;
}
static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
{
- struct sh_cmt_config *cfg = p->pdev->dev.platform_data;
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
@@ -144,7 +149,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
{
- struct sh_cmt_config *cfg = p->pdev->dev.platform_data;
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
int ret;
/* enable clock */
@@ -153,16 +158,18 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk);
return ret;
}
- *rate = clk_get_rate(p->clk) / 8;
/* make sure channel is disabled */
sh_cmt_start_stop_ch(p, 0);
/* configure channel, periodic mode and maximum timeout */
- if (p->width == 16)
- sh_cmt_write(p, CMCSR, 0);
- else
+ if (p->width == 16) {
+ *rate = clk_get_rate(p->clk) / 512;
+ sh_cmt_write(p, CMCSR, 0x43);
+ } else {
+ *rate = clk_get_rate(p->clk) / 8;
sh_cmt_write(p, CMCSR, 0x01a4);
+ }
sh_cmt_write(p, CMCOR, 0xffffffff);
sh_cmt_write(p, CMCNT, 0);
@@ -376,6 +383,68 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
spin_unlock_irqrestore(&p->lock, flags);
}
+static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
+{
+ return container_of(cs, struct sh_cmt_priv, cs);
+}
+
+static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
+{
+ struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+ unsigned long flags, raw;
+ unsigned long value;
+ int has_wrapped;
+
+ spin_lock_irqsave(&p->lock, flags);
+ value = p->total_cycles;
+ raw = sh_cmt_get_counter(p, &has_wrapped);
+
+ if (unlikely(has_wrapped))
+ raw += p->match_value;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ return value + raw;
+}
+
+static int sh_cmt_clocksource_enable(struct clocksource *cs)
+{
+ struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+ int ret;
+
+ p->total_cycles = 0;
+
+ ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
+ if (ret)
+ return ret;
+
+ /* TODO: calculate good shift from rate and counter bit width */
+ cs->shift = 0;
+ cs->mult = clocksource_hz2mult(p->rate, cs->shift);
+ return 0;
+}
+
+static void sh_cmt_clocksource_disable(struct clocksource *cs)
+{
+ sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE);
+}
+
+static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
+ char *name, unsigned long rating)
+{
+ struct clocksource *cs = &p->cs;
+
+ cs->name = name;
+ cs->rating = rating;
+ cs->read = sh_cmt_clocksource_read;
+ cs->enable = sh_cmt_clocksource_enable;
+ cs->disable = sh_cmt_clocksource_disable;
+ cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
+ cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ pr_info("sh_cmt: %s used as clock source\n", cs->name);
+ clocksource_register(cs);
+ return 0;
+}
+
static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
{
return container_of(ced, struct sh_cmt_priv, ced);
@@ -468,9 +537,9 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
clockevents_register_device(ced);
}
-int sh_cmt_register(struct sh_cmt_priv *p, char *name,
- unsigned long clockevent_rating,
- unsigned long clocksource_rating)
+static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
+ unsigned long clockevent_rating,
+ unsigned long clocksource_rating)
{
if (p->width == (sizeof(p->max_match_value) * 8))
p->max_match_value = ~0;
@@ -483,12 +552,15 @@ int sh_cmt_register(struct sh_cmt_priv *p, char *name,
if (clockevent_rating)
sh_cmt_register_clockevent(p, name, clockevent_rating);
+ if (clocksource_rating)
+ sh_cmt_register_clocksource(p, name, clocksource_rating);
+
return 0;
}
static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
{
- struct sh_cmt_config *cfg = pdev->dev.platform_data;
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
struct resource *res;
int irq, ret;
ret = -ENXIO;
@@ -545,7 +617,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
if (resource_size(res) == 6) {
p->width = 16;
p->overflow_bit = 0x80;
- p->clear_bits = ~0xc0;
+ p->clear_bits = ~0x80;
} else {
p->width = 32;
p->overflow_bit = 0x8000;
@@ -566,8 +638,14 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
static int __devinit sh_cmt_probe(struct platform_device *pdev)
{
struct sh_cmt_priv *p = platform_get_drvdata(pdev);
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
int ret;
+ if (p) {
+ pr_info("sh_cmt: %s kept as earlytimer\n", cfg->name);
+ return 0;
+ }
+
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
@@ -577,7 +655,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
ret = sh_cmt_setup(p, pdev);
if (ret) {
kfree(p);
-
platform_set_drvdata(pdev, NULL);
}
return ret;
@@ -606,6 +683,7 @@ static void __exit sh_cmt_exit(void)
platform_driver_unregister(&sh_cmt_device_driver);
}
+early_platform_init("earlytimer", &sh_cmt_device_driver);
module_init(sh_cmt_init);
module_exit(sh_cmt_exit);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
new file mode 100644
index 00000000000..d1ae75454d1
--- /dev/null
+++ b/drivers/clocksource/sh_mtu2.c
@@ -0,0 +1,357 @@
+/*
+ * SuperH Timer Support - MTU2
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/clockchips.h>
+#include <linux/sh_timer.h>
+
+struct sh_mtu2_priv {
+ void __iomem *mapbase;
+ struct clk *clk;
+ struct irqaction irqaction;
+ struct platform_device *pdev;
+ unsigned long rate;
+ unsigned long periodic;
+ struct clock_event_device ced;
+};
+
+static DEFINE_SPINLOCK(sh_mtu2_lock);
+
+#define TSTR -1 /* shared register */
+#define TCR 0 /* channel register */
+#define TMDR 1 /* channel register */
+#define TIOR 2 /* channel register */
+#define TIER 3 /* channel register */
+#define TSR 4 /* channel register */
+#define TCNT 5 /* channel register */
+#define TGR 6 /* channel register */
+
+static unsigned long mtu2_reg_offs[] = {
+ [TCR] = 0,
+ [TMDR] = 1,
+ [TIOR] = 2,
+ [TIER] = 4,
+ [TSR] = 5,
+ [TCNT] = 6,
+ [TGR] = 8,
+};
+
+static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ void __iomem *base = p->mapbase;
+ unsigned long offs;
+
+ if (reg_nr == TSTR)
+ return ioread8(base + cfg->channel_offset);
+
+ offs = mtu2_reg_offs[reg_nr];
+
+ if ((reg_nr == TCNT) || (reg_nr == TGR))
+ return ioread16(base + offs);
+ else
+ return ioread8(base + offs);
+}
+
+static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr,
+ unsigned long value)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ void __iomem *base = p->mapbase;
+ unsigned long offs;
+
+ if (reg_nr == TSTR) {
+ iowrite8(value, base + cfg->channel_offset);
+ return;
+ }
+
+ offs = mtu2_reg_offs[reg_nr];
+
+ if ((reg_nr == TCNT) || (reg_nr == TGR))
+ iowrite16(value, base + offs);
+ else
+ iowrite8(value, base + offs);
+}
+
+static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ unsigned long flags, value;
+
+ /* start stop register shared by multiple timer channels */
+ spin_lock_irqsave(&sh_mtu2_lock, flags);
+ value = sh_mtu2_read(p, TSTR);
+
+ if (start)
+ value |= 1 << cfg->timer_bit;
+ else
+ value &= ~(1 << cfg->timer_bit);
+
+ sh_mtu2_write(p, TSTR, value);
+ spin_unlock_irqrestore(&sh_mtu2_lock, flags);
+}
+
+static int sh_mtu2_enable(struct sh_mtu2_priv *p)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ int ret;
+
+ /* enable clock */
+ ret = clk_enable(p->clk);
+ if (ret) {
+ pr_err("sh_mtu2: cannot enable clock \"%s\"\n", cfg->clk);
+ return ret;
+ }
+
+ /* make sure channel is disabled */
+ sh_mtu2_start_stop_ch(p, 0);
+
+ p->rate = clk_get_rate(p->clk) / 64;
+ p->periodic = (p->rate + HZ/2) / HZ;
+
+ /* "Periodic Counter Operation" */
+ sh_mtu2_write(p, TCR, 0x23); /* TGRA clear, divide clock by 64 */
+ sh_mtu2_write(p, TIOR, 0);
+ sh_mtu2_write(p, TGR, p->periodic);
+ sh_mtu2_write(p, TCNT, 0);
+ sh_mtu2_write(p, TMDR, 0);
+ sh_mtu2_write(p, TIER, 0x01);
+
+ /* enable channel */
+ sh_mtu2_start_stop_ch(p, 1);
+
+ return 0;
+}
+
+static void sh_mtu2_disable(struct sh_mtu2_priv *p)
+{
+ /* disable channel */
+ sh_mtu2_start_stop_ch(p, 0);
+
+ /* stop clock */
+ clk_disable(p->clk);
+}
+
+static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
+{
+ struct sh_mtu2_priv *p = dev_id;
+
+ /* acknowledge interrupt */
+ sh_mtu2_read(p, TSR);
+ sh_mtu2_write(p, TSR, 0xfe);
+
+ /* notify clockevent layer */
+ p->ced.event_handler(&p->ced);
+ return IRQ_HANDLED;
+}
+
+static struct sh_mtu2_priv *ced_to_sh_mtu2(struct clock_event_device *ced)
+{
+ return container_of(ced, struct sh_mtu2_priv, ced);
+}
+
+static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
+ struct clock_event_device *ced)
+{
+ struct sh_mtu2_priv *p = ced_to_sh_mtu2(ced);
+ int disabled = 0;
+
+ /* deal with old setting first */
+ switch (ced->mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ sh_mtu2_disable(p);
+ disabled = 1;
+ break;
+ default:
+ break;
+ }
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ pr_info("sh_mtu2: %s used for periodic clock events\n",
+ ced->name);
+ sh_mtu2_enable(p);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ if (!disabled)
+ sh_mtu2_disable(p);
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ break;
+ }
+}
+
+static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
+ char *name, unsigned long rating)
+{
+ struct clock_event_device *ced = &p->ced;
+ int ret;
+
+ memset(ced, 0, sizeof(*ced));
+
+ ced->name = name;
+ ced->features = CLOCK_EVT_FEAT_PERIODIC;
+ ced->rating = rating;
+ ced->cpumask = cpumask_of(0);
+ ced->set_mode = sh_mtu2_clock_event_mode;
+
+ ret = setup_irq(p->irqaction.irq, &p->irqaction);
+ if (ret) {
+ pr_err("sh_mtu2: failed to request irq %d\n",
+ p->irqaction.irq);
+ return;
+ }
+
+ pr_info("sh_mtu2: %s used for clock events\n", ced->name);
+ clockevents_register_device(ced);
+}
+
+static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name,
+ unsigned long clockevent_rating)
+{
+ if (clockevent_rating)
+ sh_mtu2_register_clockevent(p, name, clockevent_rating);
+
+ return 0;
+}
+
+static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
+{
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
+ struct resource *res;
+ int irq, ret;
+ ret = -ENXIO;
+
+ memset(p, 0, sizeof(*p));
+ p->pdev = pdev;
+
+ if (!cfg) {
+ dev_err(&p->pdev->dev, "missing platform data\n");
+ goto err0;
+ }
+
+ platform_set_drvdata(pdev, p);
+
+ res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&p->pdev->dev, "failed to get I/O memory\n");
+ goto err0;
+ }
+
+ irq = platform_get_irq(p->pdev, 0);
+ if (irq < 0) {
+ dev_err(&p->pdev->dev, "failed to get irq\n");
+ goto err0;
+ }
+
+ /* map memory, let mapbase point to our channel */
+ p->mapbase = ioremap_nocache(res->start, resource_size(res));
+ if (p->mapbase == NULL) {
+ pr_err("sh_mtu2: failed to remap I/O memory\n");
+ goto err0;
+ }
+
+ /* setup data for setup_irq() (too early for request_irq()) */
+ p->irqaction.name = cfg->name;
+ p->irqaction.handler = sh_mtu2_interrupt;
+ p->irqaction.dev_id = p;
+ p->irqaction.irq = irq;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.mask = CPU_MASK_NONE;
+
+ /* get hold of clock */
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ pr_err("sh_mtu2: cannot get clock \"%s\"\n", cfg->clk);
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
+
+ return sh_mtu2_register(p, cfg->name, cfg->clockevent_rating);
+ err1:
+ iounmap(p->mapbase);
+ err0:
+ return ret;
+}
+
+static int __devinit sh_mtu2_probe(struct platform_device *pdev)
+{
+ struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
+ int ret;
+
+ if (p) {
+ pr_info("sh_mtu2: %s kept as earlytimer\n", cfg->name);
+ return 0;
+ }
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ dev_err(&pdev->dev, "failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ ret = sh_mtu2_setup(p, pdev);
+ if (ret) {
+ kfree(p);
+ platform_set_drvdata(pdev, NULL);
+ }
+ return ret;
+}
+
+static int __devexit sh_mtu2_remove(struct platform_device *pdev)
+{
+ return -EBUSY; /* cannot unregister clockevent */
+}
+
+static struct platform_driver sh_mtu2_device_driver = {
+ .probe = sh_mtu2_probe,
+ .remove = __devexit_p(sh_mtu2_remove),
+ .driver = {
+ .name = "sh_mtu2",
+ }
+};
+
+static int __init sh_mtu2_init(void)
+{
+ return platform_driver_register(&sh_mtu2_device_driver);
+}
+
+static void __exit sh_mtu2_exit(void)
+{
+ platform_driver_unregister(&sh_mtu2_device_driver);
+}
+
+early_platform_init("earlytimer", &sh_mtu2_device_driver);
+module_init(sh_mtu2_init);
+module_exit(sh_mtu2_exit);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
new file mode 100644
index 00000000000..d6ea4398bf6
--- /dev/null
+++ b/drivers/clocksource/sh_tmu.c
@@ -0,0 +1,461 @@
+/*
+ * SuperH Timer Support - TMU
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/sh_timer.h>
+
+struct sh_tmu_priv {
+ void __iomem *mapbase;
+ struct clk *clk;
+ struct irqaction irqaction;
+ struct platform_device *pdev;
+ unsigned long rate;
+ unsigned long periodic;
+ struct clock_event_device ced;
+ struct clocksource cs;
+};
+
+static DEFINE_SPINLOCK(sh_tmu_lock);
+
+#define TSTR -1 /* shared register */
+#define TCOR 0 /* channel register */
+#define TCNT 1 /* channel register */
+#define TCR 2 /* channel register */
+
+static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ void __iomem *base = p->mapbase;
+ unsigned long offs;
+
+ if (reg_nr == TSTR)
+ return ioread8(base - cfg->channel_offset);
+
+ offs = reg_nr << 2;
+
+ if (reg_nr == TCR)
+ return ioread16(base + offs);
+ else
+ return ioread32(base + offs);
+}
+
+static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr,
+ unsigned long value)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ void __iomem *base = p->mapbase;
+ unsigned long offs;
+
+ if (reg_nr == TSTR) {
+ iowrite8(value, base - cfg->channel_offset);
+ return;
+ }
+
+ offs = reg_nr << 2;
+
+ if (reg_nr == TCR)
+ iowrite16(value, base + offs);
+ else
+ iowrite32(value, base + offs);
+}
+
+static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ unsigned long flags, value;
+
+ /* start stop register shared by multiple timer channels */
+ spin_lock_irqsave(&sh_tmu_lock, flags);
+ value = sh_tmu_read(p, TSTR);
+
+ if (start)
+ value |= 1 << cfg->timer_bit;
+ else
+ value &= ~(1 << cfg->timer_bit);
+
+ sh_tmu_write(p, TSTR, value);
+ spin_unlock_irqrestore(&sh_tmu_lock, flags);
+}
+
+static int sh_tmu_enable(struct sh_tmu_priv *p)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ int ret;
+
+ /* enable clock */
+ ret = clk_enable(p->clk);
+ if (ret) {
+ pr_err("sh_tmu: cannot enable clock \"%s\"\n", cfg->clk);
+ return ret;
+ }
+
+ /* make sure channel is disabled */
+ sh_tmu_start_stop_ch(p, 0);
+
+ /* maximum timeout */
+ sh_tmu_write(p, TCOR, 0xffffffff);
+ sh_tmu_write(p, TCNT, 0xffffffff);
+
+ /* configure channel to parent clock / 4, irq off */
+ p->rate = clk_get_rate(p->clk) / 4;
+ sh_tmu_write(p, TCR, 0x0000);
+
+ /* enable channel */
+ sh_tmu_start_stop_ch(p, 1);
+
+ return 0;
+}
+
+static void sh_tmu_disable(struct sh_tmu_priv *p)
+{
+ /* disable channel */
+ sh_tmu_start_stop_ch(p, 0);
+
+ /* stop clock */
+ clk_disable(p->clk);
+}
+
+static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
+ int periodic)
+{
+ /* stop timer */
+ sh_tmu_start_stop_ch(p, 0);
+
+ /* acknowledge interrupt */
+ sh_tmu_read(p, TCR);
+
+ /* enable interrupt */
+ sh_tmu_write(p, TCR, 0x0020);
+
+ /* reload delta value in case of periodic timer */
+ if (periodic)
+ sh_tmu_write(p, TCOR, delta);
+ else
+ sh_tmu_write(p, TCOR, 0);
+
+ sh_tmu_write(p, TCNT, delta);
+
+ /* start timer */
+ sh_tmu_start_stop_ch(p, 1);
+}
+
+static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
+{
+ struct sh_tmu_priv *p = dev_id;
+
+ /* disable or acknowledge interrupt */
+ if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT)
+ sh_tmu_write(p, TCR, 0x0000);
+ else
+ sh_tmu_write(p, TCR, 0x0020);
+
+ /* notify clockevent layer */
+ p->ced.event_handler(&p->ced);
+ return IRQ_HANDLED;
+}
+
+static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs)
+{
+ return container_of(cs, struct sh_tmu_priv, cs);
+}
+
+static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
+{
+ struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+
+ return sh_tmu_read(p, TCNT) ^ 0xffffffff;
+}
+
+static int sh_tmu_clocksource_enable(struct clocksource *cs)
+{
+ struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+ int ret;
+
+ ret = sh_tmu_enable(p);
+ if (ret)
+ return ret;
+
+ /* TODO: calculate good shift from rate and counter bit width */
+ cs->shift = 10;
+ cs->mult = clocksource_hz2mult(p->rate, cs->shift);
+ return 0;
+}
+
+static void sh_tmu_clocksource_disable(struct clocksource *cs)
+{
+ sh_tmu_disable(cs_to_sh_tmu(cs));
+}
+
+static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
+ char *name, unsigned long rating)
+{
+ struct clocksource *cs = &p->cs;
+
+ cs->name = name;
+ cs->rating = rating;
+ cs->read = sh_tmu_clocksource_read;
+ cs->enable = sh_tmu_clocksource_enable;
+ cs->disable = sh_tmu_clocksource_disable;
+ cs->mask = CLOCKSOURCE_MASK(32);
+ cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ pr_info("sh_tmu: %s used as clock source\n", cs->name);
+ clocksource_register(cs);
+ return 0;
+}
+
+static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced)
+{
+ return container_of(ced, struct sh_tmu_priv, ced);
+}
+
+static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
+{
+ struct clock_event_device *ced = &p->ced;
+
+ sh_tmu_enable(p);
+
+ /* TODO: calculate good shift from rate and counter bit width */
+
+ ced->shift = 32;
+ ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
+ ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
+ ced->min_delta_ns = 5000;
+
+ if (periodic) {
+ p->periodic = (p->rate + HZ/2) / HZ;
+ sh_tmu_set_next(p, p->periodic, 1);
+ }
+}
+
+static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
+ struct clock_event_device *ced)
+{
+ struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
+ int disabled = 0;
+
+ /* deal with old setting first */
+ switch (ced->mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ case CLOCK_EVT_MODE_ONESHOT:
+ sh_tmu_disable(p);
+ disabled = 1;
+ break;
+ default:
+ break;
+ }
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ pr_info("sh_tmu: %s used for periodic clock events\n",
+ ced->name);
+ sh_tmu_clock_event_start(p, 1);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ pr_info("sh_tmu: %s used for oneshot clock events\n",
+ ced->name);
+ sh_tmu_clock_event_start(p, 0);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ if (!disabled)
+ sh_tmu_disable(p);
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ break;
+ }
+}
+
+static int sh_tmu_clock_event_next(unsigned long delta,
+ struct clock_event_device *ced)
+{
+ struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
+
+ BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
+
+ /* program new delta value */
+ sh_tmu_set_next(p, delta, 0);
+ return 0;
+}
+
+static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
+ char *name, unsigned long rating)
+{
+ struct clock_event_device *ced = &p->ced;
+ int ret;
+
+ memset(ced, 0, sizeof(*ced));
+
+ ced->name = name;
+ ced->features = CLOCK_EVT_FEAT_PERIODIC;
+ ced->features |= CLOCK_EVT_FEAT_ONESHOT;
+ ced->rating = rating;
+ ced->cpumask = cpumask_of(0);
+ ced->set_next_event = sh_tmu_clock_event_next;
+ ced->set_mode = sh_tmu_clock_event_mode;
+
+ ret = setup_irq(p->irqaction.irq, &p->irqaction);
+ if (ret) {
+ pr_err("sh_tmu: failed to request irq %d\n",
+ p->irqaction.irq);
+ return;
+ }
+
+ pr_info("sh_tmu: %s used for clock events\n", ced->name);
+ clockevents_register_device(ced);
+}
+
+static int sh_tmu_register(struct sh_tmu_priv *p, char *name,
+ unsigned long clockevent_rating,
+ unsigned long clocksource_rating)
+{
+ if (clockevent_rating)
+ sh_tmu_register_clockevent(p, name, clockevent_rating);
+ else if (clocksource_rating)
+ sh_tmu_register_clocksource(p, name, clocksource_rating);
+
+ return 0;
+}
+
+static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
+{
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
+ struct resource *res;
+ int irq, ret;
+ ret = -ENXIO;
+
+ memset(p, 0, sizeof(*p));
+ p->pdev = pdev;
+
+ if (!cfg) {
+ dev_err(&p->pdev->dev, "missing platform data\n");
+ goto err0;
+ }
+
+ platform_set_drvdata(pdev, p);
+
+ res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&p->pdev->dev, "failed to get I/O memory\n");
+ goto err0;
+ }
+
+ irq = platform_get_irq(p->pdev, 0);
+ if (irq < 0) {
+ dev_err(&p->pdev->dev, "failed to get irq\n");
+ goto err0;
+ }
+
+ /* map memory, let mapbase point to our channel */
+ p->mapbase = ioremap_nocache(res->start, resource_size(res));
+ if (p->mapbase == NULL) {
+ pr_err("sh_tmu: failed to remap I/O memory\n");
+ goto err0;
+ }
+
+ /* setup data for setup_irq() (too early for request_irq()) */
+ p->irqaction.name = cfg->name;
+ p->irqaction.handler = sh_tmu_interrupt;
+ p->irqaction.dev_id = p;
+ p->irqaction.irq = irq;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.mask = CPU_MASK_NONE;
+
+ /* get hold of clock */
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ pr_err("sh_tmu: cannot get clock \"%s\"\n", cfg->clk);
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
+
+ return sh_tmu_register(p, cfg->name,
+ cfg->clockevent_rating,
+ cfg->clocksource_rating);
+ err1:
+ iounmap(p->mapbase);
+ err0:
+ return ret;
+}
+
+static int __devinit sh_tmu_probe(struct platform_device *pdev)
+{
+ struct sh_tmu_priv *p = platform_get_drvdata(pdev);
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
+ int ret;
+
+ if (p) {
+ pr_info("sh_tmu: %s kept as earlytimer\n", cfg->name);
+ return 0;
+ }
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ dev_err(&pdev->dev, "failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ ret = sh_tmu_setup(p, pdev);
+ if (ret) {
+ kfree(p);
+ platform_set_drvdata(pdev, NULL);
+ }
+ return ret;
+}
+
+static int __devexit sh_tmu_remove(struct platform_device *pdev)
+{
+ return -EBUSY; /* cannot unregister clockevent and clocksource */
+}
+
+static struct platform_driver sh_tmu_device_driver = {
+ .probe = sh_tmu_probe,
+ .remove = __devexit_p(sh_tmu_remove),
+ .driver = {
+ .name = "sh_tmu",
+ }
+};
+
+static int __init sh_tmu_init(void)
+{
+ return platform_driver_register(&sh_tmu_device_driver);
+}
+
+static void __exit sh_tmu_exit(void)
+{
+ platform_driver_unregister(&sh_tmu_device_driver);
+}
+
+early_platform_init("earlytimer", &sh_tmu_device_driver);
+module_init(sh_tmu_init);
+module_exit(sh_tmu_exit);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("SuperH TMU Timer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 47d2ad0ae07..6e2ec0b1894 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -808,7 +808,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
ret = -ENOMEM;
goto nomem_out;
}
- if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
free_cpumask_var(policy->cpus);
kfree(policy);
ret = -ENOMEM;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 66a7687cca7..f18d1bde043 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -179,9 +179,14 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan)
static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
struct fsl_desc_sw *desc)
{
+ u64 snoop_bits;
+
+ snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+ ? FSL_DMA_SNEN : 0;
+
desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
- DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL,
- 64);
+ DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
+ | snoop_bits, 64);
}
static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
@@ -313,8 +318,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
- struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
+ struct fsl_desc_sw *desc;
unsigned long flags;
dma_cookie_t cookie;
@@ -322,14 +327,17 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&fsl_chan->desc_lock, flags);
cookie = fsl_chan->common.cookie;
- cookie++;
- if (cookie < 0)
- cookie = 1;
- desc->async_tx.cookie = cookie;
- fsl_chan->common.cookie = desc->async_tx.cookie;
+ list_for_each_entry(desc, &tx->tx_list, node) {
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
- append_ld_queue(fsl_chan, desc);
- list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev);
+ desc->async_tx.cookie = cookie;
+ }
+
+ fsl_chan->common.cookie = cookie;
+ append_ld_queue(fsl_chan, tx_to_fsl_desc(tx));
+ list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev);
spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
@@ -454,8 +462,8 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
{
struct fsl_dma_chan *fsl_chan;
struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
+ struct list_head *list;
size_t copy;
- LIST_HEAD(link_chain);
if (!chan)
return NULL;
@@ -472,7 +480,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
if (!new) {
dev_err(fsl_chan->dev,
"No free memory for link descriptor\n");
- return NULL;
+ goto fail;
}
#ifdef FSL_DMA_LD_DEBUG
dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
@@ -507,7 +515,19 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
/* Set End-of-link to the last link descriptor of new list*/
set_ld_eol(fsl_chan, new);
- return first ? &first->async_tx : NULL;
+ return &first->async_tx;
+
+fail:
+ if (!first)
+ return NULL;
+
+ list = &first->async_tx.tx_list;
+ list_for_each_entry_safe_reverse(new, prev, list, node) {
+ list_del(&new->node);
+ dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
+ }
+
+ return NULL;
}
/**
@@ -598,15 +618,16 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
dma_addr_t next_dest_addr;
unsigned long flags;
+ spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+
if (!dma_is_idle(fsl_chan))
- return;
+ goto out_unlock;
dma_halt(fsl_chan);
/* If there are some link descriptors
* not transfered in queue. We need to start it.
*/
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
/* Find the first un-transfer desciptor */
for (ld_node = fsl_chan->ld_queue.next;
@@ -617,8 +638,6 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
fsl_chan->common.cookie) == DMA_SUCCESS);
ld_node = ld_node->next);
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
-
if (ld_node != &fsl_chan->ld_queue) {
/* Get the ld start address from ld_queue */
next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
@@ -630,6 +649,9 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
set_cdar(fsl_chan, 0);
set_ndar(fsl_chan, 0);
}
+
+out_unlock:
+ spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
}
/**
@@ -831,7 +853,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
- if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) {
+ if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
dev_err(fdev->dev, "There is no %d channel!\n",
new_fsl_chan->id);
err = -EINVAL;
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 1955ee8d6d2..a600fc0f796 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -173,7 +173,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
- if (i7300_idle_platform_probe(NULL, NULL) == 0) {
+ if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) {
device->common.chancnt--;
}
#endif
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index e5f5c5a8ba6..ab4f3592a11 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -49,7 +49,6 @@ config EDAC_DEBUG_VERBOSE
config EDAC_MM_EDAC
tristate "Main Memory EDAC (Error Detection And Correction) reporting"
- default y
help
Some systems are able to detect and correct errors in main
memory. EDAC can report statistics on memory error
@@ -58,6 +57,31 @@ config EDAC_MM_EDAC
occurred so that a particular failing memory module can be
replaced. If unsure, select 'Y'.
+config EDAC_AMD64
+ tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
+ depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI
+ help
+ Support for error detection and correction on the AMD 64
+ Families of Memory Controllers (K8, F10h and F11h)
+
+config EDAC_AMD64_ERROR_INJECTION
+ bool "Sysfs Error Injection facilities"
+ depends on EDAC_AMD64
+ help
+ Recent Opterons (Family 10h and later) provide for Memory Error
+ Injection into the ECC detection circuits. The amd64_edac module
+ allows the operator/user to inject Uncorrectable and Correctable
+ errors into DRAM.
+
+ When enabled, in each of the respective memory controller directories
+ (/sys/devices/system/edac/mc/mcX), there are 3 input files:
+
+ - inject_section (0..3, 16-byte section of 64-byte cacheline),
+ - inject_word (0..8, 16-bit word of 16-byte section),
+ - inject_ecc_vector (hex ecc vector: select bits of inject word)
+
+ In addition, there are two control files, inject_read and inject_write,
+ which trigger the DRAM ECC Read and Write respectively.
config EDAC_AMD76X
tristate "AMD 76x (760, 762, 768)"
@@ -192,16 +216,20 @@ config EDAC_PPC4XX
config EDAC_AMD8131
tristate "AMD8131 HyperTransport PCI-X Tunnel"
- depends on EDAC_MM_EDAC && PCI
+ depends on EDAC_MM_EDAC && PCI && PPC_MAPLE
help
Support for error detection and correction on the
AMD8131 HyperTransport PCI-X Tunnel chip.
+ Note, add more Kconfig dependency if it's adopted
+ on some machine other than Maple.
config EDAC_AMD8111
tristate "AMD8111 HyperTransport I/O Hub"
- depends on EDAC_MM_EDAC && PCI
+ depends on EDAC_MM_EDAC && PCI && PPC_MAPLE
help
Support for error detection and correction on the
AMD8111 HyperTransport I/O Hub chip.
+ Note, add more Kconfig dependency if it's adopted
+ on some machine other than Maple.
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index a5fdcf02f59..633dc5604ee 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -30,8 +30,17 @@ obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
obj-$(CONFIG_EDAC_X38) += x38_edac.o
obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
+
+amd64_edac_mod-y := amd64_edac_err_types.o amd64_edac.o
+amd64_edac_mod-$(CONFIG_EDAC_DEBUG) += amd64_edac_dbg.o
+amd64_edac_mod-$(CONFIG_EDAC_AMD64_ERROR_INJECTION) += amd64_edac_inj.o
+
+obj-$(CONFIG_EDAC_AMD64) += amd64_edac_mod.o
+
obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o
obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o
obj-$(CONFIG_EDAC_CELL) += cell_edac.o
obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
+obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
+obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
new file mode 100644
index 00000000000..c36bf40568c
--- /dev/null
+++ b/drivers/edac/amd64_edac.c
@@ -0,0 +1,3354 @@
+#include "amd64_edac.h"
+#include <asm/k8.h>
+
+static struct edac_pci_ctl_info *amd64_ctl_pci;
+
+static int report_gart_errors;
+module_param(report_gart_errors, int, 0644);
+
+/*
+ * Set by command line parameter. If BIOS has enabled the ECC, this override is
+ * cleared to prevent re-enabling the hardware by this driver.
+ */
+static int ecc_enable_override;
+module_param(ecc_enable_override, int, 0644);
+
+/* Lookup table for all possible MC control instances */
+struct amd64_pvt;
+static struct mem_ctl_info *mci_lookup[MAX_NUMNODES];
+static struct amd64_pvt *pvt_lookup[MAX_NUMNODES];
+
+/*
+ * Memory scrubber control interface. For K8, memory scrubbing is handled by
+ * hardware and can involve L2 cache, dcache as well as the main memory. With
+ * F10, this is extended to L3 cache scrubbing on CPU models sporting that
+ * functionality.
+ *
+ * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
+ * (dram) over to cache lines. This is nasty, so we will use bandwidth in
+ * bytes/sec for the setting.
+ *
+ * Currently, we only do dram scrubbing. If the scrubbing is done in software on
+ * other archs, we might not have access to the caches directly.
+ */
+
+/*
+ * scan the scrub rate mapping table for a close or matching bandwidth value to
+ * issue. If requested is too big, then use last maximum value found.
+ */
+static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw,
+ u32 min_scrubrate)
+{
+ u32 scrubval;
+ int i;
+
+ /*
+ * map the configured rate (new_bw) to a value specific to the AMD64
+ * memory controller and apply to register. Search for the first
+ * bandwidth entry that is greater or equal than the setting requested
+ * and program that. If at last entry, turn off DRAM scrubbing.
+ */
+ for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
+ /*
+ * skip scrub rates which aren't recommended
+ * (see F10 BKDG, F3x58)
+ */
+ if (scrubrates[i].scrubval < min_scrubrate)
+ continue;
+
+ if (scrubrates[i].bandwidth <= new_bw)
+ break;
+
+ /*
+ * if no suitable bandwidth found, turn off DRAM scrubbing
+ * entirely by falling back to the last element in the
+ * scrubrates array.
+ */
+ }
+
+ scrubval = scrubrates[i].scrubval;
+ if (scrubval)
+ edac_printk(KERN_DEBUG, EDAC_MC,
+ "Setting scrub rate bandwidth: %u\n",
+ scrubrates[i].bandwidth);
+ else
+ edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n");
+
+ pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
+
+ return 0;
+}
+
+static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u32 min_scrubrate = 0x0;
+
+ switch (boot_cpu_data.x86) {
+ case 0xf:
+ min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
+ break;
+ case 0x10:
+ min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
+ break;
+ case 0x11:
+ min_scrubrate = F11_MIN_SCRUB_RATE_BITS;
+ break;
+
+ default:
+ amd64_printk(KERN_ERR, "Unsupported family!\n");
+ break;
+ }
+ return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth,
+ min_scrubrate);
+}
+
+static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u32 scrubval = 0;
+ int status = -1, i, ret = 0;
+
+ ret = pci_read_config_dword(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
+ if (ret)
+ debugf0("Reading K8_SCRCTRL failed\n");
+
+ scrubval = scrubval & 0x001F;
+
+ edac_printk(KERN_DEBUG, EDAC_MC,
+ "pci-read, sdram scrub control value: %d \n", scrubval);
+
+ for (i = 0; ARRAY_SIZE(scrubrates); i++) {
+ if (scrubrates[i].scrubval == scrubval) {
+ *bw = scrubrates[i].bandwidth;
+ status = 0;
+ break;
+ }
+ }
+
+ return status;
+}
+
+/* Map from a CSROW entry to the mask entry that operates on it */
+static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
+{
+ return csrow >> (pvt->num_dcsm >> 3);
+}
+
+/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
+static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
+{
+ if (dct == 0)
+ return pvt->dcsb0[csrow];
+ else
+ return pvt->dcsb1[csrow];
+}
+
+/*
+ * Return the 'mask' address the i'th CS entry. This function is needed because
+ * there number of DCSM registers on Rev E and prior vs Rev F and later is
+ * different.
+ */
+static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
+{
+ if (dct == 0)
+ return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
+ else
+ return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
+}
+
+
+/*
+ * In *base and *limit, pass back the full 40-bit base and limit physical
+ * addresses for the node given by node_id. This information is obtained from
+ * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
+ * base and limit addresses are of type SysAddr, as defined at the start of
+ * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
+ * in the address range they represent.
+ */
+static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
+ u64 *base, u64 *limit)
+{
+ *base = pvt->dram_base[node_id];
+ *limit = pvt->dram_limit[node_id];
+}
+
+/*
+ * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
+ * with node_id
+ */
+static int amd64_base_limit_match(struct amd64_pvt *pvt,
+ u64 sys_addr, int node_id)
+{
+ u64 base, limit, addr;
+
+ amd64_get_base_and_limit(pvt, node_id, &base, &limit);
+
+ /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
+ * all ones if the most significant implemented address bit is 1.
+ * Here we discard bits 63-40. See section 3.4.2 of AMD publication
+ * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
+ * Application Programming.
+ */
+ addr = sys_addr & 0x000000ffffffffffull;
+
+ return (addr >= base) && (addr <= limit);
+}
+
+/*
+ * Attempt to map a SysAddr to a node. On success, return a pointer to the
+ * mem_ctl_info structure for the node that the SysAddr maps to.
+ *
+ * On failure, return NULL.
+ */
+static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
+ u64 sys_addr)
+{
+ struct amd64_pvt *pvt;
+ int node_id;
+ u32 intlv_en, bits;
+
+ /*
+ * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
+ * 3.4.4.2) registers to map the SysAddr to a node ID.
+ */
+ pvt = mci->pvt_info;
+
+ /*
+ * The value of this field should be the same for all DRAM Base
+ * registers. Therefore we arbitrarily choose to read it from the
+ * register for node 0.
+ */
+ intlv_en = pvt->dram_IntlvEn[0];
+
+ if (intlv_en == 0) {
+ for (node_id = 0; ; ) {
+ if (amd64_base_limit_match(pvt, sys_addr, node_id))
+ break;
+
+ if (++node_id >= DRAM_REG_COUNT)
+ goto err_no_match;
+ }
+ goto found;
+ }
+
+ if (unlikely((intlv_en != (0x01 << 8)) &&
+ (intlv_en != (0x03 << 8)) &&
+ (intlv_en != (0x07 << 8)))) {
+ amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
+ "IntlvEn field of DRAM Base Register for node 0: "
+ "This probably indicates a BIOS bug.\n", intlv_en);
+ return NULL;
+ }
+
+ bits = (((u32) sys_addr) >> 12) & intlv_en;
+
+ for (node_id = 0; ; ) {
+ if ((pvt->dram_limit[node_id] & intlv_en) == bits)
+ break; /* intlv_sel field matches */
+
+ if (++node_id >= DRAM_REG_COUNT)
+ goto err_no_match;
+ }
+
+ /* sanity test for sys_addr */
+ if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
+ amd64_printk(KERN_WARNING,
+ "%s(): sys_addr 0x%lx falls outside base/limit "
+ "address range for node %d with node interleaving "
+ "enabled.\n", __func__, (unsigned long)sys_addr,
+ node_id);
+ return NULL;
+ }
+
+found:
+ return edac_mc_find(node_id);
+
+err_no_match:
+ debugf2("sys_addr 0x%lx doesn't match any node\n",
+ (unsigned long)sys_addr);
+
+ return NULL;
+}
+
+/*
+ * Extract the DRAM CS base address from selected csrow register.
+ */
+static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
+{
+ return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
+ pvt->dcs_shift;
+}
+
+/*
+ * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
+ */
+static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
+{
+ u64 dcsm_bits, other_bits;
+ u64 mask;
+
+ /* Extract bits from DRAM CS Mask. */
+ dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
+
+ other_bits = pvt->dcsm_mask;
+ other_bits = ~(other_bits << pvt->dcs_shift);
+
+ /*
+ * The extracted bits from DCSM belong in the spaces represented by
+ * the cleared bits in other_bits.
+ */
+ mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
+
+ return mask;
+}
+
+/*
+ * @input_addr is an InputAddr associated with the node given by mci. Return the
+ * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
+ */
+static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
+{
+ struct amd64_pvt *pvt;
+ int csrow;
+ u64 base, mask;
+
+ pvt = mci->pvt_info;
+
+ /*
+ * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
+ * base/mask register pair, test the condition shown near the start of
+ * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
+ */
+ for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
+
+ /* This DRAM chip select is disabled on this node */
+ if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
+ continue;
+
+ base = base_from_dct_base(pvt, csrow);
+ mask = ~mask_from_dct_mask(pvt, csrow);
+
+ if ((input_addr & mask) == (base & mask)) {
+ debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
+ (unsigned long)input_addr, csrow,
+ pvt->mc_node_id);
+
+ return csrow;
+ }
+ }
+
+ debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
+ (unsigned long)input_addr, pvt->mc_node_id);
+
+ return -1;
+}
+
+/*
+ * Return the base value defined by the DRAM Base register for the node
+ * represented by mci. This function returns the full 40-bit value despite the
+ * fact that the register only stores bits 39-24 of the value. See section
+ * 3.4.4.1 (BKDG #26094, K8, revA-E)
+ */
+static inline u64 get_dram_base(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ return pvt->dram_base[pvt->mc_node_id];
+}
+
+/*
+ * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
+ * for the node represented by mci. Info is passed back in *hole_base,
+ * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
+ * info is invalid. Info may be invalid for either of the following reasons:
+ *
+ * - The revision of the node is not E or greater. In this case, the DRAM Hole
+ * Address Register does not exist.
+ *
+ * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
+ * indicating that its contents are not valid.
+ *
+ * The values passed back in *hole_base, *hole_offset, and *hole_size are
+ * complete 32-bit values despite the fact that the bitfields in the DHAR
+ * only represent bits 31-24 of the base and offset values.
+ */
+int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
+ u64 *hole_offset, u64 *hole_size)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u64 base;
+
+ /* only revE and later have the DRAM Hole Address Register */
+ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_E) {
+ debugf1(" revision %d for node %d does not support DHAR\n",
+ pvt->ext_model, pvt->mc_node_id);
+ return 1;
+ }
+
+ /* only valid for Fam10h */
+ if (boot_cpu_data.x86 == 0x10 &&
+ (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
+ debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
+ return 1;
+ }
+
+ if ((pvt->dhar & DHAR_VALID) == 0) {
+ debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
+ pvt->mc_node_id);
+ return 1;
+ }
+
+ /* This node has Memory Hoisting */
+
+ /* +------------------+--------------------+--------------------+-----
+ * | memory | DRAM hole | relocated |
+ * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
+ * | | | DRAM hole |
+ * | | | [0x100000000, |
+ * | | | (0x100000000+ |
+ * | | | (0xffffffff-x))] |
+ * +------------------+--------------------+--------------------+-----
+ *
+ * Above is a diagram of physical memory showing the DRAM hole and the
+ * relocated addresses from the DRAM hole. As shown, the DRAM hole
+ * starts at address x (the base address) and extends through address
+ * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
+ * addresses in the hole so that they start at 0x100000000.
+ */
+
+ base = dhar_base(pvt->dhar);
+
+ *hole_base = base;
+ *hole_size = (0x1ull << 32) - base;
+
+ if (boot_cpu_data.x86 > 0xf)
+ *hole_offset = f10_dhar_offset(pvt->dhar);
+ else
+ *hole_offset = k8_dhar_offset(pvt->dhar);
+
+ debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
+ pvt->mc_node_id, (unsigned long)*hole_base,
+ (unsigned long)*hole_offset, (unsigned long)*hole_size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
+
+/*
+ * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
+ * assumed that sys_addr maps to the node given by mci.
+ *
+ * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
+ * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
+ * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
+ * then it is also involved in translating a SysAddr to a DramAddr. Sections
+ * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
+ * These parts of the documentation are unclear. I interpret them as follows:
+ *
+ * When node n receives a SysAddr, it processes the SysAddr as follows:
+ *
+ * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
+ * Limit registers for node n. If the SysAddr is not within the range
+ * specified by the base and limit values, then node n ignores the Sysaddr
+ * (since it does not map to node n). Otherwise continue to step 2 below.
+ *
+ * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
+ * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
+ * the range of relocated addresses (starting at 0x100000000) from the DRAM
+ * hole. If not, skip to step 3 below. Else get the value of the
+ * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
+ * offset defined by this value from the SysAddr.
+ *
+ * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
+ * Base register for node n. To obtain the DramAddr, subtract the base
+ * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
+ */
+static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
+{
+ u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
+ int ret = 0;
+
+ dram_base = get_dram_base(mci);
+
+ ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
+ &hole_size);
+ if (!ret) {
+ if ((sys_addr >= (1ull << 32)) &&
+ (sys_addr < ((1ull << 32) + hole_size))) {
+ /* use DHAR to translate SysAddr to DramAddr */
+ dram_addr = sys_addr - hole_offset;
+
+ debugf2("using DHAR to translate SysAddr 0x%lx to "
+ "DramAddr 0x%lx\n",
+ (unsigned long)sys_addr,
+ (unsigned long)dram_addr);
+
+ return dram_addr;
+ }
+ }
+
+ /*
+ * Translate the SysAddr to a DramAddr as shown near the start of
+ * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
+ * only deals with 40-bit values. Therefore we discard bits 63-40 of
+ * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
+ * discard are all 1s. Otherwise the bits we discard are all 0s. See
+ * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
+ * Programmer's Manual Volume 1 Application Programming.
+ */
+ dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
+
+ debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
+ "DramAddr 0x%lx\n", (unsigned long)sys_addr,
+ (unsigned long)dram_addr);
+ return dram_addr;
+}
+
+/*
+ * @intlv_en is the value of the IntlvEn field from a DRAM Base register
+ * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
+ * for node interleaving.
+ */
+static int num_node_interleave_bits(unsigned intlv_en)
+{
+ static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
+ int n;
+
+ BUG_ON(intlv_en > 7);
+ n = intlv_shift_table[intlv_en];
+ return n;
+}
+
+/* Translate the DramAddr given by @dram_addr to an InputAddr. */
+static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
+{
+ struct amd64_pvt *pvt;
+ int intlv_shift;
+ u64 input_addr;
+
+ pvt = mci->pvt_info;
+
+ /*
+ * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
+ * concerning translating a DramAddr to an InputAddr.
+ */
+ intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
+ input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
+ (dram_addr & 0xfff);
+
+ debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
+ intlv_shift, (unsigned long)dram_addr,
+ (unsigned long)input_addr);
+
+ return input_addr;
+}
+
+/*
+ * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
+ * assumed that @sys_addr maps to the node given by mci.
+ */
+static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
+{
+ u64 input_addr;
+
+ input_addr =
+ dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
+
+ debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
+ (unsigned long)sys_addr, (unsigned long)input_addr);
+
+ return input_addr;
+}
+
+
+/*
+ * @input_addr is an InputAddr associated with the node represented by mci.
+ * Translate @input_addr to a DramAddr and return the result.
+ */
+static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
+{
+ struct amd64_pvt *pvt;
+ int node_id, intlv_shift;
+ u64 bits, dram_addr;
+ u32 intlv_sel;
+
+ /*
+ * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
+ * shows how to translate a DramAddr to an InputAddr. Here we reverse
+ * this procedure. When translating from a DramAddr to an InputAddr, the
+ * bits used for node interleaving are discarded. Here we recover these
+ * bits from the IntlvSel field of the DRAM Limit register (section
+ * 3.4.4.2) for the node that input_addr is associated with.
+ */
+ pvt = mci->pvt_info;
+ node_id = pvt->mc_node_id;
+ BUG_ON((node_id < 0) || (node_id > 7));
+
+ intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
+
+ if (intlv_shift == 0) {
+ debugf1(" InputAddr 0x%lx translates to DramAddr of "
+ "same value\n", (unsigned long)input_addr);
+
+ return input_addr;
+ }
+
+ bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
+ (input_addr & 0xfff);
+
+ intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
+ dram_addr = bits + (intlv_sel << 12);
+
+ debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
+ "(%d node interleave bits)\n", (unsigned long)input_addr,
+ (unsigned long)dram_addr, intlv_shift);
+
+ return dram_addr;
+}
+
+/*
+ * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
+ * @dram_addr to a SysAddr.
+ */
+static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
+ int ret = 0;
+
+ ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
+ &hole_size);
+ if (!ret) {
+ if ((dram_addr >= hole_base) &&
+ (dram_addr < (hole_base + hole_size))) {
+ sys_addr = dram_addr + hole_offset;
+
+ debugf1("using DHAR to translate DramAddr 0x%lx to "
+ "SysAddr 0x%lx\n", (unsigned long)dram_addr,
+ (unsigned long)sys_addr);
+
+ return sys_addr;
+ }
+ }
+
+ amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
+ sys_addr = dram_addr + base;
+
+ /*
+ * The sys_addr we have computed up to this point is a 40-bit value
+ * because the k8 deals with 40-bit values. However, the value we are
+ * supposed to return is a full 64-bit physical address. The AMD
+ * x86-64 architecture specifies that the most significant implemented
+ * address bit through bit 63 of a physical address must be either all
+ * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
+ * 64-bit value below. See section 3.4.2 of AMD publication 24592:
+ * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
+ * Programming.
+ */
+ sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
+
+ debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
+ pvt->mc_node_id, (unsigned long)dram_addr,
+ (unsigned long)sys_addr);
+
+ return sys_addr;
+}
+
+/*
+ * @input_addr is an InputAddr associated with the node given by mci. Translate
+ * @input_addr to a SysAddr.
+ */
+static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
+ u64 input_addr)
+{
+ return dram_addr_to_sys_addr(mci,
+ input_addr_to_dram_addr(mci, input_addr));
+}
+
+/*
+ * Find the minimum and maximum InputAddr values that map to the given @csrow.
+ * Pass back these values in *input_addr_min and *input_addr_max.
+ */
+static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
+ u64 *input_addr_min, u64 *input_addr_max)
+{
+ struct amd64_pvt *pvt;
+ u64 base, mask;
+
+ pvt = mci->pvt_info;
+ BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT));
+
+ base = base_from_dct_base(pvt, csrow);
+ mask = mask_from_dct_mask(pvt, csrow);
+
+ *input_addr_min = base & ~mask;
+ *input_addr_max = base | mask | pvt->dcs_mask_notused;
+}
+
+/*
+ * Extract error address from MCA NB Address Low (section 3.6.4.5) and MCA NB
+ * Address High (section 3.6.4.6) register values and return the result. Address
+ * is located in the info structure (nbeah and nbeal), the encoding is device
+ * specific.
+ */
+static u64 extract_error_address(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ return pvt->ops->get_error_address(mci, info);
+}
+
+
+/* Map the Error address to a PAGE and PAGE OFFSET. */
+static inline void error_address_to_page_and_offset(u64 error_address,
+ u32 *page, u32 *offset)
+{
+ *page = (u32) (error_address >> PAGE_SHIFT);
+ *offset = ((u32) error_address) & ~PAGE_MASK;
+}
+
+/*
+ * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
+ * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
+ * of a node that detected an ECC memory error. mci represents the node that
+ * the error address maps to (possibly different from the node that detected
+ * the error). Return the number of the csrow that sys_addr maps to, or -1 on
+ * error.
+ */
+static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
+{
+ int csrow;
+
+ csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
+
+ if (csrow == -1)
+ amd64_mc_printk(mci, KERN_ERR,
+ "Failed to translate InputAddr to csrow for "
+ "address 0x%lx\n", (unsigned long)sys_addr);
+ return csrow;
+}
+
+static int get_channel_from_ecc_syndrome(unsigned short syndrome);
+
+static void amd64_cpu_display_info(struct amd64_pvt *pvt)
+{
+ if (boot_cpu_data.x86 == 0x11)
+ edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n");
+ else if (boot_cpu_data.x86 == 0x10)
+ edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
+ else if (boot_cpu_data.x86 == 0xf)
+ edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
+ (pvt->ext_model >= OPTERON_CPU_REV_F) ?
+ "Rev F or later" : "Rev E or earlier");
+ else
+ /* we'll hardly ever ever get here */
+ edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n");
+}
+
+/*
+ * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
+ * are ECC capable.
+ */
+static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
+{
+ int bit;
+ enum dev_type edac_cap = EDAC_NONE;
+
+ bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
+ ? 19
+ : 17;
+
+ if (pvt->dclr0 >> BIT(bit))
+ edac_cap = EDAC_FLAG_SECDED;
+
+ return edac_cap;
+}
+
+
+static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
+ int ganged);
+
+/* Display and decode various NB registers for debug purposes. */
+static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
+{
+ int ganged;
+
+ debugf1(" nbcap:0x%8.08x DctDualCap=%s DualNode=%s 8-Node=%s\n",
+ pvt->nbcap,
+ (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "True" : "False",
+ (pvt->nbcap & K8_NBCAP_DUAL_NODE) ? "True" : "False",
+ (pvt->nbcap & K8_NBCAP_8_NODE) ? "True" : "False");
+ debugf1(" ECC Capable=%s ChipKill Capable=%s\n",
+ (pvt->nbcap & K8_NBCAP_SECDED) ? "True" : "False",
+ (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "True" : "False");
+ debugf1(" DramCfg0-low=0x%08x DIMM-ECC=%s Parity=%s Width=%s\n",
+ pvt->dclr0,
+ (pvt->dclr0 & BIT(19)) ? "Enabled" : "Disabled",
+ (pvt->dclr0 & BIT(8)) ? "Enabled" : "Disabled",
+ (pvt->dclr0 & BIT(11)) ? "128b" : "64b");
+ debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s DIMM Type=%s\n",
+ (pvt->dclr0 & BIT(12)) ? "Y" : "N",
+ (pvt->dclr0 & BIT(13)) ? "Y" : "N",
+ (pvt->dclr0 & BIT(14)) ? "Y" : "N",
+ (pvt->dclr0 & BIT(15)) ? "Y" : "N",
+ (pvt->dclr0 & BIT(16)) ? "UN-Buffered" : "Buffered");
+
+
+ debugf1(" online-spare: 0x%8.08x\n", pvt->online_spare);
+
+ if (boot_cpu_data.x86 == 0xf) {
+ debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
+ pvt->dhar, dhar_base(pvt->dhar),
+ k8_dhar_offset(pvt->dhar));
+ debugf1(" DramHoleValid=%s\n",
+ (pvt->dhar & DHAR_VALID) ? "True" : "False");
+
+ debugf1(" dbam-dkt: 0x%8.08x\n", pvt->dbam0);
+
+ /* everything below this point is Fam10h and above */
+ return;
+
+ } else {
+ debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
+ pvt->dhar, dhar_base(pvt->dhar),
+ f10_dhar_offset(pvt->dhar));
+ debugf1(" DramMemHoistValid=%s DramHoleValid=%s\n",
+ (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) ?
+ "True" : "False",
+ (pvt->dhar & DHAR_VALID) ?
+ "True" : "False");
+ }
+
+ /* Only if NOT ganged does dcl1 have valid info */
+ if (!dct_ganging_enabled(pvt)) {
+ debugf1(" DramCfg1-low=0x%08x DIMM-ECC=%s Parity=%s "
+ "Width=%s\n", pvt->dclr1,
+ (pvt->dclr1 & BIT(19)) ? "Enabled" : "Disabled",
+ (pvt->dclr1 & BIT(8)) ? "Enabled" : "Disabled",
+ (pvt->dclr1 & BIT(11)) ? "128b" : "64b");
+ debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s "
+ "DIMM Type=%s\n",
+ (pvt->dclr1 & BIT(12)) ? "Y" : "N",
+ (pvt->dclr1 & BIT(13)) ? "Y" : "N",
+ (pvt->dclr1 & BIT(14)) ? "Y" : "N",
+ (pvt->dclr1 & BIT(15)) ? "Y" : "N",
+ (pvt->dclr1 & BIT(16)) ? "UN-Buffered" : "Buffered");
+ }
+
+ /*
+ * Determine if ganged and then dump memory sizes for first controller,
+ * and if NOT ganged dump info for 2nd controller.
+ */
+ ganged = dct_ganging_enabled(pvt);
+
+ f10_debug_display_dimm_sizes(0, pvt, ganged);
+
+ if (!ganged)
+ f10_debug_display_dimm_sizes(1, pvt, ganged);
+}
+
+/* Read in both of DBAM registers */
+static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
+{
+ int err = 0;
+ unsigned int reg;
+
+ reg = DBAM0;
+ err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam0);
+ if (err)
+ goto err_reg;
+
+ if (boot_cpu_data.x86 >= 0x10) {
+ reg = DBAM1;
+ err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam1);
+
+ if (err)
+ goto err_reg;
+ }
+
+err_reg:
+ debugf0("Error reading F2x%03x.\n", reg);
+}
+
+/*
+ * NOTE: CPU Revision Dependent code: Rev E and Rev F
+ *
+ * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
+ * set the shift factor for the DCSB and DCSM values.
+ *
+ * ->dcs_mask_notused, RevE:
+ *
+ * To find the max InputAddr for the csrow, start with the base address and set
+ * all bits that are "don't care" bits in the test at the start of section
+ * 3.5.4 (p. 84).
+ *
+ * The "don't care" bits are all set bits in the mask and all bits in the gaps
+ * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
+ * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
+ * gaps.
+ *
+ * ->dcs_mask_notused, RevF and later:
+ *
+ * To find the max InputAddr for the csrow, start with the base address and set
+ * all bits that are "don't care" bits in the test at the start of NPT section
+ * 4.5.4 (p. 87).
+ *
+ * The "don't care" bits are all set bits in the mask and all bits in the gaps
+ * between bit ranges [36:27] and [21:13].
+ *
+ * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
+ * which are all bits in the above-mentioned gaps.
+ */
+static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
+{
+ if (pvt->ext_model >= OPTERON_CPU_REV_F) {
+ pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
+ pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
+ pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
+ pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
+
+ switch (boot_cpu_data.x86) {
+ case 0xf:
+ pvt->num_dcsm = REV_F_DCSM_COUNT;
+ break;
+
+ case 0x10:
+ pvt->num_dcsm = F10_DCSM_COUNT;
+ break;
+
+ case 0x11:
+ pvt->num_dcsm = F11_DCSM_COUNT;
+ break;
+
+ default:
+ amd64_printk(KERN_ERR, "Unsupported family!\n");
+ break;
+ }
+ } else {
+ pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
+ pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
+ pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
+ pvt->dcs_shift = REV_E_DCS_SHIFT;
+ pvt->num_dcsm = REV_E_DCSM_COUNT;
+ }
+}
+
+/*
+ * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
+ */
+static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
+{
+ int cs, reg, err = 0;
+
+ amd64_set_dct_base_and_mask(pvt);
+
+ for (cs = 0; cs < CHIPSELECT_COUNT; cs++) {
+ reg = K8_DCSB0 + (cs * 4);
+ err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
+ &pvt->dcsb0[cs]);
+ if (unlikely(err))
+ debugf0("Reading K8_DCSB0[%d] failed\n", cs);
+ else
+ debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
+ cs, pvt->dcsb0[cs], reg);
+
+ /* If DCT are NOT ganged, then read in DCT1's base */
+ if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
+ reg = F10_DCSB1 + (cs * 4);
+ err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
+ &pvt->dcsb1[cs]);
+ if (unlikely(err))
+ debugf0("Reading F10_DCSB1[%d] failed\n", cs);
+ else
+ debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, pvt->dcsb1[cs], reg);
+ } else {
+ pvt->dcsb1[cs] = 0;
+ }
+ }
+
+ for (cs = 0; cs < pvt->num_dcsm; cs++) {
+ reg = K8_DCSB0 + (cs * 4);
+ err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
+ &pvt->dcsm0[cs]);
+ if (unlikely(err))
+ debugf0("Reading K8_DCSM0 failed\n");
+ else
+ debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
+ cs, pvt->dcsm0[cs], reg);
+
+ /* If DCT are NOT ganged, then read in DCT1's mask */
+ if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
+ reg = F10_DCSM1 + (cs * 4);
+ err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
+ &pvt->dcsm1[cs]);
+ if (unlikely(err))
+ debugf0("Reading F10_DCSM1[%d] failed\n", cs);
+ else
+ debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, pvt->dcsm1[cs], reg);
+ } else
+ pvt->dcsm1[cs] = 0;
+ }
+}
+
+static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
+{
+ enum mem_type type;
+
+ if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= OPTERON_CPU_REV_F) {
+ /* Rev F and later */
+ type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
+ } else {
+ /* Rev E and earlier */
+ type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
+ }
+
+ debugf1(" Memory type is: %s\n",
+ (type == MEM_DDR2) ? "MEM_DDR2" :
+ (type == MEM_RDDR2) ? "MEM_RDDR2" :
+ (type == MEM_DDR) ? "MEM_DDR" : "MEM_RDDR");
+
+ return type;
+}
+
+/*
+ * Read the DRAM Configuration Low register. It differs between CG, D & E revs
+ * and the later RevF memory controllers (DDR vs DDR2)
+ *
+ * Return:
+ * number of memory channels in operation
+ * Pass back:
+ * contents of the DCL0_LOW register
+ */
+static int k8_early_channel_count(struct amd64_pvt *pvt)
+{
+ int flag, err = 0;
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
+ if (err)
+ return err;
+
+ if ((boot_cpu_data.x86_model >> 4) >= OPTERON_CPU_REV_F) {
+ /* RevF (NPT) and later */
+ flag = pvt->dclr0 & F10_WIDTH_128;
+ } else {
+ /* RevE and earlier */
+ flag = pvt->dclr0 & REVE_WIDTH_128;
+ }
+
+ /* not used */
+ pvt->dclr1 = 0;
+
+ return (flag) ? 2 : 1;
+}
+
+/* extract the ERROR ADDRESS for the K8 CPUs */
+static u64 k8_get_error_address(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ return (((u64) (info->nbeah & 0xff)) << 32) +
+ (info->nbeal & ~0x03);
+}
+
+/*
+ * Read the Base and Limit registers for K8 based Memory controllers; extract
+ * fields from the 'raw' reg into separate data fields
+ *
+ * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
+ */
+static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+{
+ u32 low;
+ u32 off = dram << 3; /* 8 bytes between DRAM entries */
+ int err;
+
+ err = pci_read_config_dword(pvt->addr_f1_ctl,
+ K8_DRAM_BASE_LOW + off, &low);
+ if (err)
+ debugf0("Reading K8_DRAM_BASE_LOW failed\n");
+
+ /* Extract parts into separate data entries */
+ pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
+ pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
+ pvt->dram_rw_en[dram] = (low & 0x3);
+
+ err = pci_read_config_dword(pvt->addr_f1_ctl,
+ K8_DRAM_LIMIT_LOW + off, &low);
+ if (err)
+ debugf0("Reading K8_DRAM_LIMIT_LOW failed\n");
+
+ /*
+ * Extract parts into separate data entries. Limit is the HIGHEST memory
+ * location of the region, so lower 24 bits need to be all ones
+ */
+ pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
+ pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
+ pvt->dram_DstNode[dram] = (low & 0x7);
+}
+
+static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info,
+ u64 SystemAddress)
+{
+ struct mem_ctl_info *src_mci;
+ unsigned short syndrome;
+ int channel, csrow;
+ u32 page, offset;
+
+ /* Extract the syndrome parts and form a 16-bit syndrome */
+ syndrome = EXTRACT_HIGH_SYNDROME(info->nbsl) << 8;
+ syndrome |= EXTRACT_LOW_SYNDROME(info->nbsh);
+
+ /* CHIPKILL enabled */
+ if (info->nbcfg & K8_NBCFG_CHIPKILL) {
+ channel = get_channel_from_ecc_syndrome(syndrome);
+ if (channel < 0) {
+ /*
+ * Syndrome didn't map, so we don't know which of the
+ * 2 DIMMs is in error. So we need to ID 'both' of them
+ * as suspect.
+ */
+ amd64_mc_printk(mci, KERN_WARNING,
+ "unknown syndrome 0x%x - possible error "
+ "reporting race\n", syndrome);
+ edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ return;
+ }
+ } else {
+ /*
+ * non-chipkill ecc mode
+ *
+ * The k8 documentation is unclear about how to determine the
+ * channel number when using non-chipkill memory. This method
+ * was obtained from email communication with someone at AMD.
+ * (Wish the email was placed in this comment - norsk)
+ */
+ channel = ((SystemAddress & BIT(3)) != 0);
+ }
+
+ /*
+ * Find out which node the error address belongs to. This may be
+ * different from the node that detected the error.
+ */
+ src_mci = find_mc_by_sys_addr(mci, SystemAddress);
+ if (src_mci) {
+ amd64_mc_printk(mci, KERN_ERR,
+ "failed to map error address 0x%lx to a node\n",
+ (unsigned long)SystemAddress);
+ edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ return;
+ }
+
+ /* Now map the SystemAddress to a CSROW */
+ csrow = sys_addr_to_csrow(src_mci, SystemAddress);
+ if (csrow < 0) {
+ edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
+ } else {
+ error_address_to_page_and_offset(SystemAddress, &page, &offset);
+
+ edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
+ channel, EDAC_MOD_STR);
+ }
+}
+
+/*
+ * determrine the number of PAGES in for this DIMM's size based on its DRAM
+ * Address Mapping.
+ *
+ * First step is to calc the number of bits to shift a value of 1 left to
+ * indicate show many pages. Start with the DBAM value as the starting bits,
+ * then proceed to adjust those shift bits, based on CPU rev and the table.
+ * See BKDG on the DBAM
+ */
+static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
+{
+ int nr_pages;
+
+ if (pvt->ext_model >= OPTERON_CPU_REV_F) {
+ nr_pages = 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
+ } else {
+ /*
+ * RevE and less section; this line is tricky. It collapses the
+ * table used by RevD and later to one that matches revisions CG
+ * and earlier.
+ */
+ dram_map -= (pvt->ext_model >= OPTERON_CPU_REV_D) ?
+ (dram_map > 8 ? 4 : (dram_map > 5 ?
+ 3 : (dram_map > 2 ? 1 : 0))) : 0;
+
+ /* 25 shift is 32MiB minimum DIMM size in RevE and prior */
+ nr_pages = 1 << (dram_map + 25 - PAGE_SHIFT);
+ }
+
+ return nr_pages;
+}
+
+/*
+ * Get the number of DCT channels in use.
+ *
+ * Return:
+ * number of Memory Channels in operation
+ * Pass back:
+ * contents of the DCL0_LOW register
+ */
+static int f10_early_channel_count(struct amd64_pvt *pvt)
+{
+ int err = 0, channels = 0;
+ u32 dbam;
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
+ if (err)
+ goto err_reg;
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
+ if (err)
+ goto err_reg;
+
+ /* If we are in 128 bit mode, then we are using 2 channels */
+ if (pvt->dclr0 & F10_WIDTH_128) {
+ debugf0("Data WIDTH is 128 bits - 2 channels\n");
+ channels = 2;
+ return channels;
+ }
+
+ /*
+ * Need to check if in UN-ganged mode: In such, there are 2 channels,
+ * but they are NOT in 128 bit mode and thus the above 'dcl0' status bit
+ * will be OFF.
+ *
+ * Need to check DCT0[0] and DCT1[0] to see if only one of them has
+ * their CSEnable bit on. If so, then SINGLE DIMM case.
+ */
+ debugf0("Data WIDTH is NOT 128 bits - need more decoding\n");
+
+ /*
+ * Check DRAM Bank Address Mapping values for each DIMM to see if there
+ * is more than just one DIMM present in unganged mode. Need to check
+ * both controllers since DIMMs can be placed in either one.
+ */
+ channels = 0;
+ err = pci_read_config_dword(pvt->dram_f2_ctl, DBAM0, &dbam);
+ if (err)
+ goto err_reg;
+
+ if (DBAM_DIMM(0, dbam) > 0)
+ channels++;
+ if (DBAM_DIMM(1, dbam) > 0)
+ channels++;
+ if (DBAM_DIMM(2, dbam) > 0)
+ channels++;
+ if (DBAM_DIMM(3, dbam) > 0)
+ channels++;
+
+ /* If more than 2 DIMMs are present, then we have 2 channels */
+ if (channels > 2)
+ channels = 2;
+ else if (channels == 0) {
+ /* No DIMMs on DCT0, so look at DCT1 */
+ err = pci_read_config_dword(pvt->dram_f2_ctl, DBAM1, &dbam);
+ if (err)
+ goto err_reg;
+
+ if (DBAM_DIMM(0, dbam) > 0)
+ channels++;
+ if (DBAM_DIMM(1, dbam) > 0)
+ channels++;
+ if (DBAM_DIMM(2, dbam) > 0)
+ channels++;
+ if (DBAM_DIMM(3, dbam) > 0)
+ channels++;
+
+ if (channels > 2)
+ channels = 2;
+ }
+
+ /* If we found ALL 0 values, then assume just ONE DIMM-ONE Channel */
+ if (channels == 0)
+ channels = 1;
+
+ debugf0("DIMM count= %d\n", channels);
+
+ return channels;
+
+err_reg:
+ return -1;
+
+}
+
+static int f10_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
+{
+ return 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
+}
+
+/* Enable extended configuration access via 0xCF8 feature */
+static void amd64_setup(struct amd64_pvt *pvt)
+{
+ u32 reg;
+
+ pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
+
+ pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
+ reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
+ pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
+}
+
+/* Restore the extended configuration access via 0xCF8 feature */
+static void amd64_teardown(struct amd64_pvt *pvt)
+{
+ u32 reg;
+
+ pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
+
+ reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
+ if (pvt->flags.cf8_extcfg)
+ reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
+ pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
+}
+
+static u64 f10_get_error_address(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ return (((u64) (info->nbeah & 0xffff)) << 32) +
+ (info->nbeal & ~0x01);
+}
+
+/*
+ * Read the Base and Limit registers for F10 based Memory controllers. Extract
+ * fields from the 'raw' reg into separate data fields.
+ *
+ * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
+ */
+static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+{
+ u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
+
+ low_offset = K8_DRAM_BASE_LOW + (dram << 3);
+ high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
+
+ /* read the 'raw' DRAM BASE Address register */
+ pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_base);
+
+ /* Read from the ECS data register */
+ pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_base);
+
+ /* Extract parts into separate data entries */
+ pvt->dram_rw_en[dram] = (low_base & 0x3);
+
+ if (pvt->dram_rw_en[dram] == 0)
+ return;
+
+ pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
+
+ pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) |
+ ((u64) low_base & 0xFFFF0000))) << 8;
+
+ low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
+ high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
+
+ /* read the 'raw' LIMIT registers */
+ pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_limit);
+
+ /* Read from the ECS data register for the HIGH portion */
+ pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_limit);
+
+ debugf0(" HW Regs: BASE=0x%08x-%08x LIMIT= 0x%08x-%08x\n",
+ high_base, low_base, high_limit, low_limit);
+
+ pvt->dram_DstNode[dram] = (low_limit & 0x7);
+ pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
+
+ /*
+ * Extract address values and form a LIMIT address. Limit is the HIGHEST
+ * memory location of the region, so low 24 bits need to be all ones.
+ */
+ low_limit |= 0x0000FFFF;
+ pvt->dram_limit[dram] =
+ ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF);
+}
+
+static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
+{
+ int err = 0;
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
+ &pvt->dram_ctl_select_low);
+ if (err) {
+ debugf0("Reading F10_DCTL_SEL_LOW failed\n");
+ } else {
+ debugf0("DRAM_DCTL_SEL_LOW=0x%x DctSelBaseAddr=0x%x\n",
+ pvt->dram_ctl_select_low, dct_sel_baseaddr(pvt));
+
+ debugf0(" DRAM DCTs are=%s DRAM Is=%s DRAM-Ctl-"
+ "sel-hi-range=%s\n",
+ (dct_ganging_enabled(pvt) ? "GANGED" : "NOT GANGED"),
+ (dct_dram_enabled(pvt) ? "Enabled" : "Disabled"),
+ (dct_high_range_enabled(pvt) ? "Enabled" : "Disabled"));
+
+ debugf0(" DctDatIntLv=%s MemCleared=%s DctSelIntLvAddr=0x%x\n",
+ (dct_data_intlv_enabled(pvt) ? "Enabled" : "Disabled"),
+ (dct_memory_cleared(pvt) ? "True " : "False "),
+ dct_sel_interleave_addr(pvt));
+ }
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
+ &pvt->dram_ctl_select_high);
+ if (err)
+ debugf0("Reading F10_DCTL_SEL_HIGH failed\n");
+}
+
+/*
+ * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
+ * Interleaving Modes.
+ */
+static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
+ int hi_range_sel, u32 intlv_en)
+{
+ u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
+
+ if (dct_ganging_enabled(pvt))
+ cs = 0;
+ else if (hi_range_sel)
+ cs = dct_sel_high;
+ else if (dct_interleave_enabled(pvt)) {
+ /*
+ * see F2x110[DctSelIntLvAddr] - channel interleave mode
+ */
+ if (dct_sel_interleave_addr(pvt) == 0)
+ cs = sys_addr >> 6 & 1;
+ else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
+ temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+
+ if (dct_sel_interleave_addr(pvt) & 1)
+ cs = (sys_addr >> 9 & 1) ^ temp;
+ else
+ cs = (sys_addr >> 6 & 1) ^ temp;
+ } else if (intlv_en & 4)
+ cs = sys_addr >> 15 & 1;
+ else if (intlv_en & 2)
+ cs = sys_addr >> 14 & 1;
+ else if (intlv_en & 1)
+ cs = sys_addr >> 13 & 1;
+ else
+ cs = sys_addr >> 12 & 1;
+ } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
+ cs = ~dct_sel_high & 1;
+ else
+ cs = 0;
+
+ return cs;
+}
+
+static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
+{
+ if (intlv_en == 1)
+ return 1;
+ else if (intlv_en == 3)
+ return 2;
+ else if (intlv_en == 7)
+ return 3;
+
+ return 0;
+}
+
+/* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
+static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
+ u32 dct_sel_base_addr,
+ u64 dct_sel_base_off,
+ u32 hole_valid, u32 hole_off,
+ u64 dram_base)
+{
+ u64 chan_off;
+
+ if (hi_range_sel) {
+ if (!(dct_sel_base_addr & 0xFFFFF800) &&
+ hole_valid && (sys_addr >= 0x100000000ULL))
+ chan_off = hole_off << 16;
+ else
+ chan_off = dct_sel_base_off;
+ } else {
+ if (hole_valid && (sys_addr >= 0x100000000ULL))
+ chan_off = hole_off << 16;
+ else
+ chan_off = dram_base & 0xFFFFF8000000ULL;
+ }
+
+ return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
+ (chan_off & 0x0000FFFFFF800000ULL);
+}
+
+/* Hack for the time being - Can we get this from BIOS?? */
+#define CH0SPARE_RANK 0
+#define CH1SPARE_RANK 1
+
+/*
+ * checks if the csrow passed in is marked as SPARED, if so returns the new
+ * spare row
+ */
+static inline int f10_process_possible_spare(int csrow,
+ u32 cs, struct amd64_pvt *pvt)
+{
+ u32 swap_done;
+ u32 bad_dram_cs;
+
+ /* Depending on channel, isolate respective SPARING info */
+ if (cs) {
+ swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
+ bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
+ if (swap_done && (csrow == bad_dram_cs))
+ csrow = CH1SPARE_RANK;
+ } else {
+ swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
+ bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
+ if (swap_done && (csrow == bad_dram_cs))
+ csrow = CH0SPARE_RANK;
+ }
+ return csrow;
+}
+
+/*
+ * Iterate over the DRAM DCT "base" and "mask" registers looking for a
+ * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
+ *
+ * Return:
+ * -EINVAL: NOT FOUND
+ * 0..csrow = Chip-Select Row
+ */
+static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
+{
+ struct mem_ctl_info *mci;
+ struct amd64_pvt *pvt;
+ u32 cs_base, cs_mask;
+ int cs_found = -EINVAL;
+ int csrow;
+
+ mci = mci_lookup[nid];
+ if (!mci)
+ return cs_found;
+
+ pvt = mci->pvt_info;
+
+ debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
+
+ for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
+
+ cs_base = amd64_get_dct_base(pvt, cs, csrow);
+ if (!(cs_base & K8_DCSB_CS_ENABLE))
+ continue;
+
+ /*
+ * We have an ENABLED CSROW, Isolate just the MASK bits of the
+ * target: [28:19] and [13:5], which map to [36:27] and [21:13]
+ * of the actual address.
+ */
+ cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
+
+ /*
+ * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
+ * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
+ */
+ cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
+
+ debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
+ csrow, cs_base, cs_mask);
+
+ cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
+
+ debugf1(" Final CSMask=0x%x\n", cs_mask);
+ debugf1(" (InputAddr & ~CSMask)=0x%x "
+ "(CSBase & ~CSMask)=0x%x\n",
+ (in_addr & ~cs_mask), (cs_base & ~cs_mask));
+
+ if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
+ cs_found = f10_process_possible_spare(csrow, cs, pvt);
+
+ debugf1(" MATCH csrow=%d\n", cs_found);
+ break;
+ }
+ }
+ return cs_found;
+}
+
+/* For a given @dram_range, check if @sys_addr falls within it. */
+static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
+ u64 sys_addr, int *nid, int *chan_sel)
+{
+ int node_id, cs_found = -EINVAL, high_range = 0;
+ u32 intlv_en, intlv_sel, intlv_shift, hole_off;
+ u32 hole_valid, tmp, dct_sel_base, channel;
+ u64 dram_base, chan_addr, dct_sel_base_off;
+
+ dram_base = pvt->dram_base[dram_range];
+ intlv_en = pvt->dram_IntlvEn[dram_range];
+
+ node_id = pvt->dram_DstNode[dram_range];
+ intlv_sel = pvt->dram_IntlvSel[dram_range];
+
+ debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
+ dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
+
+ /*
+ * This assumes that one node's DHAR is the same as all the other
+ * nodes' DHAR.
+ */
+ hole_off = (pvt->dhar & 0x0000FF80);
+ hole_valid = (pvt->dhar & 0x1);
+ dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
+
+ debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
+ hole_off, hole_valid, intlv_sel);
+
+ if (intlv_en ||
+ (intlv_sel != ((sys_addr >> 12) & intlv_en)))
+ return -EINVAL;
+
+ dct_sel_base = dct_sel_baseaddr(pvt);
+
+ /*
+ * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
+ * select between DCT0 and DCT1.
+ */
+ if (dct_high_range_enabled(pvt) &&
+ !dct_ganging_enabled(pvt) &&
+ ((sys_addr >> 27) >= (dct_sel_base >> 11)))
+ high_range = 1;
+
+ channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
+
+ chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
+ dct_sel_base_off, hole_valid,
+ hole_off, dram_base);
+
+ intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
+
+ /* remove Node ID (in case of memory interleaving) */
+ tmp = chan_addr & 0xFC0;
+
+ chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
+
+ /* remove channel interleave and hash */
+ if (dct_interleave_enabled(pvt) &&
+ !dct_high_range_enabled(pvt) &&
+ !dct_ganging_enabled(pvt)) {
+ if (dct_sel_interleave_addr(pvt) != 1)
+ chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
+ else {
+ tmp = chan_addr & 0xFC0;
+ chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
+ | tmp;
+ }
+ }
+
+ debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
+ chan_addr, (u32)(chan_addr >> 8));
+
+ cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
+
+ if (cs_found >= 0) {
+ *nid = node_id;
+ *chan_sel = channel;
+ }
+ return cs_found;
+}
+
+static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
+ int *node, int *chan_sel)
+{
+ int dram_range, cs_found = -EINVAL;
+ u64 dram_base, dram_limit;
+
+ for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
+
+ if (!pvt->dram_rw_en[dram_range])
+ continue;
+
+ dram_base = pvt->dram_base[dram_range];
+ dram_limit = pvt->dram_limit[dram_range];
+
+ if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
+
+ cs_found = f10_match_to_this_node(pvt, dram_range,
+ sys_addr, node,
+ chan_sel);
+ if (cs_found >= 0)
+ break;
+ }
+ }
+ return cs_found;
+}
+
+/*
+ * This the F10h reference code from AMD to map a @sys_addr to NodeID,
+ * CSROW, Channel.
+ *
+ * The @sys_addr is usually an error address received from the hardware.
+ */
+static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info,
+ u64 sys_addr)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u32 page, offset;
+ unsigned short syndrome;
+ int nid, csrow, chan = 0;
+
+ csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
+
+ if (csrow >= 0) {
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
+
+ syndrome = EXTRACT_HIGH_SYNDROME(info->nbsl) << 8;
+ syndrome |= EXTRACT_LOW_SYNDROME(info->nbsh);
+
+ /*
+ * Is CHIPKILL on? If so, then we can attempt to use the
+ * syndrome to isolate which channel the error was on.
+ */
+ if (pvt->nbcfg & K8_NBCFG_CHIPKILL)
+ chan = get_channel_from_ecc_syndrome(syndrome);
+
+ if (chan >= 0) {
+ edac_mc_handle_ce(mci, page, offset, syndrome,
+ csrow, chan, EDAC_MOD_STR);
+ } else {
+ /*
+ * Channel unknown, report all channels on this
+ * CSROW as failed.
+ */
+ for (chan = 0; chan < mci->csrows[csrow].nr_channels;
+ chan++) {
+ edac_mc_handle_ce(mci, page, offset,
+ syndrome,
+ csrow, chan,
+ EDAC_MOD_STR);
+ }
+ }
+
+ } else {
+ edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ }
+}
+
+/*
+ * Input (@index) is the DBAM DIMM value (1 of 4) used as an index into a shift
+ * table (revf_quad_ddr2_shift) which starts at 128MB DIMM size. Index of 0
+ * indicates an empty DIMM slot, as reported by Hardware on empty slots.
+ *
+ * Normalize to 128MB by subracting 27 bit shift.
+ */
+static int map_dbam_to_csrow_size(int index)
+{
+ int mega_bytes = 0;
+
+ if (index > 0 && index <= DBAM_MAX_VALUE)
+ mega_bytes = ((128 << (revf_quad_ddr2_shift[index]-27)));
+
+ return mega_bytes;
+}
+
+/*
+ * debug routine to display the memory sizes of a DIMM (ganged or not) and it
+ * CSROWs as well
+ */
+static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
+ int ganged)
+{
+ int dimm, size0, size1;
+ u32 dbam;
+ u32 *dcsb;
+
+ debugf1(" dbam%d: 0x%8.08x CSROW is %s\n", ctrl,
+ ctrl ? pvt->dbam1 : pvt->dbam0,
+ ganged ? "GANGED - dbam1 not used" : "NON-GANGED");
+
+ dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
+ dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
+
+ /* Dump memory sizes for DIMM and its CSROWs */
+ for (dimm = 0; dimm < 4; dimm++) {
+
+ size0 = 0;
+ if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
+ size0 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
+
+ size1 = 0;
+ if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
+ size1 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
+
+ debugf1(" CTRL-%d DIMM-%d=%5dMB CSROW-%d=%5dMB "
+ "CSROW-%d=%5dMB\n",
+ ctrl,
+ dimm,
+ size0 + size1,
+ dimm * 2,
+ size0,
+ dimm * 2 + 1,
+ size1);
+ }
+}
+
+/*
+ * Very early hardware probe on pci_probe thread to determine if this module
+ * supports the hardware.
+ *
+ * Return:
+ * 0 for OK
+ * 1 for error
+ */
+static int f10_probe_valid_hardware(struct amd64_pvt *pvt)
+{
+ int ret = 0;
+
+ /*
+ * If we are on a DDR3 machine, we don't know yet if
+ * we support that properly at this time
+ */
+ if ((pvt->dchr0 & F10_DCHR_Ddr3Mode) ||
+ (pvt->dchr1 & F10_DCHR_Ddr3Mode)) {
+
+ amd64_printk(KERN_WARNING,
+ "%s() This machine is running with DDR3 memory. "
+ "This is not currently supported. "
+ "DCHR0=0x%x DCHR1=0x%x\n",
+ __func__, pvt->dchr0, pvt->dchr1);
+
+ amd64_printk(KERN_WARNING,
+ " Contact '%s' module MAINTAINER to help add"
+ " support.\n",
+ EDAC_MOD_STR);
+
+ ret = 1;
+
+ }
+ return ret;
+}
+
+/*
+ * There currently are 3 types type of MC devices for AMD Athlon/Opterons
+ * (as per PCI DEVICE_IDs):
+ *
+ * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI
+ * DEVICE ID, even though there is differences between the different Revisions
+ * (CG,D,E,F).
+ *
+ * Family F10h and F11h.
+ *
+ */
+static struct amd64_family_type amd64_family_types[] = {
+ [K8_CPUS] = {
+ .ctl_name = "RevF",
+ .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
+ .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
+ .ops = {
+ .early_channel_count = k8_early_channel_count,
+ .get_error_address = k8_get_error_address,
+ .read_dram_base_limit = k8_read_dram_base_limit,
+ .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
+ .dbam_map_to_pages = k8_dbam_map_to_pages,
+ }
+ },
+ [F10_CPUS] = {
+ .ctl_name = "Family 10h",
+ .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
+ .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
+ .ops = {
+ .probe_valid_hardware = f10_probe_valid_hardware,
+ .early_channel_count = f10_early_channel_count,
+ .get_error_address = f10_get_error_address,
+ .read_dram_base_limit = f10_read_dram_base_limit,
+ .read_dram_ctl_register = f10_read_dram_ctl_register,
+ .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
+ .dbam_map_to_pages = f10_dbam_map_to_pages,
+ }
+ },
+ [F11_CPUS] = {
+ .ctl_name = "Family 11h",
+ .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
+ .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
+ .ops = {
+ .probe_valid_hardware = f10_probe_valid_hardware,
+ .early_channel_count = f10_early_channel_count,
+ .get_error_address = f10_get_error_address,
+ .read_dram_base_limit = f10_read_dram_base_limit,
+ .read_dram_ctl_register = f10_read_dram_ctl_register,
+ .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
+ .dbam_map_to_pages = f10_dbam_map_to_pages,
+ }
+ },
+};
+
+static struct pci_dev *pci_get_related_function(unsigned int vendor,
+ unsigned int device,
+ struct pci_dev *related)
+{
+ struct pci_dev *dev = NULL;
+
+ dev = pci_get_device(vendor, device, dev);
+ while (dev) {
+ if ((dev->bus->number == related->bus->number) &&
+ (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
+ break;
+ dev = pci_get_device(vendor, device, dev);
+ }
+
+ return dev;
+}
+
+/*
+ * syndrome mapping table for ECC ChipKill devices
+ *
+ * The comment in each row is the token (nibble) number that is in error.
+ * The least significant nibble of the syndrome is the mask for the bits
+ * that are in error (need to be toggled) for the particular nibble.
+ *
+ * Each row contains 16 entries.
+ * The first entry (0th) is the channel number for that row of syndromes.
+ * The remaining 15 entries are the syndromes for the respective Error
+ * bit mask index.
+ *
+ * 1st index entry is 0x0001 mask, indicating that the rightmost bit is the
+ * bit in error.
+ * The 2nd index entry is 0x0010 that the second bit is damaged.
+ * The 3rd index entry is 0x0011 indicating that the rightmost 2 bits
+ * are damaged.
+ * Thus so on until index 15, 0x1111, whose entry has the syndrome
+ * indicating that all 4 bits are damaged.
+ *
+ * A search is performed on this table looking for a given syndrome.
+ *
+ * See the AMD documentation for ECC syndromes. This ECC table is valid
+ * across all the versions of the AMD64 processors.
+ *
+ * A fast lookup is to use the LAST four bits of the 16-bit syndrome as a
+ * COLUMN index, then search all ROWS of that column, looking for a match
+ * with the input syndrome. The ROW value will be the token number.
+ *
+ * The 0'th entry on that row, can be returned as the CHANNEL (0 or 1) of this
+ * error.
+ */
+#define NUMBER_ECC_ROWS 36
+static const unsigned short ecc_chipkill_syndromes[NUMBER_ECC_ROWS][16] = {
+ /* Channel 0 syndromes */
+ {/*0*/ 0, 0xe821, 0x7c32, 0x9413, 0xbb44, 0x5365, 0xc776, 0x2f57,
+ 0xdd88, 0x35a9, 0xa1ba, 0x499b, 0x66cc, 0x8eed, 0x1afe, 0xf2df },
+ {/*1*/ 0, 0x5d31, 0xa612, 0xfb23, 0x9584, 0xc8b5, 0x3396, 0x6ea7,
+ 0xeac8, 0xb7f9, 0x4cda, 0x11eb, 0x7f4c, 0x227d, 0xd95e, 0x846f },
+ {/*2*/ 0, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f },
+ {/*3*/ 0, 0x2021, 0x3032, 0x1013, 0x4044, 0x6065, 0x7076, 0x5057,
+ 0x8088, 0xa0a9, 0xb0ba, 0x909b, 0xc0cc, 0xe0ed, 0xf0fe, 0xd0df },
+ {/*4*/ 0, 0x5041, 0xa082, 0xf0c3, 0x9054, 0xc015, 0x30d6, 0x6097,
+ 0xe0a8, 0xb0e9, 0x402a, 0x106b, 0x70fc, 0x20bd, 0xd07e, 0x803f },
+ {/*5*/ 0, 0xbe21, 0xd732, 0x6913, 0x2144, 0x9f65, 0xf676, 0x4857,
+ 0x3288, 0x8ca9, 0xe5ba, 0x5b9b, 0x13cc, 0xaded, 0xc4fe, 0x7adf },
+ {/*6*/ 0, 0x4951, 0x8ea2, 0xc7f3, 0x5394, 0x1ac5, 0xdd36, 0x9467,
+ 0xa1e8, 0xe8b9, 0x2f4a, 0x661b, 0xf27c, 0xbb2d, 0x7cde, 0x358f },
+ {/*7*/ 0, 0x74e1, 0x9872, 0xec93, 0xd6b4, 0xa255, 0x4ec6, 0x3a27,
+ 0x6bd8, 0x1f39, 0xf3aa, 0x874b, 0xbd6c, 0xc98d, 0x251e, 0x51ff },
+ {/*8*/ 0, 0x15c1, 0x2a42, 0x3f83, 0xcef4, 0xdb35, 0xe4b6, 0xf177,
+ 0x4758, 0x5299, 0x6d1a, 0x78db, 0x89ac, 0x9c6d, 0xa3ee, 0xb62f },
+ {/*9*/ 0, 0x3d01, 0x1602, 0x2b03, 0x8504, 0xb805, 0x9306, 0xae07,
+ 0xca08, 0xf709, 0xdc0a, 0xe10b, 0x4f0c, 0x720d, 0x590e, 0x640f },
+ {/*a*/ 0, 0x9801, 0xec02, 0x7403, 0x6b04, 0xf305, 0x8706, 0x1f07,
+ 0xbd08, 0x2509, 0x510a, 0xc90b, 0xd60c, 0x4e0d, 0x3a0e, 0xa20f },
+ {/*b*/ 0, 0xd131, 0x6212, 0xb323, 0x3884, 0xe9b5, 0x5a96, 0x8ba7,
+ 0x1cc8, 0xcdf9, 0x7eda, 0xafeb, 0x244c, 0xf57d, 0x465e, 0x976f },
+ {/*c*/ 0, 0xe1d1, 0x7262, 0x93b3, 0xb834, 0x59e5, 0xca56, 0x2b87,
+ 0xdc18, 0x3dc9, 0xae7a, 0x4fab, 0x542c, 0x85fd, 0x164e, 0xf79f },
+ {/*d*/ 0, 0x6051, 0xb0a2, 0xd0f3, 0x1094, 0x70c5, 0xa036, 0xc067,
+ 0x20e8, 0x40b9, 0x904a, 0x601b, 0x307c, 0x502d, 0x80de, 0xe08f },
+ {/*e*/ 0, 0xa4c1, 0xf842, 0x5c83, 0xe6f4, 0x4235, 0x1eb6, 0xba77,
+ 0x7b58, 0xdf99, 0x831a, 0x27db, 0x9dac, 0x396d, 0x65ee, 0xc12f },
+ {/*f*/ 0, 0x11c1, 0x2242, 0x3383, 0xc8f4, 0xd935, 0xeab6, 0xfb77,
+ 0x4c58, 0x5d99, 0x6e1a, 0x7fdb, 0x84ac, 0x956d, 0xa6ee, 0xb72f },
+
+ /* Channel 1 syndromes */
+ {/*10*/ 1, 0x45d1, 0x8a62, 0xcfb3, 0x5e34, 0x1be5, 0xd456, 0x9187,
+ 0xa718, 0xe2c9, 0x2d7a, 0x68ab, 0xf92c, 0xbcfd, 0x734e, 0x369f },
+ {/*11*/ 1, 0x63e1, 0xb172, 0xd293, 0x14b4, 0x7755, 0xa5c6, 0xc627,
+ 0x28d8, 0x4b39, 0x99aa, 0xfa4b, 0x3c6c, 0x5f8d, 0x8d1e, 0xeeff },
+ {/*12*/ 1, 0xb741, 0xd982, 0x6ec3, 0x2254, 0x9515, 0xfbd6, 0x4c97,
+ 0x33a8, 0x84e9, 0xea2a, 0x5d6b, 0x11fc, 0xa6bd, 0xc87e, 0x7f3f },
+ {/*13*/ 1, 0xdd41, 0x6682, 0xbbc3, 0x3554, 0xe815, 0x53d6, 0xce97,
+ 0x1aa8, 0xc7e9, 0x7c2a, 0xa1fb, 0x2ffc, 0xf2bd, 0x497e, 0x943f },
+ {/*14*/ 1, 0x2bd1, 0x3d62, 0x16b3, 0x4f34, 0x64e5, 0x7256, 0x5987,
+ 0x8518, 0xaec9, 0xb87a, 0x93ab, 0xca2c, 0xe1fd, 0xf74e, 0xdc9f },
+ {/*15*/ 1, 0x83c1, 0xc142, 0x4283, 0xa4f4, 0x2735, 0x65b6, 0xe677,
+ 0xf858, 0x7b99, 0x391a, 0xbadb, 0x5cac, 0xdf6d, 0x9dee, 0x1e2f },
+ {/*16*/ 1, 0x8fd1, 0xc562, 0x4ab3, 0xa934, 0x26e5, 0x6c56, 0xe387,
+ 0xfe18, 0x71c9, 0x3b7a, 0xb4ab, 0x572c, 0xd8fd, 0x924e, 0x1d9f },
+ {/*17*/ 1, 0x4791, 0x89e2, 0xce73, 0x5264, 0x15f5, 0xdb86, 0x9c17,
+ 0xa3b8, 0xe429, 0x2a5a, 0x6dcb, 0xf1dc, 0xb64d, 0x783e, 0x3faf },
+ {/*18*/ 1, 0x5781, 0xa9c2, 0xfe43, 0x92a4, 0xc525, 0x3b66, 0x6ce7,
+ 0xe3f8, 0xb479, 0x4a3a, 0x1dbb, 0x715c, 0x26dd, 0xd89e, 0x8f1f },
+ {/*19*/ 1, 0xbf41, 0xd582, 0x6ac3, 0x2954, 0x9615, 0xfcd6, 0x4397,
+ 0x3ea8, 0x81e9, 0xeb2a, 0x546b, 0x17fc, 0xa8bd, 0xc27e, 0x7d3f },
+ {/*1a*/ 1, 0x9891, 0xe1e2, 0x7273, 0x6464, 0xf7f5, 0x8586, 0x1617,
+ 0xb8b8, 0x2b29, 0x595a, 0xcacb, 0xdcdc, 0x4f4d, 0x3d3e, 0xaeaf },
+ {/*1b*/ 1, 0xcce1, 0x4472, 0x8893, 0xfdb4, 0x3f55, 0xb9c6, 0x7527,
+ 0x56d8, 0x9a39, 0x12aa, 0xde4b, 0xab6c, 0x678d, 0xef1e, 0x23ff },
+ {/*1c*/ 1, 0xa761, 0xf9b2, 0x5ed3, 0xe214, 0x4575, 0x1ba6, 0xbcc7,
+ 0x7328, 0xd449, 0x8a9a, 0x2dfb, 0x913c, 0x365d, 0x688e, 0xcfef },
+ {/*1d*/ 1, 0xff61, 0x55b2, 0xaad3, 0x7914, 0x8675, 0x2ca6, 0xd3c7,
+ 0x9e28, 0x6149, 0xcb9a, 0x34fb, 0xe73c, 0x185d, 0xb28e, 0x4def },
+ {/*1e*/ 1, 0x5451, 0xa8a2, 0xfcf3, 0x9694, 0xc2c5, 0x3e36, 0x6a67,
+ 0xebe8, 0xbfb9, 0x434a, 0x171b, 0x7d7c, 0x292d, 0xd5de, 0x818f },
+ {/*1f*/ 1, 0x6fc1, 0xb542, 0xda83, 0x19f4, 0x7635, 0xacb6, 0xc377,
+ 0x2e58, 0x4199, 0x9b1a, 0xf4db, 0x37ac, 0x586d, 0x82ee, 0xed2f },
+
+ /* ECC bits are also in the set of tokens and they too can go bad
+ * first 2 cover channel 0, while the second 2 cover channel 1
+ */
+ {/*20*/ 0, 0xbe01, 0xd702, 0x6903, 0x2104, 0x9f05, 0xf606, 0x4807,
+ 0x3208, 0x8c09, 0xe50a, 0x5b0b, 0x130c, 0xad0d, 0xc40e, 0x7a0f },
+ {/*21*/ 0, 0x4101, 0x8202, 0xc303, 0x5804, 0x1905, 0xda06, 0x9b07,
+ 0xac08, 0xed09, 0x2e0a, 0x6f0b, 0x640c, 0xb50d, 0x760e, 0x370f },
+ {/*22*/ 1, 0xc441, 0x4882, 0x8cc3, 0xf654, 0x3215, 0xbed6, 0x7a97,
+ 0x5ba8, 0x9fe9, 0x132a, 0xd76b, 0xadfc, 0x69bd, 0xe57e, 0x213f },
+ {/*23*/ 1, 0x7621, 0x9b32, 0xed13, 0xda44, 0xac65, 0x4176, 0x3757,
+ 0x6f88, 0x19a9, 0xf4ba, 0x829b, 0xb5cc, 0xc3ed, 0x2efe, 0x58df }
+};
+
+/*
+ * Given the syndrome argument, scan each of the channel tables for a syndrome
+ * match. Depending on which table it is found, return the channel number.
+ */
+static int get_channel_from_ecc_syndrome(unsigned short syndrome)
+{
+ int row;
+ int column;
+
+ /* Determine column to scan */
+ column = syndrome & 0xF;
+
+ /* Scan all rows, looking for syndrome, or end of table */
+ for (row = 0; row < NUMBER_ECC_ROWS; row++) {
+ if (ecc_chipkill_syndromes[row][column] == syndrome)
+ return ecc_chipkill_syndromes[row][0];
+ }
+
+ debugf0("syndrome(%x) not found\n", syndrome);
+ return -1;
+}
+
+/*
+ * Check for valid error in the NB Status High register. If so, proceed to read
+ * NB Status Low, NB Address Low and NB Address High registers and store data
+ * into error structure.
+ *
+ * Returns:
+ * - 1: if hardware regs contains valid error info
+ * - 0: if no valid error is indicated
+ */
+static int amd64_get_error_info_regs(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *regs)
+{
+ struct amd64_pvt *pvt;
+ struct pci_dev *misc_f3_ctl;
+ int err = 0;
+
+ pvt = mci->pvt_info;
+ misc_f3_ctl = pvt->misc_f3_ctl;
+
+ err = pci_read_config_dword(misc_f3_ctl, K8_NBSH, &regs->nbsh);
+ if (err)
+ goto err_reg;
+
+ if (!(regs->nbsh & K8_NBSH_VALID_BIT))
+ return 0;
+
+ /* valid error, read remaining error information registers */
+ err = pci_read_config_dword(misc_f3_ctl, K8_NBSL, &regs->nbsl);
+ if (err)
+ goto err_reg;
+
+ err = pci_read_config_dword(misc_f3_ctl, K8_NBEAL, &regs->nbeal);
+ if (err)
+ goto err_reg;
+
+ err = pci_read_config_dword(misc_f3_ctl, K8_NBEAH, &regs->nbeah);
+ if (err)
+ goto err_reg;
+
+ err = pci_read_config_dword(misc_f3_ctl, K8_NBCFG, &regs->nbcfg);
+ if (err)
+ goto err_reg;
+
+ return 1;
+
+err_reg:
+ debugf0("Reading error info register failed\n");
+ return 0;
+}
+
+/*
+ * This function is called to retrieve the error data from hardware and store it
+ * in the info structure.
+ *
+ * Returns:
+ * - 1: if a valid error is found
+ * - 0: if no error is found
+ */
+static int amd64_get_error_info(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ struct amd64_pvt *pvt;
+ struct amd64_error_info_regs regs;
+
+ pvt = mci->pvt_info;
+
+ if (!amd64_get_error_info_regs(mci, info))
+ return 0;
+
+ /*
+ * Here's the problem with the K8's EDAC reporting: There are four
+ * registers which report pieces of error information. They are shared
+ * between CEs and UEs. Furthermore, contrary to what is stated in the
+ * BKDG, the overflow bit is never used! Every error always updates the
+ * reporting registers.
+ *
+ * Can you see the race condition? All four error reporting registers
+ * must be read before a new error updates them! There is no way to read
+ * all four registers atomically. The best than can be done is to detect
+ * that a race has occured and then report the error without any kind of
+ * precision.
+ *
+ * What is still positive is that errors are still reported and thus
+ * problems can still be detected - just not localized because the
+ * syndrome and address are spread out across registers.
+ *
+ * Grrrrr!!!!! Here's hoping that AMD fixes this in some future K8 rev.
+ * UEs and CEs should have separate register sets with proper overflow
+ * bits that are used! At very least the problem can be fixed by
+ * honoring the ErrValid bit in 'nbsh' and not updating registers - just
+ * set the overflow bit - unless the current error is CE and the new
+ * error is UE which would be the only situation for overwriting the
+ * current values.
+ */
+
+ regs = *info;
+
+ /* Use info from the second read - most current */
+ if (unlikely(!amd64_get_error_info_regs(mci, info)))
+ return 0;
+
+ /* clear the error bits in hardware */
+ pci_write_bits32(pvt->misc_f3_ctl, K8_NBSH, 0, K8_NBSH_VALID_BIT);
+
+ /* Check for the possible race condition */
+ if ((regs.nbsh != info->nbsh) ||
+ (regs.nbsl != info->nbsl) ||
+ (regs.nbeah != info->nbeah) ||
+ (regs.nbeal != info->nbeal)) {
+ amd64_mc_printk(mci, KERN_WARNING,
+ "hardware STATUS read access race condition "
+ "detected!\n");
+ return 0;
+ }
+ return 1;
+}
+
+static inline void amd64_decode_gart_tlb_error(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ u32 err_code;
+ u32 ec_tt; /* error code transaction type (2b) */
+ u32 ec_ll; /* error code cache level (2b) */
+
+ err_code = EXTRACT_ERROR_CODE(info->nbsl);
+ ec_ll = EXTRACT_LL_CODE(err_code);
+ ec_tt = EXTRACT_TT_CODE(err_code);
+
+ amd64_mc_printk(mci, KERN_ERR,
+ "GART TLB event: transaction type(%s), "
+ "cache level(%s)\n", tt_msgs[ec_tt], ll_msgs[ec_ll]);
+}
+
+static inline void amd64_decode_mem_cache_error(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ u32 err_code;
+ u32 ec_rrrr; /* error code memory transaction (4b) */
+ u32 ec_tt; /* error code transaction type (2b) */
+ u32 ec_ll; /* error code cache level (2b) */
+
+ err_code = EXTRACT_ERROR_CODE(info->nbsl);
+ ec_ll = EXTRACT_LL_CODE(err_code);
+ ec_tt = EXTRACT_TT_CODE(err_code);
+ ec_rrrr = EXTRACT_RRRR_CODE(err_code);
+
+ amd64_mc_printk(mci, KERN_ERR,
+ "cache hierarchy error: memory transaction type(%s), "
+ "transaction type(%s), cache level(%s)\n",
+ rrrr_msgs[ec_rrrr], tt_msgs[ec_tt], ll_msgs[ec_ll]);
+}
+
+
+/*
+ * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
+ * ADDRESS and process.
+ */
+static void amd64_handle_ce(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u64 SystemAddress;
+
+ /* Ensure that the Error Address is VALID */
+ if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
+ amd64_mc_printk(mci, KERN_ERR,
+ "HW has no ERROR_ADDRESS available\n");
+ edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ return;
+ }
+
+ SystemAddress = extract_error_address(mci, info);
+
+ amd64_mc_printk(mci, KERN_ERR,
+ "CE ERROR_ADDRESS= 0x%llx\n", SystemAddress);
+
+ pvt->ops->map_sysaddr_to_csrow(mci, info, SystemAddress);
+}
+
+/* Handle any Un-correctable Errors (UEs) */
+static void amd64_handle_ue(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ int csrow;
+ u64 SystemAddress;
+ u32 page, offset;
+ struct mem_ctl_info *log_mci, *src_mci = NULL;
+
+ log_mci = mci;
+
+ if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
+ amd64_mc_printk(mci, KERN_CRIT,
+ "HW has no ERROR_ADDRESS available\n");
+ edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ return;
+ }
+
+ SystemAddress = extract_error_address(mci, info);
+
+ /*
+ * Find out which node the error address belongs to. This may be
+ * different from the node that detected the error.
+ */
+ src_mci = find_mc_by_sys_addr(mci, SystemAddress);
+ if (!src_mci) {
+ amd64_mc_printk(mci, KERN_CRIT,
+ "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
+ (unsigned long)SystemAddress);
+ edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ return;
+ }
+
+ log_mci = src_mci;
+
+ csrow = sys_addr_to_csrow(log_mci, SystemAddress);
+ if (csrow < 0) {
+ amd64_mc_printk(mci, KERN_CRIT,
+ "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
+ (unsigned long)SystemAddress);
+ edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ } else {
+ error_address_to_page_and_offset(SystemAddress, &page, &offset);
+ edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
+ }
+}
+
+static void amd64_decode_bus_error(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ u32 err_code, ext_ec;
+ u32 ec_pp; /* error code participating processor (2p) */
+ u32 ec_to; /* error code timed out (1b) */
+ u32 ec_rrrr; /* error code memory transaction (4b) */
+ u32 ec_ii; /* error code memory or I/O (2b) */
+ u32 ec_ll; /* error code cache level (2b) */
+
+ ext_ec = EXTRACT_EXT_ERROR_CODE(info->nbsl);
+ err_code = EXTRACT_ERROR_CODE(info->nbsl);
+
+ ec_ll = EXTRACT_LL_CODE(err_code);
+ ec_ii = EXTRACT_II_CODE(err_code);
+ ec_rrrr = EXTRACT_RRRR_CODE(err_code);
+ ec_to = EXTRACT_TO_CODE(err_code);
+ ec_pp = EXTRACT_PP_CODE(err_code);
+
+ amd64_mc_printk(mci, KERN_ERR,
+ "BUS ERROR:\n"
+ " time-out(%s) mem or i/o(%s)\n"
+ " participating processor(%s)\n"
+ " memory transaction type(%s)\n"
+ " cache level(%s) Error Found by: %s\n",
+ to_msgs[ec_to],
+ ii_msgs[ec_ii],
+ pp_msgs[ec_pp],
+ rrrr_msgs[ec_rrrr],
+ ll_msgs[ec_ll],
+ (info->nbsh & K8_NBSH_ERR_SCRUBER) ?
+ "Scrubber" : "Normal Operation");
+
+ /* If this was an 'observed' error, early out */
+ if (ec_pp == K8_NBSL_PP_OBS)
+ return; /* We aren't the node involved */
+
+ /* Parse out the extended error code for ECC events */
+ switch (ext_ec) {
+ /* F10 changed to one Extended ECC error code */
+ case F10_NBSL_EXT_ERR_RES: /* Reserved field */
+ case F10_NBSL_EXT_ERR_ECC: /* F10 ECC ext err code */
+ break;
+
+ default:
+ amd64_mc_printk(mci, KERN_ERR, "NOT ECC: no special error "
+ "handling for this error\n");
+ return;
+ }
+
+ if (info->nbsh & K8_NBSH_CECC)
+ amd64_handle_ce(mci, info);
+ else if (info->nbsh & K8_NBSH_UECC)
+ amd64_handle_ue(mci, info);
+
+ /*
+ * If main error is CE then overflow must be CE. If main error is UE
+ * then overflow is unknown. We'll call the overflow a CE - if
+ * panic_on_ue is set then we're already panic'ed and won't arrive
+ * here. Else, then apparently someone doesn't think that UE's are
+ * catastrophic.
+ */
+ if (info->nbsh & K8_NBSH_OVERFLOW)
+ edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR
+ "Error Overflow set");
+}
+
+int amd64_process_error_info(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info,
+ int handle_errors)
+{
+ struct amd64_pvt *pvt;
+ struct amd64_error_info_regs *regs;
+ u32 err_code, ext_ec;
+ int gart_tlb_error = 0;
+
+ pvt = mci->pvt_info;
+
+ /* If caller doesn't want us to process the error, return */
+ if (!handle_errors)
+ return 1;
+
+ regs = info;
+
+ debugf1("NorthBridge ERROR: mci(0x%p)\n", mci);
+ debugf1(" MC node(%d) Error-Address(0x%.8x-%.8x)\n",
+ pvt->mc_node_id, regs->nbeah, regs->nbeal);
+ debugf1(" nbsh(0x%.8x) nbsl(0x%.8x)\n",
+ regs->nbsh, regs->nbsl);
+ debugf1(" Valid Error=%s Overflow=%s\n",
+ (regs->nbsh & K8_NBSH_VALID_BIT) ? "True" : "False",
+ (regs->nbsh & K8_NBSH_OVERFLOW) ? "True" : "False");
+ debugf1(" Err Uncorrected=%s MCA Error Reporting=%s\n",
+ (regs->nbsh & K8_NBSH_UNCORRECTED_ERR) ?
+ "True" : "False",
+ (regs->nbsh & K8_NBSH_ERR_ENABLE) ?
+ "True" : "False");
+ debugf1(" MiscErr Valid=%s ErrAddr Valid=%s PCC=%s\n",
+ (regs->nbsh & K8_NBSH_MISC_ERR_VALID) ?
+ "True" : "False",
+ (regs->nbsh & K8_NBSH_VALID_ERROR_ADDR) ?
+ "True" : "False",
+ (regs->nbsh & K8_NBSH_PCC) ?
+ "True" : "False");
+ debugf1(" CECC=%s UECC=%s Found by Scruber=%s\n",
+ (regs->nbsh & K8_NBSH_CECC) ?
+ "True" : "False",
+ (regs->nbsh & K8_NBSH_UECC) ?
+ "True" : "False",
+ (regs->nbsh & K8_NBSH_ERR_SCRUBER) ?
+ "True" : "False");
+ debugf1(" CORE0=%s CORE1=%s CORE2=%s CORE3=%s\n",
+ (regs->nbsh & K8_NBSH_CORE0) ? "True" : "False",
+ (regs->nbsh & K8_NBSH_CORE1) ? "True" : "False",
+ (regs->nbsh & K8_NBSH_CORE2) ? "True" : "False",
+ (regs->nbsh & K8_NBSH_CORE3) ? "True" : "False");
+
+
+ err_code = EXTRACT_ERROR_CODE(regs->nbsl);
+
+ /* Determine which error type:
+ * 1) GART errors - non-fatal, developmental events
+ * 2) MEMORY errors
+ * 3) BUS errors
+ * 4) Unknown error
+ */
+ if (TEST_TLB_ERROR(err_code)) {
+ /*
+ * GART errors are intended to help graphics driver developers
+ * to detect bad GART PTEs. It is recommended by AMD to disable
+ * GART table walk error reporting by default[1] (currently
+ * being disabled in mce_cpu_quirks()) and according to the
+ * comment in mce_cpu_quirks(), such GART errors can be
+ * incorrectly triggered. We may see these errors anyway and
+ * unless requested by the user, they won't be reported.
+ *
+ * [1] section 13.10.1 on BIOS and Kernel Developers Guide for
+ * AMD NPT family 0Fh processors
+ */
+ if (report_gart_errors == 0)
+ return 1;
+
+ /*
+ * Only if GART error reporting is requested should we generate
+ * any logs.
+ */
+ gart_tlb_error = 1;
+
+ debugf1("GART TLB error\n");
+ amd64_decode_gart_tlb_error(mci, info);
+ } else if (TEST_MEM_ERROR(err_code)) {
+ debugf1("Memory/Cache error\n");
+ amd64_decode_mem_cache_error(mci, info);
+ } else if (TEST_BUS_ERROR(err_code)) {
+ debugf1("Bus (Link/DRAM) error\n");
+ amd64_decode_bus_error(mci, info);
+ } else {
+ /* shouldn't reach here! */
+ amd64_mc_printk(mci, KERN_WARNING,
+ "%s(): unknown MCE error 0x%x\n", __func__,
+ err_code);
+ }
+
+ ext_ec = EXTRACT_EXT_ERROR_CODE(regs->nbsl);
+ amd64_mc_printk(mci, KERN_ERR,
+ "ExtErr=(0x%x) %s\n", ext_ec, ext_msgs[ext_ec]);
+
+ if (((ext_ec >= F10_NBSL_EXT_ERR_CRC &&
+ ext_ec <= F10_NBSL_EXT_ERR_TGT) ||
+ (ext_ec == F10_NBSL_EXT_ERR_RMW)) &&
+ EXTRACT_LDT_LINK(info->nbsh)) {
+
+ amd64_mc_printk(mci, KERN_ERR,
+ "Error on hypertransport link: %s\n",
+ htlink_msgs[
+ EXTRACT_LDT_LINK(info->nbsh)]);
+ }
+
+ /*
+ * Check the UE bit of the NB status high register, if set generate some
+ * logs. If NOT a GART error, then process the event as a NO-INFO event.
+ * If it was a GART error, skip that process.
+ */
+ if (regs->nbsh & K8_NBSH_UNCORRECTED_ERR) {
+ amd64_mc_printk(mci, KERN_CRIT, "uncorrected error\n");
+ if (!gart_tlb_error)
+ edac_mc_handle_ue_no_info(mci, "UE bit is set\n");
+ }
+
+ if (regs->nbsh & K8_NBSH_PCC)
+ amd64_mc_printk(mci, KERN_CRIT,
+ "PCC (processor context corrupt) set\n");
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(amd64_process_error_info);
+
+/*
+ * The main polling 'check' function, called FROM the edac core to perform the
+ * error checking and if an error is encountered, error processing.
+ */
+static void amd64_check(struct mem_ctl_info *mci)
+{
+ struct amd64_error_info_regs info;
+
+ if (amd64_get_error_info(mci, &info))
+ amd64_process_error_info(mci, &info, 1);
+}
+
+/*
+ * Input:
+ * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer
+ * 2) AMD Family index value
+ *
+ * Ouput:
+ * Upon return of 0, the following filled in:
+ *
+ * struct pvt->addr_f1_ctl
+ * struct pvt->misc_f3_ctl
+ *
+ * Filled in with related device funcitions of 'dram_f2_ctl'
+ * These devices are "reserved" via the pci_get_device()
+ *
+ * Upon return of 1 (error status):
+ *
+ * Nothing reserved
+ */
+static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx)
+{
+ const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx];
+
+ /* Reserve the ADDRESS MAP Device */
+ pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
+ amd64_dev->addr_f1_ctl,
+ pvt->dram_f2_ctl);
+
+ if (!pvt->addr_f1_ctl) {
+ amd64_printk(KERN_ERR, "error address map device not found: "
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl);
+ return 1;
+ }
+
+ /* Reserve the MISC Device */
+ pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
+ amd64_dev->misc_f3_ctl,
+ pvt->dram_f2_ctl);
+
+ if (!pvt->misc_f3_ctl) {
+ pci_dev_put(pvt->addr_f1_ctl);
+ pvt->addr_f1_ctl = NULL;
+
+ amd64_printk(KERN_ERR, "error miscellaneous device not found: "
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl);
+ return 1;
+ }
+
+ debugf1(" Addr Map device PCI Bus ID:\t%s\n",
+ pci_name(pvt->addr_f1_ctl));
+ debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n",
+ pci_name(pvt->dram_f2_ctl));
+ debugf1(" Misc device PCI Bus ID:\t%s\n",
+ pci_name(pvt->misc_f3_ctl));
+
+ return 0;
+}
+
+static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
+{
+ pci_dev_put(pvt->addr_f1_ctl);
+ pci_dev_put(pvt->misc_f3_ctl);
+}
+
+/*
+ * Retrieve the hardware registers of the memory controller (this includes the
+ * 'Address Map' and 'Misc' device regs)
+ */
+static void amd64_read_mc_registers(struct amd64_pvt *pvt)
+{
+ u64 msr_val;
+ int dram, err = 0;
+
+ /*
+ * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
+ * those are Read-As-Zero
+ */
+ rdmsrl(MSR_K8_TOP_MEM1, msr_val);
+ pvt->top_mem = msr_val >> 23;
+ debugf0(" TOP_MEM=0x%08llx\n", pvt->top_mem);
+
+ /* check first whether TOP_MEM2 is enabled */
+ rdmsrl(MSR_K8_SYSCFG, msr_val);
+ if (msr_val & (1U << 21)) {
+ rdmsrl(MSR_K8_TOP_MEM2, msr_val);
+ pvt->top_mem2 = msr_val >> 23;
+ debugf0(" TOP_MEM2=0x%08llx\n", pvt->top_mem2);
+ } else
+ debugf0(" TOP_MEM2 disabled.\n");
+
+ amd64_cpu_display_info(pvt);
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
+ if (err)
+ goto err_reg;
+
+ if (pvt->ops->read_dram_ctl_register)
+ pvt->ops->read_dram_ctl_register(pvt);
+
+ for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
+ /*
+ * Call CPU specific READ function to get the DRAM Base and
+ * Limit values from the DCT.
+ */
+ pvt->ops->read_dram_base_limit(pvt, dram);
+
+ /*
+ * Only print out debug info on rows with both R and W Enabled.
+ * Normal processing, compiler should optimize this whole 'if'
+ * debug output block away.
+ */
+ if (pvt->dram_rw_en[dram] != 0) {
+ debugf1(" DRAM_BASE[%d]: 0x%8.08x-%8.08x "
+ "DRAM_LIMIT: 0x%8.08x-%8.08x\n",
+ dram,
+ (u32)(pvt->dram_base[dram] >> 32),
+ (u32)(pvt->dram_base[dram] & 0xFFFFFFFF),
+ (u32)(pvt->dram_limit[dram] >> 32),
+ (u32)(pvt->dram_limit[dram] & 0xFFFFFFFF));
+ debugf1(" IntlvEn=%s %s %s "
+ "IntlvSel=%d DstNode=%d\n",
+ pvt->dram_IntlvEn[dram] ?
+ "Enabled" : "Disabled",
+ (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
+ (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
+ pvt->dram_IntlvSel[dram],
+ pvt->dram_DstNode[dram]);
+ }
+ }
+
+ amd64_read_dct_base_mask(pvt);
+
+ err = pci_read_config_dword(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
+ if (err)
+ goto err_reg;
+
+ amd64_read_dbam_reg(pvt);
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl,
+ F10_ONLINE_SPARE, &pvt->online_spare);
+ if (err)
+ goto err_reg;
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
+ if (err)
+ goto err_reg;
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
+ if (err)
+ goto err_reg;
+
+ if (!dct_ganging_enabled(pvt)) {
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1,
+ &pvt->dclr1);
+ if (err)
+ goto err_reg;
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_1,
+ &pvt->dchr1);
+ if (err)
+ goto err_reg;
+ }
+
+ amd64_dump_misc_regs(pvt);
+
+err_reg:
+ debugf0("Reading an MC register failed\n");
+
+}
+
+/*
+ * NOTE: CPU Revision Dependent code
+ *
+ * Input:
+ * @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1)
+ * k8 private pointer to -->
+ * DRAM Bank Address mapping register
+ * node_id
+ * DCL register where dual_channel_active is
+ *
+ * The DBAM register consists of 4 sets of 4 bits each definitions:
+ *
+ * Bits: CSROWs
+ * 0-3 CSROWs 0 and 1
+ * 4-7 CSROWs 2 and 3
+ * 8-11 CSROWs 4 and 5
+ * 12-15 CSROWs 6 and 7
+ *
+ * Values range from: 0 to 15
+ * The meaning of the values depends on CPU revision and dual-channel state,
+ * see relevant BKDG more info.
+ *
+ * The memory controller provides for total of only 8 CSROWs in its current
+ * architecture. Each "pair" of CSROWs normally represents just one DIMM in
+ * single channel or two (2) DIMMs in dual channel mode.
+ *
+ * The following code logic collapses the various tables for CSROW based on CPU
+ * revision.
+ *
+ * Returns:
+ * The number of PAGE_SIZE pages on the specified CSROW number it
+ * encompasses
+ *
+ */
+static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
+{
+ u32 dram_map, nr_pages;
+
+ /*
+ * The math on this doesn't look right on the surface because x/2*4 can
+ * be simplified to x*2 but this expression makes use of the fact that
+ * it is integral math where 1/2=0. This intermediate value becomes the
+ * number of bits to shift the DBAM register to extract the proper CSROW
+ * field.
+ */
+ dram_map = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
+
+ nr_pages = pvt->ops->dbam_map_to_pages(pvt, dram_map);
+
+ /*
+ * If dual channel then double the memory size of single channel.
+ * Channel count is 1 or 2
+ */
+ nr_pages <<= (pvt->channel_count - 1);
+
+ debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, dram_map);
+ debugf0(" nr_pages= %u channel-count = %d\n",
+ nr_pages, pvt->channel_count);
+
+ return nr_pages;
+}
+
+/*
+ * Initialize the array of csrow attribute instances, based on the values
+ * from pci config hardware registers.
+ */
+static int amd64_init_csrows(struct mem_ctl_info *mci)
+{
+ struct csrow_info *csrow;
+ struct amd64_pvt *pvt;
+ u64 input_addr_min, input_addr_max, sys_addr;
+ int i, err = 0, empty = 1;
+
+ pvt = mci->pvt_info;
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
+ if (err)
+ debugf0("Reading K8_NBCFG failed\n");
+
+ debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
+ (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
+ (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
+ );
+
+ for (i = 0; i < CHIPSELECT_COUNT; i++) {
+ csrow = &mci->csrows[i];
+
+ if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
+ debugf1("----CSROW %d EMPTY for node %d\n", i,
+ pvt->mc_node_id);
+ continue;
+ }
+
+ debugf1("----CSROW %d VALID for MC node %d\n",
+ i, pvt->mc_node_id);
+
+ empty = 0;
+ csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
+ find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
+ sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
+ csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
+ sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
+ csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
+ csrow->page_mask = ~mask_from_dct_mask(pvt, i);
+ /* 8 bytes of resolution */
+
+ csrow->mtype = amd64_determine_memory_type(pvt);
+
+ debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
+ debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
+ (unsigned long)input_addr_min,
+ (unsigned long)input_addr_max);
+ debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
+ (unsigned long)sys_addr, csrow->page_mask);
+ debugf1(" nr_pages: %u first_page: 0x%lx "
+ "last_page: 0x%lx\n",
+ (unsigned)csrow->nr_pages,
+ csrow->first_page, csrow->last_page);
+
+ /*
+ * determine whether CHIPKILL or JUST ECC or NO ECC is operating
+ */
+ if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
+ csrow->edac_mode =
+ (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
+ EDAC_S4ECD4ED : EDAC_SECDED;
+ else
+ csrow->edac_mode = EDAC_NONE;
+ }
+
+ return empty;
+}
+
+/*
+ * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
+ * enable it.
+ */
+static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
+ int cpu, idx = 0, err = 0;
+ struct msr msrs[cpumask_weight(cpumask)];
+ u32 value;
+ u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+
+ if (!ecc_enable_override)
+ return;
+
+ memset(msrs, 0, sizeof(msrs));
+
+ amd64_printk(KERN_WARNING,
+ "'ecc_enable_override' parameter is active, "
+ "Enabling AMD ECC hardware now: CAUTION\n");
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
+ if (err)
+ debugf0("Reading K8_NBCTL failed\n");
+
+ /* turn on UECCn and CECCEn bits */
+ pvt->old_nbctl = value & mask;
+ pvt->nbctl_mcgctl_saved = 1;
+
+ value |= mask;
+ pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+
+ rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+
+ for_each_cpu(cpu, cpumask) {
+ if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
+ set_bit(idx, &pvt->old_mcgctl);
+
+ msrs[idx].l |= K8_MSR_MCGCTL_NBE;
+ idx++;
+ }
+ wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
+ if (err)
+ debugf0("Reading K8_NBCFG failed\n");
+
+ debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
+ (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
+ (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
+
+ if (!(value & K8_NBCFG_ECC_ENABLE)) {
+ amd64_printk(KERN_WARNING,
+ "This node reports that DRAM ECC is "
+ "currently Disabled; ENABLING now\n");
+
+ /* Attempt to turn on DRAM ECC Enable */
+ value |= K8_NBCFG_ECC_ENABLE;
+ pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
+ if (err)
+ debugf0("Reading K8_NBCFG failed\n");
+
+ if (!(value & K8_NBCFG_ECC_ENABLE)) {
+ amd64_printk(KERN_WARNING,
+ "Hardware rejects Enabling DRAM ECC checking\n"
+ "Check memory DIMM configuration\n");
+ } else {
+ amd64_printk(KERN_DEBUG,
+ "Hardware accepted DRAM ECC Enable\n");
+ }
+ }
+ debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
+ (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
+ (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
+
+ pvt->ctl_error_info.nbcfg = value;
+}
+
+static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
+{
+ const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
+ int cpu, idx = 0, err = 0;
+ struct msr msrs[cpumask_weight(cpumask)];
+ u32 value;
+ u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+
+ if (!pvt->nbctl_mcgctl_saved)
+ return;
+
+ memset(msrs, 0, sizeof(msrs));
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
+ if (err)
+ debugf0("Reading K8_NBCTL failed\n");
+ value &= ~mask;
+ value |= pvt->old_nbctl;
+
+ /* restore the NB Enable MCGCTL bit */
+ pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+
+ rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+
+ for_each_cpu(cpu, cpumask) {
+ msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
+ msrs[idx].l |=
+ test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
+ idx++;
+ }
+
+ wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+}
+
+static void check_mcg_ctl(void *ret)
+{
+ u64 msr_val = 0;
+ u8 nbe;
+
+ rdmsrl(MSR_IA32_MCG_CTL, msr_val);
+ nbe = msr_val & K8_MSR_MCGCTL_NBE;
+
+ debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+ raw_smp_processor_id(), msr_val,
+ (nbe ? "enabled" : "disabled"));
+
+ if (!nbe)
+ *(int *)ret = 0;
+}
+
+/* check MCG_CTL on all the cpus on this node */
+static int amd64_mcg_ctl_enabled_on_cpus(const cpumask_t *mask)
+{
+ int ret = 1;
+ preempt_disable();
+ smp_call_function_many(mask, check_mcg_ctl, &ret, 1);
+ preempt_enable();
+
+ return ret;
+}
+
+/*
+ * EDAC requires that the BIOS have ECC enabled before taking over the
+ * processing of ECC errors. This is because the BIOS can properly initialize
+ * the memory system completely. A command line option allows to force-enable
+ * hardware ECC later in amd64_enable_ecc_error_reporting().
+ */
+static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
+{
+ u32 value;
+ int err = 0, ret = 0;
+ u8 ecc_enabled = 0;
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
+ if (err)
+ debugf0("Reading K8_NBCTL failed\n");
+
+ ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
+
+ ret = amd64_mcg_ctl_enabled_on_cpus(cpumask_of_node(pvt->mc_node_id));
+
+ debugf0("K8_NBCFG=0x%x, DRAM ECC is %s\n", value,
+ (value & K8_NBCFG_ECC_ENABLE ? "enabled" : "disabled"));
+
+ if (!ecc_enabled || !ret) {
+ if (!ecc_enabled) {
+ amd64_printk(KERN_WARNING, "This node reports that "
+ "Memory ECC is currently "
+ "disabled.\n");
+
+ amd64_printk(KERN_WARNING, "bit 0x%lx in register "
+ "F3x%x of the MISC_CONTROL device (%s) "
+ "should be enabled\n", K8_NBCFG_ECC_ENABLE,
+ K8_NBCFG, pci_name(pvt->misc_f3_ctl));
+ }
+ if (!ret) {
+ amd64_printk(KERN_WARNING, "bit 0x%016lx in MSR 0x%08x "
+ "of node %d should be enabled\n",
+ K8_MSR_MCGCTL_NBE, MSR_IA32_MCG_CTL,
+ pvt->mc_node_id);
+ }
+ if (!ecc_enable_override) {
+ amd64_printk(KERN_WARNING, "WARNING: ECC is NOT "
+ "currently enabled by the BIOS. Module "
+ "will NOT be loaded.\n"
+ " Either Enable ECC in the BIOS, "
+ "or use the 'ecc_enable_override' "
+ "parameter.\n"
+ " Might be a BIOS bug, if BIOS says "
+ "ECC is enabled\n"
+ " Use of the override can cause "
+ "unknown side effects.\n");
+ ret = -ENODEV;
+ }
+ } else {
+ amd64_printk(KERN_INFO,
+ "ECC is enabled by BIOS, Proceeding "
+ "with EDAC module initialization\n");
+
+ /* CLEAR the override, since BIOS controlled it */
+ ecc_enable_override = 0;
+ }
+
+ return ret;
+}
+
+struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
+ ARRAY_SIZE(amd64_inj_attrs) +
+ 1];
+
+struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
+
+static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
+{
+ unsigned int i = 0, j = 0;
+
+ for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
+ sysfs_attrs[i] = amd64_dbg_attrs[i];
+
+ for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
+ sysfs_attrs[i] = amd64_inj_attrs[j];
+
+ sysfs_attrs[i] = terminator;
+
+ mci->mc_driver_sysfs_attributes = sysfs_attrs;
+}
+
+static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+
+ if (pvt->nbcap & K8_NBCAP_SECDED)
+ mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
+
+ if (pvt->nbcap & K8_NBCAP_CHIPKILL)
+ mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+
+ mci->edac_cap = amd64_determine_edac_cap(pvt);
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = EDAC_AMD64_VERSION;
+ mci->ctl_name = get_amd_family_name(pvt->mc_type_index);
+ mci->dev_name = pci_name(pvt->dram_f2_ctl);
+ mci->ctl_page_to_phys = NULL;
+
+ /* IMPORTANT: Set the polling 'check' function in this module */
+ mci->edac_check = amd64_check;
+
+ /* memory scrubber interface */
+ mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
+ mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
+}
+
+/*
+ * Init stuff for this DRAM Controller device.
+ *
+ * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration
+ * Space feature MUST be enabled on ALL Processors prior to actually reading
+ * from the ECS registers. Since the loading of the module can occur on any
+ * 'core', and cores don't 'see' all the other processors ECS data when the
+ * others are NOT enabled. Our solution is to first enable ECS access in this
+ * routine on all processors, gather some data in a amd64_pvt structure and
+ * later come back in a finish-setup function to perform that final
+ * initialization. See also amd64_init_2nd_stage() for that.
+ */
+static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
+ int mc_type_index)
+{
+ struct amd64_pvt *pvt = NULL;
+ int err = 0, ret;
+
+ ret = -ENOMEM;
+ pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
+ if (!pvt)
+ goto err_exit;
+
+ pvt->mc_node_id = get_mc_node_id_from_pdev(dram_f2_ctl);
+
+ pvt->dram_f2_ctl = dram_f2_ctl;
+ pvt->ext_model = boot_cpu_data.x86_model >> 4;
+ pvt->mc_type_index = mc_type_index;
+ pvt->ops = family_ops(mc_type_index);
+ pvt->old_mcgctl = 0;
+
+ /*
+ * We have the dram_f2_ctl device as an argument, now go reserve its
+ * sibling devices from the PCI system.
+ */
+ ret = -ENODEV;
+ err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index);
+ if (err)
+ goto err_free;
+
+ ret = -EINVAL;
+ err = amd64_check_ecc_enabled(pvt);
+ if (err)
+ goto err_put;
+
+ /*
+ * Key operation here: setup of HW prior to performing ops on it. Some
+ * setup is required to access ECS data. After this is performed, the
+ * 'teardown' function must be called upon error and normal exit paths.
+ */
+ if (boot_cpu_data.x86 >= 0x10)
+ amd64_setup(pvt);
+
+ /*
+ * Save the pointer to the private data for use in 2nd initialization
+ * stage
+ */
+ pvt_lookup[pvt->mc_node_id] = pvt;
+
+ return 0;
+
+err_put:
+ amd64_free_mc_sibling_devices(pvt);
+
+err_free:
+ kfree(pvt);
+
+err_exit:
+ return ret;
+}
+
+/*
+ * This is the finishing stage of the init code. Needs to be performed after all
+ * MCs' hardware have been prepped for accessing extended config space.
+ */
+static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
+{
+ int node_id = pvt->mc_node_id;
+ struct mem_ctl_info *mci;
+ int ret, err = 0;
+
+ amd64_read_mc_registers(pvt);
+
+ ret = -ENODEV;
+ if (pvt->ops->probe_valid_hardware) {
+ err = pvt->ops->probe_valid_hardware(pvt);
+ if (err)
+ goto err_exit;
+ }
+
+ /*
+ * We need to determine how many memory channels there are. Then use
+ * that information for calculating the size of the dynamic instance
+ * tables in the 'mci' structure
+ */
+ pvt->channel_count = pvt->ops->early_channel_count(pvt);
+ if (pvt->channel_count < 0)
+ goto err_exit;
+
+ ret = -ENOMEM;
+ mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id);
+ if (!mci)
+ goto err_exit;
+
+ mci->pvt_info = pvt;
+
+ mci->dev = &pvt->dram_f2_ctl->dev;
+ amd64_setup_mci_misc_attributes(mci);
+
+ if (amd64_init_csrows(mci))
+ mci->edac_cap = EDAC_FLAG_NONE;
+
+ amd64_enable_ecc_error_reporting(mci);
+ amd64_set_mc_sysfs_attributes(mci);
+
+ ret = -ENODEV;
+ if (edac_mc_add_mc(mci)) {
+ debugf1("failed edac_mc_add_mc()\n");
+ goto err_add_mc;
+ }
+
+ mci_lookup[node_id] = mci;
+ pvt_lookup[node_id] = NULL;
+ return 0;
+
+err_add_mc:
+ edac_mc_free(mci);
+
+err_exit:
+ debugf0("failure to init 2nd stage: ret=%d\n", ret);
+
+ amd64_restore_ecc_error_reporting(pvt);
+
+ if (boot_cpu_data.x86 > 0xf)
+ amd64_teardown(pvt);
+
+ amd64_free_mc_sibling_devices(pvt);
+
+ kfree(pvt_lookup[pvt->mc_node_id]);
+ pvt_lookup[node_id] = NULL;
+
+ return ret;
+}
+
+
+static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
+ const struct pci_device_id *mc_type)
+{
+ int ret = 0;
+
+ debugf0("(MC node=%d,mc_type='%s')\n",
+ get_mc_node_id_from_pdev(pdev),
+ get_amd_family_name(mc_type->driver_data));
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0)
+ ret = -EIO;
+ else
+ ret = amd64_probe_one_instance(pdev, mc_type->driver_data);
+
+ if (ret < 0)
+ debugf0("ret=%d\n", ret);
+
+ return ret;
+}
+
+static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct amd64_pvt *pvt;
+
+ /* Remove from EDAC CORE tracking list */
+ mci = edac_mc_del_mc(&pdev->dev);
+ if (!mci)
+ return;
+
+ pvt = mci->pvt_info;
+
+ amd64_restore_ecc_error_reporting(pvt);
+
+ if (boot_cpu_data.x86 > 0xf)
+ amd64_teardown(pvt);
+
+ amd64_free_mc_sibling_devices(pvt);
+
+ kfree(pvt);
+ mci->pvt_info = NULL;
+
+ mci_lookup[pvt->mc_node_id] = NULL;
+
+ /* Free the EDAC CORE resources */
+ edac_mc_free(mci);
+}
+
+/*
+ * This table is part of the interface for loading drivers for PCI devices. The
+ * PCI core identifies what devices are on a system during boot, and then
+ * inquiry this table to see if this driver is for a given device found.
+ */
+static const struct pci_device_id amd64_pci_table[] __devinitdata = {
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ .driver_data = K8_CPUS
+ },
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ .driver_data = F10_CPUS
+ },
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_11H_NB_DRAM,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ .driver_data = F11_CPUS
+ },
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, amd64_pci_table);
+
+static struct pci_driver amd64_pci_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = amd64_init_one_instance,
+ .remove = __devexit_p(amd64_remove_one_instance),
+ .id_table = amd64_pci_table,
+};
+
+static void amd64_setup_pci_device(void)
+{
+ struct mem_ctl_info *mci;
+ struct amd64_pvt *pvt;
+
+ if (amd64_ctl_pci)
+ return;
+
+ mci = mci_lookup[0];
+ if (mci) {
+
+ pvt = mci->pvt_info;
+ amd64_ctl_pci =
+ edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev,
+ EDAC_MOD_STR);
+
+ if (!amd64_ctl_pci) {
+ pr_warning("%s(): Unable to create PCI control\n",
+ __func__);
+
+ pr_warning("%s(): PCI error report via EDAC not set\n",
+ __func__);
+ }
+ }
+}
+
+static int __init amd64_edac_init(void)
+{
+ int nb, err = -ENODEV;
+
+ edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
+
+ opstate_init();
+
+ if (cache_k8_northbridges() < 0)
+ goto err_exit;
+
+ err = pci_register_driver(&amd64_pci_driver);
+ if (err)
+ return err;
+
+ /*
+ * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
+ * amd64_pvt structs. These will be used in the 2nd stage init function
+ * to finish initialization of the MC instances.
+ */
+ for (nb = 0; nb < num_k8_northbridges; nb++) {
+ if (!pvt_lookup[nb])
+ continue;
+
+ err = amd64_init_2nd_stage(pvt_lookup[nb]);
+ if (err)
+ goto err_exit;
+ }
+
+ amd64_setup_pci_device();
+
+ return 0;
+
+err_exit:
+ debugf0("'finish_setup' stage failed\n");
+ pci_unregister_driver(&amd64_pci_driver);
+
+ return err;
+}
+
+static void __exit amd64_edac_exit(void)
+{
+ if (amd64_ctl_pci)
+ edac_pci_release_generic_ctl(amd64_ctl_pci);
+
+ pci_unregister_driver(&amd64_pci_driver);
+}
+
+module_init(amd64_edac_init);
+module_exit(amd64_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
+ "Dave Peterson, Thayne Harbaugh");
+MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
+ EDAC_AMD64_VERSION);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
new file mode 100644
index 00000000000..a159957e167
--- /dev/null
+++ b/drivers/edac/amd64_edac.h
@@ -0,0 +1,644 @@
+/*
+ * AMD64 class Memory Controller kernel module
+ *
+ * Copyright (c) 2009 SoftwareBitMaker.
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Originally Written by Thayne Harbaugh
+ *
+ * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
+ * - K8 CPU Revision D and greater support
+ *
+ * Changes by Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>:
+ * - Module largely rewritten, with new (and hopefully correct)
+ * code for dealing with node and chip select interleaving,
+ * various code cleanup, and bug fixes
+ * - Added support for memory hoisting using DRAM hole address
+ * register
+ *
+ * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
+ * -K8 Rev (1207) revision support added, required Revision
+ * specific mini-driver code to support Rev F as well as
+ * prior revisions
+ *
+ * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
+ * -Family 10h revision support added. New PCI Device IDs,
+ * indicating new changes. Actual registers modified
+ * were slight, less than the Rev E to Rev F transition
+ * but changing the PCI Device ID was the proper thing to
+ * do, as it provides for almost automactic family
+ * detection. The mods to Rev F required more family
+ * information detection.
+ *
+ * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
+ * - misc fixes and code cleanups
+ *
+ * This module is based on the following documents
+ * (available from http://www.amd.com/):
+ *
+ * Title: BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
+ * Opteron Processors
+ * AMD publication #: 26094
+ *` Revision: 3.26
+ *
+ * Title: BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
+ * Processors
+ * AMD publication #: 32559
+ * Revision: 3.00
+ * Issue Date: May 2006
+ *
+ * Title: BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
+ * Processors
+ * AMD publication #: 31116
+ * Revision: 3.00
+ * Issue Date: September 07, 2007
+ *
+ * Sections in the first 2 documents are no longer in sync with each other.
+ * The Family 10h BKDG was totally re-written from scratch with a new
+ * presentation model.
+ * Therefore, comments that refer to a Document section might be off.
+ */
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/mmzone.h>
+#include <linux/edac.h>
+#include <asm/msr.h>
+#include "edac_core.h"
+
+#define amd64_printk(level, fmt, arg...) \
+ edac_printk(level, "amd64", fmt, ##arg)
+
+#define amd64_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "amd64", fmt, ##arg)
+
+/*
+ * Throughout the comments in this code, the following terms are used:
+ *
+ * SysAddr, DramAddr, and InputAddr
+ *
+ * These terms come directly from the amd64 documentation
+ * (AMD publication #26094). They are defined as follows:
+ *
+ * SysAddr:
+ * This is a physical address generated by a CPU core or a device
+ * doing DMA. If generated by a CPU core, a SysAddr is the result of
+ * a virtual to physical address translation by the CPU core's address
+ * translation mechanism (MMU).
+ *
+ * DramAddr:
+ * A DramAddr is derived from a SysAddr by subtracting an offset that
+ * depends on which node the SysAddr maps to and whether the SysAddr
+ * is within a range affected by memory hoisting. The DRAM Base
+ * (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers
+ * determine which node a SysAddr maps to.
+ *
+ * If the DRAM Hole Address Register (DHAR) is enabled and the SysAddr
+ * is within the range of addresses specified by this register, then
+ * a value x from the DHAR is subtracted from the SysAddr to produce a
+ * DramAddr. Here, x represents the base address for the node that
+ * the SysAddr maps to plus an offset due to memory hoisting. See
+ * section 3.4.8 and the comments in amd64_get_dram_hole_info() and
+ * sys_addr_to_dram_addr() below for more information.
+ *
+ * If the SysAddr is not affected by the DHAR then a value y is
+ * subtracted from the SysAddr to produce a DramAddr. Here, y is the
+ * base address for the node that the SysAddr maps to. See section
+ * 3.4.4 and the comments in sys_addr_to_dram_addr() below for more
+ * information.
+ *
+ * InputAddr:
+ * A DramAddr is translated to an InputAddr before being passed to the
+ * memory controller for the node that the DramAddr is associated
+ * with. The memory controller then maps the InputAddr to a csrow.
+ * If node interleaving is not in use, then the InputAddr has the same
+ * value as the DramAddr. Otherwise, the InputAddr is produced by
+ * discarding the bits used for node interleaving from the DramAddr.
+ * See section 3.4.4 for more information.
+ *
+ * The memory controller for a given node uses its DRAM CS Base and
+ * DRAM CS Mask registers to map an InputAddr to a csrow. See
+ * sections 3.5.4 and 3.5.5 for more information.
+ */
+
+#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__
+#define EDAC_MOD_STR "amd64_edac"
+
+/* Extended Model from CPUID, for CPU Revision numbers */
+#define OPTERON_CPU_LE_REV_C 0
+#define OPTERON_CPU_REV_D 1
+#define OPTERON_CPU_REV_E 2
+
+/* NPT processors have the following Extended Models */
+#define OPTERON_CPU_REV_F 4
+#define OPTERON_CPU_REV_FA 5
+
+/* Hardware limit on ChipSelect rows per MC and processors per system */
+#define CHIPSELECT_COUNT 8
+#define DRAM_REG_COUNT 8
+
+
+/*
+ * PCI-defined configuration space registers
+ */
+
+
+/*
+ * Function 1 - Address Map
+ */
+#define K8_DRAM_BASE_LOW 0x40
+#define K8_DRAM_LIMIT_LOW 0x44
+#define K8_DHAR 0xf0
+
+#define DHAR_VALID BIT(0)
+#define F10_DRAM_MEM_HOIST_VALID BIT(1)
+
+#define DHAR_BASE_MASK 0xff000000
+#define dhar_base(dhar) (dhar & DHAR_BASE_MASK)
+
+#define K8_DHAR_OFFSET_MASK 0x0000ff00
+#define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16)
+
+#define F10_DHAR_OFFSET_MASK 0x0000ff80
+ /* NOTE: Extra mask bit vs K8 */
+#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16)
+
+
+/* F10 High BASE/LIMIT registers */
+#define F10_DRAM_BASE_HIGH 0x140
+#define F10_DRAM_LIMIT_HIGH 0x144
+
+
+/*
+ * Function 2 - DRAM controller
+ */
+#define K8_DCSB0 0x40
+#define F10_DCSB1 0x140
+
+#define K8_DCSB_CS_ENABLE BIT(0)
+#define K8_DCSB_NPT_SPARE BIT(1)
+#define K8_DCSB_NPT_TESTFAIL BIT(2)
+
+/*
+ * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
+ * the address
+ */
+#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
+#define REV_E_DCS_SHIFT 4
+#define REV_E_DCSM_COUNT 8
+
+#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
+#define REV_F_F1Xh_DCS_SHIFT 8
+
+/*
+ * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
+ * to form the address
+ */
+#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
+#define REV_F_DCS_SHIFT 8
+#define REV_F_DCSM_COUNT 4
+#define F10_DCSM_COUNT 4
+#define F11_DCSM_COUNT 2
+
+/* DRAM CS Mask Registers */
+#define K8_DCSM0 0x60
+#define F10_DCSM1 0x160
+
+/* REV E: select [29:21] and [15:9] from DCSM */
+#define REV_E_DCSM_MASK_BITS 0x3FE0FE00
+
+/* unused bits [24:20] and [12:0] */
+#define REV_E_DCS_NOTUSED_BITS 0x01F01FFF
+
+/* REV F and later: select [28:19] and [13:5] from DCSM */
+#define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0
+
+/* unused bits [26:22] and [12:0] */
+#define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF
+
+#define DBAM0 0x80
+#define DBAM1 0x180
+
+/* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */
+#define DBAM_DIMM(i, reg) ((((reg) >> (4*i))) & 0xF)
+
+#define DBAM_MAX_VALUE 11
+
+
+#define F10_DCLR_0 0x90
+#define F10_DCLR_1 0x190
+#define REVE_WIDTH_128 BIT(16)
+#define F10_WIDTH_128 BIT(11)
+
+
+#define F10_DCHR_0 0x94
+#define F10_DCHR_1 0x194
+
+#define F10_DCHR_FOUR_RANK_DIMM BIT(18)
+#define F10_DCHR_Ddr3Mode BIT(8)
+#define F10_DCHR_MblMode BIT(6)
+
+
+#define F10_DCTL_SEL_LOW 0x110
+
+#define dct_sel_baseaddr(pvt) \
+ ((pvt->dram_ctl_select_low) & 0xFFFFF800)
+
+#define dct_sel_interleave_addr(pvt) \
+ (((pvt->dram_ctl_select_low) >> 6) & 0x3)
+
+enum {
+ F10_DCTL_SEL_LOW_DctSelHiRngEn = BIT(0),
+ F10_DCTL_SEL_LOW_DctSelIntLvEn = BIT(2),
+ F10_DCTL_SEL_LOW_DctGangEn = BIT(4),
+ F10_DCTL_SEL_LOW_DctDatIntLv = BIT(5),
+ F10_DCTL_SEL_LOW_DramEnable = BIT(8),
+ F10_DCTL_SEL_LOW_MemCleared = BIT(10),
+};
+
+#define dct_high_range_enabled(pvt) \
+ (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelHiRngEn)
+
+#define dct_interleave_enabled(pvt) \
+ (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelIntLvEn)
+
+#define dct_ganging_enabled(pvt) \
+ (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctGangEn)
+
+#define dct_data_intlv_enabled(pvt) \
+ (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctDatIntLv)
+
+#define dct_dram_enabled(pvt) \
+ (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DramEnable)
+
+#define dct_memory_cleared(pvt) \
+ (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_MemCleared)
+
+
+#define F10_DCTL_SEL_HIGH 0x114
+
+
+/*
+ * Function 3 - Misc Control
+ */
+#define K8_NBCTL 0x40
+
+/* Correctable ECC error reporting enable */
+#define K8_NBCTL_CECCEn BIT(0)
+
+/* UnCorrectable ECC error reporting enable */
+#define K8_NBCTL_UECCEn BIT(1)
+
+#define K8_NBCFG 0x44
+#define K8_NBCFG_CHIPKILL BIT(23)
+#define K8_NBCFG_ECC_ENABLE BIT(22)
+
+#define K8_NBSL 0x48
+
+
+#define EXTRACT_HIGH_SYNDROME(x) (((x) >> 24) & 0xff)
+#define EXTRACT_EXT_ERROR_CODE(x) (((x) >> 16) & 0x1f)
+
+/* Family F10h: Normalized Extended Error Codes */
+#define F10_NBSL_EXT_ERR_RES 0x0
+#define F10_NBSL_EXT_ERR_CRC 0x1
+#define F10_NBSL_EXT_ERR_SYNC 0x2
+#define F10_NBSL_EXT_ERR_MST 0x3
+#define F10_NBSL_EXT_ERR_TGT 0x4
+#define F10_NBSL_EXT_ERR_GART 0x5
+#define F10_NBSL_EXT_ERR_RMW 0x6
+#define F10_NBSL_EXT_ERR_WDT 0x7
+#define F10_NBSL_EXT_ERR_ECC 0x8
+#define F10_NBSL_EXT_ERR_DEV 0x9
+#define F10_NBSL_EXT_ERR_LINK_DATA 0xA
+
+/* Next two are overloaded values */
+#define F10_NBSL_EXT_ERR_LINK_PROTO 0xB
+#define F10_NBSL_EXT_ERR_L3_PROTO 0xB
+
+#define F10_NBSL_EXT_ERR_NB_ARRAY 0xC
+#define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD
+#define F10_NBSL_EXT_ERR_LINK_RETRY 0xE
+
+/* Next two are overloaded values */
+#define F10_NBSL_EXT_ERR_GART_WALK 0xF
+#define F10_NBSL_EXT_ERR_DEV_WALK 0xF
+
+/* 0x10 to 0x1B: Reserved */
+#define F10_NBSL_EXT_ERR_L3_DATA 0x1C
+#define F10_NBSL_EXT_ERR_L3_TAG 0x1D
+#define F10_NBSL_EXT_ERR_L3_LRU 0x1E
+
+/* K8: Normalized Extended Error Codes */
+#define K8_NBSL_EXT_ERR_ECC 0x0
+#define K8_NBSL_EXT_ERR_CRC 0x1
+#define K8_NBSL_EXT_ERR_SYNC 0x2
+#define K8_NBSL_EXT_ERR_MST 0x3
+#define K8_NBSL_EXT_ERR_TGT 0x4
+#define K8_NBSL_EXT_ERR_GART 0x5
+#define K8_NBSL_EXT_ERR_RMW 0x6
+#define K8_NBSL_EXT_ERR_WDT 0x7
+#define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8
+#define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD
+
+#define EXTRACT_ERROR_CODE(x) ((x) & 0xffff)
+#define TEST_TLB_ERROR(x) (((x) & 0xFFF0) == 0x0010)
+#define TEST_MEM_ERROR(x) (((x) & 0xFF00) == 0x0100)
+#define TEST_BUS_ERROR(x) (((x) & 0xF800) == 0x0800)
+#define EXTRACT_TT_CODE(x) (((x) >> 2) & 0x3)
+#define EXTRACT_II_CODE(x) (((x) >> 2) & 0x3)
+#define EXTRACT_LL_CODE(x) (((x) >> 0) & 0x3)
+#define EXTRACT_RRRR_CODE(x) (((x) >> 4) & 0xf)
+#define EXTRACT_TO_CODE(x) (((x) >> 8) & 0x1)
+#define EXTRACT_PP_CODE(x) (((x) >> 9) & 0x3)
+
+/*
+ * The following are for BUS type errors AFTER values have been normalized by
+ * shifting right
+ */
+#define K8_NBSL_PP_SRC 0x0
+#define K8_NBSL_PP_RES 0x1
+#define K8_NBSL_PP_OBS 0x2
+#define K8_NBSL_PP_GENERIC 0x3
+
+
+#define K8_NBSH 0x4C
+
+#define K8_NBSH_VALID_BIT BIT(31)
+#define K8_NBSH_OVERFLOW BIT(30)
+#define K8_NBSH_UNCORRECTED_ERR BIT(29)
+#define K8_NBSH_ERR_ENABLE BIT(28)
+#define K8_NBSH_MISC_ERR_VALID BIT(27)
+#define K8_NBSH_VALID_ERROR_ADDR BIT(26)
+#define K8_NBSH_PCC BIT(25)
+#define K8_NBSH_CECC BIT(14)
+#define K8_NBSH_UECC BIT(13)
+#define K8_NBSH_ERR_SCRUBER BIT(8)
+#define K8_NBSH_CORE3 BIT(3)
+#define K8_NBSH_CORE2 BIT(2)
+#define K8_NBSH_CORE1 BIT(1)
+#define K8_NBSH_CORE0 BIT(0)
+
+#define EXTRACT_LDT_LINK(x) (((x) >> 4) & 0x7)
+#define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF)
+#define EXTRACT_LOW_SYNDROME(x) (((x) >> 15) & 0xff)
+
+
+#define K8_NBEAL 0x50
+#define K8_NBEAH 0x54
+#define K8_SCRCTRL 0x58
+
+#define F10_NB_CFG_LOW 0x88
+#define F10_NB_CFG_LOW_ENABLE_EXT_CFG BIT(14)
+
+#define F10_NB_CFG_HIGH 0x8C
+
+#define F10_ONLINE_SPARE 0xB0
+#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1))
+#define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3))
+#define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
+#define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
+
+#define F10_NB_ARRAY_ADDR 0xB8
+
+#define F10_NB_ARRAY_DRAM_ECC 0x80000000
+
+/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */
+#define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1)
+
+#define F10_NB_ARRAY_DATA 0xBC
+
+#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
+ (BIT(((word) & 0xF) + 20) | \
+ BIT(17) | \
+ ((bits) & 0xF))
+
+#define SET_NB_DRAM_INJECTION_READ(word, bits) \
+ (BIT(((word) & 0xF) + 20) | \
+ BIT(16) | \
+ ((bits) & 0xF))
+
+#define K8_NBCAP 0xE8
+#define K8_NBCAP_CORES (BIT(12)|BIT(13))
+#define K8_NBCAP_CHIPKILL BIT(4)
+#define K8_NBCAP_SECDED BIT(3)
+#define K8_NBCAP_8_NODE BIT(2)
+#define K8_NBCAP_DUAL_NODE BIT(1)
+#define K8_NBCAP_DCT_DUAL BIT(0)
+
+/*
+ * MSR Regs
+ */
+#define K8_MSR_MCGCTL 0x017b
+#define K8_MSR_MCGCTL_NBE BIT(4)
+
+#define K8_MSR_MC4CTL 0x0410
+#define K8_MSR_MC4STAT 0x0411
+#define K8_MSR_MC4ADDR 0x0412
+
+/* AMD sets the first MC device at device ID 0x18. */
+static inline int get_mc_node_id_from_pdev(struct pci_dev *pdev)
+{
+ return PCI_SLOT(pdev->devfn) - 0x18;
+}
+
+enum amd64_chipset_families {
+ K8_CPUS = 0,
+ F10_CPUS,
+ F11_CPUS,
+};
+
+/*
+ * Structure to hold:
+ *
+ * 1) dynamically read status and error address HW registers
+ * 2) sysfs entered values
+ * 3) MCE values
+ *
+ * Depends on entry into the modules
+ */
+struct amd64_error_info_regs {
+ u32 nbcfg;
+ u32 nbsh;
+ u32 nbsl;
+ u32 nbeah;
+ u32 nbeal;
+};
+
+/* Error injection control structure */
+struct error_injection {
+ u32 section;
+ u32 word;
+ u32 bit_map;
+};
+
+struct amd64_pvt {
+ /* pci_device handles which we utilize */
+ struct pci_dev *addr_f1_ctl;
+ struct pci_dev *dram_f2_ctl;
+ struct pci_dev *misc_f3_ctl;
+
+ int mc_node_id; /* MC index of this MC node */
+ int ext_model; /* extended model value of this node */
+
+ struct low_ops *ops; /* pointer to per PCI Device ID func table */
+
+ int channel_count;
+
+ /* Raw registers */
+ u32 dclr0; /* DRAM Configuration Low DCT0 reg */
+ u32 dclr1; /* DRAM Configuration Low DCT1 reg */
+ u32 dchr0; /* DRAM Configuration High DCT0 reg */
+ u32 dchr1; /* DRAM Configuration High DCT1 reg */
+ u32 nbcap; /* North Bridge Capabilities */
+ u32 nbcfg; /* F10 North Bridge Configuration */
+ u32 ext_nbcfg; /* Extended F10 North Bridge Configuration */
+ u32 dhar; /* DRAM Hoist reg */
+ u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
+ u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
+
+ /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
+ u32 dcsb0[CHIPSELECT_COUNT];
+ u32 dcsb1[CHIPSELECT_COUNT];
+
+ /* DRAM CS Mask Registers F2x[1,0][6C:60] */
+ u32 dcsm0[CHIPSELECT_COUNT];
+ u32 dcsm1[CHIPSELECT_COUNT];
+
+ /*
+ * Decoded parts of DRAM BASE and LIMIT Registers
+ * F1x[78,70,68,60,58,50,48,40]
+ */
+ u64 dram_base[DRAM_REG_COUNT];
+ u64 dram_limit[DRAM_REG_COUNT];
+ u8 dram_IntlvSel[DRAM_REG_COUNT];
+ u8 dram_IntlvEn[DRAM_REG_COUNT];
+ u8 dram_DstNode[DRAM_REG_COUNT];
+ u8 dram_rw_en[DRAM_REG_COUNT];
+
+ /*
+ * The following fields are set at (load) run time, after CPU revision
+ * has been determined, since the dct_base and dct_mask registers vary
+ * based on revision
+ */
+ u32 dcsb_base; /* DCSB base bits */
+ u32 dcsm_mask; /* DCSM mask bits */
+ u32 num_dcsm; /* Number of DCSM registers */
+ u32 dcs_mask_notused; /* DCSM notused mask bits */
+ u32 dcs_shift; /* DCSB and DCSM shift value */
+
+ u64 top_mem; /* top of memory below 4GB */
+ u64 top_mem2; /* top of memory above 4GB */
+
+ u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */
+ u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */
+ u32 online_spare; /* On-Line spare Reg */
+
+ /* temp storage for when input is received from sysfs */
+ struct amd64_error_info_regs ctl_error_info;
+
+ /* place to store error injection parameters prior to issue */
+ struct error_injection injection;
+
+ /* Save old hw registers' values before we modified them */
+ u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */
+ u32 old_nbctl;
+ unsigned long old_mcgctl; /* per core on this node */
+
+ /* MC Type Index value: socket F vs Family 10h */
+ u32 mc_type_index;
+
+ /* misc settings */
+ struct flags {
+ unsigned long cf8_extcfg:1;
+ } flags;
+};
+
+struct scrubrate {
+ u32 scrubval; /* bit pattern for scrub rate */
+ u32 bandwidth; /* bandwidth consumed (bytes/sec) */
+};
+
+extern struct scrubrate scrubrates[23];
+extern u32 revf_quad_ddr2_shift[16];
+extern const char *tt_msgs[4];
+extern const char *ll_msgs[4];
+extern const char *rrrr_msgs[16];
+extern const char *to_msgs[2];
+extern const char *pp_msgs[4];
+extern const char *ii_msgs[4];
+extern const char *ext_msgs[32];
+extern const char *htlink_msgs[8];
+
+#ifdef CONFIG_EDAC_DEBUG
+#define NUM_DBG_ATTRS 9
+#else
+#define NUM_DBG_ATTRS 0
+#endif
+
+#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
+#define NUM_INJ_ATTRS 5
+#else
+#define NUM_INJ_ATTRS 0
+#endif
+
+extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
+ amd64_inj_attrs[NUM_INJ_ATTRS];
+
+/*
+ * Each of the PCI Device IDs types have their own set of hardware accessor
+ * functions and per device encoding/decoding logic.
+ */
+struct low_ops {
+ int (*probe_valid_hardware)(struct amd64_pvt *pvt);
+ int (*early_channel_count)(struct amd64_pvt *pvt);
+
+ u64 (*get_error_address)(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info);
+ void (*read_dram_base_limit)(struct amd64_pvt *pvt, int dram);
+ void (*read_dram_ctl_register)(struct amd64_pvt *pvt);
+ void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info,
+ u64 SystemAddr);
+ int (*dbam_map_to_pages)(struct amd64_pvt *pvt, int dram_map);
+};
+
+struct amd64_family_type {
+ const char *ctl_name;
+ u16 addr_f1_ctl;
+ u16 misc_f3_ctl;
+ struct low_ops ops;
+};
+
+static struct amd64_family_type amd64_family_types[];
+
+static inline const char *get_amd_family_name(int index)
+{
+ return amd64_family_types[index].ctl_name;
+}
+
+static inline struct low_ops *family_ops(int index)
+{
+ return &amd64_family_types[index].ops;
+}
+
+/*
+ * For future CPU versions, verify the following as new 'slow' rates appear and
+ * modify the necessary skip values for the supported CPU.
+ */
+#define K8_MIN_SCRUB_RATE_BITS 0x0
+#define F10_MIN_SCRUB_RATE_BITS 0x5
+#define F11_MIN_SCRUB_RATE_BITS 0x6
+
+int amd64_process_error_info(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info,
+ int handle_errors);
+int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
+ u64 *hole_offset, u64 *hole_size);
diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c
new file mode 100644
index 00000000000..0a41b248a4a
--- /dev/null
+++ b/drivers/edac/amd64_edac_dbg.c
@@ -0,0 +1,255 @@
+#include "amd64_edac.h"
+
+/*
+ * accept a hex value and store it into the virtual error register file, field:
+ * nbeal and nbeah. Assume virtual error values have already been set for: NBSL,
+ * NBSH and NBCFG. Then proceed to map the error values to a MC, CSROW and
+ * CHANNEL
+ */
+static ssize_t amd64_nbea_store(struct mem_ctl_info *mci, const char *data,
+ size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long long value;
+ int ret = 0;
+
+ ret = strict_strtoull(data, 16, &value);
+ if (ret != -EINVAL) {
+ debugf0("received NBEA= 0x%llx\n", value);
+
+ /* place the value into the virtual error packet */
+ pvt->ctl_error_info.nbeal = (u32) value;
+ value >>= 32;
+ pvt->ctl_error_info.nbeah = (u32) value;
+
+ /* Process the Mapping request */
+ /* TODO: Add race prevention */
+ amd64_process_error_info(mci, &pvt->ctl_error_info, 1);
+
+ return count;
+ }
+ return ret;
+}
+
+/* display back what the last NBEA (MCA NB Address (MC4_ADDR)) was written */
+static ssize_t amd64_nbea_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u64 value;
+
+ value = pvt->ctl_error_info.nbeah;
+ value <<= 32;
+ value |= pvt->ctl_error_info.nbeal;
+
+ return sprintf(data, "%llx\n", value);
+}
+
+/* store the NBSL (MCA NB Status Low (MC4_STATUS)) value user desires */
+static ssize_t amd64_nbsl_store(struct mem_ctl_info *mci, const char *data,
+ size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 16, &value);
+ if (ret != -EINVAL) {
+ debugf0("received NBSL= 0x%lx\n", value);
+
+ pvt->ctl_error_info.nbsl = (u32) value;
+
+ return count;
+ }
+ return ret;
+}
+
+/* display back what the last NBSL value written */
+static ssize_t amd64_nbsl_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u32 value;
+
+ value = pvt->ctl_error_info.nbsl;
+
+ return sprintf(data, "%x\n", value);
+}
+
+/* store the NBSH (MCA NB Status High) value user desires */
+static ssize_t amd64_nbsh_store(struct mem_ctl_info *mci, const char *data,
+ size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 16, &value);
+ if (ret != -EINVAL) {
+ debugf0("received NBSH= 0x%lx\n", value);
+
+ pvt->ctl_error_info.nbsh = (u32) value;
+
+ return count;
+ }
+ return ret;
+}
+
+/* display back what the last NBSH value written */
+static ssize_t amd64_nbsh_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u32 value;
+
+ value = pvt->ctl_error_info.nbsh;
+
+ return sprintf(data, "%x\n", value);
+}
+
+/* accept and store the NBCFG (MCA NB Configuration) value user desires */
+static ssize_t amd64_nbcfg_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 16, &value);
+ if (ret != -EINVAL) {
+ debugf0("received NBCFG= 0x%lx\n", value);
+
+ pvt->ctl_error_info.nbcfg = (u32) value;
+
+ return count;
+ }
+ return ret;
+}
+
+/* various show routines for the controls of a MCI */
+static ssize_t amd64_nbcfg_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ return sprintf(data, "%x\n", pvt->ctl_error_info.nbcfg);
+}
+
+
+static ssize_t amd64_dhar_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ return sprintf(data, "%x\n", pvt->dhar);
+}
+
+
+static ssize_t amd64_dbam_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ return sprintf(data, "%x\n", pvt->dbam0);
+}
+
+
+static ssize_t amd64_topmem_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ return sprintf(data, "%llx\n", pvt->top_mem);
+}
+
+
+static ssize_t amd64_topmem2_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ return sprintf(data, "%llx\n", pvt->top_mem2);
+}
+
+static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data)
+{
+ u64 hole_base = 0;
+ u64 hole_offset = 0;
+ u64 hole_size = 0;
+
+ amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
+
+ return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
+ hole_size);
+}
+
+/*
+ * update NUM_DBG_ATTRS in case you add new members
+ */
+struct mcidev_sysfs_attribute amd64_dbg_attrs[] = {
+
+ {
+ .attr = {
+ .name = "nbea_ctl",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = amd64_nbea_show,
+ .store = amd64_nbea_store,
+ },
+ {
+ .attr = {
+ .name = "nbsl_ctl",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = amd64_nbsl_show,
+ .store = amd64_nbsl_store,
+ },
+ {
+ .attr = {
+ .name = "nbsh_ctl",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = amd64_nbsh_show,
+ .store = amd64_nbsh_store,
+ },
+ {
+ .attr = {
+ .name = "nbcfg_ctl",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = amd64_nbcfg_show,
+ .store = amd64_nbcfg_store,
+ },
+ {
+ .attr = {
+ .name = "dhar",
+ .mode = (S_IRUGO)
+ },
+ .show = amd64_dhar_show,
+ .store = NULL,
+ },
+ {
+ .attr = {
+ .name = "dbam",
+ .mode = (S_IRUGO)
+ },
+ .show = amd64_dbam_show,
+ .store = NULL,
+ },
+ {
+ .attr = {
+ .name = "topmem",
+ .mode = (S_IRUGO)
+ },
+ .show = amd64_topmem_show,
+ .store = NULL,
+ },
+ {
+ .attr = {
+ .name = "topmem2",
+ .mode = (S_IRUGO)
+ },
+ .show = amd64_topmem2_show,
+ .store = NULL,
+ },
+ {
+ .attr = {
+ .name = "dram_hole",
+ .mode = (S_IRUGO)
+ },
+ .show = amd64_hole_show,
+ .store = NULL,
+ },
+};
diff --git a/drivers/edac/amd64_edac_err_types.c b/drivers/edac/amd64_edac_err_types.c
new file mode 100644
index 00000000000..f212ff12a9d
--- /dev/null
+++ b/drivers/edac/amd64_edac_err_types.c
@@ -0,0 +1,161 @@
+#include "amd64_edac.h"
+
+/*
+ * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
+ * for DDR2 DRAM mapping.
+ */
+u32 revf_quad_ddr2_shift[] = {
+ 0, /* 0000b NULL DIMM (128mb) */
+ 28, /* 0001b 256mb */
+ 29, /* 0010b 512mb */
+ 29, /* 0011b 512mb */
+ 29, /* 0100b 512mb */
+ 30, /* 0101b 1gb */
+ 30, /* 0110b 1gb */
+ 31, /* 0111b 2gb */
+ 31, /* 1000b 2gb */
+ 32, /* 1001b 4gb */
+ 32, /* 1010b 4gb */
+ 33, /* 1011b 8gb */
+ 0, /* 1100b future */
+ 0, /* 1101b future */
+ 0, /* 1110b future */
+ 0 /* 1111b future */
+};
+
+/*
+ * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
+ * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
+ * or higher value'.
+ *
+ *FIXME: Produce a better mapping/linearisation.
+ */
+
+struct scrubrate scrubrates[] = {
+ { 0x01, 1600000000UL},
+ { 0x02, 800000000UL},
+ { 0x03, 400000000UL},
+ { 0x04, 200000000UL},
+ { 0x05, 100000000UL},
+ { 0x06, 50000000UL},
+ { 0x07, 25000000UL},
+ { 0x08, 12284069UL},
+ { 0x09, 6274509UL},
+ { 0x0A, 3121951UL},
+ { 0x0B, 1560975UL},
+ { 0x0C, 781440UL},
+ { 0x0D, 390720UL},
+ { 0x0E, 195300UL},
+ { 0x0F, 97650UL},
+ { 0x10, 48854UL},
+ { 0x11, 24427UL},
+ { 0x12, 12213UL},
+ { 0x13, 6101UL},
+ { 0x14, 3051UL},
+ { 0x15, 1523UL},
+ { 0x16, 761UL},
+ { 0x00, 0UL}, /* scrubbing off */
+};
+
+/*
+ * string representation for the different MCA reported error types, see F3x48
+ * or MSR0000_0411.
+ */
+const char *tt_msgs[] = { /* transaction type */
+ "instruction",
+ "data",
+ "generic",
+ "reserved"
+};
+
+const char *ll_msgs[] = { /* cache level */
+ "L0",
+ "L1",
+ "L2",
+ "L3/generic"
+};
+
+const char *rrrr_msgs[] = {
+ "generic",
+ "generic read",
+ "generic write",
+ "data read",
+ "data write",
+ "inst fetch",
+ "prefetch",
+ "evict",
+ "snoop",
+ "reserved RRRR= 9",
+ "reserved RRRR= 10",
+ "reserved RRRR= 11",
+ "reserved RRRR= 12",
+ "reserved RRRR= 13",
+ "reserved RRRR= 14",
+ "reserved RRRR= 15"
+};
+
+const char *pp_msgs[] = { /* participating processor */
+ "local node originated (SRC)",
+ "local node responded to request (RES)",
+ "local node observed as 3rd party (OBS)",
+ "generic"
+};
+
+const char *to_msgs[] = {
+ "no timeout",
+ "timed out"
+};
+
+const char *ii_msgs[] = { /* memory or i/o */
+ "mem access",
+ "reserved",
+ "i/o access",
+ "generic"
+};
+
+/* Map the 5 bits of Extended Error code to the string table. */
+const char *ext_msgs[] = { /* extended error */
+ "K8 ECC error/F10 reserved", /* 0_0000b */
+ "CRC error", /* 0_0001b */
+ "sync error", /* 0_0010b */
+ "mst abort", /* 0_0011b */
+ "tgt abort", /* 0_0100b */
+ "GART error", /* 0_0101b */
+ "RMW error", /* 0_0110b */
+ "Wdog timer error", /* 0_0111b */
+ "F10-ECC/K8-Chipkill error", /* 0_1000b */
+ "DEV Error", /* 0_1001b */
+ "Link Data error", /* 0_1010b */
+ "Link or L3 Protocol error", /* 0_1011b */
+ "NB Array error", /* 0_1100b */
+ "DRAM Parity error", /* 0_1101b */
+ "Link Retry/GART Table Walk/DEV Table Walk error", /* 0_1110b */
+ "Res 0x0ff error", /* 0_1111b */
+ "Res 0x100 error", /* 1_0000b */
+ "Res 0x101 error", /* 1_0001b */
+ "Res 0x102 error", /* 1_0010b */
+ "Res 0x103 error", /* 1_0011b */
+ "Res 0x104 error", /* 1_0100b */
+ "Res 0x105 error", /* 1_0101b */
+ "Res 0x106 error", /* 1_0110b */
+ "Res 0x107 error", /* 1_0111b */
+ "Res 0x108 error", /* 1_1000b */
+ "Res 0x109 error", /* 1_1001b */
+ "Res 0x10A error", /* 1_1010b */
+ "Res 0x10B error", /* 1_1011b */
+ "L3 Cache Data error", /* 1_1100b */
+ "L3 CacheTag error", /* 1_1101b */
+ "L3 Cache LRU error", /* 1_1110b */
+ "Res 0x1FF error" /* 1_1111b */
+};
+
+const char *htlink_msgs[] = {
+ "none",
+ "1",
+ "2",
+ "1 2",
+ "3",
+ "1 3",
+ "2 3",
+ "1 2 3"
+};
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
new file mode 100644
index 00000000000..d3675b76b3a
--- /dev/null
+++ b/drivers/edac/amd64_edac_inj.c
@@ -0,0 +1,185 @@
+#include "amd64_edac.h"
+
+/*
+ * store error injection section value which refers to one of 4 16-byte sections
+ * within a 64-byte cacheline
+ *
+ * range: 0..3
+ */
+static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 10, &value);
+ if (ret != -EINVAL) {
+ pvt->injection.section = (u32) value;
+ return count;
+ }
+ return ret;
+}
+
+/*
+ * store error injection word value which refers to one of 9 16-bit word of the
+ * 16-byte (128-bit + ECC bits) section
+ *
+ * range: 0..8
+ */
+static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 10, &value);
+ if (ret != -EINVAL) {
+
+ value = (value <= 8) ? value : 0;
+ pvt->injection.word = (u32) value;
+
+ return count;
+ }
+ return ret;
+}
+
+/*
+ * store 16 bit error injection vector which enables injecting errors to the
+ * corresponding bit within the error injection word above. When used during a
+ * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
+ */
+static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 16, &value);
+ if (ret != -EINVAL) {
+
+ pvt->injection.bit_map = (u32) value & 0xFFFF;
+
+ return count;
+ }
+ return ret;
+}
+
+/*
+ * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
+ * fields needed by the injection registers and read the NB Array Data Port.
+ */
+static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ u32 section, word_bits;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 10, &value);
+ if (ret != -EINVAL) {
+
+ /* Form value to choose 16-byte section of cacheline */
+ section = F10_NB_ARRAY_DRAM_ECC |
+ SET_NB_ARRAY_ADDRESS(pvt->injection.section);
+ pci_write_config_dword(pvt->misc_f3_ctl,
+ F10_NB_ARRAY_ADDR, section);
+
+ word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
+ pvt->injection.bit_map);
+
+ /* Issue 'word' and 'bit' along with the READ request */
+ pci_write_config_dword(pvt->misc_f3_ctl,
+ F10_NB_ARRAY_DATA, word_bits);
+
+ debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
+
+ return count;
+ }
+ return ret;
+}
+
+/*
+ * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
+ * fields needed by the injection registers.
+ */
+static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ u32 section, word_bits;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 10, &value);
+ if (ret != -EINVAL) {
+
+ /* Form value to choose 16-byte section of cacheline */
+ section = F10_NB_ARRAY_DRAM_ECC |
+ SET_NB_ARRAY_ADDRESS(pvt->injection.section);
+ pci_write_config_dword(pvt->misc_f3_ctl,
+ F10_NB_ARRAY_ADDR, section);
+
+ word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
+ pvt->injection.bit_map);
+
+ /* Issue 'word' and 'bit' along with the READ request */
+ pci_write_config_dword(pvt->misc_f3_ctl,
+ F10_NB_ARRAY_DATA, word_bits);
+
+ debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
+
+ return count;
+ }
+ return ret;
+}
+
+/*
+ * update NUM_INJ_ATTRS in case you add new members
+ */
+struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
+
+ {
+ .attr = {
+ .name = "inject_section",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = NULL,
+ .store = amd64_inject_section_store,
+ },
+ {
+ .attr = {
+ .name = "inject_word",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = NULL,
+ .store = amd64_inject_word_store,
+ },
+ {
+ .attr = {
+ .name = "inject_ecc_vector",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = NULL,
+ .store = amd64_inject_ecc_vector_store,
+ },
+ {
+ .attr = {
+ .name = "inject_write",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = NULL,
+ .store = amd64_inject_write_store,
+ },
+ {
+ .attr = {
+ .name = "inject_read",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = NULL,
+ .store = amd64_inject_read_store,
+ },
+};
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index 61469218112..2cb58ef743e 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -389,7 +389,7 @@ static int amd8111_dev_probe(struct pci_dev *dev,
dev_info->edac_dev->dev = &dev_info->dev->dev;
dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
dev_info->edac_dev->ctl_name = dev_info->ctl_name;
- dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id;
+ dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
if (edac_op_state == EDAC_OPSTATE_POLL)
dev_info->edac_dev->edac_check = dev_info->check;
@@ -473,7 +473,7 @@ static int amd8111_pci_probe(struct pci_dev *dev,
pci_info->edac_dev->dev = &pci_info->dev->dev;
pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
pci_info->edac_dev->ctl_name = pci_info->ctl_name;
- pci_info->edac_dev->dev_name = pci_info->dev->dev.bus_id;
+ pci_info->edac_dev->dev_name = dev_name(&pci_info->dev->dev);
if (edac_op_state == EDAC_OPSTATE_POLL)
pci_info->edac_dev->edac_check = pci_info->check;
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
index c083b31cac5..b432d60c622 100644
--- a/drivers/edac/amd8131_edac.c
+++ b/drivers/edac/amd8131_edac.c
@@ -287,7 +287,7 @@ static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev_info->edac_dev->dev = &dev_info->dev->dev;
dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR;
dev_info->edac_dev->ctl_name = dev_info->ctl_name;
- dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id;
+ dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
if (edac_op_state == EDAC_OPSTATE_POLL)
dev_info->edac_dev->edac_check = amd8131_chipset.check;
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 6ad95c8d636..48d3b140983 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -76,10 +76,11 @@
extern int edac_debug_level;
#ifndef CONFIG_EDAC_DEBUG_VERBOSE
-#define edac_debug_printk(level, fmt, arg...) \
- do { \
- if (level <= edac_debug_level) \
- edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \
+#define edac_debug_printk(level, fmt, arg...) \
+ do { \
+ if (level <= edac_debug_level) \
+ edac_printk(KERN_DEBUG, EDAC_DEBUG, \
+ "%s: " fmt, __func__, ##arg); \
} while (0)
#else /* CONFIG_EDAC_DEBUG_VERBOSE */
#define edac_debug_printk(level, fmt, arg...) \
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 5f1b5400d96..24c84ae8152 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -596,6 +596,7 @@ int dmi_get_year(int field)
return year;
}
+EXPORT_SYMBOL(dmi_get_year);
/**
* dmi_walk - Walk the DMI table and get called back for every record
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 4cd35d8fd79..f5d46e7199d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -67,12 +67,18 @@ config DRM_I830
will load the correct one.
config DRM_I915
+ tristate "i915 driver"
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB
select FRAMEBUFFER_CONSOLE if !EMBEDDED
- tristate "i915 driver"
+ # i915 depends on ACPI_VIDEO when ACPI is enabled
+ # but for select to work, need to select ACPI_VIDEO's dependencies, ick
+ select VIDEO_OUTPUT_CONTROL if ACPI
+ select BACKLIGHT_CLASS_DEVICE if ACPI
+ select INPUT if ACPI
+ select ACPI_VIDEO if ACPI
help
Choose this option if you have a system that has Intel 830M, 845G,
852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
@@ -84,12 +90,6 @@ config DRM_I915
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
depends on DRM_I915
- # i915 KMS depends on ACPI_VIDEO when ACPI is enabled
- # but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select VIDEO_OUTPUT_CONTROL if ACPI
- select BACKLIGHT_CLASS_DEVICE if ACPI
- select INPUT if ACPI
- select ACPI_VIDEO if ACPI
help
Choose this option if you want kernel modesetting enabled by default,
and you have a new enough userspace to support this. Running old
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 94a76887173..8fab7890a36 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2294,7 +2294,12 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
}
}
- if (connector->funcs->set_property)
+ /* Do DPMS ourselves */
+ if (property == connector->dev->mode_config.dpms_property) {
+ if (connector->funcs->dpms)
+ (*connector->funcs->dpms)(connector, (int) out_resp->value);
+ ret = 0;
+ } else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, out_resp->value);
/* store the property value if succesful */
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 45890447fee..a6f73f1e99d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -199,6 +199,29 @@ static void drm_helper_add_std_modes(struct drm_device *dev,
}
/**
+ * drm_helper_encoder_in_use - check if a given encoder is in use
+ * @encoder: encoder to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @encoders's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @encoder is part of the mode_config, false otherwise.
+ */
+bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(drm_helper_encoder_in_use);
+
+/**
* drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
* @crtc: CRTC to check
*
@@ -216,7 +239,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
/* FIXME: Locking around list access? */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- if (encoder->crtc == crtc)
+ if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
return true;
return false;
}
@@ -240,7 +263,7 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder_funcs = encoder->helper_private;
- if (!encoder->crtc)
+ if (!drm_helper_encoder_in_use(encoder))
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
}
@@ -935,6 +958,88 @@ bool drm_helper_initial_config(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_initial_config);
+static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
+{
+ int dpms = DRM_MODE_DPMS_OFF;
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ if (connector->dpms < dpms)
+ dpms = connector->dpms;
+ return dpms;
+}
+
+static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
+{
+ int dpms = DRM_MODE_DPMS_OFF;
+ struct drm_connector *connector;
+ struct drm_device *dev = crtc->dev;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder && connector->encoder->crtc == crtc)
+ if (connector->dpms < dpms)
+ dpms = connector->dpms;
+ return dpms;
+}
+
+/**
+ * drm_helper_connector_dpms
+ * @connector affected connector
+ * @mode DPMS mode
+ *
+ * Calls the low-level connector DPMS function, then
+ * calls appropriate encoder and crtc DPMS functions as well
+ */
+void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+ int old_dpms;
+
+ if (mode == connector->dpms)
+ return;
+
+ old_dpms = connector->dpms;
+ connector->dpms = mode;
+
+ /* from off to on, do crtc then encoder */
+ if (mode < old_dpms) {
+ if (crtc) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ if (encoder) {
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+ }
+
+ /* from on to off, do encoder then crtc */
+ if (mode > old_dpms) {
+ if (encoder) {
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+ if (crtc) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ }
+
+ return;
+}
+EXPORT_SYMBOL(drm_helper_connector_dpms);
+
/**
* drm_hotplug_stage_two
* @dev DRM device
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ca9c6165671..6f6b26479d8 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -289,6 +289,11 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
struct drm_display_mode *mode;
struct detailed_pixel_timing *pt = &timing->data.pixel_data;
+ /* ignore tiny modes */
+ if (((pt->hactive_hi << 8) | pt->hactive_lo) < 64 ||
+ ((pt->vactive_hi << 8) | pt->hactive_lo) < 64)
+ return NULL;
+
if (pt->stereo) {
printk(KERN_WARNING "stereo mode not supported\n");
return NULL;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 93e677a481f..fc8e5acd9d9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -196,6 +196,7 @@ int drm_irq_install(struct drm_device *dev)
{
int ret = 0;
unsigned long sh_flags = 0;
+ char *irqname;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
@@ -227,8 +228,13 @@ int drm_irq_install(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
sh_flags = IRQF_SHARED;
+ if (dev->devname)
+ irqname = dev->devname;
+ else
+ irqname = dev->driver->name;
+
ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
- sh_flags, dev->devname, dev);
+ sh_flags, irqname, dev);
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 8f9372921f8..9987ab88083 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -147,7 +147,7 @@ static ssize_t status_show(struct device *device,
enum drm_connector_status status;
status = connector->funcs->detect(connector);
- return snprintf(buf, PAGE_SIZE, "%s",
+ return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_connector_status_name(status));
}
@@ -166,7 +166,7 @@ static ssize_t dpms_show(struct device *device,
if (ret)
return 0;
- return snprintf(buf, PAGE_SIZE, "%s",
+ return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_dpms_name((int)dpms_status));
}
@@ -176,7 +176,7 @@ static ssize_t enabled_show(struct device *device,
{
struct drm_connector *connector = to_drm_connector(device);
- return snprintf(buf, PAGE_SIZE, connector->encoder ? "enabled" :
+ return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" :
"disabled");
}
@@ -317,6 +317,7 @@ static struct device_attribute connector_attrs_opt1[] = {
static struct bin_attribute edid_attr = {
.attr.name = "edid",
+ .attr.mode = 0444,
.size = 128,
.read = edid_show,
};
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 53d54455262..0ccb63ee50e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -987,12 +987,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
int fb_bar = IS_I9XX(dev) ? 2 : 0;
int ret = 0;
- dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
- if (!dev->devname) {
- ret = -ENOMEM;
- goto out;
- }
-
dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
0xff000000;
@@ -1006,7 +1000,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
if (ret)
- goto kfree_devname;
+ goto out;
/* Basic memrange allocator for stolen space (aka vram) */
drm_mm_init(&dev_priv->vram, 0, prealloc_size);
@@ -1024,7 +1018,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = i915_gem_init_ringbuffer(dev);
if (ret)
- goto kfree_devname;
+ goto out;
/* Allow hardware batchbuffers unless told otherwise.
*/
@@ -1056,8 +1050,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
destroy_ringbuffer:
i915_gem_cleanup_ringbuffer(dev);
-kfree_devname:
- kfree(dev->devname);
out:
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 717b6a854bc..39f5c658ef5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1145,6 +1145,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_unlock(&dev->struct_mutex);
return VM_FAULT_SIGBUS;
}
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return VM_FAULT_SIGBUS;
+ }
+
list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
}
@@ -2128,8 +2135,10 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
return;
}
- pitch_val = (obj_priv->stride / 128) - 1;
- WARN_ON(pitch_val & ~0x0000000f);
+ pitch_val = obj_priv->stride / 128;
+ pitch_val = ffs(pitch_val) - 1;
+ WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
+
val = obj_priv->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2251,9 +2260,6 @@ try_again:
goto try_again;
}
- BUG_ON(old_obj_priv->active ||
- (reg->obj->write_domain & I915_GEM_GPU_DOMAINS));
-
/*
* Zap this virtual mapping so we can set up a fence again
* for this object next time we need it.
@@ -2421,6 +2427,16 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
if (obj_priv->pages == NULL)
return;
+ /* XXX: The 865 in particular appears to be weird in how it handles
+ * cache flushing. We haven't figured it out, but the
+ * clflush+agp_chipset_flush doesn't appear to successfully get the
+ * data visible to the PGU, while wbinvd + agp_chipset_flush does.
+ */
+ if (IS_I865G(obj->dev)) {
+ wbinvd();
+ return;
+ }
+
drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 52a059354e8..540dd336e6e 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -213,7 +213,8 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (tiling_mode == I915_TILING_NONE)
return true;
- if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ if (!IS_I9XX(dev) ||
+ (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_width = 128;
else
tile_width = 512;
@@ -225,11 +226,18 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else if (IS_I9XX(dev)) {
- if (stride / tile_width > I830_FENCE_MAX_PITCH_VAL ||
+ uint32_t pitch_val = ffs(stride / tile_width) - 1;
+
+ /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
+ * instead of 4 (2KB) on 945s.
+ */
+ if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
size > (I830_FENCE_MAX_SIZE_VAL << 20))
return false;
} else {
- if (stride / 128 > I830_FENCE_MAX_PITCH_VAL ||
+ uint32_t pitch_val = ffs(stride / tile_width) - 1;
+
+ if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
size > (I830_FENCE_MAX_SIZE_VAL << 19))
return false;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9668cc0d7f4..375569d01d0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -190,7 +190,8 @@
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
#define I830_FENCE_REG_VALID (1<<0)
-#define I830_FENCE_MAX_PITCH_VAL 0x10
+#define I915_FENCE_MAX_PITCH_VAL 0x10
+#define I830_FENCE_MAX_PITCH_VAL 6
#define I830_FENCE_MAX_SIZE_VAL (1<<8)
#define I915_FENCE_START_MASK 0x0ff00000
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 640f5158eff..79acc4f4c1f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -381,11 +381,6 @@ static int intel_crt_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
- struct drm_device *dev = connector->dev;
-
- if (property == dev->mode_config.dpms_property && connector->encoder)
- intel_crt_dpms(connector->encoder, (uint32_t)(value & 0xf));
-
return 0;
}
@@ -402,6 +397,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 8b8d6e65cd3..1ee3007d6ec 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -316,6 +316,7 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
};
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
.save = intel_dvo_save,
.restore = intel_dvo_restore,
.detect = intel_dvo_detect,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d0983bb93a1..7d6bdd70532 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -219,6 +219,7 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
};
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
.save = intel_hdmi_save,
.restore = intel_hdmi_restore,
.detect = intel_hdmi_detect,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 53731f0ffcb..53cccfa58b9 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -343,11 +343,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
- struct drm_device *dev = connector->dev;
-
- if (property == dev->mode_config.dpms_property && connector->encoder)
- intel_lvds_dpms(connector->encoder, (uint32_t)(value & 0xf));
-
return 0;
}
@@ -366,6 +361,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
.save = intel_lvds_save,
.restore = intel_lvds_restore,
.detect = intel_lvds_detect,
@@ -391,7 +387,7 @@ static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
}
/* These systems claim to have LVDS, but really don't */
-static const struct dmi_system_id __initdata intel_no_lvds[] = {
+static const struct dmi_system_id intel_no_lvds[] = {
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Apple Mac Mini (Core series)",
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index f3ef6bfd8ff..3093b4d4a4d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1616,6 +1616,7 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
};
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
.save = intel_sdvo_save,
.restore = intel_sdvo_restore,
.detect = intel_sdvo_detect,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d2c32983242..98ac0546b7b 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1626,6 +1626,7 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
};
static const struct drm_connector_funcs intel_tv_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
.save = intel_tv_save,
.restore = intel_tv_restore,
.detect = intel_tv_detect,
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 77a7a4d8465..aff90bb9648 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2185,9 +2185,9 @@ void radeon_commit_ring(drm_radeon_private_t *dev_priv)
/* check if the ring is padded out to 16-dword alignment */
- tail_aligned = dev_priv->ring.tail & 0xf;
+ tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN-1);
if (tail_aligned) {
- int num_p2 = 16 - tail_aligned;
+ int num_p2 = RADEON_RING_ALIGN - tail_aligned;
ring = dev_priv->ring.start;
/* pad with some CP_PACKET2 */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 8071d965f14..0c6bfc1de15 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1964,11 +1964,14 @@ do { \
#define RING_LOCALS int write, _nr, _align_nr; unsigned int mask; u32 *ring;
+#define RADEON_RING_ALIGN 16
+
#define BEGIN_RING( n ) do { \
if ( RADEON_VERBOSE ) { \
DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
} \
- _align_nr = (n + 0xf) & ~0xf; \
+ _align_nr = RADEON_RING_ALIGN - ((dev_priv->ring.tail + n) & (RADEON_RING_ALIGN-1)); \
+ _align_nr += n; \
if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) { \
COMMIT_RING(); \
radeon_wait_ring( dev_priv, _align_nr * sizeof(u32)); \
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index b5e3b285169..a1787fdf5b9 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -182,7 +182,7 @@ static struct platform_driver lm78_isa_driver = {
.name = "lm78",
},
.probe = lm78_isa_probe,
- .remove = lm78_isa_remove,
+ .remove = __devexit_p(lm78_isa_remove),
};
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index baa28b73ae4..b9680f50f54 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -396,7 +396,7 @@ static int __devinit calc_CCR(unsigned long scl_hz)
signed char cdf, cdfm;
int scgd, scgdm, scgds;
- mclk = clk_get(NULL, "module_clk");
+ mclk = clk_get(NULL, "peripheral_clk");
if (IS_ERR(mclk)) {
return PTR_ERR(mclk);
} else {
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
index 537da1cde16..e59b6dee9ae 100644
--- a/drivers/ide/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -402,27 +402,23 @@ static u8 ali_cable_detect(ide_hwif_t *hwif)
return cbl;
}
-#if !defined(CONFIG_SPARC64) && !defined(CONFIG_PPC)
+#ifndef CONFIG_SPARC64
/**
* init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff
* @hwif: interface to configure
*
* Obtain the IRQ tables for an ALi based IDE solution on the PC
* class platforms. This part of the code isn't applicable to the
- * Sparc and PowerPC systems.
+ * Sparc systems.
*/
static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 ideic, inmir;
s8 irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6,
1, 11, 0, 12, 0, 14, 0, 15 };
int irq = -1;
- if (dev->device == PCI_DEVICE_ID_AL_M5229)
- hwif->irq = hwif->channel ? 15 : 14;
-
if (isa_dev) {
/*
* read IDE interface control
@@ -455,7 +451,7 @@ static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
}
#else
#define init_hwif_ali15x3 NULL
-#endif /* !defined(CONFIG_SPARC64) && !defined(CONFIG_PPC) */
+#endif /* CONFIG_SPARC64 */
/**
* init_dma_ali15x3 - set up DMA on ALi15x3
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 7201b176d75..757e5956b13 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -80,34 +80,6 @@ void ide_init_pc(struct ide_atapi_pc *pc)
EXPORT_SYMBOL_GPL(ide_init_pc);
/*
- * Generate a new packet command request in front of the request queue, before
- * the current request, so that it will be processed immediately, on the next
- * pass through the driver.
- */
-static void ide_queue_pc_head(ide_drive_t *drive, struct gendisk *disk,
- struct ide_atapi_pc *pc, struct request *rq)
-{
- blk_rq_init(NULL, rq);
- rq->cmd_type = REQ_TYPE_SPECIAL;
- rq->cmd_flags |= REQ_PREEMPT;
- rq->buffer = (char *)pc;
- rq->rq_disk = disk;
-
- if (pc->req_xfer) {
- rq->data = pc->buf;
- rq->data_len = pc->req_xfer;
- }
-
- memcpy(rq->cmd, pc->c, 12);
- if (drive->media == ide_tape)
- rq->cmd[13] = REQ_IDETAPE_PC1;
-
- drive->hwif->rq = NULL;
-
- elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
-}
-
-/*
* Add a special packet command request to the tail of the request queue,
* and wait for it to be serviced.
*/
@@ -119,19 +91,21 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_SPECIAL;
- rq->buffer = (char *)pc;
+ rq->special = (char *)pc;
if (pc->req_xfer) {
- rq->data = pc->buf;
- rq->data_len = pc->req_xfer;
+ error = blk_rq_map_kern(drive->queue, rq, pc->buf, pc->req_xfer,
+ GFP_NOIO);
+ if (error)
+ goto put_req;
}
memcpy(rq->cmd, pc->c, 12);
if (drive->media == ide_tape)
rq->cmd[13] = REQ_IDETAPE_PC1;
error = blk_execute_rq(drive->queue, disk, rq, 0);
+put_req:
blk_put_request(rq);
-
return error;
}
EXPORT_SYMBOL_GPL(ide_queue_pc_tail);
@@ -191,20 +165,113 @@ void ide_create_request_sense_cmd(ide_drive_t *drive, struct ide_atapi_pc *pc)
}
EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
+void ide_prep_sense(ide_drive_t *drive, struct request *rq)
+{
+ struct request_sense *sense = &drive->sense_data;
+ struct request *sense_rq = &drive->sense_rq;
+ unsigned int cmd_len, sense_len;
+ int err;
+
+ debug_log("%s: enter\n", __func__);
+
+ switch (drive->media) {
+ case ide_floppy:
+ cmd_len = 255;
+ sense_len = 18;
+ break;
+ case ide_tape:
+ cmd_len = 20;
+ sense_len = 20;
+ break;
+ default:
+ cmd_len = 18;
+ sense_len = 18;
+ }
+
+ BUG_ON(sense_len > sizeof(*sense));
+
+ if (blk_sense_request(rq) || drive->sense_rq_armed)
+ return;
+
+ memset(sense, 0, sizeof(*sense));
+
+ blk_rq_init(rq->q, sense_rq);
+
+ err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
+ GFP_NOIO);
+ if (unlikely(err)) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "%s: failed to map sense buffer\n",
+ drive->name);
+ return;
+ }
+
+ sense_rq->rq_disk = rq->rq_disk;
+ sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
+ sense_rq->cmd[4] = cmd_len;
+ sense_rq->cmd_type = REQ_TYPE_SENSE;
+ sense_rq->cmd_flags |= REQ_PREEMPT;
+
+ if (drive->media == ide_tape)
+ sense_rq->cmd[13] = REQ_IDETAPE_PC1;
+
+ drive->sense_rq_armed = true;
+}
+EXPORT_SYMBOL_GPL(ide_prep_sense);
+
+int ide_queue_sense_rq(ide_drive_t *drive, void *special)
+{
+ /* deferred failure from ide_prep_sense() */
+ if (!drive->sense_rq_armed) {
+ printk(KERN_WARNING "%s: failed queue sense request\n",
+ drive->name);
+ return -ENOMEM;
+ }
+
+ drive->sense_rq.special = special;
+ drive->sense_rq_armed = false;
+
+ drive->hwif->rq = NULL;
+
+ elv_add_request(drive->queue, &drive->sense_rq,
+ ELEVATOR_INSERT_FRONT, 0);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
+
/*
* Called when an error was detected during the last packet command.
- * We queue a request sense packet command in the head of the request list.
+ * We queue a request sense packet command at the head of the request
+ * queue.
*/
-void ide_retry_pc(ide_drive_t *drive, struct gendisk *disk)
+void ide_retry_pc(ide_drive_t *drive)
{
- struct request *rq = &drive->request_sense_rq;
+ struct request *failed_rq = drive->hwif->rq;
+ struct request *sense_rq = &drive->sense_rq;
struct ide_atapi_pc *pc = &drive->request_sense_pc;
(void)ide_read_error(drive);
- ide_create_request_sense_cmd(drive, pc);
+
+ /* init pc from sense_rq */
+ ide_init_pc(pc);
+ memcpy(pc->c, sense_rq->cmd, 12);
+ pc->buf = bio_data(sense_rq->bio); /* pointer to mapped address */
+ pc->req_xfer = blk_rq_bytes(sense_rq);
+
if (drive->media == ide_tape)
set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
- ide_queue_pc_head(drive, disk, pc, rq);
+
+ /*
+ * Push back the failed request and put request sense on top
+ * of it. The failed command will be retried after sense data
+ * is acquired.
+ */
+ blk_requeue_request(failed_rq->q, failed_rq);
+ drive->hwif->rq = NULL;
+ if (ide_queue_sense_rq(drive, pc)) {
+ blk_start_request(failed_rq);
+ ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
+ }
}
EXPORT_SYMBOL_GPL(ide_retry_pc);
@@ -246,7 +313,7 @@ int ide_cd_get_xferlen(struct request *rq)
return 32768;
else if (blk_sense_request(rq) || blk_pc_request(rq) ||
rq->cmd_type == REQ_TYPE_ATA_PC)
- return rq->data_len;
+ return blk_rq_bytes(rq);
else
return 0;
}
@@ -276,7 +343,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
struct ide_cmd *cmd = &hwif->cmd;
struct request *rq = hwif->rq;
const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- xfer_func_t *xferfunc;
unsigned int timeout, done;
u16 bcount;
u8 stat, ireason, dsc = 0;
@@ -303,18 +369,14 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
drive->name, rq_data_dir(pc->rq)
? "write" : "read");
pc->flags |= PC_FLAG_DMA_ERROR;
- } else {
+ } else
pc->xferred = pc->req_xfer;
- if (drive->pc_update_buffers)
- drive->pc_update_buffers(drive, pc);
- }
debug_log("%s: DMA finished\n", drive->name);
}
/* No more interrupts */
if ((stat & ATA_DRQ) == 0) {
int uptodate, error;
- unsigned int done;
debug_log("Packet command completed, %d bytes transferred\n",
pc->xferred);
@@ -343,7 +405,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
/* Retry operation */
- ide_retry_pc(drive, rq->rq_disk);
+ ide_retry_pc(drive);
/* queued, but not started */
return ide_stopped;
@@ -353,6 +415,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) && (stat & ATA_DSC) == 0)
dsc = 1;
+ /*
+ * ->pc_callback() might change rq->data_len for
+ * residual count, cache total length.
+ */
+ done = blk_rq_bytes(rq);
+
/* Command finished - Call the callback function */
uptodate = drive->pc_callback(drive, dsc);
@@ -361,7 +429,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
if (blk_special_request(rq)) {
rq->errors = 0;
- done = blk_rq_bytes(rq);
error = 0;
} else {
@@ -370,15 +437,10 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
rq->errors = -EIO;
}
- if (drive->media == ide_tape)
- done = ide_rq_bytes(rq); /* FIXME */
- else
- done = blk_rq_bytes(rq);
-
error = uptodate ? 0 : -EIO;
}
- ide_complete_rq(drive, error, done);
+ ide_complete_rq(drive, error, blk_rq_bytes(rq));
return ide_stopped;
}
@@ -407,21 +469,11 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
return ide_do_reset(drive);
}
- xferfunc = write ? tp_ops->output_data : tp_ops->input_data;
-
- if (drive->media == ide_floppy && pc->buf == NULL) {
- done = min_t(unsigned int, bcount, cmd->nleft);
- ide_pio_bytes(drive, cmd, write, done);
- } else if (drive->media == ide_tape && pc->bh) {
- done = drive->pc_io_buffers(drive, pc, bcount, write);
- } else {
- done = min_t(unsigned int, bcount, pc->req_xfer - pc->xferred);
- xferfunc(drive, NULL, pc->cur_pos, done);
- }
+ done = min_t(unsigned int, bcount, cmd->nleft);
+ ide_pio_bytes(drive, cmd, write, done);
- /* Update the current position */
+ /* Update transferred byte count */
pc->xferred += done;
- pc->cur_pos += done;
bcount -= done;
@@ -599,7 +651,6 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
/* We haven't transferred any data yet */
pc->xferred = 0;
- pc->cur_pos = pc->buf;
valid_tf = IDE_VALID_DEVICE;
bcount = ((drive->media == ide_tape) ?
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 925eb9e245d..424140c6c40 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -182,7 +182,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
(sense->information[2] << 8) |
(sense->information[3]);
- if (drive->queue->hardsect_size == 2048)
+ if (queue_logical_block_size(drive->queue) == 2048)
/* device sector size is 2K */
sector <<= 2;
@@ -206,54 +206,25 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
ide_cd_log_error(drive->name, failed_command, sense);
}
-static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
- struct request *failed_command)
-{
- struct cdrom_info *info = drive->driver_data;
- struct request *rq = &drive->request_sense_rq;
-
- ide_debug_log(IDE_DBG_SENSE, "enter");
-
- if (sense == NULL)
- sense = &info->sense_data;
-
- /* stuff the sense request in front of our current request */
- blk_rq_init(NULL, rq);
- rq->cmd_type = REQ_TYPE_ATA_PC;
- rq->rq_disk = info->disk;
-
- rq->data = sense;
- rq->cmd[0] = GPCMD_REQUEST_SENSE;
- rq->cmd[4] = 18;
- rq->data_len = 18;
-
- rq->cmd_type = REQ_TYPE_SENSE;
- rq->cmd_flags |= REQ_PREEMPT;
-
- /* NOTE! Save the failed command in "rq->buffer" */
- rq->buffer = (void *) failed_command;
-
- if (failed_command)
- ide_debug_log(IDE_DBG_SENSE, "failed_cmd: 0x%x",
- failed_command->cmd[0]);
-
- drive->hwif->rq = NULL;
-
- elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
-}
-
static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
{
/*
- * For REQ_TYPE_SENSE, "rq->buffer" points to the original
- * failed request
+ * For REQ_TYPE_SENSE, "rq->special" points to the original
+ * failed request. Also, the sense data should be read
+ * directly from rq which might be different from the original
+ * sense buffer if it got copied during mapping.
*/
- struct request *failed = (struct request *)rq->buffer;
- struct cdrom_info *info = drive->driver_data;
- void *sense = &info->sense_data;
+ struct request *failed = (struct request *)rq->special;
+ void *sense = bio_data(rq->bio);
if (failed) {
if (failed->sense) {
+ /*
+ * Sense is always read into drive->sense_data.
+ * Copy back if the failed request has its
+ * sense pointer set.
+ */
+ memcpy(failed->sense, sense, 18);
sense = failed->sense;
failed->sense_len = rq->sense_len;
}
@@ -428,22 +399,13 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
/* if we got a CHECK_CONDITION status, queue a request sense command */
if (stat & ATA_ERR)
- cdrom_queue_request_sense(drive, NULL, NULL);
+ return ide_queue_sense_rq(drive, NULL) ? 2 : 1;
return 1;
end_request:
if (stat & ATA_ERR) {
- struct request_queue *q = drive->queue;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blkdev_dequeue_request(rq);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
hwif->rq = NULL;
-
- cdrom_queue_request_sense(drive, rq->sense, rq);
- return 1;
+ return ide_queue_sense_rq(drive, rq) ? 2 : 1;
} else
return 2;
}
@@ -503,14 +465,8 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
* and some drives don't send them. Sigh.
*/
if (rq->cmd[0] == GPCMD_REQUEST_SENSE &&
- cmd->nleft > 0 && cmd->nleft <= 5) {
- unsigned int ofs = cmd->nbytes - cmd->nleft;
-
- while (cmd->nleft > 0) {
- *((u8 *)rq->data + ofs++) = 0;
- cmd->nleft--;
- }
- }
+ cmd->nleft > 0 && cmd->nleft <= 5)
+ cmd->nleft = 0;
}
int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
@@ -543,14 +499,18 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
rq->cmd_flags |= cmd_flags;
rq->timeout = timeout;
if (buffer) {
- rq->data = buffer;
- rq->data_len = *bufflen;
+ error = blk_rq_map_kern(drive->queue, rq, buffer,
+ *bufflen, GFP_NOIO);
+ if (error) {
+ blk_put_request(rq);
+ return error;
+ }
}
error = blk_execute_rq(drive->queue, info->disk, rq, 0);
if (buffer)
- *bufflen = rq->data_len;
+ *bufflen = rq->resid_len;
flags = rq->cmd_flags;
blk_put_request(rq);
@@ -608,7 +568,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
struct request *rq = hwif->rq;
ide_expiry_t *expiry = NULL;
int dma_error = 0, dma, thislen, uptodate = 0;
- int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0, nsectors;
+ int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
int sense = blk_sense_request(rq);
unsigned int timeout;
u16 len;
@@ -738,13 +698,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
out_end:
if (blk_pc_request(rq) && rc == 0) {
- unsigned int dlen = rq->data_len;
-
- rq->data_len = 0;
-
- if (blk_end_request(rq, 0, dlen))
- BUG();
-
+ rq->resid_len = 0;
+ blk_end_request_all(rq, 0);
hwif->rq = NULL;
} else {
if (sense && uptodate)
@@ -762,21 +717,13 @@ out_end:
ide_cd_error_cmd(drive, cmd);
/* make sure it's fully ended */
- if (blk_pc_request(rq))
- nsectors = (rq->data_len + 511) >> 9;
- else
- nsectors = rq->hard_nr_sectors;
-
- if (nsectors == 0)
- nsectors = 1;
-
if (blk_fs_request(rq) == 0) {
- rq->data_len -= (cmd->nbytes - cmd->nleft);
+ rq->resid_len -= cmd->nbytes - cmd->nleft;
if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
- rq->data_len += cmd->last_xfer_len;
+ rq->resid_len += cmd->last_xfer_len;
}
- ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9);
+ ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
if (sense && rc == 2)
ide_error(drive, "request sense failure", stat);
@@ -790,7 +737,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
struct request_queue *q = drive->queue;
int write = rq_data_dir(rq) == WRITE;
unsigned short sectors_per_frame =
- queue_hardsect_size(q) >> SECTOR_BITS;
+ queue_logical_block_size(q) >> SECTOR_BITS;
ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, "
"secs_per_frame: %u",
@@ -809,8 +756,8 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
}
/* fs requests *must* be hardware frame aligned */
- if ((rq->nr_sectors & (sectors_per_frame - 1)) ||
- (rq->sector & (sectors_per_frame - 1)))
+ if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) ||
+ (blk_rq_pos(rq) & (sectors_per_frame - 1)))
return ide_stopped;
/* use DMA, if possible */
@@ -838,15 +785,10 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
drive->dma = 0;
/* sg request */
- if (rq->bio || ((rq->cmd_type == REQ_TYPE_ATA_PC) && rq->data_len)) {
+ if (rq->bio) {
struct request_queue *q = drive->queue;
+ char *buf = bio_data(rq->bio);
unsigned int alignment;
- char *buf;
-
- if (rq->bio)
- buf = bio_data(rq->bio);
- else
- buf = rq->data;
drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
@@ -858,7 +800,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
*/
alignment = queue_dma_alignment(q) | q->dma_pad_mask;
if ((unsigned long)buf & alignment
- || rq->data_len & q->dma_pad_mask
+ || blk_rq_bytes(rq) & q->dma_pad_mask
|| object_is_on_stack(buf))
drive->dma = 0;
}
@@ -896,6 +838,9 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
goto out_end;
}
+ /* prepare sense request for this command */
+ ide_prep_sense(drive, rq);
+
memset(&cmd, 0, sizeof(cmd));
if (rq_data_dir(rq))
@@ -903,15 +848,14 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
cmd.rq = rq;
- if (blk_fs_request(rq) || rq->data_len) {
- ide_init_sg_cmd(&cmd, blk_fs_request(rq) ? (rq->nr_sectors << 9)
- : rq->data_len);
+ if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
+ ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
ide_map_sg(drive, &cmd);
}
return ide_issue_pc(drive, &cmd);
out_end:
- nsectors = rq->hard_nr_sectors;
+ nsectors = blk_rq_sectors(rq);
if (nsectors == 0)
nsectors = 1;
@@ -1077,8 +1021,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
/* save a private copy of the TOC capacity for error handling */
drive->probed_capacity = toc->capacity * sectors_per_frame;
- blk_queue_hardsect_size(drive->queue,
- sectors_per_frame << SECTOR_BITS);
+ blk_queue_logical_block_size(drive->queue,
+ sectors_per_frame << SECTOR_BITS);
/* first read just the header, so we know how long the TOC is */
stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
@@ -1394,9 +1338,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
/* standard prep_rq_fn that builds 10 byte cmds */
static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
{
- int hard_sect = queue_hardsect_size(q);
- long block = (long)rq->hard_sector / (hard_sect >> 9);
- unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9);
+ int hard_sect = queue_logical_block_size(q);
+ long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
+ unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
memset(rq->cmd, 0, BLK_MAX_CDB);
@@ -1599,7 +1543,7 @@ static int ide_cdrom_setup(ide_drive_t *drive)
nslots = ide_cdrom_probe_capabilities(drive);
- blk_queue_hardsect_size(q, CD_FRAMESIZE);
+ blk_queue_logical_block_size(q, CD_FRAMESIZE);
if (ide_cdrom_register(drive, nslots)) {
printk(KERN_ERR PFX "%s: %s failed to register device with the"
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 1d97101099c..93a3cf1b0f3 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -87,10 +87,6 @@ struct cdrom_info {
struct atapi_toc *toc;
- /* The result of the last successful request sense command
- on this device. */
- struct request_sense sense_data;
-
u8 max_speed; /* Max speed of the drive. */
u8 current_speed; /* Current speed of the drive. */
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index a9fbe2c3121..c6f7fcfb9d6 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -82,7 +82,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
sector_t block)
{
ide_hwif_t *hwif = drive->hwif;
- u16 nsectors = (u16)rq->nr_sectors;
+ u16 nsectors = (u16)blk_rq_sectors(rq);
u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
struct ide_cmd cmd;
@@ -90,7 +90,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
ide_startstop_t rc;
if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
- if (block + rq->nr_sectors > 1ULL << 28)
+ if (block + blk_rq_sectors(rq) > 1ULL << 28)
dma = 0;
else
lba48 = 0;
@@ -195,9 +195,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
ledtrig_ide_activity();
- pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n",
+ pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
- (unsigned long long)block, rq->nr_sectors,
+ (unsigned long long)block, blk_rq_sectors(rq),
(unsigned long)rq->buffer);
if (hwif->rw_disk)
@@ -411,7 +411,6 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
cmd->protocol = ATA_PROT_NODATA;
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
- rq->cmd_flags |= REQ_SOFTBARRIER;
rq->special = cmd;
}
@@ -640,7 +639,7 @@ static void ide_disk_setup(ide_drive_t *drive)
}
printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
- q->max_sectors / 2);
+ queue_max_sectors(q) / 2);
if (ata_id_is_ssd(id))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index a0b8cab1d9a..001f68f0bb2 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -103,7 +103,7 @@ ide_startstop_t ide_dma_intr(ide_drive_t *drive)
ide_finish_cmd(drive, cmd, stat);
else
ide_complete_rq(drive, 0,
- cmd->rq->nr_sectors << 9);
+ blk_rq_sectors(cmd->rq) << 9);
return ide_stopped;
}
printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
@@ -510,23 +510,11 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
/*
* un-busy drive etc and make sure request is sane
*/
-
rq = hwif->rq;
- if (!rq)
- goto out;
-
- hwif->rq = NULL;
-
- rq->errors = 0;
-
- if (!rq->bio)
- goto out;
-
- rq->sector = rq->bio->bi_sector;
- rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
- rq->hard_cur_sectors = rq->current_nr_sectors;
- rq->buffer = bio_data(rq->bio);
-out:
+ if (rq) {
+ hwif->rq = NULL;
+ rq->errors = 0;
+ }
return ret;
}
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 2b4868d95f8..650981758f1 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -134,13 +134,17 @@ static ide_startstop_t ide_floppy_issue_pc(ide_drive_t *drive,
drive->pc = pc;
if (pc->retries > IDEFLOPPY_MAX_PC_RETRIES) {
+ unsigned int done = blk_rq_bytes(drive->hwif->rq);
+
if (!(pc->flags & PC_FLAG_SUPPRESS_ERROR))
ide_floppy_report_error(floppy, pc);
+
/* Giving up */
pc->error = IDE_DRV_ERROR_GENERAL;
drive->failed_pc = NULL;
drive->pc_callback(drive, 0);
+ ide_complete_rq(drive, -EIO, done);
return ide_stopped;
}
@@ -190,7 +194,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
{
struct ide_disk_obj *floppy = drive->driver_data;
int block = sector / floppy->bs_factor;
- int blocks = rq->nr_sectors / floppy->bs_factor;
+ int blocks = blk_rq_sectors(rq) / floppy->bs_factor;
int cmd = rq_data_dir(rq);
ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks);
@@ -216,16 +220,14 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
ide_init_pc(pc);
memcpy(pc->c, rq->cmd, sizeof(pc->c));
pc->rq = rq;
- if (rq->data_len && rq_data_dir(rq) == WRITE)
- pc->flags |= PC_FLAG_WRITING;
- pc->buf = rq->data;
- if (rq->bio)
+ if (blk_rq_bytes(rq)) {
pc->flags |= PC_FLAG_DMA_OK;
- /*
- * possibly problematic, doesn't look like ide-floppy correctly
- * handled scattered requests if dma fails...
- */
- pc->req_xfer = pc->buf_size = rq->data_len;
+ if (rq_data_dir(rq) == WRITE)
+ pc->flags |= PC_FLAG_WRITING;
+ }
+ /* pio will be performed by ide_pio_bytes() which handles sg fine */
+ pc->buf = NULL;
+ pc->req_xfer = pc->buf_size = blk_rq_bytes(rq);
}
static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
@@ -257,16 +259,16 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
goto out_end;
}
if (blk_fs_request(rq)) {
- if (((long)rq->sector % floppy->bs_factor) ||
- (rq->nr_sectors % floppy->bs_factor)) {
+ if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
+ (blk_rq_sectors(rq) % floppy->bs_factor)) {
printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
drive->name);
goto out_end;
}
pc = &floppy->queued_pc;
idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
- } else if (blk_special_request(rq)) {
- pc = (struct ide_atapi_pc *) rq->buffer;
+ } else if (blk_special_request(rq) || blk_sense_request(rq)) {
+ pc = (struct ide_atapi_pc *)rq->special;
} else if (blk_pc_request(rq)) {
pc = &floppy->queued_pc;
idefloppy_blockpc_cmd(floppy, pc, rq);
@@ -275,6 +277,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
goto out_end;
}
+ ide_prep_sense(drive, rq);
+
memset(&cmd, 0, sizeof(cmd));
if (rq_data_dir(rq))
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 6415a2e2ba8..bba4297f2f0 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -116,9 +116,9 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
unsigned int ide_rq_bytes(struct request *rq)
{
if (blk_pc_request(rq))
- return rq->data_len;
+ return blk_rq_bytes(rq);
else
- return rq->hard_cur_sectors << 9;
+ return blk_rq_cur_sectors(rq) << 9;
}
EXPORT_SYMBOL_GPL(ide_rq_bytes);
@@ -133,7 +133,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
* and complete the whole request right now
*/
if (blk_noretry_request(rq) && error <= 0)
- nr_bytes = rq->hard_nr_sectors << 9;
+ nr_bytes = blk_rq_sectors(rq) << 9;
rc = ide_end_rq(drive, rq, error, nr_bytes);
if (rc == 0)
@@ -248,14 +248,7 @@ void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
struct scatterlist *sg = hwif->sg_table;
struct request *rq = cmd->rq;
- if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
- sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
- cmd->sg_nents = 1;
- } else if (!rq->bio) {
- sg_init_one(sg, rq->data, rq->data_len);
- cmd->sg_nents = 1;
- } else
- cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
+ cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
}
EXPORT_SYMBOL_GPL(ide_map_sg);
@@ -286,7 +279,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
if (cmd) {
if (cmd->protocol == ATA_PROT_PIO) {
- ide_init_sg_cmd(cmd, rq->nr_sectors << 9);
+ ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
ide_map_sg(drive, cmd);
}
@@ -371,7 +364,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
return execute_drive_cmd(drive, rq);
else if (blk_pm_request(rq)) {
- struct request_pm_state *pm = rq->data;
+ struct request_pm_state *pm = rq->special;
#ifdef DEBUG_PM
printk("%s: start_power_step(step: %d)\n",
drive->name, pm->pm_step);
@@ -394,7 +387,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
drv = *(struct ide_driver **)rq->rq_disk->private_data;
- return drv->do_request(drive, rq, rq->sector);
+ return drv->do_request(drive, rq, blk_rq_pos(rq));
}
return do_special(drive);
kill_rq:
@@ -484,6 +477,9 @@ void do_ide_request(struct request_queue *q)
spin_unlock_irq(q->queue_lock);
+ /* HLD do_request() callback might sleep, make sure it's okay */
+ might_sleep();
+
if (ide_lock_host(host, hwif))
goto plug_device_2;
@@ -491,10 +487,10 @@ void do_ide_request(struct request_queue *q)
if (!ide_lock_port(hwif)) {
ide_hwif_t *prev_port;
+
+ WARN_ON_ONCE(hwif->rq);
repeat:
prev_port = hwif->host->cur_port;
- hwif->rq = NULL;
-
if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
time_after(drive->sleep, jiffies)) {
ide_unlock_port(hwif);
@@ -523,7 +519,9 @@ repeat:
* we know that the queue isn't empty, but this can happen
* if the q->prep_rq_fn() decides to kill a request
*/
- rq = elv_next_request(drive->queue);
+ if (!rq)
+ rq = blk_fetch_request(drive->queue);
+
spin_unlock_irq(q->queue_lock);
spin_lock_irq(&hwif->lock);
@@ -535,7 +533,7 @@ repeat:
/*
* Sanity: don't accept a request that isn't a PM request
* if we are currently power managed. This is very important as
- * blk_stop_queue() doesn't prevent the elv_next_request()
+ * blk_stop_queue() doesn't prevent the blk_fetch_request()
* above to return us whatever is in the queue. Since we call
* ide_do_request() ourselves, we end up taking requests while
* the queue is blocked...
@@ -559,8 +557,11 @@ repeat:
startstop = start_request(drive, rq);
spin_lock_irq(&hwif->lock);
- if (startstop == ide_stopped)
+ if (startstop == ide_stopped) {
+ rq = hwif->rq;
+ hwif->rq = NULL;
goto repeat;
+ }
} else
goto plug_device;
out:
@@ -576,18 +577,24 @@ plug_device:
plug_device_2:
spin_lock_irq(q->queue_lock);
+ if (rq)
+ blk_requeue_request(q, rq);
if (!elv_queue_empty(q))
blk_plug_device(q);
}
-static void ide_plug_device(ide_drive_t *drive)
+static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
{
struct request_queue *q = drive->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
+
+ if (rq)
+ blk_requeue_request(q, rq);
if (!elv_queue_empty(q))
blk_plug_device(q);
+
spin_unlock_irqrestore(q->queue_lock, flags);
}
@@ -636,6 +643,7 @@ void ide_timer_expiry (unsigned long data)
unsigned long flags;
int wait = -1;
int plug_device = 0;
+ struct request *uninitialized_var(rq_in_flight);
spin_lock_irqsave(&hwif->lock, flags);
@@ -697,6 +705,8 @@ void ide_timer_expiry (unsigned long data)
spin_lock_irq(&hwif->lock);
enable_irq(hwif->irq);
if (startstop == ide_stopped && hwif->polling == 0) {
+ rq_in_flight = hwif->rq;
+ hwif->rq = NULL;
ide_unlock_port(hwif);
plug_device = 1;
}
@@ -705,7 +715,7 @@ void ide_timer_expiry (unsigned long data)
if (plug_device) {
ide_unlock_host(hwif->host);
- ide_plug_device(drive);
+ ide_requeue_and_plug(drive, rq_in_flight);
}
}
@@ -791,6 +801,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
ide_startstop_t startstop;
irqreturn_t irq_ret = IRQ_NONE;
int plug_device = 0;
+ struct request *uninitialized_var(rq_in_flight);
if (host->host_flags & IDE_HFLAG_SERIALIZE) {
if (hwif != host->cur_port)
@@ -870,6 +881,8 @@ irqreturn_t ide_intr (int irq, void *dev_id)
*/
if (startstop == ide_stopped && hwif->polling == 0) {
BUG_ON(hwif->handler);
+ rq_in_flight = hwif->rq;
+ hwif->rq = NULL;
ide_unlock_port(hwif);
plug_device = 1;
}
@@ -879,7 +892,7 @@ out:
out_early:
if (plug_device) {
ide_unlock_host(hwif->host);
- ide_plug_device(drive);
+ ide_requeue_and_plug(drive, rq_in_flight);
}
return irq_ret;
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index c1c25ebbaa1..5991b23793f 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -231,7 +231,6 @@ static int generic_drive_reset(ide_drive_t *drive)
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd_len = 1;
rq->cmd[0] = REQ_DRIVE_RESET;
- rq->cmd_flags |= REQ_SOFTBARRIER;
if (blk_execute_rq(drive->queue, NULL, rq, 1))
ret = rq->errors;
blk_put_request(rq);
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 2148df836ce..e386a32dc9b 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -96,7 +96,7 @@ static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
if (rq)
printk(KERN_CONT ", sector=%llu",
- (unsigned long long)rq->sector);
+ (unsigned long long)blk_rq_pos(rq));
}
printk(KERN_CONT "\n");
}
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 310d03f2b5b..a914023d6d0 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -24,11 +24,8 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
start_queue = 1;
spin_unlock_irq(&hwif->lock);
- if (start_queue) {
- spin_lock_irq(q->queue_lock);
- blk_start_queueing(q);
- spin_unlock_irq(q->queue_lock);
- }
+ if (start_queue)
+ blk_run_queue(q);
return;
}
spin_unlock_irq(&hwif->lock);
diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
index 61111fd2713..39d4e01f5c9 100644
--- a/drivers/ide/ide-pci-generic.c
+++ b/drivers/ide/ide-pci-generic.c
@@ -33,6 +33,16 @@ static int ide_generic_all; /* Set to claim all devices */
module_param_named(all_generic_ide, ide_generic_all, bool, 0444);
MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers.");
+static void netcell_quirkproc(ide_drive_t *drive)
+{
+ /* mark words 85-87 as valid */
+ drive->id[ATA_ID_CSF_DEFAULT] |= 0x4000;
+}
+
+static const struct ide_port_ops netcell_port_ops = {
+ .quirkproc = netcell_quirkproc,
+};
+
#define DECLARE_GENERIC_PCI_DEV(extra_flags) \
{ \
.name = DRV_NAME, \
@@ -74,6 +84,7 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
{ /* 6: Revolution */
.name = DRV_NAME,
+ .port_ops = &netcell_port_ops,
.host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
IDE_HFLAG_TRUST_BIOS_FOR_DMA |
IDE_HFLAG_OFF_BOARD,
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 0d8a151c0a0..ba1488bd843 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -7,7 +7,6 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
ide_hwif_t *hwif = drive->hwif;
struct request *rq;
struct request_pm_state rqpm;
- struct ide_cmd cmd;
int ret;
/* call ACPI _GTM only once */
@@ -15,11 +14,9 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
ide_acpi_get_timing(hwif);
memset(&rqpm, 0, sizeof(rqpm));
- memset(&cmd, 0, sizeof(cmd));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_PM_SUSPEND;
- rq->special = &cmd;
- rq->data = &rqpm;
+ rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_SUSPEND;
if (mesg.event == PM_EVENT_PRETHAW)
mesg.event = PM_EVENT_FREEZE;
@@ -41,7 +38,6 @@ int generic_ide_resume(struct device *dev)
ide_hwif_t *hwif = drive->hwif;
struct request *rq;
struct request_pm_state rqpm;
- struct ide_cmd cmd;
int err;
/* call ACPI _PS0 / _STM only once */
@@ -53,12 +49,10 @@ int generic_ide_resume(struct device *dev)
ide_acpi_exec_tfs(drive);
memset(&rqpm, 0, sizeof(rqpm));
- memset(&cmd, 0, sizeof(cmd));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_PM_RESUME;
rq->cmd_flags |= REQ_PREEMPT;
- rq->special = &cmd;
- rq->data = &rqpm;
+ rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
rqpm.pm_state = PM_EVENT_ON;
@@ -77,7 +71,7 @@ int generic_ide_resume(struct device *dev)
void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
{
- struct request_pm_state *pm = rq->data;
+ struct request_pm_state *pm = rq->special;
#ifdef DEBUG_PM
printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
@@ -107,10 +101,8 @@ void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
{
- struct request_pm_state *pm = rq->data;
- struct ide_cmd *cmd = rq->special;
-
- memset(cmd, 0, sizeof(*cmd));
+ struct request_pm_state *pm = rq->special;
+ struct ide_cmd cmd = { };
switch (pm->pm_step) {
case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
@@ -123,12 +115,12 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
return ide_stopped;
}
if (ata_id_flush_ext_enabled(drive->id))
- cmd->tf.command = ATA_CMD_FLUSH_EXT;
+ cmd.tf.command = ATA_CMD_FLUSH_EXT;
else
- cmd->tf.command = ATA_CMD_FLUSH;
+ cmd.tf.command = ATA_CMD_FLUSH;
goto out_do_tf;
case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
- cmd->tf.command = ATA_CMD_STANDBYNOW1;
+ cmd.tf.command = ATA_CMD_STANDBYNOW1;
goto out_do_tf;
case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
ide_set_max_pio(drive);
@@ -141,7 +133,7 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
ide_complete_power_step(drive, rq);
return ide_stopped;
case IDE_PM_IDLE: /* Resume step 2 (idle) */
- cmd->tf.command = ATA_CMD_IDLEIMMEDIATE;
+ cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
goto out_do_tf;
case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
/*
@@ -163,11 +155,11 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
return ide_stopped;
out_do_tf:
- cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd->valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- cmd->protocol = ATA_PROT_NODATA;
+ cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
+ cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
+ cmd.protocol = ATA_PROT_NODATA;
- return do_rw_taskfile(drive, cmd);
+ return do_rw_taskfile(drive, &cmd);
}
/**
@@ -181,7 +173,7 @@ out_do_tf:
void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
{
struct request_queue *q = drive->queue;
- struct request_pm_state *pm = rq->data;
+ struct request_pm_state *pm = rq->special;
unsigned long flags;
ide_complete_power_step(drive, rq);
@@ -207,7 +199,7 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{
- struct request_pm_state *pm = rq->data;
+ struct request_pm_state *pm = rq->special;
if (blk_pm_suspend_request(rq) &&
pm->pm_step == IDE_PM_START_SUSPEND)
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 3a53e0834cf..d9764f0bc82 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -131,13 +131,6 @@ enum {
IDETAPE_DIR_WRITE = (1 << 2),
};
-struct idetape_bh {
- u32 b_size;
- atomic_t b_count;
- struct idetape_bh *b_reqnext;
- char *b_data;
-};
-
/* Tape door status */
#define DOOR_UNLOCKED 0
#define DOOR_LOCKED 1
@@ -219,18 +212,12 @@ typedef struct ide_tape_obj {
/* Data buffer size chosen based on the tape's recommendation */
int buffer_size;
- /* merge buffer */
- struct idetape_bh *merge_bh;
- /* size of the merge buffer */
- int merge_bh_size;
- /* pointer to current buffer head within the merge buffer */
- struct idetape_bh *bh;
- char *b_data;
- int b_count;
-
- int pages_per_buffer;
- /* Wasted space in each stage */
- int excess_bh_size;
+ /* Staging buffer of buffer_size bytes */
+ void *buf;
+ /* The read/write cursor */
+ void *cur;
+ /* The number of valid bytes in buf */
+ size_t valid;
/* Measures average tape speed */
unsigned long avg_time;
@@ -297,84 +284,6 @@ static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
return tape;
}
-static int idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
- unsigned int bcount)
-{
- struct idetape_bh *bh = pc->bh;
- int count;
-
- while (bcount) {
- if (bh == NULL)
- break;
- count = min(
- (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
- bcount);
- drive->hwif->tp_ops->input_data(drive, NULL, bh->b_data +
- atomic_read(&bh->b_count), count);
- bcount -= count;
- atomic_add(count, &bh->b_count);
- if (atomic_read(&bh->b_count) == bh->b_size) {
- bh = bh->b_reqnext;
- if (bh)
- atomic_set(&bh->b_count, 0);
- }
- }
-
- pc->bh = bh;
-
- return bcount;
-}
-
-static int idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
- unsigned int bcount)
-{
- struct idetape_bh *bh = pc->bh;
- int count;
-
- while (bcount) {
- if (bh == NULL)
- break;
- count = min((unsigned int)pc->b_count, (unsigned int)bcount);
- drive->hwif->tp_ops->output_data(drive, NULL, pc->b_data, count);
- bcount -= count;
- pc->b_data += count;
- pc->b_count -= count;
- if (!pc->b_count) {
- bh = bh->b_reqnext;
- pc->bh = bh;
- if (bh) {
- pc->b_data = bh->b_data;
- pc->b_count = atomic_read(&bh->b_count);
- }
- }
- }
-
- return bcount;
-}
-
-static void idetape_update_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc)
-{
- struct idetape_bh *bh = pc->bh;
- int count;
- unsigned int bcount = pc->xferred;
-
- if (pc->flags & PC_FLAG_WRITING)
- return;
- while (bcount) {
- if (bh == NULL) {
- printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
- __func__);
- return;
- }
- count = min((unsigned int)bh->b_size, (unsigned int)bcount);
- atomic_set(&bh->b_count, count);
- if (atomic_read(&bh->b_count) == bh->b_size)
- bh = bh->b_reqnext;
- bcount -= count;
- }
- pc->bh = bh;
-}
-
/*
* called on each failed packet command retry to analyze the request sense. We
* currently do not utilize this information.
@@ -392,12 +301,10 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
pc->c[0], tape->sense_key, tape->asc, tape->ascq);
/* Correct pc->xferred by asking the tape. */
- if (pc->flags & PC_FLAG_DMA_ERROR) {
+ if (pc->flags & PC_FLAG_DMA_ERROR)
pc->xferred = pc->req_xfer -
tape->blk_size *
get_unaligned_be32(&sense[3]);
- idetape_update_buffers(drive, pc);
- }
/*
* If error was the result of a zero-length read or write command,
@@ -436,29 +343,6 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
}
}
-/* Free data buffers completely. */
-static void ide_tape_kfree_buffer(idetape_tape_t *tape)
-{
- struct idetape_bh *prev_bh, *bh = tape->merge_bh;
-
- while (bh) {
- u32 size = bh->b_size;
-
- while (size) {
- unsigned int order = fls(size >> PAGE_SHIFT)-1;
-
- if (bh->b_data)
- free_pages((unsigned long)bh->b_data, order);
-
- size &= (order-1);
- bh->b_data += (1 << order) * PAGE_SIZE;
- }
- prev_bh = bh;
- bh = bh->b_reqnext;
- kfree(prev_bh);
- }
-}
-
static void ide_tape_handle_dsc(ide_drive_t *);
static int ide_tape_callback(ide_drive_t *drive, int dsc)
@@ -496,7 +380,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
}
tape->first_frame += blocks;
- rq->current_nr_sectors -= blocks;
+ rq->resid_len -= blocks * tape->blk_size;
if (pc->error) {
uptodate = 0;
@@ -558,19 +442,6 @@ static void ide_tape_handle_dsc(ide_drive_t *drive)
idetape_postpone_request(drive);
}
-static int ide_tape_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
- unsigned int bcount, int write)
-{
- unsigned int bleft;
-
- if (write)
- bleft = idetape_output_buffers(drive, pc, bcount);
- else
- bleft = idetape_input_buffers(drive, pc, bcount);
-
- return bcount - bleft;
-}
-
/*
* Packet Command Interface
*
@@ -622,6 +493,8 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
(pc->flags & PC_FLAG_ABORT)) {
+ unsigned int done = blk_rq_bytes(drive->hwif->rq);
+
/*
* We will "abort" retrying a packet command in case legitimate
* error code was received (crossing a filemark, or end of the
@@ -641,8 +514,10 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
/* Giving up */
pc->error = IDE_DRV_ERROR_GENERAL;
}
+
drive->failed_pc = NULL;
drive->pc_callback(drive, 0);
+ ide_complete_rq(drive, -EIO, done);
return ide_stopped;
}
debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
@@ -695,7 +570,7 @@ static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
printk(KERN_ERR "ide-tape: %s: I/O error, ",
tape->name);
/* Retry operation */
- ide_retry_pc(drive, tape->disk);
+ ide_retry_pc(drive);
return ide_stopped;
}
pc->error = 0;
@@ -711,27 +586,22 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
struct ide_atapi_pc *pc, struct request *rq,
u8 opcode)
{
- struct idetape_bh *bh = (struct idetape_bh *)rq->special;
- unsigned int length = rq->current_nr_sectors;
+ unsigned int length = blk_rq_sectors(rq);
ide_init_pc(pc);
put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
pc->c[1] = 1;
- pc->bh = bh;
pc->buf = NULL;
pc->buf_size = length * tape->blk_size;
pc->req_xfer = pc->buf_size;
if (pc->req_xfer == tape->buffer_size)
pc->flags |= PC_FLAG_DMA_OK;
- if (opcode == READ_6) {
+ if (opcode == READ_6)
pc->c[0] = READ_6;
- atomic_set(&bh->b_count, 0);
- } else if (opcode == WRITE_6) {
+ else if (opcode == WRITE_6) {
pc->c[0] = WRITE_6;
pc->flags |= PC_FLAG_WRITING;
- pc->b_data = bh->b_data;
- pc->b_count = atomic_read(&bh->b_count);
}
memcpy(rq->cmd, pc->c, 12);
@@ -747,12 +617,10 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
struct ide_cmd cmd;
u8 stat;
- debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %lu,"
- " current_nr_sectors: %u\n",
- (unsigned long long)rq->sector, rq->nr_sectors,
- rq->current_nr_sectors);
+ debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %u\n"
+ (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq));
- if (!blk_special_request(rq)) {
+ if (!(blk_special_request(rq) || blk_sense_request(rq))) {
/* We do not support buffer cache originated requests. */
printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
"request queue (%d)\n", drive->name, rq->cmd_type);
@@ -828,7 +696,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
goto out;
}
if (rq->cmd[13] & REQ_IDETAPE_PC1) {
- pc = (struct ide_atapi_pc *) rq->buffer;
+ pc = (struct ide_atapi_pc *)rq->special;
rq->cmd[13] &= ~(REQ_IDETAPE_PC1);
rq->cmd[13] |= REQ_IDETAPE_PC2;
goto out;
@@ -840,6 +708,9 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
BUG();
out:
+ /* prepare sense request for this command */
+ ide_prep_sense(drive, rq);
+
memset(&cmd, 0, sizeof(cmd));
if (rq_data_dir(rq))
@@ -847,167 +718,10 @@ out:
cmd.rq = rq;
- return ide_tape_issue_pc(drive, &cmd, pc);
-}
-
-/*
- * The function below uses __get_free_pages to allocate a data buffer of size
- * tape->buffer_size (or a bit more). We attempt to combine sequential pages as
- * much as possible.
- *
- * It returns a pointer to the newly allocated buffer, or NULL in case of
- * failure.
- */
-static struct idetape_bh *ide_tape_kmalloc_buffer(idetape_tape_t *tape,
- int full, int clear)
-{
- struct idetape_bh *prev_bh, *bh, *merge_bh;
- int pages = tape->pages_per_buffer;
- unsigned int order, b_allocd;
- char *b_data = NULL;
-
- merge_bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
- bh = merge_bh;
- if (bh == NULL)
- goto abort;
-
- order = fls(pages) - 1;
- bh->b_data = (char *) __get_free_pages(GFP_KERNEL, order);
- if (!bh->b_data)
- goto abort;
- b_allocd = (1 << order) * PAGE_SIZE;
- pages &= (order-1);
-
- if (clear)
- memset(bh->b_data, 0, b_allocd);
- bh->b_reqnext = NULL;
- bh->b_size = b_allocd;
- atomic_set(&bh->b_count, full ? bh->b_size : 0);
-
- while (pages) {
- order = fls(pages) - 1;
- b_data = (char *) __get_free_pages(GFP_KERNEL, order);
- if (!b_data)
- goto abort;
- b_allocd = (1 << order) * PAGE_SIZE;
-
- if (clear)
- memset(b_data, 0, b_allocd);
-
- /* newly allocated page frames below buffer header or ...*/
- if (bh->b_data == b_data + b_allocd) {
- bh->b_size += b_allocd;
- bh->b_data -= b_allocd;
- if (full)
- atomic_add(b_allocd, &bh->b_count);
- continue;
- }
- /* they are above the header */
- if (b_data == bh->b_data + bh->b_size) {
- bh->b_size += b_allocd;
- if (full)
- atomic_add(b_allocd, &bh->b_count);
- continue;
- }
- prev_bh = bh;
- bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
- if (!bh) {
- free_pages((unsigned long) b_data, order);
- goto abort;
- }
- bh->b_reqnext = NULL;
- bh->b_data = b_data;
- bh->b_size = b_allocd;
- atomic_set(&bh->b_count, full ? bh->b_size : 0);
- prev_bh->b_reqnext = bh;
-
- pages &= (order-1);
- }
-
- bh->b_size -= tape->excess_bh_size;
- if (full)
- atomic_sub(tape->excess_bh_size, &bh->b_count);
- return merge_bh;
-abort:
- ide_tape_kfree_buffer(tape);
- return NULL;
-}
+ ide_init_sg_cmd(&cmd, pc->req_xfer);
+ ide_map_sg(drive, &cmd);
-static int idetape_copy_stage_from_user(idetape_tape_t *tape,
- const char __user *buf, int n)
-{
- struct idetape_bh *bh = tape->bh;
- int count;
- int ret = 0;
-
- while (n) {
- if (bh == NULL) {
- printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
- __func__);
- return 1;
- }
- count = min((unsigned int)
- (bh->b_size - atomic_read(&bh->b_count)),
- (unsigned int)n);
- if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
- count))
- ret = 1;
- n -= count;
- atomic_add(count, &bh->b_count);
- buf += count;
- if (atomic_read(&bh->b_count) == bh->b_size) {
- bh = bh->b_reqnext;
- if (bh)
- atomic_set(&bh->b_count, 0);
- }
- }
- tape->bh = bh;
- return ret;
-}
-
-static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
- int n)
-{
- struct idetape_bh *bh = tape->bh;
- int count;
- int ret = 0;
-
- while (n) {
- if (bh == NULL) {
- printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
- __func__);
- return 1;
- }
- count = min(tape->b_count, n);
- if (copy_to_user(buf, tape->b_data, count))
- ret = 1;
- n -= count;
- tape->b_data += count;
- tape->b_count -= count;
- buf += count;
- if (!tape->b_count) {
- bh = bh->b_reqnext;
- tape->bh = bh;
- if (bh) {
- tape->b_data = bh->b_data;
- tape->b_count = atomic_read(&bh->b_count);
- }
- }
- }
- return ret;
-}
-
-static void idetape_init_merge_buffer(idetape_tape_t *tape)
-{
- struct idetape_bh *bh = tape->merge_bh;
- tape->bh = tape->merge_bh;
-
- if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
- atomic_set(&bh->b_count, 0);
- else {
- tape->b_data = bh->b_data;
- tape->b_count = atomic_read(&bh->b_count);
- }
+ return ide_tape_issue_pc(drive, &cmd, pc);
}
/*
@@ -1107,10 +821,10 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
return;
clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags);
- tape->merge_bh_size = 0;
- if (tape->merge_bh != NULL) {
- ide_tape_kfree_buffer(tape);
- tape->merge_bh = NULL;
+ tape->valid = 0;
+ if (tape->buf != NULL) {
+ kfree(tape->buf);
+ tape->buf = NULL;
}
tape->chrdev_dir = IDETAPE_DIR_NONE;
@@ -1164,36 +878,44 @@ static void ide_tape_discard_merge_buffer(ide_drive_t *drive,
* Generate a read/write request for the block device interface and wait for it
* to be serviced.
*/
-static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
- struct idetape_bh *bh)
+static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
{
idetape_tape_t *tape = drive->driver_data;
struct request *rq;
- int ret, errors;
+ int ret;
debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
+ BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
+ BUG_ON(size < 0 || size % tape->blk_size);
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd[13] = cmd;
rq->rq_disk = tape->disk;
- rq->special = (void *)bh;
- rq->sector = tape->first_frame;
- rq->nr_sectors = blocks;
- rq->current_nr_sectors = blocks;
- blk_execute_rq(drive->queue, tape->disk, rq, 0);
+ rq->__sector = tape->first_frame;
- errors = rq->errors;
- ret = tape->blk_size * (blocks - rq->current_nr_sectors);
- blk_put_request(rq);
+ if (size) {
+ ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
+ __GFP_WAIT);
+ if (ret)
+ goto out_put;
+ }
- if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
- return 0;
+ blk_execute_rq(drive->queue, tape->disk, rq, 0);
- if (tape->merge_bh)
- idetape_init_merge_buffer(tape);
- if (errors == IDE_DRV_ERROR_GENERAL)
- return -EIO;
+ /* calculate the number of transferred bytes and update buffer state */
+ size -= rq->resid_len;
+ tape->cur = tape->buf;
+ if (cmd == REQ_IDETAPE_READ)
+ tape->valid = size;
+ else
+ tape->valid = 0;
+
+ ret = size;
+ if (rq->errors == IDE_DRV_ERROR_GENERAL)
+ ret = -EIO;
+out_put:
+ blk_put_request(rq);
return ret;
}
@@ -1230,153 +952,87 @@ static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
pc->flags |= PC_FLAG_WAIT_FOR_DSC;
}
-/* Queue up a character device originated write request. */
-static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
-
- return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
- blocks, tape->merge_bh);
-}
-
static void ide_tape_flush_merge_buffer(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
- int blocks, min;
- struct idetape_bh *bh;
if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer"
" but we are not writing.\n");
return;
}
- if (tape->merge_bh_size > tape->buffer_size) {
- printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
- tape->merge_bh_size = tape->buffer_size;
- }
- if (tape->merge_bh_size) {
- blocks = tape->merge_bh_size / tape->blk_size;
- if (tape->merge_bh_size % tape->blk_size) {
- unsigned int i;
-
- blocks++;
- i = tape->blk_size - tape->merge_bh_size %
- tape->blk_size;
- bh = tape->bh->b_reqnext;
- while (bh) {
- atomic_set(&bh->b_count, 0);
- bh = bh->b_reqnext;
- }
- bh = tape->bh;
- while (i) {
- if (bh == NULL) {
- printk(KERN_INFO "ide-tape: bug,"
- " bh NULL\n");
- break;
- }
- min = min(i, (unsigned int)(bh->b_size -
- atomic_read(&bh->b_count)));
- memset(bh->b_data + atomic_read(&bh->b_count),
- 0, min);
- atomic_add(min, &bh->b_count);
- i -= min;
- bh = bh->b_reqnext;
- }
- }
- (void) idetape_add_chrdev_write_request(drive, blocks);
- tape->merge_bh_size = 0;
- }
- if (tape->merge_bh != NULL) {
- ide_tape_kfree_buffer(tape);
- tape->merge_bh = NULL;
+ if (tape->buf) {
+ size_t aligned = roundup(tape->valid, tape->blk_size);
+
+ memset(tape->cur, 0, aligned - tape->valid);
+ idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, aligned);
+ kfree(tape->buf);
+ tape->buf = NULL;
}
tape->chrdev_dir = IDETAPE_DIR_NONE;
}
-static int idetape_init_read(ide_drive_t *drive)
+static int idetape_init_rw(ide_drive_t *drive, int dir)
{
idetape_tape_t *tape = drive->driver_data;
- int bytes_read;
+ int rc;
- /* Initialize read operation */
- if (tape->chrdev_dir != IDETAPE_DIR_READ) {
- if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
- ide_tape_flush_merge_buffer(drive);
- idetape_flush_tape_buffers(drive);
- }
- if (tape->merge_bh || tape->merge_bh_size) {
- printk(KERN_ERR "ide-tape: merge_bh_size should be"
- " 0 now\n");
- tape->merge_bh_size = 0;
- }
- tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
- if (!tape->merge_bh)
- return -ENOMEM;
- tape->chrdev_dir = IDETAPE_DIR_READ;
+ BUG_ON(dir != IDETAPE_DIR_READ && dir != IDETAPE_DIR_WRITE);
- /*
- * Issue a read 0 command to ensure that DSC handshake is
- * switched from completion mode to buffer available mode.
- * No point in issuing this if DSC overlap isn't supported, some
- * drives (Seagate STT3401A) will return an error.
- */
- if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
- bytes_read = idetape_queue_rw_tail(drive,
- REQ_IDETAPE_READ, 0,
- tape->merge_bh);
- if (bytes_read < 0) {
- ide_tape_kfree_buffer(tape);
- tape->merge_bh = NULL;
- tape->chrdev_dir = IDETAPE_DIR_NONE;
- return bytes_read;
- }
- }
- }
+ if (tape->chrdev_dir == dir)
+ return 0;
- return 0;
-}
+ if (tape->chrdev_dir == IDETAPE_DIR_READ)
+ ide_tape_discard_merge_buffer(drive, 1);
+ else if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
+ ide_tape_flush_merge_buffer(drive);
+ idetape_flush_tape_buffers(drive);
+ }
-/* called from idetape_chrdev_read() to service a chrdev read request. */
-static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
-{
- idetape_tape_t *tape = drive->driver_data;
+ if (tape->buf || tape->valid) {
+ printk(KERN_ERR "ide-tape: valid should be 0 now\n");
+ tape->valid = 0;
+ }
- debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
+ tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
+ if (!tape->buf)
+ return -ENOMEM;
+ tape->chrdev_dir = dir;
+ tape->cur = tape->buf;
- /* If we are at a filemark, return a read length of 0 */
- if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
- return 0;
-
- idetape_init_read(drive);
+ /*
+ * Issue a 0 rw command to ensure that DSC handshake is
+ * switched from completion mode to buffer available mode. No
+ * point in issuing this if DSC overlap isn't supported, some
+ * drives (Seagate STT3401A) will return an error.
+ */
+ if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
+ int cmd = dir == IDETAPE_DIR_READ ? REQ_IDETAPE_READ
+ : REQ_IDETAPE_WRITE;
+
+ rc = idetape_queue_rw_tail(drive, cmd, 0);
+ if (rc < 0) {
+ kfree(tape->buf);
+ tape->buf = NULL;
+ tape->chrdev_dir = IDETAPE_DIR_NONE;
+ return rc;
+ }
+ }
- return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
- tape->merge_bh);
+ return 0;
}
static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
{
idetape_tape_t *tape = drive->driver_data;
- struct idetape_bh *bh;
- int blocks;
+
+ memset(tape->buf, 0, tape->buffer_size);
while (bcount) {
- unsigned int count;
+ unsigned int count = min(tape->buffer_size, bcount);
- bh = tape->merge_bh;
- count = min(tape->buffer_size, bcount);
+ idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, count);
bcount -= count;
- blocks = count / tape->blk_size;
- while (count) {
- atomic_set(&bh->b_count,
- min(count, (unsigned int)bh->b_size));
- memset(bh->b_data, 0, atomic_read(&bh->b_count));
- count -= atomic_read(&bh->b_count);
- bh = bh->b_reqnext;
- }
- idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
- tape->merge_bh);
}
}
@@ -1456,7 +1112,7 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
}
if (tape->chrdev_dir == IDETAPE_DIR_READ) {
- tape->merge_bh_size = 0;
+ tape->valid = 0;
if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
++count;
ide_tape_discard_merge_buffer(drive, 0);
@@ -1505,9 +1161,9 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
{
struct ide_tape_obj *tape = file->private_data;
ide_drive_t *drive = tape->drive;
- ssize_t bytes_read, temp, actually_read = 0, rc;
+ size_t done = 0;
ssize_t ret = 0;
- u16 ctl = *(u16 *)&tape->caps[12];
+ int rc;
debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
@@ -1517,49 +1173,43 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
(count % tape->blk_size) == 0)
tape->user_bs_factor = count / tape->blk_size;
}
- rc = idetape_init_read(drive);
+
+ rc = idetape_init_rw(drive, IDETAPE_DIR_READ);
if (rc < 0)
return rc;
- if (count == 0)
- return (0);
- if (tape->merge_bh_size) {
- actually_read = min((unsigned int)(tape->merge_bh_size),
- (unsigned int)count);
- if (idetape_copy_stage_to_user(tape, buf, actually_read))
- ret = -EFAULT;
- buf += actually_read;
- tape->merge_bh_size -= actually_read;
- count -= actually_read;
- }
- while (count >= tape->buffer_size) {
- bytes_read = idetape_add_chrdev_read_request(drive, ctl);
- if (bytes_read <= 0)
- goto finish;
- if (idetape_copy_stage_to_user(tape, buf, bytes_read))
- ret = -EFAULT;
- buf += bytes_read;
- count -= bytes_read;
- actually_read += bytes_read;
- }
- if (count) {
- bytes_read = idetape_add_chrdev_read_request(drive, ctl);
- if (bytes_read <= 0)
- goto finish;
- temp = min((unsigned long)count, (unsigned long)bytes_read);
- if (idetape_copy_stage_to_user(tape, buf, temp))
+
+ while (done < count) {
+ size_t todo;
+
+ /* refill if staging buffer is empty */
+ if (!tape->valid) {
+ /* If we are at a filemark, nothing more to read */
+ if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
+ break;
+ /* read */
+ if (idetape_queue_rw_tail(drive, REQ_IDETAPE_READ,
+ tape->buffer_size) <= 0)
+ break;
+ }
+
+ /* copy out */
+ todo = min_t(size_t, count - done, tape->valid);
+ if (copy_to_user(buf + done, tape->cur, todo))
ret = -EFAULT;
- actually_read += temp;
- tape->merge_bh_size = bytes_read-temp;
+
+ tape->cur += todo;
+ tape->valid -= todo;
+ done += todo;
}
-finish:
- if (!actually_read && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) {
+
+ if (!done && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) {
debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
idetape_space_over_filemarks(drive, MTFSF, 1);
return 0;
}
- return ret ? ret : actually_read;
+ return ret ? ret : done;
}
static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
@@ -1567,9 +1217,9 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
{
struct ide_tape_obj *tape = file->private_data;
ide_drive_t *drive = tape->drive;
- ssize_t actually_written = 0;
+ size_t done = 0;
ssize_t ret = 0;
- u16 ctl = *(u16 *)&tape->caps[12];
+ int rc;
/* The drive is write protected. */
if (tape->write_prot)
@@ -1578,80 +1228,31 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
/* Initialize write operation */
- if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
- if (tape->chrdev_dir == IDETAPE_DIR_READ)
- ide_tape_discard_merge_buffer(drive, 1);
- if (tape->merge_bh || tape->merge_bh_size) {
- printk(KERN_ERR "ide-tape: merge_bh_size "
- "should be 0 now\n");
- tape->merge_bh_size = 0;
- }
- tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
- if (!tape->merge_bh)
- return -ENOMEM;
- tape->chrdev_dir = IDETAPE_DIR_WRITE;
- idetape_init_merge_buffer(tape);
+ rc = idetape_init_rw(drive, IDETAPE_DIR_WRITE);
+ if (rc < 0)
+ return rc;
- /*
- * Issue a write 0 command to ensure that DSC handshake is
- * switched from completion mode to buffer available mode. No
- * point in issuing this if DSC overlap isn't supported, some
- * drives (Seagate STT3401A) will return an error.
- */
- if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
- ssize_t retval = idetape_queue_rw_tail(drive,
- REQ_IDETAPE_WRITE, 0,
- tape->merge_bh);
- if (retval < 0) {
- ide_tape_kfree_buffer(tape);
- tape->merge_bh = NULL;
- tape->chrdev_dir = IDETAPE_DIR_NONE;
- return retval;
- }
- }
- }
- if (count == 0)
- return (0);
- if (tape->merge_bh_size) {
- if (tape->merge_bh_size >= tape->buffer_size) {
- printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
- tape->merge_bh_size = 0;
- }
- actually_written = min((unsigned int)
- (tape->buffer_size - tape->merge_bh_size),
- (unsigned int)count);
- if (idetape_copy_stage_from_user(tape, buf, actually_written))
- ret = -EFAULT;
- buf += actually_written;
- tape->merge_bh_size += actually_written;
- count -= actually_written;
-
- if (tape->merge_bh_size == tape->buffer_size) {
- ssize_t retval;
- tape->merge_bh_size = 0;
- retval = idetape_add_chrdev_write_request(drive, ctl);
- if (retval <= 0)
- return (retval);
- }
- }
- while (count >= tape->buffer_size) {
- ssize_t retval;
- if (idetape_copy_stage_from_user(tape, buf, tape->buffer_size))
- ret = -EFAULT;
- buf += tape->buffer_size;
- count -= tape->buffer_size;
- retval = idetape_add_chrdev_write_request(drive, ctl);
- actually_written += tape->buffer_size;
- if (retval <= 0)
- return (retval);
- }
- if (count) {
- actually_written += count;
- if (idetape_copy_stage_from_user(tape, buf, count))
+ while (done < count) {
+ size_t todo;
+
+ /* flush if staging buffer is full */
+ if (tape->valid == tape->buffer_size &&
+ idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
+ tape->buffer_size) <= 0)
+ return rc;
+
+ /* copy in */
+ todo = min_t(size_t, count - done,
+ tape->buffer_size - tape->valid);
+ if (copy_from_user(tape->cur, buf + done, todo))
ret = -EFAULT;
- tape->merge_bh_size += count;
+
+ tape->cur += todo;
+ tape->valid += todo;
+ done += todo;
}
- return ret ? ret : actually_written;
+
+ return ret ? ret : done;
}
static int idetape_write_filemark(ide_drive_t *drive)
@@ -1812,7 +1413,7 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
idetape_flush_tape_buffers(drive);
}
if (cmd == MTIOCGET || cmd == MTIOCPOS) {
- block_offset = tape->merge_bh_size /
+ block_offset = tape->valid /
(tape->blk_size * tape->user_bs_factor);
position = idetape_read_position(drive);
if (position < 0)
@@ -1960,12 +1561,12 @@ static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
idetape_tape_t *tape = drive->driver_data;
ide_tape_flush_merge_buffer(drive);
- tape->merge_bh = ide_tape_kmalloc_buffer(tape, 1, 0);
- if (tape->merge_bh != NULL) {
+ tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
+ if (tape->buf != NULL) {
idetape_pad_zeros(drive, tape->blk_size *
(tape->user_bs_factor - 1));
- ide_tape_kfree_buffer(tape);
- tape->merge_bh = NULL;
+ kfree(tape->buf);
+ tape->buf = NULL;
}
idetape_write_filemark(drive);
idetape_flush_tape_buffers(drive);
@@ -2159,8 +1760,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
u16 *ctl = (u16 *)&tape->caps[12];
drive->pc_callback = ide_tape_callback;
- drive->pc_update_buffers = idetape_update_buffers;
- drive->pc_io_buffers = ide_tape_io_buffers;
drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP;
@@ -2191,11 +1790,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
tape->buffer_size = *ctl * tape->blk_size;
}
buffer_size = tape->buffer_size;
- tape->pages_per_buffer = buffer_size / PAGE_SIZE;
- if (buffer_size % PAGE_SIZE) {
- tape->pages_per_buffer++;
- tape->excess_bh_size = PAGE_SIZE - buffer_size % PAGE_SIZE;
- }
/* select the "best" DSC read/write polling freq */
speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
@@ -2238,7 +1832,7 @@ static void ide_tape_release(struct device *dev)
ide_drive_t *drive = tape->drive;
struct gendisk *g = tape->disk;
- BUG_ON(tape->merge_bh_size);
+ BUG_ON(tape->valid);
drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
drive->driver_data = NULL;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 4aa6223c11b..a0c3e1b2f73 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -385,7 +385,7 @@ out_end:
if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
ide_finish_cmd(drive, cmd, stat);
else
- ide_complete_rq(drive, 0, cmd->rq->nr_sectors << 9);
+ ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
return ide_stopped;
out_err:
ide_error_cmd(drive, cmd);
@@ -424,7 +424,9 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
- rq->buffer = buf;
+
+ if (cmd->tf_flags & IDE_TFLAG_WRITE)
+ rq->cmd_flags |= REQ_RW;
/*
* (ks) We transfer currently only whole sectors.
@@ -432,18 +434,20 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
* if we would find a solution to transfer any size.
* To support special commands like READ LONG.
*/
- rq->hard_nr_sectors = rq->nr_sectors = nsect;
- rq->hard_cur_sectors = rq->current_nr_sectors = nsect;
-
- if (cmd->tf_flags & IDE_TFLAG_WRITE)
- rq->cmd_flags |= REQ_RW;
+ if (nsect) {
+ error = blk_rq_map_kern(drive->queue, rq, buf,
+ nsect * SECTOR_SIZE, __GFP_WAIT);
+ if (error)
+ goto put_req;
+ }
rq->special = cmd;
cmd->rq = rq;
error = blk_execute_rq(drive->queue, NULL, rq, 0);
- blk_put_request(rq);
+put_req:
+ blk_put_request(rq);
return error;
}
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 248a54bd238..e24ecc87a9b 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2006-2007 MontaVista Software, Inc.
+ * Copyright (C) 2006-2007, 2009 MontaVista Software, Inc.
* Copyright (C) 2007 Bartlomiej Zolnierkiewicz
*
* Portions Copyright (C) 1999 Promise Technology, Inc.
@@ -177,7 +177,7 @@ static void pdc202xx_dma_start(ide_drive_t *drive)
u8 clock = inb(high_16 + 0x11);
outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11);
- word_count = (rq->nr_sectors << 8);
+ word_count = (blk_rq_sectors(rq) << 8);
word_count = (rq_data_dir(rq) == READ) ?
word_count | 0x05000000 :
word_count | 0x06000000;
@@ -227,28 +227,19 @@ somebody_else:
return (dma_stat & 4) == 4; /* return 1 if INTR asserted */
}
-static void pdc202xx_reset_host (ide_hwif_t *hwif)
+static void pdc202xx_reset(ide_drive_t *drive)
{
+ ide_hwif_t *hwif = drive->hwif;
unsigned long high_16 = hwif->extra_base - 16;
u8 udma_speed_flag = inb(high_16 | 0x001f);
+ printk(KERN_WARNING "PDC202xx: software reset...\n");
+
outb(udma_speed_flag | 0x10, high_16 | 0x001f);
mdelay(100);
outb(udma_speed_flag & ~0x10, high_16 | 0x001f);
mdelay(2000); /* 2 seconds ?! */
- printk(KERN_WARNING "PDC202XX: %s channel reset.\n",
- hwif->channel ? "Secondary" : "Primary");
-}
-
-static void pdc202xx_reset (ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- ide_hwif_t *mate = hwif->mate;
-
- pdc202xx_reset_host(hwif);
- pdc202xx_reset_host(mate);
-
ide_set_max_pio(drive);
}
@@ -328,9 +319,8 @@ static const struct ide_dma_ops pdc20246_dma_ops = {
.dma_start = ide_dma_start,
.dma_end = ide_dma_end,
.dma_test_irq = pdc202xx_dma_test_irq,
- .dma_lost_irq = pdc202xx_dma_lost_irq,
+ .dma_lost_irq = ide_dma_lost_irq,
.dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_clear = pdc202xx_reset,
.dma_sff_read_status = ide_dma_sff_read_status,
};
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
index b4cf42dc8a6..05a93d6baec 100644
--- a/drivers/ide/tc86c001.c
+++ b/drivers/ide/tc86c001.c
@@ -112,7 +112,7 @@ static void tc86c001_dma_start(ide_drive_t *drive)
ide_hwif_t *hwif = drive->hwif;
unsigned long sc_base = hwif->config_data;
unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
- unsigned long nsectors = hwif->rq->nr_sectors;
+ unsigned long nsectors = blk_rq_sectors(hwif->rq);
/*
* We have to manually load the sector count and size into
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 564422d2397..5ca76224f6d 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -307,7 +307,7 @@ static int tx4939ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ?
TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1);
- tx4939ide_writew(cmd->rq->nr_sectors, base, TX4939IDE_Sec_Cnt);
+ tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt);
return 0;
}
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index bf740394d70..949c97ff57e 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -41,6 +41,10 @@ static int debug;
module_param_named(debug, debug, uint, 0644);
MODULE_PARM_DESC(debug, "Enable debug printks in this driver");
+static int forceload;
+module_param_named(forceload, forceload, uint, 0644);
+MODULE_PARM_DESC(debug, "Enable driver testing on unvalidated i5000");
+
#define dprintk(fmt, arg...) \
do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0)
@@ -552,7 +556,7 @@ static int __init i7300_idle_init(void)
cpus_clear(idle_cpumask);
total_us = 0;
- if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev))
+ if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
return -ENODEV;
if (i7300_idle_thrt_save())
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 823a6297a1a..2cd00b5b45b 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -1789,12 +1789,13 @@ static int dv1394_open(struct inode *inode, struct file *file)
} else {
/* look up the card by ID */
unsigned long flags;
+ int idx = ieee1394_file_to_instance(file);
spin_lock_irqsave(&dv1394_cards_lock, flags);
if (!list_empty(&dv1394_cards)) {
struct video_card *p;
list_for_each_entry(p, &dv1394_cards, list) {
- if ((p->id) == ieee1394_file_to_instance(file)) {
+ if ((p->id) == idx) {
video = p;
break;
}
@@ -1803,7 +1804,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
spin_unlock_irqrestore(&dv1394_cards_lock, flags);
if (!video) {
- debug_printk("dv1394: OHCI card %d not found", ieee1394_file_to_instance(file));
+ debug_printk("dv1394: OHCI card %d not found", idx);
return -ENODEV;
}
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
index 21d50f73a21..28b9f58bafd 100644
--- a/drivers/ieee1394/ieee1394_core.h
+++ b/drivers/ieee1394/ieee1394_core.h
@@ -5,6 +5,7 @@
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/types.h>
+#include <linux/cdev.h>
#include <asm/atomic.h>
#include "hosts.h"
@@ -155,7 +156,10 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
*/
static inline unsigned char ieee1394_file_to_instance(struct file *file)
{
- return file->f_path.dentry->d_inode->i_cindex;
+ int idx = cdev_index(file->f_path.dentry->d_inode);
+ if (idx < 0)
+ idx = 0;
+ return idx;
}
extern int hpsb_disable_irm;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e54e002665b..5d445f48789 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -42,6 +42,7 @@ static unsigned int input_abs_bypass_init_data[] __initdata = {
ABS_MT_POSITION_Y,
ABS_MT_TOOL_TYPE,
ABS_MT_BLOB_ID,
+ ABS_MT_TRACKING_ID,
0
};
static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)];
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 67248c31e19..be5bbbb8ae4 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -210,7 +210,7 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
timeout = wait_event_timeout(ps2dev->wait,
!(ps2dev->flags & PS2_FLAG_CMD1), timeout);
- if (ps2dev->cmdcnt && timeout > 0) {
+ if (ps2dev->cmdcnt && !(ps2dev->flags & PS2_FLAG_CMD1)) {
timeout = ps2_adjust_timeout(ps2dev, command, timeout);
wait_event_timeout(ps2dev->wait,
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index f100c7f4c1d..6954f550010 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -419,7 +419,7 @@ static int ucb1400_ts_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int ucb1400_ts_resume(struct platform_device *dev)
{
- struct ucb1400_ts *ucb = platform_get_drvdata(dev);
+ struct ucb1400_ts *ucb = dev->dev.platform_data;
if (ucb->ts_task) {
/*
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 56df1cee8fb..3319c2fec28 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -232,7 +232,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset,
target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
if (sync_page_io(rdev->bdev, target,
- roundup(size, bdev_hardsect_size(rdev->bdev)),
+ roundup(size, bdev_logical_block_size(rdev->bdev)),
page, READ)) {
page->index = index;
attach_page_buffers(page, NULL); /* so that free_buffer will
@@ -287,7 +287,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
int size = PAGE_SIZE;
if (page->index == bitmap->file_pages-1)
size = roundup(bitmap->last_page_size,
- bdev_hardsect_size(rdev->bdev));
+ bdev_logical_block_size(rdev->bdev));
/* Just make sure we aren't corrupting data or
* metadata
*/
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index a2e26c24214..75d8081a904 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -178,7 +178,7 @@ static int set_chunk_size(struct dm_exception_store *store,
}
/* Validate the chunk size against the device block size */
- if (chunk_size_ulong % (bdev_hardsect_size(store->cow->bdev) >> 9)) {
+ if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
*error = "Chunk size is not a multiple of device blocksize";
return -EINVAL;
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index be233bc4d91..6fa8ccf91c7 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -413,7 +413,8 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
* Buffer holds both header and bitset.
*/
buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
- bitset_size, ti->limits.hardsect_size);
+ bitset_size,
+ ti->limits.logical_block_size);
if (buf_size > dev->bdev->bd_inode->i_size) {
DMWARN("log device %s too small: need %llu bytes",
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index e75c6dd76a9..2662a41337e 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -282,7 +282,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
*/
if (!ps->store->chunk_size) {
ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
- bdev_hardsect_size(ps->store->cow->bdev) >> 9);
+ bdev_logical_block_size(ps->store->cow->bdev) >> 9);
ps->store->chunk_mask = ps->store->chunk_size - 1;
ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
chunk_size_supplied = 0;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 429b50b975d..e9a73bb242b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -108,7 +108,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
lhs->max_hw_segments =
min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
- lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
+ lhs->logical_block_size = max(lhs->logical_block_size,
+ rhs->logical_block_size);
lhs->max_segment_size =
min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
@@ -509,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
* combine_restrictions_low()
*/
rs->max_sectors =
- min_not_zero(rs->max_sectors, q->max_sectors);
+ min_not_zero(rs->max_sectors, queue_max_sectors(q));
/*
* Check if merge fn is supported.
@@ -524,24 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
rs->max_phys_segments =
min_not_zero(rs->max_phys_segments,
- q->max_phys_segments);
+ queue_max_phys_segments(q));
rs->max_hw_segments =
- min_not_zero(rs->max_hw_segments, q->max_hw_segments);
+ min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
- rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
+ rs->logical_block_size = max(rs->logical_block_size,
+ queue_logical_block_size(q));
rs->max_segment_size =
- min_not_zero(rs->max_segment_size, q->max_segment_size);
+ min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
rs->max_hw_sectors =
- min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
+ min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
rs->seg_boundary_mask =
min_not_zero(rs->seg_boundary_mask,
- q->seg_boundary_mask);
+ queue_segment_boundary(q));
- rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
+ rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
}
@@ -683,8 +685,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
rs->max_phys_segments = MAX_PHYS_SEGMENTS;
if (!rs->max_hw_segments)
rs->max_hw_segments = MAX_HW_SEGMENTS;
- if (!rs->hardsect_size)
- rs->hardsect_size = 1 << SECTOR_SHIFT;
+ if (!rs->logical_block_size)
+ rs->logical_block_size = 1 << SECTOR_SHIFT;
if (!rs->max_segment_size)
rs->max_segment_size = MAX_SEGMENT_SIZE;
if (!rs->seg_boundary_mask)
@@ -912,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
* restrictions.
*/
blk_queue_max_sectors(q, t->limits.max_sectors);
- q->max_phys_segments = t->limits.max_phys_segments;
- q->max_hw_segments = t->limits.max_hw_segments;
- q->hardsect_size = t->limits.hardsect_size;
- q->max_segment_size = t->limits.max_segment_size;
- q->max_hw_sectors = t->limits.max_hw_sectors;
- q->seg_boundary_mask = t->limits.seg_boundary_mask;
- q->bounce_pfn = t->limits.bounce_pfn;
+ blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
+ blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
+ blk_queue_logical_block_size(q, t->limits.logical_block_size);
+ blk_queue_max_segment_size(q, t->limits.max_segment_size);
+ blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
+ blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
+ blk_queue_bounce_limit(q, t->limits.bounce_pfn);
if (t->limits.no_cluster)
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 424f7b048c3..3fd8b1e6548 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -20,7 +20,8 @@
#include <linux/idr.h>
#include <linux/hdreg.h>
#include <linux/blktrace_api.h>
-#include <trace/block.h>
+
+#include <trace/events/block.h>
#define DM_MSG_PREFIX "core"
@@ -53,8 +54,6 @@ struct dm_target_io {
union map_info info;
};
-DEFINE_TRACE(block_bio_complete);
-
/*
* For request-based dm.
* One of these is allocated per request.
@@ -656,8 +655,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
/* the bio has been remapped so dispatch it */
trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
- tio->io->bio->bi_bdev->bd_dev,
- clone->bi_sector, sector);
+ tio->io->bio->bi_bdev->bd_dev, sector);
generic_make_request(clone);
} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 7a36e38393a..64f1f3e046e 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->num_sectors = rdev->sectors;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 641b211fe3f..20f6ac33834 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1202,7 +1202,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
- bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
+ bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 41ced0cbe82..4ee31aa13c4 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* merge_bvec_fn will be involved in multipath.)
*/
if (q->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(q) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
conf->working_disks++;
@@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev)
* violating it, not that we ever expect a device with
* a merge_bvec_fn to be involved in multipath */
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
if (!test_bit(Faulty, &rdev->flags))
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c08d7559be5..925507e7d67 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev)
*/
if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
if (!smallest || (rdev1->sectors < smallest->sectors))
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 36df9109cde..e23758b4a34 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0;
@@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 499620afb44..750550c1166 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
- mddev->queue->max_sectors = (PAGE_SIZE>>9);
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+ blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0;
rdev->raid_disk = mirror;
@@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
- mddev->queue->max_sectors = (PAGE_SIZE>>9);
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+ blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5d400aef8d9..bef87669823 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -362,7 +362,7 @@ static void raid5_unplug_device(struct request_queue *q);
static struct stripe_head *
get_active_stripe(raid5_conf_t *conf, sector_t sector,
- int previous, int noblock)
+ int previous, int noblock, int noquiesce)
{
struct stripe_head *sh;
@@ -372,7 +372,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
do {
wait_event_lock_irq(conf->wait_for_stripe,
- conf->quiesce == 0,
+ conf->quiesce == 0 || noquiesce,
conf->device_lock, /* nothing */);
sh = __find_stripe(conf, sector, conf->generation - previous);
if (!sh) {
@@ -2671,7 +2671,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
sector_t bn = compute_blocknr(sh, i, 1);
sector_t s = raid5_compute_sector(conf, bn, 0,
&dd_idx, NULL);
- sh2 = get_active_stripe(conf, s, 0, 1);
+ sh2 = get_active_stripe(conf, s, 0, 1, 1);
if (sh2 == NULL)
/* so far only the early blocks of this stripe
* have been requested. When later blocks
@@ -2944,7 +2944,7 @@ static bool handle_stripe5(struct stripe_head *sh)
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
struct stripe_head *sh2
- = get_active_stripe(conf, sh->sector, 1, 1);
+ = get_active_stripe(conf, sh->sector, 1, 1, 1);
if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
/* sh cannot be written until sh2 has been read.
* so arrange for sh to be delayed a little
@@ -3189,7 +3189,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
struct stripe_head *sh2
- = get_active_stripe(conf, sh->sector, 1, 1);
+ = get_active_stripe(conf, sh->sector, 1, 1, 1);
if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
/* sh cannot be written until sh2 has been read.
* so arrange for sh to be delayed a little
@@ -3288,7 +3288,7 @@ static void unplug_slaves(mddev_t *mddev)
int i;
rcu_read_lock();
- for (i=0; i<mddev->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
@@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi)
{
struct request_queue *q = bdev_get_queue(bi->bi_bdev);
- if ((bi->bi_size>>9) > q->max_sectors)
+ if ((bi->bi_size>>9) > queue_max_sectors(q))
return 0;
blk_recount_segments(q, bi);
- if (bi->bi_phys_segments > q->max_phys_segments)
+ if (bi->bi_phys_segments > queue_max_phys_segments(q))
return 0;
if (q->merge_bvec_fn)
@@ -3675,7 +3675,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
(unsigned long long)logical_sector);
sh = get_active_stripe(conf, new_sector, previous,
- (bi->bi_rw&RWA_MASK));
+ (bi->bi_rw&RWA_MASK), 0);
if (sh) {
if (unlikely(previous)) {
/* expansion might have moved on while waiting for a
@@ -3873,7 +3873,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
int j;
int skipped = 0;
- sh = get_active_stripe(conf, stripe_addr+i, 0, 0);
+ sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
set_bit(STRIPE_EXPANDING, &sh->state);
atomic_inc(&conf->reshape_stripes);
/* If any of this stripe is beyond the end of the old
@@ -3916,13 +3916,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
raid5_compute_sector(conf, stripe_addr*(new_data_disks),
1, &dd_idx, NULL);
last_sector =
- raid5_compute_sector(conf, ((stripe_addr+conf->chunk_size/512)
+ raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
*(new_data_disks) - 1),
1, &dd_idx, NULL);
if (last_sector >= mddev->dev_sectors)
last_sector = mddev->dev_sectors - 1;
while (first_sector <= last_sector) {
- sh = get_active_stripe(conf, first_sector, 1, 0);
+ sh = get_active_stripe(conf, first_sector, 1, 0, 1);
set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
@@ -4022,9 +4022,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
- sh = get_active_stripe(conf, sector_nr, 0, 1);
+ sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
if (sh == NULL) {
- sh = get_active_stripe(conf, sector_nr, 0, 0);
+ sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
/* make sure we don't swamp the stripe cache if someone else
* is trying to get access
*/
@@ -4034,7 +4034,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
* We don't need to check the 'failed' flag as when that gets set,
* recovery aborts.
*/
- for (i=0; i<mddev->raid_disks; i++)
+ for (i = 0; i < conf->raid_disks; i++)
if (conf->disks[i].rdev == NULL)
still_degraded = 1;
@@ -4086,7 +4086,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
/* already done this stripe */
continue;
- sh = get_active_stripe(conf, sector, 0, 1);
+ sh = get_active_stripe(conf, sector, 0, 1, 0);
if (!sh) {
/* failed to get a stripe - must wait */
diff --git a/drivers/media/video/ivtv/ivtv-queue.c b/drivers/media/video/ivtv/ivtv-queue.c
index ff7b7deded4..7fde36e6d22 100644
--- a/drivers/media/video/ivtv/ivtv-queue.c
+++ b/drivers/media/video/ivtv/ivtv-queue.c
@@ -230,7 +230,8 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
return -ENOMEM;
}
if (ivtv_might_use_dma(s)) {
- s->sg_handle = pci_map_single(itv->pdev, s->sg_dma, sizeof(struct ivtv_sg_element), s->dma);
+ s->sg_handle = pci_map_single(itv->pdev, s->sg_dma,
+ sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
ivtv_stream_sync_for_cpu(s);
}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index de143deb06f..7847bbc1440 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -672,15 +672,14 @@ try_again:
msb->req_sg);
if (!msb->seg_count) {
- chunk = __blk_end_request(msb->block_req, -ENOMEM,
- blk_rq_cur_bytes(msb->block_req));
+ chunk = __blk_end_request_cur(msb->block_req, -ENOMEM);
continue;
}
- t_sec = msb->block_req->sector << 9;
+ t_sec = blk_rq_pos(msb->block_req) << 9;
sector_div(t_sec, msb->page_size);
- count = msb->block_req->nr_sectors << 9;
+ count = blk_rq_bytes(msb->block_req);
count /= msb->page_size;
param.system = msb->system;
@@ -705,8 +704,8 @@ try_again:
return 0;
}
- dev_dbg(&card->dev, "elv_next\n");
- msb->block_req = elv_next_request(msb->queue);
+ dev_dbg(&card->dev, "blk_fetch\n");
+ msb->block_req = blk_fetch_request(msb->queue);
if (!msb->block_req) {
dev_dbg(&card->dev, "issue end\n");
return -EAGAIN;
@@ -745,7 +744,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
t_len *= msb->page_size;
}
} else
- t_len = msb->block_req->nr_sectors << 9;
+ t_len = blk_rq_bytes(msb->block_req);
dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error);
@@ -825,8 +824,8 @@ static void mspro_block_submit_req(struct request_queue *q)
return;
if (msb->eject) {
- while ((req = elv_next_request(q)) != NULL)
- __blk_end_request(req, -ENODEV, blk_rq_bytes(req));
+ while ((req = blk_fetch_request(q)) != NULL)
+ __blk_end_request_all(req, -ENODEV);
return;
}
@@ -1243,7 +1242,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
- blk_queue_hardsect_size(msb->queue, msb->page_size);
+ blk_queue_logical_block_size(msb->queue, msb->page_size);
capacity = be16_to_cpu(sys_info->user_block_count);
capacity *= be16_to_cpu(sys_info->block_size);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index a9019f081b9..79f5433359f 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1277,8 +1277,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
- ioc->name, __func__, req->bio->bi_vcnt, req->data_len,
- rsp->bio->bi_vcnt, rsp->data_len);
+ ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
+ rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
return -EINVAL;
}
@@ -1295,7 +1295,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
smpreq = (SmpPassthroughRequest_t *)mf;
memset(smpreq, 0, sizeof(*smpreq));
- smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4);
+ smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
if (rphy)
@@ -1321,10 +1321,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI_SGE_FLAGS_END_OF_BUFFER |
MPI_SGE_FLAGS_DIRECTION |
mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT;
- flagsLength |= (req->data_len - 4);
+ flagsLength |= (blk_rq_bytes(req) - 4);
dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
- req->data_len, PCI_DMA_BIDIRECTIONAL);
+ blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_out)
goto put_mf;
mpt_add_sge(psge, flagsLength, dma_addr_out);
@@ -1332,9 +1332,9 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* response */
flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
- flagsLength |= rsp->data_len + 4;
+ flagsLength |= blk_rq_bytes(rsp) + 4;
dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
- rsp->data_len, PCI_DMA_BIDIRECTIONAL);
+ blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_in)
goto unmap;
mpt_add_sge(psge, flagsLength, dma_addr_in);
@@ -1357,8 +1357,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
memcpy(req->sense, smprep, sizeof(*smprep));
req->sense_len = sizeof(*smprep);
- req->data_len = 0;
- rsp->data_len -= smprep->ResponseDataLength;
+ req->resid_len = 0;
+ rsp->resid_len -= smprep->ResponseDataLength;
} else {
printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
ioc->name, __func__);
@@ -1366,10 +1366,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
unmap:
if (dma_addr_out)
- pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len,
+ pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
PCI_DMA_BIDIRECTIONAL);
if (dma_addr_in)
- pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len,
+ pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
PCI_DMA_BIDIRECTIONAL);
put_mf:
if (mf)
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index a443e136dc4..335d4c78a77 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -426,15 +426,9 @@ static void i2o_block_end_request(struct request *req, int error,
struct request_queue *q = req->q;
unsigned long flags;
- if (blk_end_request(req, error, nr_bytes)) {
- int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
-
- if (blk_pc_request(req))
- leftover = req->data_len;
-
+ if (blk_end_request(req, error, nr_bytes))
if (error)
- blk_end_request(req, -EIO, leftover);
- }
+ blk_end_request_all(req, -EIO);
spin_lock_irqsave(q->queue_lock, flags);
@@ -761,7 +755,7 @@ static int i2o_block_transfer(struct request *req)
break;
case CACHE_SMARTFETCH:
- if (req->nr_sectors > 16)
+ if (blk_rq_sectors(req) > 16)
ctl_flags = 0x201F0008;
else
ctl_flags = 0x001F0000;
@@ -781,13 +775,13 @@ static int i2o_block_transfer(struct request *req)
ctl_flags = 0x001F0010;
break;
case CACHE_SMARTBACK:
- if (req->nr_sectors > 16)
+ if (blk_rq_sectors(req) > 16)
ctl_flags = 0x001F0004;
else
ctl_flags = 0x001F0010;
break;
case CACHE_SMARTTHROUGH:
- if (req->nr_sectors > 16)
+ if (blk_rq_sectors(req) > 16)
ctl_flags = 0x001F0004;
else
ctl_flags = 0x001F0010;
@@ -800,8 +794,9 @@ static int i2o_block_transfer(struct request *req)
if (c->adaptec) {
u8 cmd[10];
u32 scsi_flags;
- u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT;
+ u16 hwsec;
+ hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
memset(cmd, 0, 10);
sgl_offset = SGL_OFFSET_12;
@@ -827,22 +822,22 @@ static int i2o_block_transfer(struct request *req)
*mptr++ = cpu_to_le32(scsi_flags);
- *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
- *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
+ *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
+ *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
memcpy(mptr, cmd, 10);
mptr += 4;
- *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
+ *mptr++ = cpu_to_le32(blk_rq_bytes(req));
} else
#endif
{
msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
*mptr++ = cpu_to_le32(ctl_flags);
- *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
+ *mptr++ = cpu_to_le32(blk_rq_bytes(req));
*mptr++ =
- cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT));
+ cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
*mptr++ =
- cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT));
+ cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
}
if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -883,7 +878,7 @@ static void i2o_block_request_fn(struct request_queue *q)
struct request *req;
while (!blk_queue_plugged(q)) {
- req = elv_next_request(q);
+ req = blk_peek_request(q);
if (!req)
break;
@@ -896,7 +891,7 @@ static void i2o_block_request_fn(struct request_queue *q)
if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
if (!i2o_block_transfer(req)) {
- blkdev_dequeue_request(req);
+ blk_start_request(req);
continue;
} else
osm_info("transfer error\n");
@@ -922,8 +917,10 @@ static void i2o_block_request_fn(struct request_queue *q)
blk_stop_queue(q);
break;
}
- } else
- end_request(req, 0);
+ } else {
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
+ }
}
};
@@ -1082,7 +1079,7 @@ static int i2o_block_probe(struct device *dev)
*/
if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
!i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
- blk_queue_hardsect_size(queue, le32_to_cpu(blocksize));
+ blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
} else
osm_warn("unable to get blocksize of %s\n", gd->disk_name);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index b25e9b6516a..98ffc41eaf2 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -243,7 +243,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.mrq.cmd = &brq.cmd;
brq.mrq.data = &brq.data;
- brq.cmd.arg = req->sector;
+ brq.cmd.arg = blk_rq_pos(req);
if (!mmc_card_blockaddr(card))
brq.cmd.arg <<= 9;
brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
@@ -251,7 +251,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.stop.opcode = MMC_STOP_TRANSMISSION;
brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- brq.data.blocks = req->nr_sectors;
+ brq.data.blocks = blk_rq_sectors(req);
/*
* The block layer doesn't support all sector count
@@ -301,7 +301,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
* Adjust the sg list so it is the same size as the
* request.
*/
- if (brq.data.blocks != req->nr_sectors) {
+ if (brq.data.blocks != blk_rq_sectors(req)) {
int i, data_size = brq.data.blocks << 9;
struct scatterlist *sg;
@@ -352,8 +352,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
printk(KERN_ERR "%s: error %d transferring data,"
" sector %u, nr %u, card status %#x\n",
req->rq_disk->disk_name, brq.data.error,
- (unsigned)req->sector,
- (unsigned)req->nr_sectors, status);
+ (unsigned)blk_rq_pos(req),
+ (unsigned)blk_rq_sectors(req), status);
}
if (brq.stop.error) {
@@ -521,7 +521,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
sprintf(md->disk->disk_name, "mmcblk%d", devidx);
- blk_queue_hardsect_size(md->queue.queue, 512);
+ blk_queue_logical_block_size(md->queue.queue, 512);
if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
/*
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 7a72e75d5c6..49e582356c6 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -55,7 +55,7 @@ static int mmc_queue_thread(void *d)
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
if (!blk_queue_plugged(q))
- req = elv_next_request(q);
+ req = blk_fetch_request(q);
mq->req = req;
spin_unlock_irq(q->queue_lock);
@@ -88,16 +88,11 @@ static void mmc_request(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
- int ret;
if (!mq) {
printk(KERN_ERR "MMC: killing requests for dead queue\n");
- while ((req = elv_next_request(q)) != NULL) {
- do {
- ret = __blk_end_request(req, -EIO,
- blk_rq_cur_bytes(req));
- } while (ret);
- }
+ while ((req = blk_fetch_request(q)) != NULL)
+ __blk_end_request_all(req, -EIO);
return;
}
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index c643d0fe118..b56d72ff06e 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -64,6 +64,31 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
unsigned int tmout;
int tmout_index;
+ /*
+ * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE
+ * register is sometimes not set before a while when some
+ * "unusual" data block sizes are used (such as with the SWITCH
+ * command), even despite the fact that the XFER_DONE interrupt
+ * was raised. And if another data transfer starts before
+ * this bit comes to good sense (which eventually happens by
+ * itself) then the new transfer simply fails with a timeout.
+ */
+ if (!(mvsd_read(MVSD_HW_STATE) & (1 << 13))) {
+ unsigned long t = jiffies + HZ;
+ unsigned int hw_state, count = 0;
+ do {
+ if (time_after(jiffies, t)) {
+ dev_warn(host->dev, "FIFO_EMPTY bit missing\n");
+ break;
+ }
+ hw_state = mvsd_read(MVSD_HW_STATE);
+ count++;
+ } while (!(hw_state & (1 << 13)));
+ dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit "
+ "(hw=0x%04x, count=%d, jiffies=%ld)\n",
+ hw_state, count, jiffies - (t - HZ));
+ }
+
/* If timeout=0 then maximum timeout index is used. */
tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk);
tmout += data->timeout_clks;
@@ -620,9 +645,18 @@ static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->bus_width == MMC_BUS_WIDTH_4)
ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS;
+ /*
+ * The HI_SPEED_EN bit is causing trouble with many (but not all)
+ * high speed SD, SDHC and SDIO cards. Not enabling that bit
+ * makes all cards work. So let's just ignore that bit for now
+ * and revisit this issue if problems for not enabling this bit
+ * are ever reported.
+ */
+#if 0
if (ios->timing == MMC_TIMING_MMC_HS ||
ios->timing == MMC_TIMING_SD_HS)
ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN;
+#endif
host->ctrl = ctrl_reg;
mvsd_write(MVSD_HOST_CTRL, ctrl_reg);
@@ -882,3 +916,4 @@ module_param(nodma, int, 0);
MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre");
MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mvsdio");
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index b4a615c55f2..f4cbe473670 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -140,6 +140,8 @@ struct mxcmci_host {
struct work_struct datawork;
};
+static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
+
static inline int mxcmci_use_dma(struct mxcmci_host *host)
{
return host->do_dma;
@@ -160,7 +162,7 @@ static void mxcmci_softreset(struct mxcmci_host *host)
writew(0xff, host->base + MMC_REG_RES_TO);
}
-static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
+static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
{
unsigned int nob = data->blocks;
unsigned int blksz = data->blksz;
@@ -168,6 +170,7 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
#ifdef HAS_DMA
struct scatterlist *sg;
int i;
+ int ret;
#endif
if (data->flags & MMC_DATA_STREAM)
nob = 0xffff;
@@ -183,7 +186,7 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 3 || sg->length & 3) {
host->do_dma = 0;
- return;
+ return 0;
}
}
@@ -192,23 +195,30 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, host->dma_dir);
- imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, datasize,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_READ);
+ ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
+ datasize,
+ host->res->start + MMC_REG_BUFFER_ACCESS,
+ DMA_MODE_READ);
} else {
host->dma_dir = DMA_TO_DEVICE;
host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, host->dma_dir);
- imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, datasize,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_WRITE);
+ ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
+ datasize,
+ host->res->start + MMC_REG_BUFFER_ACCESS,
+ DMA_MODE_WRITE);
}
+ if (ret) {
+ dev_err(mmc_dev(host->mmc), "failed to setup DMA : %d\n", ret);
+ return ret;
+ }
wmb();
imx_dma_enable(host->dma);
#endif /* HAS_DMA */
+ return 0;
}
static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
@@ -345,8 +355,11 @@ static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
stat = readl(host->base + MMC_REG_STATUS);
if (stat & STATUS_ERR_MASK)
return stat;
- if (time_after(jiffies, timeout))
+ if (time_after(jiffies, timeout)) {
+ mxcmci_softreset(host);
+ mxcmci_set_clk_rate(host, host->clock);
return STATUS_TIME_OUT_READ;
+ }
if (stat & mask)
return 0;
cpu_relax();
@@ -531,6 +544,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
{
struct mxcmci_host *host = mmc_priv(mmc);
unsigned int cmdat = host->cmdat;
+ int error;
WARN_ON(host->req != NULL);
@@ -540,7 +554,12 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
host->do_dma = 1;
#endif
if (req->data) {
- mxcmci_setup_data(host, req->data);
+ error = mxcmci_setup_data(host, req->data);
+ if (error) {
+ req->cmd->error = error;
+ goto out;
+ }
+
cmdat |= CMD_DAT_CONT_DATA_ENABLE;
@@ -548,7 +567,9 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
cmdat |= CMD_DAT_CONT_WRITE;
}
- if (mxcmci_start_cmd(host, req->cmd, cmdat))
+ error = mxcmci_start_cmd(host, req->cmd, cmdat);
+out:
+ if (error)
mxcmci_finish_request(host, req);
}
@@ -724,7 +745,9 @@ static int mxcmci_probe(struct platform_device *pdev)
goto out_clk_put;
}
- mmc->f_min = clk_get_rate(host->clk) >> 7;
+ mmc->f_min = clk_get_rate(host->clk) >> 16;
+ if (mmc->f_min < 400000)
+ mmc->f_min = 400000;
mmc->f_max = clk_get_rate(host->clk) >> 1;
/* recommended in data sheet */
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index bfa25c01c87..dceb5ee3bda 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -822,7 +822,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
del_timer(&host->cmd_abort_timer);
host->abort = 1;
OMAP_MMC_WRITE(host, IE, 0);
- disable_irq(host->irq);
+ disable_irq_nosync(host->irq);
schedule_work(&host->cmd_abort_work);
return IRQ_HANDLED;
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index e62a22a7f00..c40cb96255a 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -680,7 +680,7 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
host->dma_ch = -1;
/*
* DMA Callback: run in interrupt context.
- * mutex_unlock will through a kernel warning if used.
+ * mutex_unlock will throw a kernel warning if used.
*/
up(&host->sem);
}
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c
index 3ff4ac3abe8..128c614d11a 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of.c
@@ -55,7 +55,13 @@ static u32 esdhc_readl(struct sdhci_host *host, int reg)
static u16 esdhc_readw(struct sdhci_host *host, int reg)
{
- return in_be16(host->ioaddr + (reg ^ 0x2));
+ u16 ret;
+
+ if (unlikely(reg == SDHCI_HOST_VERSION))
+ ret = in_be16(host->ioaddr + reg);
+ else
+ ret = in_be16(host->ioaddr + (reg ^ 0x2));
+ return ret;
}
static u8 esdhc_readb(struct sdhci_host *host, int reg)
@@ -277,6 +283,7 @@ static int __devexit sdhci_of_remove(struct of_device *ofdev)
static const struct of_device_id sdhci_of_match[] = {
{ .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, },
{ .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, },
+ { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, },
{ .compatible = "generic-sdhci", },
{},
};
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index a49a9c8f2cb..aaac3b6800b 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -47,40 +47,41 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
unsigned long block, nsect;
char *buf;
- block = req->sector << 9 >> tr->blkshift;
- nsect = req->current_nr_sectors << 9 >> tr->blkshift;
+ block = blk_rq_pos(req) << 9 >> tr->blkshift;
+ nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
buf = req->buffer;
if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
req->cmd[0] == REQ_LB_OP_DISCARD)
- return !tr->discard(dev, block, nsect);
+ return tr->discard(dev, block, nsect);
if (!blk_fs_request(req))
- return 0;
+ return -EIO;
- if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
- return 0;
+ if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
+ get_capacity(req->rq_disk))
+ return -EIO;
switch(rq_data_dir(req)) {
case READ:
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->readsect(dev, block, buf))
- return 0;
- return 1;
+ return -EIO;
+ return 0;
case WRITE:
if (!tr->writesect)
- return 0;
+ return -EIO;
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->writesect(dev, block, buf))
- return 0;
- return 1;
+ return -EIO;
+ return 0;
default:
printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
- return 0;
+ return -EIO;
}
}
@@ -88,19 +89,18 @@ static int mtd_blktrans_thread(void *arg)
{
struct mtd_blktrans_ops *tr = arg;
struct request_queue *rq = tr->blkcore_priv->rq;
+ struct request *req = NULL;
/* we might get involved when memory gets low, so use PF_MEMALLOC */
current->flags |= PF_MEMALLOC;
spin_lock_irq(rq->queue_lock);
+
while (!kthread_should_stop()) {
- struct request *req;
struct mtd_blktrans_dev *dev;
- int res = 0;
-
- req = elv_next_request(rq);
+ int res;
- if (!req) {
+ if (!req && !(req = blk_fetch_request(rq))) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(rq->queue_lock);
schedule();
@@ -119,8 +119,13 @@ static int mtd_blktrans_thread(void *arg)
spin_lock_irq(rq->queue_lock);
- end_request(req, res);
+ if (!__blk_end_request_cur(req, res))
+ req = NULL;
}
+
+ if (req)
+ __blk_end_request_all(req, -EIO);
+
spin_unlock_irq(rq->queue_lock);
return 0;
@@ -373,7 +378,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
}
tr->blkcore_priv->rq->queuedata = tr;
- blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
+ blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
if (tr->discard)
blk_queue_set_discard(tr->blkcore_priv->rq,
blktrans_discard_request);
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 0119220de7d..02700f769b8 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -407,16 +407,17 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
}
info->chip.ecc.mode = ecc_mode;
- info->clk = clk_get(&pdev->dev, "AEMIFCLK");
+ info->clk = clk_get(&pdev->dev, "aemif");
if (IS_ERR(info->clk)) {
ret = PTR_ERR(info->clk);
- dev_dbg(&pdev->dev, "unable to get AEMIFCLK, err %d\n", ret);
+ dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
goto err_clk;
}
ret = clk_enable(info->clk);
if (ret < 0) {
- dev_dbg(&pdev->dev, "unable to enable AEMIFCLK, err %d\n", ret);
+ dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
+ ret);
goto err_clk_enable;
}
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index f3548d04801..40c26080ecd 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -831,6 +831,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
break;
case NAND_CMD_READID:
+ host->col_addr = 0;
send_read_id(host);
break;
@@ -867,6 +868,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
mtd->priv = this;
mtd->owner = THIS_MODULE;
mtd->dev.parent = &pdev->dev;
+ mtd->name = "mxc_nand";
/* 50 us command delay time */
this->chip_delay = 5;
@@ -882,8 +884,10 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->verify_buf = mxc_nand_verify_buf;
host->clk = clk_get(&pdev->dev, "nfc");
- if (IS_ERR(host->clk))
+ if (IS_ERR(host->clk)) {
+ err = PTR_ERR(host->clk);
goto eclk;
+ }
clk_enable(host->clk);
host->clk_act = 1;
@@ -896,7 +900,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
host->regs = ioremap(res->start, res->end - res->start + 1);
if (!host->regs) {
- err = -EIO;
+ err = -ENOMEM;
goto eres;
}
@@ -1011,30 +1015,35 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct mtd_info *info = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
int ret = 0;
DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n");
- if (info)
- ret = info->suspend(info);
-
- /* Disable the NFC clock */
- clk_disable(nfc_clk); /* FIXME */
+ if (mtd) {
+ ret = mtd->suspend(mtd);
+ /* Disable the NFC clock */
+ clk_disable(host->clk);
+ }
return ret;
}
static int mxcnd_resume(struct platform_device *pdev)
{
- struct mtd_info *info = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
int ret = 0;
DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n");
- /* Enable the NFC clock */
- clk_enable(nfc_clk); /* FIXME */
- if (info)
- info->resume(info);
+ if (mtd) {
+ /* Enable the NFC clock */
+ clk_enable(host->clk);
+ mtd->resume(mtd);
+ }
return ret;
}
@@ -1055,13 +1064,7 @@ static struct platform_driver mxcnd_driver = {
static int __init mxc_nd_init(void)
{
- /* Register the device driver structure. */
- pr_info("MXC MTD nand Driver\n");
- if (platform_driver_probe(&mxcnd_driver, mxcnd_probe) != 0) {
- printk(KERN_ERR "Driver register failed for mxcnd_driver\n");
- return -ENODEV;
- }
- return 0;
+ return platform_driver_probe(&mxcnd_driver, mxcnd_probe);
}
static void __exit mxc_nd_cleanup(void)
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index fbb37192199..682aad89708 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -480,9 +480,13 @@ static int pnp_registered;
#ifdef CONFIG_EISA
static struct eisa_device_id el3_eisa_ids[] = {
+ { "TCM5090" },
+ { "TCM5091" },
{ "TCM5092" },
{ "TCM5093" },
+ { "TCM5094" },
{ "TCM5095" },
+ { "TCM5098" },
{ "" }
};
MODULE_DEVICE_TABLE(eisa, el3_eisa_ids);
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index fb57b750866..1342418fb20 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -37,6 +37,7 @@ char atl1e_driver_version[] = DRV_VERSION;
*/
static struct pci_device_id atl1e_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
/* required last entry */
{ 0 }
};
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 0ab22540bf5..4e817126e28 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -82,6 +82,12 @@
#include "atl1.h"
+#define ATLX_DRIVER_VERSION "2.1.3"
+MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
+ Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ATLX_DRIVER_VERSION);
+
/* Temporary hack for merging atl1 and atl2 */
#include "atlx.c"
diff --git a/drivers/net/atlx/atlx.h b/drivers/net/atlx/atlx.h
index 297a03da6b7..14054b75aa6 100644
--- a/drivers/net/atlx/atlx.h
+++ b/drivers/net/atlx/atlx.h
@@ -29,12 +29,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#define ATLX_DRIVER_VERSION "2.1.3"
-MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
- Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(ATLX_DRIVER_VERSION);
-
#define ATLX_ERR_PHY 2
#define ATLX_ERR_PHY_SPEED 7
#define ATLX_ERR_PHY_RES 8
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 9f971ed6b58..b4da1821332 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -979,22 +979,7 @@ static int bfin_mac_open(struct net_device *dev)
return 0;
}
-static const struct net_device_ops bfin_mac_netdev_ops = {
- .ndo_open = bfin_mac_open,
- .ndo_stop = bfin_mac_close,
- .ndo_start_xmit = bfin_mac_hard_start_xmit,
- .ndo_set_mac_address = bfin_mac_set_mac_address,
- .ndo_tx_timeout = bfin_mac_timeout,
- .ndo_set_multicast_list = bfin_mac_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = bfin_mac_poll,
-#endif
-};
-
/*
- *
* this makes the board clean up everything that it can
* and not talk to the outside world. Caused by
* an 'ifconfig ethX down'
@@ -1019,6 +1004,20 @@ static int bfin_mac_close(struct net_device *dev)
return 0;
}
+static const struct net_device_ops bfin_mac_netdev_ops = {
+ .ndo_open = bfin_mac_open,
+ .ndo_stop = bfin_mac_close,
+ .ndo_start_xmit = bfin_mac_hard_start_xmit,
+ .ndo_set_mac_address = bfin_mac_set_mac_address,
+ .ndo_tx_timeout = bfin_mac_timeout,
+ .ndo_set_multicast_list = bfin_mac_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bfin_mac_poll,
+#endif
+};
+
static int __devinit bfin_mac_probe(struct platform_device *pdev)
{
struct net_device *ndev;
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 714df2b675e..c888e97c967 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -85,8 +85,8 @@ struct fl_pg_chunk {
struct page *page;
void *va;
unsigned int offset;
- u64 *p_cnt;
- DECLARE_PCI_UNMAP_ADDR(mapping);
+ unsigned long *p_cnt;
+ dma_addr_t mapping;
};
struct rx_desc;
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 7ea48414c6c..17858b9a583 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -2496,14 +2496,16 @@ static void check_link_status(struct adapter *adapter)
for_each_port(adapter, i) {
struct net_device *dev = adapter->port[i];
struct port_info *p = netdev_priv(dev);
+ int link_fault;
spin_lock_irq(&adapter->work_lock);
- if (p->link_fault) {
+ link_fault = p->link_fault;
+ spin_unlock_irq(&adapter->work_lock);
+
+ if (link_fault) {
t3_link_fault(adapter, i);
- spin_unlock_irq(&adapter->work_lock);
continue;
}
- spin_unlock_irq(&adapter->work_lock);
if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
t3_xgm_intr_disable(adapter, i);
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 26d3587f339..b3ee2bc1a00 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -355,7 +355,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
(*d->pg_chunk.p_cnt)--;
if (!*d->pg_chunk.p_cnt)
pci_unmap_page(pdev,
- pci_unmap_addr(&d->pg_chunk, mapping),
+ d->pg_chunk.mapping,
q->alloc_size, PCI_DMA_FROMDEVICE);
put_page(d->pg_chunk.page);
@@ -454,7 +454,7 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
q->pg_chunk.offset = 0;
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
0, q->alloc_size, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(&q->pg_chunk, mapping, mapping);
+ q->pg_chunk.mapping = mapping;
}
sd->pg_chunk = q->pg_chunk;
@@ -511,8 +511,7 @@ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
nomem: q->alloc_failed++;
break;
}
- mapping = pci_unmap_addr(&sd->pg_chunk, mapping) +
- sd->pg_chunk.offset;
+ mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
pci_unmap_addr_set(sd, dma_addr, mapping);
add_one_rx_chunk(mapping, d, q->gen);
@@ -881,7 +880,7 @@ recycle:
(*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt)
pci_unmap_page(adap->pdev,
- pci_unmap_addr(&sd->pg_chunk, mapping),
+ sd->pg_chunk.mapping,
fl->alloc_size,
PCI_DMA_FROMDEVICE);
if (!skb) {
@@ -2096,7 +2095,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
(*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt)
pci_unmap_page(adap->pdev,
- pci_unmap_addr(&sd->pg_chunk, mapping),
+ sd->pg_chunk.mapping,
fl->alloc_size,
PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 4f68aeb2679..4950d5d789a 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1274,6 +1274,11 @@ void t3_link_fault(struct adapter *adapter, int port_id)
A_XGM_INT_STATUS + mac->offset);
link_fault &= F_LINKFAULTCHANGE;
+ link_ok = lc->link_ok;
+ speed = lc->speed;
+ duplex = lc->duplex;
+ fc = lc->fc;
+
phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
if (link_fault) {
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index b1419e21b46..fffb006b7d9 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -4027,8 +4027,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
PCI_DMA_FROMDEVICE);
length = le16_to_cpu(rx_desc->length);
-
- if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
+ /* !EOP means multiple descriptors were used to store a single
+ * packet, also make sure the frame isn't just CRC only */
+ if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
/* All receives must fit into a single buffer */
E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index f9a846b1b92..9f6a68fb7b4 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -897,6 +897,12 @@ enum {
};
static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
+/*
+ * Power down phy when interface is down (persists through reboot;
+ * older Linux and other OSes may not power it up again)
+ */
+static int phy_power_down = 0;
+
static inline struct fe_priv *get_nvpriv(struct net_device *dev)
{
return netdev_priv(dev);
@@ -1485,7 +1491,10 @@ static int phy_init(struct net_device *dev)
/* restart auto negotiation, power down phy */
mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
- mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE | BMCR_PDOWN);
+ mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
+ if (phy_power_down) {
+ mii_control |= BMCR_PDOWN;
+ }
if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
return PHY_ERROR;
}
@@ -5513,7 +5522,7 @@ static int nv_close(struct net_device *dev)
nv_drain_rxtx(dev);
- if (np->wolenabled) {
+ if (np->wolenabled || !phy_power_down) {
writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
nv_start_rx(dev);
} else {
@@ -6367,6 +6376,8 @@ module_param(dma_64bit, int, 0);
MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
module_param(phy_cross, int, 0);
MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
+module_param(phy_power_down, int, 0);
+MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 0642d52aef5..cf352961ae9 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -259,7 +259,7 @@ extern const char gfar_driver_version[];
(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
| IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR \
- | IEVENT_MAG)
+ | IEVENT_MAG | IEVENT_BABR)
#define IMASK_INIT_CLEAR 0x00000000
#define IMASK_BABR 0x80000000
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index f26667d5eaa..22e74a0e036 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -489,7 +489,7 @@ static const struct net_device_ops mac8390_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
+ .ndo_poll_controller = __ei_poll,
#endif
};
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index ac6fc499b28..e5c98a98ad3 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -426,7 +426,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
INC_PERF_COUNTER(priv->pstats.tx_poll);
- if (!spin_trylock(&ring->comp_lock)) {
+ if (!spin_trylock_irq(&ring->comp_lock)) {
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
return;
}
@@ -439,7 +439,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
if (inflight && priv->port_up)
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
- spin_unlock(&ring->comp_lock);
+ spin_unlock_irq(&ring->comp_lock);
}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
@@ -482,9 +482,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
- if (spin_trylock(&ring->comp_lock)) {
+ if (spin_trylock_irq(&ring->comp_lock)) {
mlx4_en_process_tx_cq(priv->dev, cq);
- spin_unlock(&ring->comp_lock);
+ spin_unlock_irq(&ring->comp_lock);
}
}
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 8247a945a1d..3b19e0ce290 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -66,7 +66,6 @@ static const int multicast_filter_limit = 32;
#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
-#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -2357,10 +2356,10 @@ static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
return cmd;
}
-static void rtl_set_rx_max_size(void __iomem *ioaddr)
+static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
{
/* Low hurts. Let's disable the filtering. */
- RTL_W16(RxMaxSize, 16383);
+ RTL_W16(RxMaxSize, rx_buf_sz);
}
static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
@@ -2407,7 +2406,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
RTL_W8(EarlyTxThres, EarlyTxThld);
- rtl_set_rx_max_size(ioaddr);
+ rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
(tp->mac_version == RTL_GIGA_MAC_VER_02) ||
@@ -2668,7 +2667,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
RTL_W8(EarlyTxThres, EarlyTxThld);
- rtl_set_rx_max_size(ioaddr);
+ rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
@@ -2846,7 +2845,7 @@ static void rtl_hw_start_8101(struct net_device *dev)
RTL_W8(EarlyTxThres, EarlyTxThld);
- rtl_set_rx_max_size(ioaddr);
+ rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 8a0823588c5..3d94e7dfea6 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -430,6 +430,7 @@ config RTL8187
ASUS P5B Deluxe
Toshiba Satellite Pro series of laptops
Asus Wireless Link
+ Linksys WUSB54GC-EU
Thanks to Realtek for their support!
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 744f4f4dd3d..8d93ca4651b 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1873,18 +1873,18 @@ static void at76_dwork_hw_scan(struct work_struct *work)
if (ret != CMD_STATUS_COMPLETE) {
queue_delayed_work(priv->hw->workqueue, &priv->dwork_hw_scan,
SCAN_POLL_INTERVAL);
- goto exit;
+ mutex_unlock(&priv->mtx);
+ return;
}
- ieee80211_scan_completed(priv->hw, false);
-
if (is_valid_ether_addr(priv->bssid))
at76_join(priv);
- ieee80211_wake_queues(priv->hw);
-
-exit:
mutex_unlock(&priv->mtx);
+
+ ieee80211_scan_completed(priv->hw, false);
+
+ ieee80211_wake_queues(priv->hw);
}
static int at76_hw_scan(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index bac6cfba6ab..d51ba0a88c2 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -71,6 +71,8 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
{USB_DEVICE(0x18E8, 0x6232), .driver_info = DEVICE_RTL8187},
/* AirLive */
{USB_DEVICE(0x1b75, 0x8187), .driver_info = DEVICE_RTL8187},
+ /* Linksys */
+ {USB_DEVICE(0x1737, 0x0073), .driver_info = DEVICE_RTL8187B},
{}
};
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 73348c4047e..4a9cc92d4d1 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -702,7 +702,7 @@ static unsigned int iosapic_startup_irq(unsigned int irq)
}
#ifdef CONFIG_SMP
-static void iosapic_set_affinity_irq(unsigned int irq,
+static int iosapic_set_affinity_irq(unsigned int irq,
const struct cpumask *dest)
{
struct vector_info *vi = iosapic_get_vector(irq);
@@ -712,7 +712,7 @@ static void iosapic_set_affinity_irq(unsigned int irq,
dest_cpu = cpu_check_affinity(irq, dest);
if (dest_cpu < 0)
- return;
+ return -1;
cpumask_copy(irq_desc[irq].affinity, cpumask_of(dest_cpu));
vi->txn_addr = txn_affinity_addr(irq, dest_cpu);
@@ -724,6 +724,8 @@ static void iosapic_set_affinity_irq(unsigned int irq,
iosapic_set_irt_data(vi, &dummy_d0, &d1);
iosapic_wr_irt_entry(vi, d0, d1);
spin_unlock_irqrestore(&iosapic_lock, flags);
+
+ return 0;
}
#endif
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index e6a7e847ee8..ea31a452b15 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -352,8 +352,8 @@ static int __devinit parport_init_chip(struct parisc_device *dev)
unsigned long port;
if (!dev->irq) {
- printk(KERN_WARNING "IRQ not found for parallel device at 0x%lx\n",
- dev->hpa.start);
+ printk(KERN_WARNING "IRQ not found for parallel device at 0x%llx\n",
+ (unsigned long long)dev->hpa.start);
return -ENODEV;
}
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 4e63cc9e277..151bf5bc8af 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1,5 +1,5 @@
/* Low-level parallel-port routines for 8255-based PC-style hardware.
- *
+ *
* Authors: Phil Blundell <philb@gnu.org>
* Tim Waugh <tim@cyberelk.demon.co.uk>
* Jose Renau <renau@acm.org>
@@ -11,7 +11,7 @@
* Cleaned up include files - Russell King <linux@arm.uk.linux.org>
* DMA support - Bert De Jonghe <bert@sophis.be>
* Many ECP bugs fixed. Fred Barnes & Jamie Lokier, 1999
- * More PCI support now conditional on CONFIG_PCI, 03/2001, Paul G.
+ * More PCI support now conditional on CONFIG_PCI, 03/2001, Paul G.
* Various hacks, Fred Barnes, 04/2001
* Updated probing logic - Adam Belay <ambx1@neo.rr.com>
*/
@@ -56,10 +56,10 @@
#include <linux/pnp.h>
#include <linux/platform_device.h>
#include <linux/sysctl.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
-#include <asm/io.h>
#include <asm/dma.h>
-#include <asm/uaccess.h>
#include <linux/parport.h>
#include <linux/parport_pc.h>
@@ -82,7 +82,7 @@
#define ECR_TST 06
#define ECR_CNF 07
#define ECR_MODE_MASK 0xe0
-#define ECR_WRITE(p,v) frob_econtrol((p),0xff,(v))
+#define ECR_WRITE(p, v) frob_econtrol((p), 0xff, (v))
#undef DEBUG
@@ -109,27 +109,27 @@ static int pci_registered_parport;
static int pnp_registered_parport;
/* frob_control, but for ECR */
-static void frob_econtrol (struct parport *pb, unsigned char m,
+static void frob_econtrol(struct parport *pb, unsigned char m,
unsigned char v)
{
unsigned char ectr = 0;
if (m != 0xff)
- ectr = inb (ECONTROL (pb));
+ ectr = inb(ECONTROL(pb));
- DPRINTK (KERN_DEBUG "frob_econtrol(%02x,%02x): %02x -> %02x\n",
+ DPRINTK(KERN_DEBUG "frob_econtrol(%02x,%02x): %02x -> %02x\n",
m, v, ectr, (ectr & ~m) ^ v);
- outb ((ectr & ~m) ^ v, ECONTROL (pb));
+ outb((ectr & ~m) ^ v, ECONTROL(pb));
}
-static __inline__ void frob_set_mode (struct parport *p, int mode)
+static inline void frob_set_mode(struct parport *p, int mode)
{
- frob_econtrol (p, ECR_MODE_MASK, mode << 5);
+ frob_econtrol(p, ECR_MODE_MASK, mode << 5);
}
#ifdef CONFIG_PARPORT_PC_FIFO
-/* Safely change the mode bits in the ECR
+/* Safely change the mode bits in the ECR
Returns:
0 : Success
-EBUSY: Could not drain FIFO in some finite amount of time,
@@ -141,17 +141,18 @@ static int change_mode(struct parport *p, int m)
unsigned char oecr;
int mode;
- DPRINTK(KERN_INFO "parport change_mode ECP-ISA to mode 0x%02x\n",m);
+ DPRINTK(KERN_INFO "parport change_mode ECP-ISA to mode 0x%02x\n", m);
if (!priv->ecr) {
- printk (KERN_DEBUG "change_mode: but there's no ECR!\n");
+ printk(KERN_DEBUG "change_mode: but there's no ECR!\n");
return 0;
}
/* Bits <7:5> contain the mode. */
- oecr = inb (ECONTROL (p));
+ oecr = inb(ECONTROL(p));
mode = (oecr >> 5) & 0x7;
- if (mode == m) return 0;
+ if (mode == m)
+ return 0;
if (mode >= 2 && !(priv->ctr & 0x20)) {
/* This mode resets the FIFO, so we may
@@ -163,19 +164,21 @@ static int change_mode(struct parport *p, int m)
case ECR_ECP: /* ECP Parallel Port mode */
/* Busy wait for 200us */
for (counter = 0; counter < 40; counter++) {
- if (inb (ECONTROL (p)) & 0x01)
+ if (inb(ECONTROL(p)) & 0x01)
+ break;
+ if (signal_pending(current))
break;
- if (signal_pending (current)) break;
- udelay (5);
+ udelay(5);
}
/* Poll slowly. */
- while (!(inb (ECONTROL (p)) & 0x01)) {
- if (time_after_eq (jiffies, expire))
+ while (!(inb(ECONTROL(p)) & 0x01)) {
+ if (time_after_eq(jiffies, expire))
/* The FIFO is stuck. */
return -EBUSY;
- schedule_timeout_interruptible(msecs_to_jiffies(10));
- if (signal_pending (current))
+ schedule_timeout_interruptible(
+ msecs_to_jiffies(10));
+ if (signal_pending(current))
break;
}
}
@@ -185,20 +188,20 @@ static int change_mode(struct parport *p, int m)
/* We have to go through mode 001 */
oecr &= ~(7 << 5);
oecr |= ECR_PS2 << 5;
- ECR_WRITE (p, oecr);
+ ECR_WRITE(p, oecr);
}
/* Set the mode. */
oecr &= ~(7 << 5);
oecr |= m << 5;
- ECR_WRITE (p, oecr);
+ ECR_WRITE(p, oecr);
return 0;
}
#ifdef CONFIG_PARPORT_1284
/* Find FIFO lossage; FIFO is reset */
#if 0
-static int get_fifo_residue (struct parport *p)
+static int get_fifo_residue(struct parport *p)
{
int residue;
int cnfga;
@@ -206,26 +209,26 @@ static int get_fifo_residue (struct parport *p)
/* Adjust for the contents of the FIFO. */
for (residue = priv->fifo_depth; ; residue--) {
- if (inb (ECONTROL (p)) & 0x2)
+ if (inb(ECONTROL(p)) & 0x2)
/* Full up. */
break;
- outb (0, FIFO (p));
+ outb(0, FIFO(p));
}
- printk (KERN_DEBUG "%s: %d PWords were left in FIFO\n", p->name,
+ printk(KERN_DEBUG "%s: %d PWords were left in FIFO\n", p->name,
residue);
/* Reset the FIFO. */
- frob_set_mode (p, ECR_PS2);
+ frob_set_mode(p, ECR_PS2);
/* Now change to config mode and clean up. FIXME */
- frob_set_mode (p, ECR_CNF);
- cnfga = inb (CONFIGA (p));
- printk (KERN_DEBUG "%s: cnfgA contains 0x%02x\n", p->name, cnfga);
+ frob_set_mode(p, ECR_CNF);
+ cnfga = inb(CONFIGA(p));
+ printk(KERN_DEBUG "%s: cnfgA contains 0x%02x\n", p->name, cnfga);
if (!(cnfga & (1<<2))) {
- printk (KERN_DEBUG "%s: Accounting for extra byte\n", p->name);
+ printk(KERN_DEBUG "%s: Accounting for extra byte\n", p->name);
residue++;
}
@@ -233,9 +236,11 @@ static int get_fifo_residue (struct parport *p)
* PWord != 1 byte. */
/* Back to PS2 mode. */
- frob_set_mode (p, ECR_PS2);
+ frob_set_mode(p, ECR_PS2);
- DPRINTK (KERN_DEBUG "*** get_fifo_residue: done residue collecting (ecr = 0x%2.2x)\n", inb (ECONTROL (p)));
+ DPRINTK(KERN_DEBUG
+ "*** get_fifo_residue: done residue collecting (ecr = 0x%2.2x)\n",
+ inb(ECONTROL(p)));
return residue;
}
#endif /* 0 */
@@ -257,8 +262,8 @@ static int clear_epp_timeout(struct parport *pb)
/* To clear timeout some chips require double read */
parport_pc_read_status(pb);
r = parport_pc_read_status(pb);
- outb (r | 0x01, STATUS (pb)); /* Some reset by writing 1 */
- outb (r & 0xfe, STATUS (pb)); /* Others by writing 0 */
+ outb(r | 0x01, STATUS(pb)); /* Some reset by writing 1 */
+ outb(r & 0xfe, STATUS(pb)); /* Others by writing 0 */
r = parport_pc_read_status(pb);
return !(r & 0x01);
@@ -272,7 +277,8 @@ static int clear_epp_timeout(struct parport *pb)
* of these are in parport_pc.h.
*/
-static void parport_pc_init_state(struct pardevice *dev, struct parport_state *s)
+static void parport_pc_init_state(struct pardevice *dev,
+ struct parport_state *s)
{
s->u.pc.ctr = 0xc;
if (dev->irq_func &&
@@ -289,22 +295,23 @@ static void parport_pc_save_state(struct parport *p, struct parport_state *s)
const struct parport_pc_private *priv = p->physport->private_data;
s->u.pc.ctr = priv->ctr;
if (priv->ecr)
- s->u.pc.ecr = inb (ECONTROL (p));
+ s->u.pc.ecr = inb(ECONTROL(p));
}
-static void parport_pc_restore_state(struct parport *p, struct parport_state *s)
+static void parport_pc_restore_state(struct parport *p,
+ struct parport_state *s)
{
struct parport_pc_private *priv = p->physport->private_data;
register unsigned char c = s->u.pc.ctr & priv->ctr_writable;
- outb (c, CONTROL (p));
+ outb(c, CONTROL(p));
priv->ctr = c;
if (priv->ecr)
- ECR_WRITE (p, s->u.pc.ecr);
+ ECR_WRITE(p, s->u.pc.ecr);
}
#ifdef CONFIG_PARPORT_1284
-static size_t parport_pc_epp_read_data (struct parport *port, void *buf,
- size_t length, int flags)
+static size_t parport_pc_epp_read_data(struct parport *port, void *buf,
+ size_t length, int flags)
{
size_t got = 0;
@@ -316,54 +323,52 @@ static size_t parport_pc_epp_read_data (struct parport *port, void *buf,
* nFault is 0 if there is at least 1 byte in the Warp's FIFO
* pError is 1 if there are 16 bytes in the Warp's FIFO
*/
- status = inb (STATUS (port));
+ status = inb(STATUS(port));
- while (!(status & 0x08) && (got < length)) {
- if ((left >= 16) && (status & 0x20) && !(status & 0x08)) {
+ while (!(status & 0x08) && got < length) {
+ if (left >= 16 && (status & 0x20) && !(status & 0x08)) {
/* can grab 16 bytes from warp fifo */
- if (!((long)buf & 0x03)) {
- insl (EPPDATA (port), buf, 4);
- } else {
- insb (EPPDATA (port), buf, 16);
- }
+ if (!((long)buf & 0x03))
+ insl(EPPDATA(port), buf, 4);
+ else
+ insb(EPPDATA(port), buf, 16);
buf += 16;
got += 16;
left -= 16;
} else {
/* grab single byte from the warp fifo */
- *((char *)buf) = inb (EPPDATA (port));
+ *((char *)buf) = inb(EPPDATA(port));
buf++;
got++;
left--;
}
- status = inb (STATUS (port));
+ status = inb(STATUS(port));
if (status & 0x01) {
/* EPP timeout should never occur... */
- printk (KERN_DEBUG "%s: EPP timeout occurred while talking to "
- "w91284pic (should not have done)\n", port->name);
- clear_epp_timeout (port);
+ printk(KERN_DEBUG
+"%s: EPP timeout occurred while talking to w91284pic (should not have done)\n", port->name);
+ clear_epp_timeout(port);
}
}
return got;
}
if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
- if (!(((long)buf | length) & 0x03)) {
- insl (EPPDATA (port), buf, (length >> 2));
- } else {
- insb (EPPDATA (port), buf, length);
- }
- if (inb (STATUS (port)) & 0x01) {
- clear_epp_timeout (port);
+ if (!(((long)buf | length) & 0x03))
+ insl(EPPDATA(port), buf, (length >> 2));
+ else
+ insb(EPPDATA(port), buf, length);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
return -EIO;
}
return length;
}
for (; got < length; got++) {
- *((char*)buf) = inb (EPPDATA(port));
+ *((char *)buf) = inb(EPPDATA(port));
buf++;
- if (inb (STATUS (port)) & 0x01) {
+ if (inb(STATUS(port)) & 0x01) {
/* EPP timeout */
- clear_epp_timeout (port);
+ clear_epp_timeout(port);
break;
}
}
@@ -371,28 +376,27 @@ static size_t parport_pc_epp_read_data (struct parport *port, void *buf,
return got;
}
-static size_t parport_pc_epp_write_data (struct parport *port, const void *buf,
- size_t length, int flags)
+static size_t parport_pc_epp_write_data(struct parport *port, const void *buf,
+ size_t length, int flags)
{
size_t written = 0;
if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
- if (!(((long)buf | length) & 0x03)) {
- outsl (EPPDATA (port), buf, (length >> 2));
- } else {
- outsb (EPPDATA (port), buf, length);
- }
- if (inb (STATUS (port)) & 0x01) {
- clear_epp_timeout (port);
+ if (!(((long)buf | length) & 0x03))
+ outsl(EPPDATA(port), buf, (length >> 2));
+ else
+ outsb(EPPDATA(port), buf, length);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
return -EIO;
}
return length;
}
for (; written < length; written++) {
- outb (*((char*)buf), EPPDATA(port));
+ outb(*((char *)buf), EPPDATA(port));
buf++;
- if (inb (STATUS(port)) & 0x01) {
- clear_epp_timeout (port);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
break;
}
}
@@ -400,24 +404,24 @@ static size_t parport_pc_epp_write_data (struct parport *port, const void *buf,
return written;
}
-static size_t parport_pc_epp_read_addr (struct parport *port, void *buf,
+static size_t parport_pc_epp_read_addr(struct parport *port, void *buf,
size_t length, int flags)
{
size_t got = 0;
if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
- insb (EPPADDR (port), buf, length);
- if (inb (STATUS (port)) & 0x01) {
- clear_epp_timeout (port);
+ insb(EPPADDR(port), buf, length);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
return -EIO;
}
return length;
}
for (; got < length; got++) {
- *((char*)buf) = inb (EPPADDR (port));
+ *((char *)buf) = inb(EPPADDR(port));
buf++;
- if (inb (STATUS (port)) & 0x01) {
- clear_epp_timeout (port);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
break;
}
}
@@ -425,25 +429,25 @@ static size_t parport_pc_epp_read_addr (struct parport *port, void *buf,
return got;
}
-static size_t parport_pc_epp_write_addr (struct parport *port,
+static size_t parport_pc_epp_write_addr(struct parport *port,
const void *buf, size_t length,
int flags)
{
size_t written = 0;
if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
- outsb (EPPADDR (port), buf, length);
- if (inb (STATUS (port)) & 0x01) {
- clear_epp_timeout (port);
+ outsb(EPPADDR(port), buf, length);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
return -EIO;
}
return length;
}
for (; written < length; written++) {
- outb (*((char*)buf), EPPADDR (port));
+ outb(*((char *)buf), EPPADDR(port));
buf++;
- if (inb (STATUS (port)) & 0x01) {
- clear_epp_timeout (port);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
break;
}
}
@@ -451,74 +455,74 @@ static size_t parport_pc_epp_write_addr (struct parport *port,
return written;
}
-static size_t parport_pc_ecpepp_read_data (struct parport *port, void *buf,
- size_t length, int flags)
+static size_t parport_pc_ecpepp_read_data(struct parport *port, void *buf,
+ size_t length, int flags)
{
size_t got;
- frob_set_mode (port, ECR_EPP);
- parport_pc_data_reverse (port);
- parport_pc_write_control (port, 0x4);
- got = parport_pc_epp_read_data (port, buf, length, flags);
- frob_set_mode (port, ECR_PS2);
+ frob_set_mode(port, ECR_EPP);
+ parport_pc_data_reverse(port);
+ parport_pc_write_control(port, 0x4);
+ got = parport_pc_epp_read_data(port, buf, length, flags);
+ frob_set_mode(port, ECR_PS2);
return got;
}
-static size_t parport_pc_ecpepp_write_data (struct parport *port,
- const void *buf, size_t length,
- int flags)
+static size_t parport_pc_ecpepp_write_data(struct parport *port,
+ const void *buf, size_t length,
+ int flags)
{
size_t written;
- frob_set_mode (port, ECR_EPP);
- parport_pc_write_control (port, 0x4);
- parport_pc_data_forward (port);
- written = parport_pc_epp_write_data (port, buf, length, flags);
- frob_set_mode (port, ECR_PS2);
+ frob_set_mode(port, ECR_EPP);
+ parport_pc_write_control(port, 0x4);
+ parport_pc_data_forward(port);
+ written = parport_pc_epp_write_data(port, buf, length, flags);
+ frob_set_mode(port, ECR_PS2);
return written;
}
-static size_t parport_pc_ecpepp_read_addr (struct parport *port, void *buf,
- size_t length, int flags)
+static size_t parport_pc_ecpepp_read_addr(struct parport *port, void *buf,
+ size_t length, int flags)
{
size_t got;
- frob_set_mode (port, ECR_EPP);
- parport_pc_data_reverse (port);
- parport_pc_write_control (port, 0x4);
- got = parport_pc_epp_read_addr (port, buf, length, flags);
- frob_set_mode (port, ECR_PS2);
+ frob_set_mode(port, ECR_EPP);
+ parport_pc_data_reverse(port);
+ parport_pc_write_control(port, 0x4);
+ got = parport_pc_epp_read_addr(port, buf, length, flags);
+ frob_set_mode(port, ECR_PS2);
return got;
}
-static size_t parport_pc_ecpepp_write_addr (struct parport *port,
+static size_t parport_pc_ecpepp_write_addr(struct parport *port,
const void *buf, size_t length,
int flags)
{
size_t written;
- frob_set_mode (port, ECR_EPP);
- parport_pc_write_control (port, 0x4);
- parport_pc_data_forward (port);
- written = parport_pc_epp_write_addr (port, buf, length, flags);
- frob_set_mode (port, ECR_PS2);
+ frob_set_mode(port, ECR_EPP);
+ parport_pc_write_control(port, 0x4);
+ parport_pc_data_forward(port);
+ written = parport_pc_epp_write_addr(port, buf, length, flags);
+ frob_set_mode(port, ECR_PS2);
return written;
}
#endif /* IEEE 1284 support */
#ifdef CONFIG_PARPORT_PC_FIFO
-static size_t parport_pc_fifo_write_block_pio (struct parport *port,
+static size_t parport_pc_fifo_write_block_pio(struct parport *port,
const void *buf, size_t length)
{
int ret = 0;
const unsigned char *bufp = buf;
size_t left = length;
unsigned long expire = jiffies + port->physport->cad->timeout;
- const int fifo = FIFO (port);
+ const int fifo = FIFO(port);
int poll_for = 8; /* 80 usecs */
const struct parport_pc_private *priv = port->physport->private_data;
const int fifo_depth = priv->fifo_depth;
@@ -526,25 +530,25 @@ static size_t parport_pc_fifo_write_block_pio (struct parport *port,
port = port->physport;
/* We don't want to be interrupted every character. */
- parport_pc_disable_irq (port);
+ parport_pc_disable_irq(port);
/* set nErrIntrEn and serviceIntr */
- frob_econtrol (port, (1<<4) | (1<<2), (1<<4) | (1<<2));
+ frob_econtrol(port, (1<<4) | (1<<2), (1<<4) | (1<<2));
/* Forward mode. */
- parport_pc_data_forward (port); /* Must be in PS2 mode */
+ parport_pc_data_forward(port); /* Must be in PS2 mode */
while (left) {
unsigned char byte;
- unsigned char ecrval = inb (ECONTROL (port));
+ unsigned char ecrval = inb(ECONTROL(port));
int i = 0;
- if (need_resched() && time_before (jiffies, expire))
+ if (need_resched() && time_before(jiffies, expire))
/* Can't yield the port. */
- schedule ();
+ schedule();
/* Anyone else waiting for the port? */
if (port->waithead) {
- printk (KERN_DEBUG "Somebody wants the port\n");
+ printk(KERN_DEBUG "Somebody wants the port\n");
break;
}
@@ -552,21 +556,22 @@ static size_t parport_pc_fifo_write_block_pio (struct parport *port,
/* FIFO is full. Wait for interrupt. */
/* Clear serviceIntr */
- ECR_WRITE (port, ecrval & ~(1<<2));
- false_alarm:
- ret = parport_wait_event (port, HZ);
- if (ret < 0) break;
+ ECR_WRITE(port, ecrval & ~(1<<2));
+false_alarm:
+ ret = parport_wait_event(port, HZ);
+ if (ret < 0)
+ break;
ret = 0;
- if (!time_before (jiffies, expire)) {
+ if (!time_before(jiffies, expire)) {
/* Timed out. */
- printk (KERN_DEBUG "FIFO write timed out\n");
+ printk(KERN_DEBUG "FIFO write timed out\n");
break;
}
- ecrval = inb (ECONTROL (port));
+ ecrval = inb(ECONTROL(port));
if (!(ecrval & (1<<2))) {
if (need_resched() &&
- time_before (jiffies, expire))
- schedule ();
+ time_before(jiffies, expire))
+ schedule();
goto false_alarm;
}
@@ -577,38 +582,38 @@ static size_t parport_pc_fifo_write_block_pio (struct parport *port,
/* Can't fail now. */
expire = jiffies + port->cad->timeout;
- poll:
- if (signal_pending (current))
+poll:
+ if (signal_pending(current))
break;
if (ecrval & 0x01) {
/* FIFO is empty. Blast it full. */
const int n = left < fifo_depth ? left : fifo_depth;
- outsb (fifo, bufp, n);
+ outsb(fifo, bufp, n);
bufp += n;
left -= n;
/* Adjust the poll time. */
- if (i < (poll_for - 2)) poll_for--;
+ if (i < (poll_for - 2))
+ poll_for--;
continue;
} else if (i++ < poll_for) {
- udelay (10);
- ecrval = inb (ECONTROL (port));
+ udelay(10);
+ ecrval = inb(ECONTROL(port));
goto poll;
}
- /* Half-full (call me an optimist) */
+ /* Half-full(call me an optimist) */
byte = *bufp++;
- outb (byte, fifo);
+ outb(byte, fifo);
left--;
- }
-
-dump_parport_state ("leave fifo_write_block_pio", port);
+ }
+ dump_parport_state("leave fifo_write_block_pio", port);
return length - left;
}
#ifdef HAS_DMA
-static size_t parport_pc_fifo_write_block_dma (struct parport *port,
+static size_t parport_pc_fifo_write_block_dma(struct parport *port,
const void *buf, size_t length)
{
int ret = 0;
@@ -621,7 +626,7 @@ static size_t parport_pc_fifo_write_block_dma (struct parport *port,
unsigned long start = (unsigned long) buf;
unsigned long end = (unsigned long) buf + length - 1;
-dump_parport_state ("enter fifo_write_block_dma", port);
+ dump_parport_state("enter fifo_write_block_dma", port);
if (end < MAX_DMA_ADDRESS) {
/* If it would cross a 64k boundary, cap it at the end. */
if ((start ^ end) & ~0xffffUL)
@@ -629,8 +634,9 @@ dump_parport_state ("enter fifo_write_block_dma", port);
dma_addr = dma_handle = dma_map_single(dev, (void *)buf, length,
DMA_TO_DEVICE);
- } else {
- /* above 16 MB we use a bounce buffer as ISA-DMA is not possible */
+ } else {
+ /* above 16 MB we use a bounce buffer as ISA-DMA
+ is not possible */
maxlen = PAGE_SIZE; /* sizeof(priv->dma_buf) */
dma_addr = priv->dma_handle;
dma_handle = 0;
@@ -639,12 +645,12 @@ dump_parport_state ("enter fifo_write_block_dma", port);
port = port->physport;
/* We don't want to be interrupted every character. */
- parport_pc_disable_irq (port);
+ parport_pc_disable_irq(port);
/* set nErrIntrEn and serviceIntr */
- frob_econtrol (port, (1<<4) | (1<<2), (1<<4) | (1<<2));
+ frob_econtrol(port, (1<<4) | (1<<2), (1<<4) | (1<<2));
/* Forward mode. */
- parport_pc_data_forward (port); /* Must be in PS2 mode */
+ parport_pc_data_forward(port); /* Must be in PS2 mode */
while (left) {
unsigned long expire = jiffies + port->physport->cad->timeout;
@@ -665,10 +671,10 @@ dump_parport_state ("enter fifo_write_block_dma", port);
set_dma_count(port->dma, count);
/* Set DMA mode */
- frob_econtrol (port, 1<<3, 1<<3);
+ frob_econtrol(port, 1<<3, 1<<3);
/* Clear serviceIntr */
- frob_econtrol (port, 1<<2, 0);
+ frob_econtrol(port, 1<<2, 0);
enable_dma(port->dma);
release_dma_lock(dmaflag);
@@ -676,20 +682,22 @@ dump_parport_state ("enter fifo_write_block_dma", port);
/* assume DMA will be successful */
left -= count;
buf += count;
- if (dma_handle) dma_addr += count;
+ if (dma_handle)
+ dma_addr += count;
/* Wait for interrupt. */
- false_alarm:
- ret = parport_wait_event (port, HZ);
- if (ret < 0) break;
+false_alarm:
+ ret = parport_wait_event(port, HZ);
+ if (ret < 0)
+ break;
ret = 0;
- if (!time_before (jiffies, expire)) {
+ if (!time_before(jiffies, expire)) {
/* Timed out. */
- printk (KERN_DEBUG "DMA write timed out\n");
+ printk(KERN_DEBUG "DMA write timed out\n");
break;
}
/* Is serviceIntr set? */
- if (!(inb (ECONTROL (port)) & (1<<2))) {
+ if (!(inb(ECONTROL(port)) & (1<<2))) {
cond_resched();
goto false_alarm;
@@ -705,14 +713,15 @@ dump_parport_state ("enter fifo_write_block_dma", port);
/* Anyone else waiting for the port? */
if (port->waithead) {
- printk (KERN_DEBUG "Somebody wants the port\n");
+ printk(KERN_DEBUG "Somebody wants the port\n");
break;
}
/* update for possible DMA residue ! */
buf -= count;
left += count;
- if (dma_handle) dma_addr -= count;
+ if (dma_handle)
+ dma_addr -= count;
}
/* Maybe got here through break, so adjust for DMA residue! */
@@ -723,12 +732,12 @@ dump_parport_state ("enter fifo_write_block_dma", port);
release_dma_lock(dmaflag);
/* Turn off DMA mode */
- frob_econtrol (port, 1<<3, 0);
+ frob_econtrol(port, 1<<3, 0);
if (dma_handle)
dma_unmap_single(dev, dma_handle, length, DMA_TO_DEVICE);
-dump_parport_state ("leave fifo_write_block_dma", port);
+ dump_parport_state("leave fifo_write_block_dma", port);
return length - left;
}
#endif
@@ -738,13 +747,13 @@ static inline size_t parport_pc_fifo_write_block(struct parport *port,
{
#ifdef HAS_DMA
if (port->dma != PARPORT_DMA_NONE)
- return parport_pc_fifo_write_block_dma (port, buf, length);
+ return parport_pc_fifo_write_block_dma(port, buf, length);
#endif
- return parport_pc_fifo_write_block_pio (port, buf, length);
+ return parport_pc_fifo_write_block_pio(port, buf, length);
}
/* Parallel Port FIFO mode (ECP chipsets) */
-static size_t parport_pc_compat_write_block_pio (struct parport *port,
+static size_t parport_pc_compat_write_block_pio(struct parport *port,
const void *buf, size_t length,
int flags)
{
@@ -756,14 +765,16 @@ static size_t parport_pc_compat_write_block_pio (struct parport *port,
/* Special case: a timeout of zero means we cannot call schedule().
* Also if O_NONBLOCK is set then use the default implementation. */
if (port->physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
- return parport_ieee1284_write_compat (port, buf,
+ return parport_ieee1284_write_compat(port, buf,
length, flags);
/* Set up parallel port FIFO mode.*/
- parport_pc_data_forward (port); /* Must be in PS2 mode */
- parport_pc_frob_control (port, PARPORT_CONTROL_STROBE, 0);
- r = change_mode (port, ECR_PPF); /* Parallel port FIFO */
- if (r) printk (KERN_DEBUG "%s: Warning change_mode ECR_PPF failed\n", port->name);
+ parport_pc_data_forward(port); /* Must be in PS2 mode */
+ parport_pc_frob_control(port, PARPORT_CONTROL_STROBE, 0);
+ r = change_mode(port, ECR_PPF); /* Parallel port FIFO */
+ if (r)
+ printk(KERN_DEBUG "%s: Warning change_mode ECR_PPF failed\n",
+ port->name);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
@@ -775,40 +786,39 @@ static size_t parport_pc_compat_write_block_pio (struct parport *port,
* the FIFO is empty, so allow 4 seconds for each position
* in the fifo.
*/
- expire = jiffies + (priv->fifo_depth * HZ * 4);
+ expire = jiffies + (priv->fifo_depth * HZ * 4);
do {
/* Wait for the FIFO to empty */
- r = change_mode (port, ECR_PS2);
- if (r != -EBUSY) {
+ r = change_mode(port, ECR_PS2);
+ if (r != -EBUSY)
break;
- }
- } while (time_before (jiffies, expire));
+ } while (time_before(jiffies, expire));
if (r == -EBUSY) {
- printk (KERN_DEBUG "%s: FIFO is stuck\n", port->name);
+ printk(KERN_DEBUG "%s: FIFO is stuck\n", port->name);
/* Prevent further data transfer. */
- frob_set_mode (port, ECR_TST);
+ frob_set_mode(port, ECR_TST);
/* Adjust for the contents of the FIFO. */
for (written -= priv->fifo_depth; ; written++) {
- if (inb (ECONTROL (port)) & 0x2) {
+ if (inb(ECONTROL(port)) & 0x2) {
/* Full up. */
break;
}
- outb (0, FIFO (port));
+ outb(0, FIFO(port));
}
/* Reset the FIFO and return to PS2 mode. */
- frob_set_mode (port, ECR_PS2);
+ frob_set_mode(port, ECR_PS2);
}
- r = parport_wait_peripheral (port,
+ r = parport_wait_peripheral(port,
PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY);
if (r)
- printk (KERN_DEBUG
- "%s: BUSY timeout (%d) in compat_write_block_pio\n",
+ printk(KERN_DEBUG
+ "%s: BUSY timeout (%d) in compat_write_block_pio\n",
port->name, r);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
@@ -818,7 +828,7 @@ static size_t parport_pc_compat_write_block_pio (struct parport *port,
/* ECP */
#ifdef CONFIG_PARPORT_1284
-static size_t parport_pc_ecp_write_block_pio (struct parport *port,
+static size_t parport_pc_ecp_write_block_pio(struct parport *port,
const void *buf, size_t length,
int flags)
{
@@ -830,36 +840,38 @@ static size_t parport_pc_ecp_write_block_pio (struct parport *port,
/* Special case: a timeout of zero means we cannot call schedule().
* Also if O_NONBLOCK is set then use the default implementation. */
if (port->physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
- return parport_ieee1284_ecp_write_data (port, buf,
+ return parport_ieee1284_ecp_write_data(port, buf,
length, flags);
/* Switch to forward mode if necessary. */
if (port->physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
/* Event 47: Set nInit high. */
- parport_frob_control (port,
+ parport_frob_control(port,
PARPORT_CONTROL_INIT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_INIT
| PARPORT_CONTROL_AUTOFD);
/* Event 49: PError goes high. */
- r = parport_wait_peripheral (port,
+ r = parport_wait_peripheral(port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r) {
- printk (KERN_DEBUG "%s: PError timeout (%d) "
+ printk(KERN_DEBUG "%s: PError timeout (%d) "
"in ecp_write_block_pio\n", port->name, r);
}
}
/* Set up ECP parallel port mode.*/
- parport_pc_data_forward (port); /* Must be in PS2 mode */
- parport_pc_frob_control (port,
+ parport_pc_data_forward(port); /* Must be in PS2 mode */
+ parport_pc_frob_control(port,
PARPORT_CONTROL_STROBE |
PARPORT_CONTROL_AUTOFD,
0);
- r = change_mode (port, ECR_ECP); /* ECP FIFO */
- if (r) printk (KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n", port->name);
+ r = change_mode(port, ECR_ECP); /* ECP FIFO */
+ if (r)
+ printk(KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n",
+ port->name);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
/* Write the data to the FIFO. */
@@ -873,55 +885,54 @@ static size_t parport_pc_ecp_write_block_pio (struct parport *port,
expire = jiffies + (priv->fifo_depth * (HZ * 4));
do {
/* Wait for the FIFO to empty */
- r = change_mode (port, ECR_PS2);
- if (r != -EBUSY) {
+ r = change_mode(port, ECR_PS2);
+ if (r != -EBUSY)
break;
- }
- } while (time_before (jiffies, expire));
+ } while (time_before(jiffies, expire));
if (r == -EBUSY) {
- printk (KERN_DEBUG "%s: FIFO is stuck\n", port->name);
+ printk(KERN_DEBUG "%s: FIFO is stuck\n", port->name);
/* Prevent further data transfer. */
- frob_set_mode (port, ECR_TST);
+ frob_set_mode(port, ECR_TST);
/* Adjust for the contents of the FIFO. */
for (written -= priv->fifo_depth; ; written++) {
- if (inb (ECONTROL (port)) & 0x2) {
+ if (inb(ECONTROL(port)) & 0x2) {
/* Full up. */
break;
}
- outb (0, FIFO (port));
+ outb(0, FIFO(port));
}
/* Reset the FIFO and return to PS2 mode. */
- frob_set_mode (port, ECR_PS2);
+ frob_set_mode(port, ECR_PS2);
/* Host transfer recovery. */
- parport_pc_data_reverse (port); /* Must be in PS2 mode */
- udelay (5);
- parport_frob_control (port, PARPORT_CONTROL_INIT, 0);
- r = parport_wait_peripheral (port, PARPORT_STATUS_PAPEROUT, 0);
+ parport_pc_data_reverse(port); /* Must be in PS2 mode */
+ udelay(5);
+ parport_frob_control(port, PARPORT_CONTROL_INIT, 0);
+ r = parport_wait_peripheral(port, PARPORT_STATUS_PAPEROUT, 0);
if (r)
- printk (KERN_DEBUG "%s: PE,1 timeout (%d) "
+ printk(KERN_DEBUG "%s: PE,1 timeout (%d) "
"in ecp_write_block_pio\n", port->name, r);
- parport_frob_control (port,
+ parport_frob_control(port,
PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
- r = parport_wait_peripheral (port,
+ r = parport_wait_peripheral(port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
- if (r)
- printk (KERN_DEBUG "%s: PE,2 timeout (%d) "
+ if (r)
+ printk(KERN_DEBUG "%s: PE,2 timeout (%d) "
"in ecp_write_block_pio\n", port->name, r);
}
- r = parport_wait_peripheral (port,
- PARPORT_STATUS_BUSY,
+ r = parport_wait_peripheral(port,
+ PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY);
- if(r)
- printk (KERN_DEBUG
+ if (r)
+ printk(KERN_DEBUG
"%s: BUSY timeout (%d) in ecp_write_block_pio\n",
port->name, r);
@@ -931,7 +942,7 @@ static size_t parport_pc_ecp_write_block_pio (struct parport *port,
}
#if 0
-static size_t parport_pc_ecp_read_block_pio (struct parport *port,
+static size_t parport_pc_ecp_read_block_pio(struct parport *port,
void *buf, size_t length,
int flags)
{
@@ -944,13 +955,13 @@ static size_t parport_pc_ecp_read_block_pio (struct parport *port,
char *bufp = buf;
port = port->physport;
-DPRINTK (KERN_DEBUG "parport_pc: parport_pc_ecp_read_block_pio\n");
-dump_parport_state ("enter fcn", port);
+ DPRINTK(KERN_DEBUG "parport_pc: parport_pc_ecp_read_block_pio\n");
+ dump_parport_state("enter fcn", port);
/* Special case: a timeout of zero means we cannot call schedule().
* Also if O_NONBLOCK is set then use the default implementation. */
if (port->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
- return parport_ieee1284_ecp_read_data (port, buf,
+ return parport_ieee1284_ecp_read_data(port, buf,
length, flags);
if (port->ieee1284.mode == IEEE1284_MODE_ECPRLE) {
@@ -966,173 +977,178 @@ dump_parport_state ("enter fcn", port);
* go through software emulation. Otherwise we may have to throw
* away data. */
if (length < fifofull)
- return parport_ieee1284_ecp_read_data (port, buf,
+ return parport_ieee1284_ecp_read_data(port, buf,
length, flags);
if (port->ieee1284.phase != IEEE1284_PH_REV_IDLE) {
/* change to reverse-idle phase (must be in forward-idle) */
/* Event 38: Set nAutoFd low (also make sure nStrobe is high) */
- parport_frob_control (port,
+ parport_frob_control(port,
PARPORT_CONTROL_AUTOFD
| PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_AUTOFD);
- parport_pc_data_reverse (port); /* Must be in PS2 mode */
- udelay (5);
+ parport_pc_data_reverse(port); /* Must be in PS2 mode */
+ udelay(5);
/* Event 39: Set nInit low to initiate bus reversal */
- parport_frob_control (port,
+ parport_frob_control(port,
PARPORT_CONTROL_INIT,
0);
/* Event 40: Wait for nAckReverse (PError) to go low */
- r = parport_wait_peripheral (port, PARPORT_STATUS_PAPEROUT, 0);
- if (r) {
- printk (KERN_DEBUG "%s: PE timeout Event 40 (%d) "
+ r = parport_wait_peripheral(port, PARPORT_STATUS_PAPEROUT, 0);
+ if (r) {
+ printk(KERN_DEBUG "%s: PE timeout Event 40 (%d) "
"in ecp_read_block_pio\n", port->name, r);
return 0;
}
}
/* Set up ECP FIFO mode.*/
-/* parport_pc_frob_control (port,
+/* parport_pc_frob_control(port,
PARPORT_CONTROL_STROBE |
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD); */
- r = change_mode (port, ECR_ECP); /* ECP FIFO */
- if (r) printk (KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n", port->name);
+ r = change_mode(port, ECR_ECP); /* ECP FIFO */
+ if (r)
+ printk(KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n",
+ port->name);
port->ieee1284.phase = IEEE1284_PH_REV_DATA;
/* the first byte must be collected manually */
-dump_parport_state ("pre 43", port);
+ dump_parport_state("pre 43", port);
/* Event 43: Wait for nAck to go low */
- r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0);
+ r = parport_wait_peripheral(port, PARPORT_STATUS_ACK, 0);
if (r) {
/* timed out while reading -- no data */
- printk (KERN_DEBUG "PIO read timed out (initial byte)\n");
+ printk(KERN_DEBUG "PIO read timed out (initial byte)\n");
goto out_no_data;
}
/* read byte */
- *bufp++ = inb (DATA (port));
+ *bufp++ = inb(DATA(port));
left--;
-dump_parport_state ("43-44", port);
+ dump_parport_state("43-44", port);
/* Event 44: nAutoFd (HostAck) goes high to acknowledge */
- parport_pc_frob_control (port,
+ parport_pc_frob_control(port,
PARPORT_CONTROL_AUTOFD,
0);
-dump_parport_state ("pre 45", port);
+ dump_parport_state("pre 45", port);
/* Event 45: Wait for nAck to go high */
-/* r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, PARPORT_STATUS_ACK); */
-dump_parport_state ("post 45", port);
-r = 0;
+ /* r = parport_wait_peripheral(port, PARPORT_STATUS_ACK,
+ PARPORT_STATUS_ACK); */
+ dump_parport_state("post 45", port);
+ r = 0;
if (r) {
/* timed out while waiting for peripheral to respond to ack */
- printk (KERN_DEBUG "ECP PIO read timed out (waiting for nAck)\n");
+ printk(KERN_DEBUG "ECP PIO read timed out (waiting for nAck)\n");
/* keep hold of the byte we've got already */
goto out_no_data;
}
/* Event 46: nAutoFd (HostAck) goes low to accept more data */
- parport_pc_frob_control (port,
+ parport_pc_frob_control(port,
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
-dump_parport_state ("rev idle", port);
+ dump_parport_state("rev idle", port);
/* Do the transfer. */
while (left > fifofull) {
int ret;
unsigned long expire = jiffies + port->cad->timeout;
- unsigned char ecrval = inb (ECONTROL (port));
+ unsigned char ecrval = inb(ECONTROL(port));
- if (need_resched() && time_before (jiffies, expire))
+ if (need_resched() && time_before(jiffies, expire))
/* Can't yield the port. */
- schedule ();
+ schedule();
/* At this point, the FIFO may already be full. In
- * that case ECP is already holding back the
- * peripheral (assuming proper design) with a delayed
- * handshake. Work fast to avoid a peripheral
- * timeout. */
+ * that case ECP is already holding back the
+ * peripheral (assuming proper design) with a delayed
+ * handshake. Work fast to avoid a peripheral
+ * timeout. */
if (ecrval & 0x01) {
/* FIFO is empty. Wait for interrupt. */
-dump_parport_state ("FIFO empty", port);
+ dump_parport_state("FIFO empty", port);
/* Anyone else waiting for the port? */
if (port->waithead) {
- printk (KERN_DEBUG "Somebody wants the port\n");
+ printk(KERN_DEBUG "Somebody wants the port\n");
break;
}
/* Clear serviceIntr */
- ECR_WRITE (port, ecrval & ~(1<<2));
- false_alarm:
-dump_parport_state ("waiting", port);
- ret = parport_wait_event (port, HZ);
-DPRINTK (KERN_DEBUG "parport_wait_event returned %d\n", ret);
+ ECR_WRITE(port, ecrval & ~(1<<2));
+false_alarm:
+ dump_parport_state("waiting", port);
+ ret = parport_wait_event(port, HZ);
+ DPRINTK(KERN_DEBUG "parport_wait_event returned %d\n",
+ ret);
if (ret < 0)
break;
ret = 0;
- if (!time_before (jiffies, expire)) {
+ if (!time_before(jiffies, expire)) {
/* Timed out. */
-dump_parport_state ("timeout", port);
- printk (KERN_DEBUG "PIO read timed out\n");
+ dump_parport_state("timeout", port);
+ printk(KERN_DEBUG "PIO read timed out\n");
break;
}
- ecrval = inb (ECONTROL (port));
+ ecrval = inb(ECONTROL(port));
if (!(ecrval & (1<<2))) {
if (need_resched() &&
- time_before (jiffies, expire)) {
- schedule ();
+ time_before(jiffies, expire)) {
+ schedule();
}
goto false_alarm;
}
/* Depending on how the FIFO threshold was
- * set, how long interrupt service took, and
- * how fast the peripheral is, we might be
- * lucky and have a just filled FIFO. */
+ * set, how long interrupt service took, and
+ * how fast the peripheral is, we might be
+ * lucky and have a just filled FIFO. */
continue;
}
if (ecrval & 0x02) {
/* FIFO is full. */
-dump_parport_state ("FIFO full", port);
- insb (fifo, bufp, fifo_depth);
+ dump_parport_state("FIFO full", port);
+ insb(fifo, bufp, fifo_depth);
bufp += fifo_depth;
left -= fifo_depth;
continue;
}
-DPRINTK (KERN_DEBUG "*** ecp_read_block_pio: reading one byte from the FIFO\n");
+ DPRINTK(KERN_DEBUG
+ "*** ecp_read_block_pio: reading one byte from the FIFO\n");
/* FIFO not filled. We will cycle this loop for a while
- * and either the peripheral will fill it faster,
- * tripping a fast empty with insb, or we empty it. */
- *bufp++ = inb (fifo);
+ * and either the peripheral will fill it faster,
+ * tripping a fast empty with insb, or we empty it. */
+ *bufp++ = inb(fifo);
left--;
}
/* scoop up anything left in the FIFO */
- while (left && !(inb (ECONTROL (port) & 0x01))) {
- *bufp++ = inb (fifo);
+ while (left && !(inb(ECONTROL(port) & 0x01))) {
+ *bufp++ = inb(fifo);
left--;
}
port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
-dump_parport_state ("rev idle2", port);
+ dump_parport_state("rev idle2", port);
out_no_data:
/* Go to forward idle mode to shut the peripheral up (event 47). */
- parport_frob_control (port, PARPORT_CONTROL_INIT, PARPORT_CONTROL_INIT);
+ parport_frob_control(port, PARPORT_CONTROL_INIT, PARPORT_CONTROL_INIT);
/* event 49: PError goes high */
- r = parport_wait_peripheral (port,
+ r = parport_wait_peripheral(port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r) {
- printk (KERN_DEBUG
+ printk(KERN_DEBUG
"%s: PE timeout FWDIDLE (%d) in ecp_read_block_pio\n",
port->name, r);
}
@@ -1141,14 +1157,14 @@ out_no_data:
/* Finish up. */
{
- int lost = get_fifo_residue (port);
+ int lost = get_fifo_residue(port);
if (lost)
/* Shouldn't happen with compliant peripherals. */
- printk (KERN_DEBUG "%s: DATA LOSS (%d bytes)!\n",
+ printk(KERN_DEBUG "%s: DATA LOSS (%d bytes)!\n",
port->name, lost);
}
-dump_parport_state ("fwd idle", port);
+ dump_parport_state("fwd idle", port);
return length - left;
}
#endif /* 0 */
@@ -1164,8 +1180,7 @@ dump_parport_state ("fwd idle", port);
/* GCC is not inlining extern inline function later overwriten to non-inline,
so we use outlined_ variants here. */
-static const struct parport_operations parport_pc_ops =
-{
+static const struct parport_operations parport_pc_ops = {
.write_data = parport_pc_write_data,
.read_data = parport_pc_read_data,
@@ -1202,88 +1217,107 @@ static const struct parport_operations parport_pc_ops =
};
#ifdef CONFIG_PARPORT_PC_SUPERIO
+
+static struct superio_struct *find_free_superio(void)
+{
+ int i;
+ for (i = 0; i < NR_SUPERIOS; i++)
+ if (superios[i].io == 0)
+ return &superios[i];
+ return NULL;
+}
+
+
/* Super-IO chipset detection, Winbond, SMSC */
static void __devinit show_parconfig_smsc37c669(int io, int key)
{
- int cr1,cr4,cra,cr23,cr26,cr27,i=0;
- static const char *const modes[]={
+ int cr1, cr4, cra, cr23, cr26, cr27;
+ struct superio_struct *s;
+
+ static const char *const modes[] = {
"SPP and Bidirectional (PS/2)",
"EPP and SPP",
"ECP",
"ECP and EPP" };
- outb(key,io);
- outb(key,io);
- outb(1,io);
- cr1=inb(io+1);
- outb(4,io);
- cr4=inb(io+1);
- outb(0x0a,io);
- cra=inb(io+1);
- outb(0x23,io);
- cr23=inb(io+1);
- outb(0x26,io);
- cr26=inb(io+1);
- outb(0x27,io);
- cr27=inb(io+1);
- outb(0xaa,io);
+ outb(key, io);
+ outb(key, io);
+ outb(1, io);
+ cr1 = inb(io + 1);
+ outb(4, io);
+ cr4 = inb(io + 1);
+ outb(0x0a, io);
+ cra = inb(io + 1);
+ outb(0x23, io);
+ cr23 = inb(io + 1);
+ outb(0x26, io);
+ cr26 = inb(io + 1);
+ outb(0x27, io);
+ cr27 = inb(io + 1);
+ outb(0xaa, io);
if (verbose_probing) {
- printk (KERN_INFO "SMSC 37c669 LPT Config: cr_1=0x%02x, 4=0x%02x, "
+ printk(KERN_INFO
+ "SMSC 37c669 LPT Config: cr_1=0x%02x, 4=0x%02x, "
"A=0x%2x, 23=0x%02x, 26=0x%02x, 27=0x%02x\n",
- cr1,cr4,cra,cr23,cr26,cr27);
-
+ cr1, cr4, cra, cr23, cr26, cr27);
+
/* The documentation calls DMA and IRQ-Lines by letters, so
the board maker can/will wire them
appropriately/randomly... G=reserved H=IDE-irq, */
- printk (KERN_INFO "SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, "
- "fifo threshold=%d\n", cr23*4,
- (cr27 &0x0f) ? 'A'-1+(cr27 &0x0f): '-',
- (cr26 &0x0f) ? 'A'-1+(cr26 &0x0f): '-', cra & 0x0f);
+ printk(KERN_INFO
+ "SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, fifo threshold=%d\n",
+ cr23 * 4,
+ (cr27 & 0x0f) ? 'A' - 1 + (cr27 & 0x0f) : '-',
+ (cr26 & 0x0f) ? 'A' - 1 + (cr26 & 0x0f) : '-',
+ cra & 0x0f);
printk(KERN_INFO "SMSC LPT Config: enabled=%s power=%s\n",
- (cr23*4 >=0x100) ?"yes":"no", (cr1 & 4) ? "yes" : "no");
- printk(KERN_INFO "SMSC LPT Config: Port mode=%s, EPP version =%s\n",
- (cr1 & 0x08 ) ? "Standard mode only (SPP)" : modes[cr4 & 0x03],
- (cr4 & 0x40) ? "1.7" : "1.9");
+ (cr23 * 4 >= 0x100) ? "yes" : "no",
+ (cr1 & 4) ? "yes" : "no");
+ printk(KERN_INFO
+ "SMSC LPT Config: Port mode=%s, EPP version =%s\n",
+ (cr1 & 0x08) ? "Standard mode only (SPP)"
+ : modes[cr4 & 0x03],
+ (cr4 & 0x40) ? "1.7" : "1.9");
}
-
+
/* Heuristics ! BIOS setup for this mainboard device limits
the choices to standard settings, i.e. io-address and IRQ
are related, however DMA can be 1 or 3, assume DMA_A=DMA1,
DMA_C=DMA3 (this is true e.g. for TYAN 1564D Tomcat IV) */
- if(cr23*4 >=0x100) { /* if active */
- while((superios[i].io!= 0) && (i<NR_SUPERIOS))
- i++;
- if(i==NR_SUPERIOS)
+ if (cr23 * 4 >= 0x100) { /* if active */
+ s = find_free_superio();
+ if (s == NULL)
printk(KERN_INFO "Super-IO: too many chips!\n");
else {
int d;
- switch (cr23*4) {
- case 0x3bc:
- superios[i].io = 0x3bc;
- superios[i].irq = 7;
- break;
- case 0x378:
- superios[i].io = 0x378;
- superios[i].irq = 7;
- break;
- case 0x278:
- superios[i].io = 0x278;
- superios[i].irq = 5;
+ switch (cr23 * 4) {
+ case 0x3bc:
+ s->io = 0x3bc;
+ s->irq = 7;
+ break;
+ case 0x378:
+ s->io = 0x378;
+ s->irq = 7;
+ break;
+ case 0x278:
+ s->io = 0x278;
+ s->irq = 5;
}
- d=(cr26 &0x0f);
- if((d==1) || (d==3))
- superios[i].dma= d;
+ d = (cr26 & 0x0f);
+ if (d == 1 || d == 3)
+ s->dma = d;
else
- superios[i].dma= PARPORT_DMA_NONE;
+ s->dma = PARPORT_DMA_NONE;
}
- }
+ }
}
static void __devinit show_parconfig_winbond(int io, int key)
{
- int cr30,cr60,cr61,cr70,cr74,crf0,i=0;
+ int cr30, cr60, cr61, cr70, cr74, crf0;
+ struct superio_struct *s;
static const char *const modes[] = {
"Standard (SPP) and Bidirectional(PS/2)", /* 0 */
"EPP-1.9 and SPP",
@@ -1296,110 +1330,134 @@ static void __devinit show_parconfig_winbond(int io, int key)
static char *const irqtypes[] = {
"pulsed low, high-Z",
"follows nACK" };
-
+
/* The registers are called compatible-PnP because the
- register layout is modelled after ISA-PnP, the access
- method is just another ... */
- outb(key,io);
- outb(key,io);
- outb(0x07,io); /* Register 7: Select Logical Device */
- outb(0x01,io+1); /* LD1 is Parallel Port */
- outb(0x30,io);
- cr30=inb(io+1);
- outb(0x60,io);
- cr60=inb(io+1);
- outb(0x61,io);
- cr61=inb(io+1);
- outb(0x70,io);
- cr70=inb(io+1);
- outb(0x74,io);
- cr74=inb(io+1);
- outb(0xf0,io);
- crf0=inb(io+1);
- outb(0xaa,io);
+ register layout is modelled after ISA-PnP, the access
+ method is just another ... */
+ outb(key, io);
+ outb(key, io);
+ outb(0x07, io); /* Register 7: Select Logical Device */
+ outb(0x01, io + 1); /* LD1 is Parallel Port */
+ outb(0x30, io);
+ cr30 = inb(io + 1);
+ outb(0x60, io);
+ cr60 = inb(io + 1);
+ outb(0x61, io);
+ cr61 = inb(io + 1);
+ outb(0x70, io);
+ cr70 = inb(io + 1);
+ outb(0x74, io);
+ cr74 = inb(io + 1);
+ outb(0xf0, io);
+ crf0 = inb(io + 1);
+ outb(0xaa, io);
if (verbose_probing) {
- printk(KERN_INFO "Winbond LPT Config: cr_30=%02x 60,61=%02x%02x "
- "70=%02x 74=%02x, f0=%02x\n", cr30,cr60,cr61,cr70,cr74,crf0);
- printk(KERN_INFO "Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ",
- (cr30 & 0x01) ? "yes":"no", cr60,cr61,cr70&0x0f );
+ printk(KERN_INFO
+ "Winbond LPT Config: cr_30=%02x 60,61=%02x%02x 70=%02x 74=%02x, f0=%02x\n",
+ cr30, cr60, cr61, cr70, cr74, crf0);
+ printk(KERN_INFO "Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ",
+ (cr30 & 0x01) ? "yes" : "no", cr60, cr61, cr70 & 0x0f);
if ((cr74 & 0x07) > 3)
printk("dma=none\n");
else
- printk("dma=%d\n",cr74 & 0x07);
- printk(KERN_INFO "Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n",
- irqtypes[crf0>>7], (crf0>>3)&0x0f);
- printk(KERN_INFO "Winbond LPT Config: Port mode=%s\n", modes[crf0 & 0x07]);
+ printk("dma=%d\n", cr74 & 0x07);
+ printk(KERN_INFO
+ "Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n",
+ irqtypes[crf0>>7], (crf0>>3)&0x0f);
+ printk(KERN_INFO "Winbond LPT Config: Port mode=%s\n",
+ modes[crf0 & 0x07]);
}
- if(cr30 & 0x01) { /* the settings can be interrogated later ... */
- while((superios[i].io!= 0) && (i<NR_SUPERIOS))
- i++;
- if(i==NR_SUPERIOS)
+ if (cr30 & 0x01) { /* the settings can be interrogated later ... */
+ s = find_free_superio();
+ if (s == NULL)
printk(KERN_INFO "Super-IO: too many chips!\n");
else {
- superios[i].io = (cr60<<8)|cr61;
- superios[i].irq = cr70&0x0f;
- superios[i].dma = (((cr74 & 0x07) > 3) ?
+ s->io = (cr60 << 8) | cr61;
+ s->irq = cr70 & 0x0f;
+ s->dma = (((cr74 & 0x07) > 3) ?
PARPORT_DMA_NONE : (cr74 & 0x07));
}
}
}
-static void __devinit decode_winbond(int efer, int key, int devid, int devrev, int oldid)
+static void __devinit decode_winbond(int efer, int key, int devid,
+ int devrev, int oldid)
{
const char *type = "unknown";
- int id,progif=2;
+ int id, progif = 2;
if (devid == devrev)
/* simple heuristics, we happened to read some
- non-winbond register */
+ non-winbond register */
return;
- id=(devid<<8) | devrev;
+ id = (devid << 8) | devrev;
/* Values are from public data sheets pdf files, I can just
- confirm 83977TF is correct :-) */
- if (id == 0x9771) type="83977F/AF";
- else if (id == 0x9773) type="83977TF / SMSC 97w33x/97w34x";
- else if (id == 0x9774) type="83977ATF";
- else if ((id & ~0x0f) == 0x5270) type="83977CTF / SMSC 97w36x";
- else if ((id & ~0x0f) == 0x52f0) type="83977EF / SMSC 97w35x";
- else if ((id & ~0x0f) == 0x5210) type="83627";
- else if ((id & ~0x0f) == 0x6010) type="83697HF";
- else if ((oldid &0x0f ) == 0x0a) { type="83877F"; progif=1;}
- else if ((oldid &0x0f ) == 0x0b) { type="83877AF"; progif=1;}
- else if ((oldid &0x0f ) == 0x0c) { type="83877TF"; progif=1;}
- else if ((oldid &0x0f ) == 0x0d) { type="83877ATF"; progif=1;}
- else progif=0;
+ confirm 83977TF is correct :-) */
+ if (id == 0x9771)
+ type = "83977F/AF";
+ else if (id == 0x9773)
+ type = "83977TF / SMSC 97w33x/97w34x";
+ else if (id == 0x9774)
+ type = "83977ATF";
+ else if ((id & ~0x0f) == 0x5270)
+ type = "83977CTF / SMSC 97w36x";
+ else if ((id & ~0x0f) == 0x52f0)
+ type = "83977EF / SMSC 97w35x";
+ else if ((id & ~0x0f) == 0x5210)
+ type = "83627";
+ else if ((id & ~0x0f) == 0x6010)
+ type = "83697HF";
+ else if ((oldid & 0x0f) == 0x0a) {
+ type = "83877F";
+ progif = 1;
+ } else if ((oldid & 0x0f) == 0x0b) {
+ type = "83877AF";
+ progif = 1;
+ } else if ((oldid & 0x0f) == 0x0c) {
+ type = "83877TF";
+ progif = 1;
+ } else if ((oldid & 0x0f) == 0x0d) {
+ type = "83877ATF";
+ progif = 1;
+ } else
+ progif = 0;
if (verbose_probing)
printk(KERN_INFO "Winbond chip at EFER=0x%x key=0x%02x "
- "devid=%02x devrev=%02x oldid=%02x type=%s\n",
+ "devid=%02x devrev=%02x oldid=%02x type=%s\n",
efer, key, devid, devrev, oldid, type);
if (progif == 2)
- show_parconfig_winbond(efer,key);
+ show_parconfig_winbond(efer, key);
}
static void __devinit decode_smsc(int efer, int key, int devid, int devrev)
{
- const char *type = "unknown";
+ const char *type = "unknown";
void (*func)(int io, int key);
- int id;
+ int id;
- if (devid == devrev)
+ if (devid == devrev)
/* simple heuristics, we happened to read some
- non-smsc register */
+ non-smsc register */
return;
- func=NULL;
- id=(devid<<8) | devrev;
+ func = NULL;
+ id = (devid << 8) | devrev;
- if (id==0x0302) {type="37c669"; func=show_parconfig_smsc37c669;}
- else if (id==0x6582) type="37c665IR";
- else if (devid==0x65) type="37c665GT";
- else if (devid==0x66) type="37c666GT";
+ if (id == 0x0302) {
+ type = "37c669";
+ func = show_parconfig_smsc37c669;
+ } else if (id == 0x6582)
+ type = "37c665IR";
+ else if (devid == 0x65)
+ type = "37c665GT";
+ else if (devid == 0x66)
+ type = "37c666GT";
if (verbose_probing)
printk(KERN_INFO "SMSC chip at EFER=0x%x "
@@ -1407,138 +1465,138 @@ static void __devinit decode_smsc(int efer, int key, int devid, int devrev)
efer, key, devid, devrev, type);
if (func)
- func(efer,key);
+ func(efer, key);
}
static void __devinit winbond_check(int io, int key)
{
- int devid,devrev,oldid,x_devid,x_devrev,x_oldid;
+ int devid, devrev, oldid, x_devid, x_devrev, x_oldid;
if (!request_region(io, 3, __func__))
return;
/* First probe without key */
- outb(0x20,io);
- x_devid=inb(io+1);
- outb(0x21,io);
- x_devrev=inb(io+1);
- outb(0x09,io);
- x_oldid=inb(io+1);
-
- outb(key,io);
- outb(key,io); /* Write Magic Sequence to EFER, extended
- funtion enable register */
- outb(0x20,io); /* Write EFIR, extended function index register */
- devid=inb(io+1); /* Read EFDR, extended function data register */
- outb(0x21,io);
- devrev=inb(io+1);
- outb(0x09,io);
- oldid=inb(io+1);
- outb(0xaa,io); /* Magic Seal */
+ outb(0x20, io);
+ x_devid = inb(io + 1);
+ outb(0x21, io);
+ x_devrev = inb(io + 1);
+ outb(0x09, io);
+ x_oldid = inb(io + 1);
+
+ outb(key, io);
+ outb(key, io); /* Write Magic Sequence to EFER, extended
+ funtion enable register */
+ outb(0x20, io); /* Write EFIR, extended function index register */
+ devid = inb(io + 1); /* Read EFDR, extended function data register */
+ outb(0x21, io);
+ devrev = inb(io + 1);
+ outb(0x09, io);
+ oldid = inb(io + 1);
+ outb(0xaa, io); /* Magic Seal */
if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid))
goto out; /* protection against false positives */
- decode_winbond(io,key,devid,devrev,oldid);
+ decode_winbond(io, key, devid, devrev, oldid);
out:
release_region(io, 3);
}
-static void __devinit winbond_check2(int io,int key)
+static void __devinit winbond_check2(int io, int key)
{
- int devid,devrev,oldid,x_devid,x_devrev,x_oldid;
+ int devid, devrev, oldid, x_devid, x_devrev, x_oldid;
if (!request_region(io, 3, __func__))
return;
/* First probe without the key */
- outb(0x20,io+2);
- x_devid=inb(io+2);
- outb(0x21,io+1);
- x_devrev=inb(io+2);
- outb(0x09,io+1);
- x_oldid=inb(io+2);
-
- outb(key,io); /* Write Magic Byte to EFER, extended
- funtion enable register */
- outb(0x20,io+2); /* Write EFIR, extended function index register */
- devid=inb(io+2); /* Read EFDR, extended function data register */
- outb(0x21,io+1);
- devrev=inb(io+2);
- outb(0x09,io+1);
- oldid=inb(io+2);
- outb(0xaa,io); /* Magic Seal */
-
- if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid))
+ outb(0x20, io + 2);
+ x_devid = inb(io + 2);
+ outb(0x21, io + 1);
+ x_devrev = inb(io + 2);
+ outb(0x09, io + 1);
+ x_oldid = inb(io + 2);
+
+ outb(key, io); /* Write Magic Byte to EFER, extended
+ funtion enable register */
+ outb(0x20, io + 2); /* Write EFIR, extended function index register */
+ devid = inb(io + 2); /* Read EFDR, extended function data register */
+ outb(0x21, io + 1);
+ devrev = inb(io + 2);
+ outb(0x09, io + 1);
+ oldid = inb(io + 2);
+ outb(0xaa, io); /* Magic Seal */
+
+ if (x_devid == devid && x_devrev == devrev && x_oldid == oldid)
goto out; /* protection against false positives */
- decode_winbond(io,key,devid,devrev,oldid);
+ decode_winbond(io, key, devid, devrev, oldid);
out:
release_region(io, 3);
}
static void __devinit smsc_check(int io, int key)
{
- int id,rev,oldid,oldrev,x_id,x_rev,x_oldid,x_oldrev;
+ int id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
if (!request_region(io, 3, __func__))
return;
/* First probe without the key */
- outb(0x0d,io);
- x_oldid=inb(io+1);
- outb(0x0e,io);
- x_oldrev=inb(io+1);
- outb(0x20,io);
- x_id=inb(io+1);
- outb(0x21,io);
- x_rev=inb(io+1);
-
- outb(key,io);
- outb(key,io); /* Write Magic Sequence to EFER, extended
- funtion enable register */
- outb(0x0d,io); /* Write EFIR, extended function index register */
- oldid=inb(io+1); /* Read EFDR, extended function data register */
- outb(0x0e,io);
- oldrev=inb(io+1);
- outb(0x20,io);
- id=inb(io+1);
- outb(0x21,io);
- rev=inb(io+1);
- outb(0xaa,io); /* Magic Seal */
-
- if ((x_id == id) && (x_oldrev == oldrev) &&
- (x_oldid == oldid) && (x_rev == rev))
+ outb(0x0d, io);
+ x_oldid = inb(io + 1);
+ outb(0x0e, io);
+ x_oldrev = inb(io + 1);
+ outb(0x20, io);
+ x_id = inb(io + 1);
+ outb(0x21, io);
+ x_rev = inb(io + 1);
+
+ outb(key, io);
+ outb(key, io); /* Write Magic Sequence to EFER, extended
+ funtion enable register */
+ outb(0x0d, io); /* Write EFIR, extended function index register */
+ oldid = inb(io + 1); /* Read EFDR, extended function data register */
+ outb(0x0e, io);
+ oldrev = inb(io + 1);
+ outb(0x20, io);
+ id = inb(io + 1);
+ outb(0x21, io);
+ rev = inb(io + 1);
+ outb(0xaa, io); /* Magic Seal */
+
+ if (x_id == id && x_oldrev == oldrev &&
+ x_oldid == oldid && x_rev == rev)
goto out; /* protection against false positives */
- decode_smsc(io,key,oldid,oldrev);
+ decode_smsc(io, key, oldid, oldrev);
out:
release_region(io, 3);
}
-static void __devinit detect_and_report_winbond (void)
-{
+static void __devinit detect_and_report_winbond(void)
+{
if (verbose_probing)
printk(KERN_DEBUG "Winbond Super-IO detection, now testing ports 3F0,370,250,4E,2E ...\n");
- winbond_check(0x3f0,0x87);
- winbond_check(0x370,0x87);
- winbond_check(0x2e ,0x87);
- winbond_check(0x4e ,0x87);
- winbond_check(0x3f0,0x86);
- winbond_check2(0x250,0x88);
- winbond_check2(0x250,0x89);
+ winbond_check(0x3f0, 0x87);
+ winbond_check(0x370, 0x87);
+ winbond_check(0x2e , 0x87);
+ winbond_check(0x4e , 0x87);
+ winbond_check(0x3f0, 0x86);
+ winbond_check2(0x250, 0x88);
+ winbond_check2(0x250, 0x89);
}
-static void __devinit detect_and_report_smsc (void)
+static void __devinit detect_and_report_smsc(void)
{
if (verbose_probing)
printk(KERN_DEBUG "SMSC Super-IO detection, now testing Ports 2F0, 370 ...\n");
- smsc_check(0x3f0,0x55);
- smsc_check(0x370,0x55);
- smsc_check(0x3f0,0x44);
- smsc_check(0x370,0x44);
+ smsc_check(0x3f0, 0x55);
+ smsc_check(0x370, 0x55);
+ smsc_check(0x3f0, 0x44);
+ smsc_check(0x370, 0x44);
}
static void __devinit detect_and_report_it87(void)
@@ -1573,34 +1631,39 @@ static void __devinit detect_and_report_it87(void)
}
#endif /* CONFIG_PARPORT_PC_SUPERIO */
-static int get_superio_dma (struct parport *p)
+static struct superio_struct *find_superio(struct parport *p)
{
- int i=0;
- while( (superios[i].io != p->base) && (i<NR_SUPERIOS))
- i++;
- if (i!=NR_SUPERIOS)
- return superios[i].dma;
+ int i;
+ for (i = 0; i < NR_SUPERIOS; i++)
+ if (superios[i].io != p->base)
+ return &superios[i];
+ return NULL;
+}
+
+static int get_superio_dma(struct parport *p)
+{
+ struct superio_struct *s = find_superio(p);
+ if (s)
+ return s->dma;
return PARPORT_DMA_NONE;
}
-static int get_superio_irq (struct parport *p)
+static int get_superio_irq(struct parport *p)
{
- int i=0;
- while( (superios[i].io != p->base) && (i<NR_SUPERIOS))
- i++;
- if (i!=NR_SUPERIOS)
- return superios[i].irq;
- return PARPORT_IRQ_NONE;
+ struct superio_struct *s = find_superio(p);
+ if (s)
+ return s->irq;
+ return PARPORT_IRQ_NONE;
}
-
+
/* --- Mode detection ------------------------------------- */
/*
* Checks for port existence, all ports support SPP MODE
- * Returns:
+ * Returns:
* 0 : No parallel port at this address
- * PARPORT_MODE_PCSPP : SPP port detected
+ * PARPORT_MODE_PCSPP : SPP port detected
* (if the user specified an ioport himself,
* this shall always be the case!)
*
@@ -1610,7 +1673,7 @@ static int parport_SPP_supported(struct parport *pb)
unsigned char r, w;
/*
- * first clear an eventually pending EPP timeout
+ * first clear an eventually pending EPP timeout
* I (sailer@ife.ee.ethz.ch) have an SMSC chipset
* that does not even respond to SPP cycles if an EPP
* timeout is pending
@@ -1619,19 +1682,19 @@ static int parport_SPP_supported(struct parport *pb)
/* Do a simple read-write test to make sure the port exists. */
w = 0xc;
- outb (w, CONTROL (pb));
+ outb(w, CONTROL(pb));
/* Is there a control register that we can read from? Some
* ports don't allow reads, so read_control just returns a
* software copy. Some ports _do_ allow reads, so bypass the
* software copy here. In addition, some bits aren't
* writable. */
- r = inb (CONTROL (pb));
+ r = inb(CONTROL(pb));
if ((r & 0xf) == w) {
w = 0xe;
- outb (w, CONTROL (pb));
- r = inb (CONTROL (pb));
- outb (0xc, CONTROL (pb));
+ outb(w, CONTROL(pb));
+ r = inb(CONTROL(pb));
+ outb(0xc, CONTROL(pb));
if ((r & 0xf) == w)
return PARPORT_MODE_PCSPP;
}
@@ -1639,18 +1702,18 @@ static int parport_SPP_supported(struct parport *pb)
if (user_specified)
/* That didn't work, but the user thinks there's a
* port here. */
- printk (KERN_INFO "parport 0x%lx (WARNING): CTR: "
+ printk(KERN_INFO "parport 0x%lx (WARNING): CTR: "
"wrote 0x%02x, read 0x%02x\n", pb->base, w, r);
/* Try the data register. The data lines aren't tri-stated at
* this stage, so we expect back what we wrote. */
w = 0xaa;
- parport_pc_write_data (pb, w);
- r = parport_pc_read_data (pb);
+ parport_pc_write_data(pb, w);
+ r = parport_pc_read_data(pb);
if (r == w) {
w = 0x55;
- parport_pc_write_data (pb, w);
- r = parport_pc_read_data (pb);
+ parport_pc_write_data(pb, w);
+ r = parport_pc_read_data(pb);
if (r == w)
return PARPORT_MODE_PCSPP;
}
@@ -1658,9 +1721,9 @@ static int parport_SPP_supported(struct parport *pb)
if (user_specified) {
/* Didn't work, but the user is convinced this is the
* place. */
- printk (KERN_INFO "parport 0x%lx (WARNING): DATA: "
+ printk(KERN_INFO "parport 0x%lx (WARNING): DATA: "
"wrote 0x%02x, read 0x%02x\n", pb->base, w, r);
- printk (KERN_INFO "parport 0x%lx: You gave this address, "
+ printk(KERN_INFO "parport 0x%lx: You gave this address, "
"but there is probably no parallel port there!\n",
pb->base);
}
@@ -1691,33 +1754,33 @@ static int parport_ECR_present(struct parport *pb)
struct parport_pc_private *priv = pb->private_data;
unsigned char r = 0xc;
- outb (r, CONTROL (pb));
- if ((inb (ECONTROL (pb)) & 0x3) == (r & 0x3)) {
- outb (r ^ 0x2, CONTROL (pb)); /* Toggle bit 1 */
+ outb(r, CONTROL(pb));
+ if ((inb(ECONTROL(pb)) & 0x3) == (r & 0x3)) {
+ outb(r ^ 0x2, CONTROL(pb)); /* Toggle bit 1 */
- r = inb (CONTROL (pb));
- if ((inb (ECONTROL (pb)) & 0x2) == (r & 0x2))
+ r = inb(CONTROL(pb));
+ if ((inb(ECONTROL(pb)) & 0x2) == (r & 0x2))
goto no_reg; /* Sure that no ECR register exists */
}
-
- if ((inb (ECONTROL (pb)) & 0x3 ) != 0x1)
+
+ if ((inb(ECONTROL(pb)) & 0x3) != 0x1)
goto no_reg;
- ECR_WRITE (pb, 0x34);
- if (inb (ECONTROL (pb)) != 0x35)
+ ECR_WRITE(pb, 0x34);
+ if (inb(ECONTROL(pb)) != 0x35)
goto no_reg;
priv->ecr = 1;
- outb (0xc, CONTROL (pb));
-
+ outb(0xc, CONTROL(pb));
+
/* Go to mode 000 */
- frob_set_mode (pb, ECR_SPP);
+ frob_set_mode(pb, ECR_SPP);
return 1;
no_reg:
- outb (0xc, CONTROL (pb));
- return 0;
+ outb(0xc, CONTROL(pb));
+ return 0;
}
#ifdef CONFIG_PARPORT_1284
@@ -1727,7 +1790,7 @@ static int parport_ECR_present(struct parport *pb)
* allows us to read data from the data lines. In theory we would get back
* 0xff but any peripheral attached to the port may drag some or all of the
* lines down to zero. So if we get back anything that isn't the contents
- * of the data register we deem PS/2 support to be present.
+ * of the data register we deem PS/2 support to be present.
*
* Some SPP ports have "half PS/2" ability - you can't turn off the line
* drivers, but an external peripheral with sufficiently beefy drivers of
@@ -1735,26 +1798,28 @@ static int parport_ECR_present(struct parport *pb)
* where they can then be read back as normal. Ports with this property
* and the right type of device attached are likely to fail the SPP test,
* (as they will appear to have stuck bits) and so the fact that they might
- * be misdetected here is rather academic.
+ * be misdetected here is rather academic.
*/
static int parport_PS2_supported(struct parport *pb)
{
int ok = 0;
-
+
clear_epp_timeout(pb);
/* try to tri-state the buffer */
- parport_pc_data_reverse (pb);
-
+ parport_pc_data_reverse(pb);
+
parport_pc_write_data(pb, 0x55);
- if (parport_pc_read_data(pb) != 0x55) ok++;
+ if (parport_pc_read_data(pb) != 0x55)
+ ok++;
parport_pc_write_data(pb, 0xaa);
- if (parport_pc_read_data(pb) != 0xaa) ok++;
+ if (parport_pc_read_data(pb) != 0xaa)
+ ok++;
/* cancel input mode */
- parport_pc_data_forward (pb);
+ parport_pc_data_forward(pb);
if (ok) {
pb->modes |= PARPORT_MODE_TRISTATE;
@@ -1773,68 +1838,68 @@ static int parport_ECP_supported(struct parport *pb)
int config, configb;
int pword;
struct parport_pc_private *priv = pb->private_data;
- /* Translate ECP intrLine to ISA irq value */
- static const int intrline[]= { 0, 7, 9, 10, 11, 14, 15, 5 };
+ /* Translate ECP intrLine to ISA irq value */
+ static const int intrline[] = { 0, 7, 9, 10, 11, 14, 15, 5 };
/* If there is no ECR, we have no hope of supporting ECP. */
if (!priv->ecr)
return 0;
/* Find out FIFO depth */
- ECR_WRITE (pb, ECR_SPP << 5); /* Reset FIFO */
- ECR_WRITE (pb, ECR_TST << 5); /* TEST FIFO */
- for (i=0; i < 1024 && !(inb (ECONTROL (pb)) & 0x02); i++)
- outb (0xaa, FIFO (pb));
+ ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
+ ECR_WRITE(pb, ECR_TST << 5); /* TEST FIFO */
+ for (i = 0; i < 1024 && !(inb(ECONTROL(pb)) & 0x02); i++)
+ outb(0xaa, FIFO(pb));
/*
* Using LGS chipset it uses ECR register, but
* it doesn't support ECP or FIFO MODE
*/
if (i == 1024) {
- ECR_WRITE (pb, ECR_SPP << 5);
+ ECR_WRITE(pb, ECR_SPP << 5);
return 0;
}
priv->fifo_depth = i;
if (verbose_probing)
- printk (KERN_DEBUG "0x%lx: FIFO is %d bytes\n", pb->base, i);
+ printk(KERN_DEBUG "0x%lx: FIFO is %d bytes\n", pb->base, i);
/* Find out writeIntrThreshold */
- frob_econtrol (pb, 1<<2, 1<<2);
- frob_econtrol (pb, 1<<2, 0);
+ frob_econtrol(pb, 1<<2, 1<<2);
+ frob_econtrol(pb, 1<<2, 0);
for (i = 1; i <= priv->fifo_depth; i++) {
- inb (FIFO (pb));
- udelay (50);
- if (inb (ECONTROL (pb)) & (1<<2))
+ inb(FIFO(pb));
+ udelay(50);
+ if (inb(ECONTROL(pb)) & (1<<2))
break;
}
if (i <= priv->fifo_depth) {
if (verbose_probing)
- printk (KERN_DEBUG "0x%lx: writeIntrThreshold is %d\n",
+ printk(KERN_DEBUG "0x%lx: writeIntrThreshold is %d\n",
pb->base, i);
} else
/* Number of bytes we know we can write if we get an
- interrupt. */
+ interrupt. */
i = 0;
priv->writeIntrThreshold = i;
/* Find out readIntrThreshold */
- frob_set_mode (pb, ECR_PS2); /* Reset FIFO and enable PS2 */
- parport_pc_data_reverse (pb); /* Must be in PS2 mode */
- frob_set_mode (pb, ECR_TST); /* Test FIFO */
- frob_econtrol (pb, 1<<2, 1<<2);
- frob_econtrol (pb, 1<<2, 0);
+ frob_set_mode(pb, ECR_PS2); /* Reset FIFO and enable PS2 */
+ parport_pc_data_reverse(pb); /* Must be in PS2 mode */
+ frob_set_mode(pb, ECR_TST); /* Test FIFO */
+ frob_econtrol(pb, 1<<2, 1<<2);
+ frob_econtrol(pb, 1<<2, 0);
for (i = 1; i <= priv->fifo_depth; i++) {
- outb (0xaa, FIFO (pb));
- if (inb (ECONTROL (pb)) & (1<<2))
+ outb(0xaa, FIFO(pb));
+ if (inb(ECONTROL(pb)) & (1<<2))
break;
}
if (i <= priv->fifo_depth) {
if (verbose_probing)
- printk (KERN_INFO "0x%lx: readIntrThreshold is %d\n",
+ printk(KERN_INFO "0x%lx: readIntrThreshold is %d\n",
pb->base, i);
} else
/* Number of bytes we can read if we get an interrupt. */
@@ -1842,23 +1907,23 @@ static int parport_ECP_supported(struct parport *pb)
priv->readIntrThreshold = i;
- ECR_WRITE (pb, ECR_SPP << 5); /* Reset FIFO */
- ECR_WRITE (pb, 0xf4); /* Configuration mode */
- config = inb (CONFIGA (pb));
+ ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
+ ECR_WRITE(pb, 0xf4); /* Configuration mode */
+ config = inb(CONFIGA(pb));
pword = (config >> 4) & 0x7;
switch (pword) {
case 0:
pword = 2;
- printk (KERN_WARNING "0x%lx: Unsupported pword size!\n",
+ printk(KERN_WARNING "0x%lx: Unsupported pword size!\n",
pb->base);
break;
case 2:
pword = 4;
- printk (KERN_WARNING "0x%lx: Unsupported pword size!\n",
+ printk(KERN_WARNING "0x%lx: Unsupported pword size!\n",
pb->base);
break;
default:
- printk (KERN_WARNING "0x%lx: Unknown implementation ID\n",
+ printk(KERN_WARNING "0x%lx: Unknown implementation ID\n",
pb->base);
/* Assume 1 */
case 1:
@@ -1867,28 +1932,29 @@ static int parport_ECP_supported(struct parport *pb)
priv->pword = pword;
if (verbose_probing) {
- printk (KERN_DEBUG "0x%lx: PWord is %d bits\n", pb->base, 8 * pword);
-
- printk (KERN_DEBUG "0x%lx: Interrupts are ISA-%s\n", pb->base,
+ printk(KERN_DEBUG "0x%lx: PWord is %d bits\n",
+ pb->base, 8 * pword);
+
+ printk(KERN_DEBUG "0x%lx: Interrupts are ISA-%s\n", pb->base,
config & 0x80 ? "Level" : "Pulses");
- configb = inb (CONFIGB (pb));
- printk (KERN_DEBUG "0x%lx: ECP port cfgA=0x%02x cfgB=0x%02x\n",
+ configb = inb(CONFIGB(pb));
+ printk(KERN_DEBUG "0x%lx: ECP port cfgA=0x%02x cfgB=0x%02x\n",
pb->base, config, configb);
- printk (KERN_DEBUG "0x%lx: ECP settings irq=", pb->base);
- if ((configb >>3) & 0x07)
- printk("%d",intrline[(configb >>3) & 0x07]);
+ printk(KERN_DEBUG "0x%lx: ECP settings irq=", pb->base);
+ if ((configb >> 3) & 0x07)
+ printk("%d", intrline[(configb >> 3) & 0x07]);
else
printk("<none or set by other means>");
- printk (" dma=");
- if( (configb & 0x03 ) == 0x00)
+ printk(" dma=");
+ if ((configb & 0x03) == 0x00)
printk("<none or set by other means>\n");
else
- printk("%d\n",configb & 0x07);
+ printk("%d\n", configb & 0x07);
}
/* Go back to mode 000 */
- frob_set_mode (pb, ECR_SPP);
+ frob_set_mode(pb, ECR_SPP);
return 1;
}
@@ -1903,10 +1969,10 @@ static int parport_ECPPS2_supported(struct parport *pb)
if (!priv->ecr)
return 0;
- oecr = inb (ECONTROL (pb));
- ECR_WRITE (pb, ECR_PS2 << 5);
+ oecr = inb(ECONTROL(pb));
+ ECR_WRITE(pb, ECR_PS2 << 5);
result = parport_PS2_supported(pb);
- ECR_WRITE (pb, oecr);
+ ECR_WRITE(pb, oecr);
return result;
}
@@ -1930,16 +1996,15 @@ static int parport_EPP_supported(struct parport *pb)
*/
/* If EPP timeout bit clear then EPP available */
- if (!clear_epp_timeout(pb)) {
+ if (!clear_epp_timeout(pb))
return 0; /* No way to clear timeout */
- }
/* Check for Intel bug. */
if (priv->ecr) {
unsigned char i;
for (i = 0x00; i < 0x80; i += 0x20) {
- ECR_WRITE (pb, i);
- if (clear_epp_timeout (pb)) {
+ ECR_WRITE(pb, i);
+ if (clear_epp_timeout(pb)) {
/* Phony EPP in ECP. */
return 0;
}
@@ -1963,17 +2028,16 @@ static int parport_ECPEPP_supported(struct parport *pb)
int result;
unsigned char oecr;
- if (!priv->ecr) {
+ if (!priv->ecr)
return 0;
- }
- oecr = inb (ECONTROL (pb));
+ oecr = inb(ECONTROL(pb));
/* Search for SMC style EPP+ECP mode */
- ECR_WRITE (pb, 0x80);
- outb (0x04, CONTROL (pb));
+ ECR_WRITE(pb, 0x80);
+ outb(0x04, CONTROL(pb));
result = parport_EPP_supported(pb);
- ECR_WRITE (pb, oecr);
+ ECR_WRITE(pb, oecr);
if (result) {
/* Set up access functions to use ECP+EPP hardware. */
@@ -1991,11 +2055,25 @@ static int parport_ECPEPP_supported(struct parport *pb)
/* Don't bother probing for modes we know we won't use. */
static int __devinit parport_PS2_supported(struct parport *pb) { return 0; }
#ifdef CONFIG_PARPORT_PC_FIFO
-static int parport_ECP_supported(struct parport *pb) { return 0; }
+static int parport_ECP_supported(struct parport *pb)
+{
+ return 0;
+}
#endif
-static int __devinit parport_EPP_supported(struct parport *pb) { return 0; }
-static int __devinit parport_ECPEPP_supported(struct parport *pb){return 0;}
-static int __devinit parport_ECPPS2_supported(struct parport *pb){return 0;}
+static int __devinit parport_EPP_supported(struct parport *pb)
+{
+ return 0;
+}
+
+static int __devinit parport_ECPEPP_supported(struct parport *pb)
+{
+ return 0;
+}
+
+static int __devinit parport_ECPPS2_supported(struct parport *pb)
+{
+ return 0;
+}
#endif /* No IEEE 1284 support */
@@ -2005,17 +2083,17 @@ static int __devinit parport_ECPPS2_supported(struct parport *pb){return 0;}
static int programmable_irq_support(struct parport *pb)
{
int irq, intrLine;
- unsigned char oecr = inb (ECONTROL (pb));
+ unsigned char oecr = inb(ECONTROL(pb));
static const int lookup[8] = {
PARPORT_IRQ_NONE, 7, 9, 10, 11, 14, 15, 5
};
- ECR_WRITE (pb, ECR_CNF << 5); /* Configuration MODE */
+ ECR_WRITE(pb, ECR_CNF << 5); /* Configuration MODE */
- intrLine = (inb (CONFIGB (pb)) >> 3) & 0x07;
+ intrLine = (inb(CONFIGB(pb)) >> 3) & 0x07;
irq = lookup[intrLine];
- ECR_WRITE (pb, oecr);
+ ECR_WRITE(pb, oecr);
return irq;
}
@@ -2025,17 +2103,17 @@ static int irq_probe_ECP(struct parport *pb)
unsigned long irqs;
irqs = probe_irq_on();
-
- ECR_WRITE (pb, ECR_SPP << 5); /* Reset FIFO */
- ECR_WRITE (pb, (ECR_TST << 5) | 0x04);
- ECR_WRITE (pb, ECR_TST << 5);
+
+ ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
+ ECR_WRITE(pb, (ECR_TST << 5) | 0x04);
+ ECR_WRITE(pb, ECR_TST << 5);
/* If Full FIFO sure that writeIntrThreshold is generated */
- for (i=0; i < 1024 && !(inb (ECONTROL (pb)) & 0x02) ; i++)
- outb (0xaa, FIFO (pb));
-
+ for (i = 0; i < 1024 && !(inb(ECONTROL(pb)) & 0x02) ; i++)
+ outb(0xaa, FIFO(pb));
+
pb->irq = probe_irq_off(irqs);
- ECR_WRITE (pb, ECR_SPP << 5);
+ ECR_WRITE(pb, ECR_SPP << 5);
if (pb->irq <= 0)
pb->irq = PARPORT_IRQ_NONE;
@@ -2045,7 +2123,7 @@ static int irq_probe_ECP(struct parport *pb)
/*
* This detection seems that only works in National Semiconductors
- * This doesn't work in SMC, LGS, and Winbond
+ * This doesn't work in SMC, LGS, and Winbond
*/
static int irq_probe_EPP(struct parport *pb)
{
@@ -2056,16 +2134,16 @@ static int irq_probe_EPP(struct parport *pb)
unsigned char oecr;
if (pb->modes & PARPORT_MODE_PCECR)
- oecr = inb (ECONTROL (pb));
+ oecr = inb(ECONTROL(pb));
irqs = probe_irq_on();
if (pb->modes & PARPORT_MODE_PCECR)
- frob_econtrol (pb, 0x10, 0x10);
-
+ frob_econtrol(pb, 0x10, 0x10);
+
clear_epp_timeout(pb);
- parport_pc_frob_control (pb, 0x20, 0x20);
- parport_pc_frob_control (pb, 0x10, 0x10);
+ parport_pc_frob_control(pb, 0x20, 0x20);
+ parport_pc_frob_control(pb, 0x10, 0x10);
clear_epp_timeout(pb);
/* Device isn't expecting an EPP read
@@ -2074,9 +2152,9 @@ static int irq_probe_EPP(struct parport *pb)
parport_pc_read_epp(pb);
udelay(20);
- pb->irq = probe_irq_off (irqs);
+ pb->irq = probe_irq_off(irqs);
if (pb->modes & PARPORT_MODE_PCECR)
- ECR_WRITE (pb, oecr);
+ ECR_WRITE(pb, oecr);
parport_pc_write_control(pb, 0xc);
if (pb->irq <= 0)
@@ -2133,28 +2211,28 @@ static int parport_irq_probe(struct parport *pb)
/* --- DMA detection -------------------------------------- */
/* Only if chipset conforms to ECP ISA Interface Standard */
-static int programmable_dma_support (struct parport *p)
+static int programmable_dma_support(struct parport *p)
{
- unsigned char oecr = inb (ECONTROL (p));
+ unsigned char oecr = inb(ECONTROL(p));
int dma;
- frob_set_mode (p, ECR_CNF);
-
- dma = inb (CONFIGB(p)) & 0x07;
+ frob_set_mode(p, ECR_CNF);
+
+ dma = inb(CONFIGB(p)) & 0x07;
/* 000: Indicates jumpered 8-bit DMA if read-only.
100: Indicates jumpered 16-bit DMA if read-only. */
if ((dma & 0x03) == 0)
dma = PARPORT_DMA_NONE;
- ECR_WRITE (p, oecr);
+ ECR_WRITE(p, oecr);
return dma;
}
-static int parport_dma_probe (struct parport *p)
+static int parport_dma_probe(struct parport *p)
{
const struct parport_pc_private *priv = p->private_data;
- if (priv->ecr)
- p->dma = programmable_dma_support(p); /* ask ECP chipset first */
+ if (priv->ecr) /* ask ECP chipset first */
+ p->dma = programmable_dma_support(p);
if (p->dma == PARPORT_DMA_NONE) {
/* ask known Super-IO chips proper, although these
claim ECP compatible, some don't report their DMA
@@ -2212,7 +2290,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
if (!base_res)
goto out4;
- memcpy(ops, &parport_pc_ops, sizeof (struct parport_operations));
+ memcpy(ops, &parport_pc_ops, sizeof(struct parport_operations));
priv->ctr = 0xc;
priv->ctr_writable = ~0x10;
priv->ecr = 0;
@@ -2239,7 +2317,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
if (!parport_EPP_supported(p))
parport_ECPEPP_supported(p);
}
- if (!parport_SPP_supported (p))
+ if (!parport_SPP_supported(p))
/* No port. */
goto out5;
if (priv->ecr)
@@ -2247,7 +2325,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
else
parport_PS2_supported(p);
- p->size = (p->modes & PARPORT_MODE_EPP)?8:3;
+ p->size = (p->modes & PARPORT_MODE_EPP) ? 8 : 3;
printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base);
if (p->base_hi && priv->ecr)
@@ -2271,7 +2349,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
}
}
if (p->dma == PARPORT_DMA_AUTO) /* To use DMA, giving the irq
- is mandatory (see above) */
+ is mandatory (see above) */
p->dma = PARPORT_DMA_NONE;
#ifdef CONFIG_PARPORT_PC_FIFO
@@ -2288,16 +2366,23 @@ struct parport *parport_pc_probe_port(unsigned long int base,
if (p->dma != PARPORT_DMA_NONE) {
printk(", dma %d", p->dma);
p->modes |= PARPORT_MODE_DMA;
- }
- else printk(", using FIFO");
- }
- else
+ } else
+ printk(", using FIFO");
+ } else
/* We can't use the DMA channel after all. */
p->dma = PARPORT_DMA_NONE;
#endif /* Allowed to use FIFO/DMA */
printk(" [");
-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}}
+
+#define printmode(x) \
+ {\
+ if (p->modes & PARPORT_MODE_##x) {\
+ printk("%s%s", f ? "," : "", #x);\
+ f++;\
+ } \
+ }
+
{
int f = 0;
printmode(PCSPP);
@@ -2309,10 +2394,10 @@ struct parport *parport_pc_probe_port(unsigned long int base,
}
#undef printmode
#ifndef CONFIG_PARPORT_1284
- printk ("(,...)");
+ printk("(,...)");
#endif /* CONFIG_PARPORT_1284 */
printk("]\n");
- if (probedirq != PARPORT_IRQ_NONE)
+ if (probedirq != PARPORT_IRQ_NONE)
printk(KERN_INFO "%s: irq %d detected\n", p->name, probedirq);
/* If No ECP release the ports grabbed above. */
@@ -2328,7 +2413,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq(p->irq, parport_irq_handler,
irqflags, p->name, p)) {
- printk (KERN_WARNING "%s: irq %d in use, "
+ printk(KERN_WARNING "%s: irq %d in use, "
"resorting to polled operation\n",
p->name, p->irq);
p->irq = PARPORT_IRQ_NONE;
@@ -2338,8 +2423,8 @@ struct parport *parport_pc_probe_port(unsigned long int base,
#ifdef CONFIG_PARPORT_PC_FIFO
#ifdef HAS_DMA
if (p->dma != PARPORT_DMA_NONE) {
- if (request_dma (p->dma, p->name)) {
- printk (KERN_WARNING "%s: dma %d in use, "
+ if (request_dma(p->dma, p->name)) {
+ printk(KERN_WARNING "%s: dma %d in use, "
"resorting to PIO operation\n",
p->name, p->dma);
p->dma = PARPORT_DMA_NONE;
@@ -2349,8 +2434,8 @@ struct parport *parport_pc_probe_port(unsigned long int base,
PAGE_SIZE,
&priv->dma_handle,
GFP_KERNEL);
- if (! priv->dma_buf) {
- printk (KERN_WARNING "%s: "
+ if (!priv->dma_buf) {
+ printk(KERN_WARNING "%s: "
"cannot get buffer for DMA, "
"resorting to PIO operation\n",
p->name);
@@ -2369,10 +2454,10 @@ struct parport *parport_pc_probe_port(unsigned long int base,
* Put the ECP detected port in PS2 mode.
* Do this also for ports that have ECR but don't do ECP.
*/
- ECR_WRITE (p, 0x34);
+ ECR_WRITE(p, 0x34);
parport_pc_write_data(p, 0);
- parport_pc_data_forward (p);
+ parport_pc_data_forward(p);
/* Now that we've told the sharing engine about the port, and
found out its characteristics, let the high-level drivers
@@ -2380,7 +2465,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
spin_lock(&ports_lock);
list_add(&priv->list, &ports_list);
spin_unlock(&ports_lock);
- parport_announce_port (p);
+ parport_announce_port(p);
return p;
@@ -2393,18 +2478,17 @@ out5:
out4:
parport_put_port(p);
out3:
- kfree (priv);
+ kfree(priv);
out2:
- kfree (ops);
+ kfree(ops);
out1:
if (pdev)
platform_device_unregister(pdev);
return NULL;
}
+EXPORT_SYMBOL(parport_pc_probe_port);
-EXPORT_SYMBOL (parport_pc_probe_port);
-
-void parport_pc_unregister_port (struct parport *p)
+void parport_pc_unregister_port(struct parport *p)
{
struct parport_pc_private *priv = p->private_data;
struct parport_operations *ops = p->ops;
@@ -2430,17 +2514,16 @@ void parport_pc_unregister_port (struct parport *p)
priv->dma_buf,
priv->dma_handle);
#endif
- kfree (p->private_data);
+ kfree(p->private_data);
parport_put_port(p);
- kfree (ops); /* hope no-one cached it */
+ kfree(ops); /* hope no-one cached it */
}
-
-EXPORT_SYMBOL (parport_pc_unregister_port);
+EXPORT_SYMBOL(parport_pc_unregister_port);
#ifdef CONFIG_PCI
/* ITE support maintained by Rich Liu <richliu@poorman.org> */
-static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
+static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
int autodma,
const struct parport_pc_via_data *via)
{
@@ -2452,73 +2535,74 @@ static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
int irq;
int i;
- DPRINTK (KERN_DEBUG "sio_ite_8872_probe()\n");
-
- // make sure which one chip
- for(i = 0; i < 5; i++) {
+ DPRINTK(KERN_DEBUG "sio_ite_8872_probe()\n");
+
+ /* make sure which one chip */
+ for (i = 0; i < 5; i++) {
base_res = request_region(inta_addr[i], 32, "it887x");
if (base_res) {
int test;
- pci_write_config_dword (pdev, 0x60,
+ pci_write_config_dword(pdev, 0x60,
0xe5000000 | inta_addr[i]);
- pci_write_config_dword (pdev, 0x78,
+ pci_write_config_dword(pdev, 0x78,
0x00000000 | inta_addr[i]);
- test = inb (inta_addr[i]);
- if (test != 0xff) break;
+ test = inb(inta_addr[i]);
+ if (test != 0xff)
+ break;
release_region(inta_addr[i], 0x8);
}
}
- if(i >= 5) {
- printk (KERN_INFO "parport_pc: cannot find ITE8872 INTA\n");
+ if (i >= 5) {
+ printk(KERN_INFO "parport_pc: cannot find ITE8872 INTA\n");
return 0;
}
- type = inb (inta_addr[i] + 0x18);
+ type = inb(inta_addr[i] + 0x18);
type &= 0x0f;
switch (type) {
case 0x2:
- printk (KERN_INFO "parport_pc: ITE8871 found (1P)\n");
+ printk(KERN_INFO "parport_pc: ITE8871 found (1P)\n");
ite8872set = 0x64200000;
break;
case 0xa:
- printk (KERN_INFO "parport_pc: ITE8875 found (1P)\n");
+ printk(KERN_INFO "parport_pc: ITE8875 found (1P)\n");
ite8872set = 0x64200000;
break;
case 0xe:
- printk (KERN_INFO "parport_pc: ITE8872 found (2S1P)\n");
+ printk(KERN_INFO "parport_pc: ITE8872 found (2S1P)\n");
ite8872set = 0x64e00000;
break;
case 0x6:
- printk (KERN_INFO "parport_pc: ITE8873 found (1S)\n");
+ printk(KERN_INFO "parport_pc: ITE8873 found (1S)\n");
return 0;
case 0x8:
- DPRINTK (KERN_DEBUG "parport_pc: ITE8874 found (2S)\n");
+ DPRINTK(KERN_DEBUG "parport_pc: ITE8874 found (2S)\n");
return 0;
default:
- printk (KERN_INFO "parport_pc: unknown ITE887x\n");
- printk (KERN_INFO "parport_pc: please mail 'lspci -nvv' "
+ printk(KERN_INFO "parport_pc: unknown ITE887x\n");
+ printk(KERN_INFO "parport_pc: please mail 'lspci -nvv' "
"output to Rich.Liu@ite.com.tw\n");
return 0;
}
- pci_read_config_byte (pdev, 0x3c, &ite8872_irq);
- pci_read_config_dword (pdev, 0x1c, &ite8872_lpt);
+ pci_read_config_byte(pdev, 0x3c, &ite8872_irq);
+ pci_read_config_dword(pdev, 0x1c, &ite8872_lpt);
ite8872_lpt &= 0x0000ff00;
- pci_read_config_dword (pdev, 0x20, &ite8872_lpthi);
+ pci_read_config_dword(pdev, 0x20, &ite8872_lpthi);
ite8872_lpthi &= 0x0000ff00;
- pci_write_config_dword (pdev, 0x6c, 0xe3000000 | ite8872_lpt);
- pci_write_config_dword (pdev, 0x70, 0xe3000000 | ite8872_lpthi);
- pci_write_config_dword (pdev, 0x80, (ite8872_lpthi<<16) | ite8872_lpt);
- // SET SPP&EPP , Parallel Port NO DMA , Enable All Function
- // SET Parallel IRQ
- pci_write_config_dword (pdev, 0x9c,
+ pci_write_config_dword(pdev, 0x6c, 0xe3000000 | ite8872_lpt);
+ pci_write_config_dword(pdev, 0x70, 0xe3000000 | ite8872_lpthi);
+ pci_write_config_dword(pdev, 0x80, (ite8872_lpthi<<16) | ite8872_lpt);
+ /* SET SPP&EPP , Parallel Port NO DMA , Enable All Function */
+ /* SET Parallel IRQ */
+ pci_write_config_dword(pdev, 0x9c,
ite8872set | (ite8872_irq * 0x11111));
- DPRINTK (KERN_DEBUG "ITE887x: The IRQ is %d.\n", ite8872_irq);
- DPRINTK (KERN_DEBUG "ITE887x: The PARALLEL I/O port is 0x%x.\n",
+ DPRINTK(KERN_DEBUG "ITE887x: The IRQ is %d.\n", ite8872_irq);
+ DPRINTK(KERN_DEBUG "ITE887x: The PARALLEL I/O port is 0x%x.\n",
ite8872_lpt);
- DPRINTK (KERN_DEBUG "ITE887x: The PARALLEL I/O porthi is 0x%x.\n",
+ DPRINTK(KERN_DEBUG "ITE887x: The PARALLEL I/O porthi is 0x%x.\n",
ite8872_lpthi);
/* Let the user (or defaults) steer us away from interrupts */
@@ -2530,14 +2614,14 @@ static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
* Release the resource so that parport_pc_probe_port can get it.
*/
release_resource(base_res);
- if (parport_pc_probe_port (ite8872_lpt, ite8872_lpthi,
+ if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi,
irq, PARPORT_DMA_NONE, &pdev->dev, 0)) {
- printk (KERN_INFO
+ printk(KERN_INFO
"parport_pc: ITE 8872 parallel port: io=0x%X",
- ite8872_lpt);
+ ite8872_lpt);
if (irq != PARPORT_IRQ_NONE)
- printk (", irq=%d", irq);
- printk ("\n");
+ printk(", irq=%d", irq);
+ printk("\n");
return 1;
}
@@ -2546,7 +2630,7 @@ static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
/* VIA 8231 support by Pavel Fedin <sonic_amiga@rambler.ru>
based on VIA 686a support code by Jeff Garzik <jgarzik@pobox.com> */
-static int __devinitdata parport_init_mode = 0;
+static int __devinitdata parport_init_mode;
/* Data for two known VIA chips */
static struct parport_pc_via_data via_686a_data __devinitdata = {
@@ -2568,7 +2652,7 @@ static struct parport_pc_via_data via_8231_data __devinitdata = {
0xF6
};
-static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
+static int __devinit sio_via_probe(struct pci_dev *pdev, int autoirq,
int autodma,
const struct parport_pc_via_data *via)
{
@@ -2580,38 +2664,38 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
printk(KERN_DEBUG "parport_pc: VIA 686A/8231 detected\n");
- switch(parport_init_mode)
- {
+ switch (parport_init_mode) {
case 1:
- printk(KERN_DEBUG "parport_pc: setting SPP mode\n");
- siofunc = VIA_FUNCTION_PARPORT_SPP;
- break;
+ printk(KERN_DEBUG "parport_pc: setting SPP mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_SPP;
+ break;
case 2:
- printk(KERN_DEBUG "parport_pc: setting PS/2 mode\n");
- siofunc = VIA_FUNCTION_PARPORT_SPP;
- ppcontrol = VIA_PARPORT_BIDIR;
- break;
+ printk(KERN_DEBUG "parport_pc: setting PS/2 mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_SPP;
+ ppcontrol = VIA_PARPORT_BIDIR;
+ break;
case 3:
- printk(KERN_DEBUG "parport_pc: setting EPP mode\n");
- siofunc = VIA_FUNCTION_PARPORT_EPP;
- ppcontrol = VIA_PARPORT_BIDIR;
- have_epp = 1;
- break;
+ printk(KERN_DEBUG "parport_pc: setting EPP mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_EPP;
+ ppcontrol = VIA_PARPORT_BIDIR;
+ have_epp = 1;
+ break;
case 4:
- printk(KERN_DEBUG "parport_pc: setting ECP mode\n");
- siofunc = VIA_FUNCTION_PARPORT_ECP;
- ppcontrol = VIA_PARPORT_BIDIR;
- break;
+ printk(KERN_DEBUG "parport_pc: setting ECP mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_ECP;
+ ppcontrol = VIA_PARPORT_BIDIR;
+ break;
case 5:
- printk(KERN_DEBUG "parport_pc: setting EPP+ECP mode\n");
- siofunc = VIA_FUNCTION_PARPORT_ECP;
- ppcontrol = VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP;
- have_epp = 1;
- break;
- default:
- printk(KERN_DEBUG "parport_pc: probing current configuration\n");
- siofunc = VIA_FUNCTION_PROBE;
- break;
+ printk(KERN_DEBUG "parport_pc: setting EPP+ECP mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_ECP;
+ ppcontrol = VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP;
+ have_epp = 1;
+ break;
+ default:
+ printk(KERN_DEBUG
+ "parport_pc: probing current configuration\n");
+ siofunc = VIA_FUNCTION_PROBE;
+ break;
}
/*
* unlock super i/o configuration
@@ -2622,38 +2706,36 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
/* Bits 1-0: Parallel Port Mode / Enable */
outb(via->viacfg_function, VIA_CONFIG_INDEX);
- tmp = inb (VIA_CONFIG_DATA);
+ tmp = inb(VIA_CONFIG_DATA);
/* Bit 5: EPP+ECP enable; bit 7: PS/2 bidirectional port enable */
outb(via->viacfg_parport_control, VIA_CONFIG_INDEX);
- tmp2 = inb (VIA_CONFIG_DATA);
- if (siofunc == VIA_FUNCTION_PROBE)
- {
- siofunc = tmp & VIA_FUNCTION_PARPORT_DISABLE;
- ppcontrol = tmp2;
+ tmp2 = inb(VIA_CONFIG_DATA);
+ if (siofunc == VIA_FUNCTION_PROBE) {
+ siofunc = tmp & VIA_FUNCTION_PARPORT_DISABLE;
+ ppcontrol = tmp2;
+ } else {
+ tmp &= ~VIA_FUNCTION_PARPORT_DISABLE;
+ tmp |= siofunc;
+ outb(via->viacfg_function, VIA_CONFIG_INDEX);
+ outb(tmp, VIA_CONFIG_DATA);
+ tmp2 &= ~(VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP);
+ tmp2 |= ppcontrol;
+ outb(via->viacfg_parport_control, VIA_CONFIG_INDEX);
+ outb(tmp2, VIA_CONFIG_DATA);
}
- else
- {
- tmp &= ~VIA_FUNCTION_PARPORT_DISABLE;
- tmp |= siofunc;
- outb(via->viacfg_function, VIA_CONFIG_INDEX);
- outb(tmp, VIA_CONFIG_DATA);
- tmp2 &= ~(VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP);
- tmp2 |= ppcontrol;
- outb(via->viacfg_parport_control, VIA_CONFIG_INDEX);
- outb(tmp2, VIA_CONFIG_DATA);
- }
-
+
/* Parallel Port I/O Base Address, bits 9-2 */
outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
port1 = inb(VIA_CONFIG_DATA) << 2;
-
- printk (KERN_DEBUG "parport_pc: Current parallel port base: 0x%X\n",port1);
- if ((port1 == 0x3BC) && have_epp)
- {
- outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
- outb((0x378 >> 2), VIA_CONFIG_DATA);
- printk(KERN_DEBUG "parport_pc: Parallel port base changed to 0x378\n");
- port1 = 0x378;
+
+ printk(KERN_DEBUG "parport_pc: Current parallel port base: 0x%X\n",
+ port1);
+ if (port1 == 0x3BC && have_epp) {
+ outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
+ outb((0x378 >> 2), VIA_CONFIG_DATA);
+ printk(KERN_DEBUG
+ "parport_pc: Parallel port base changed to 0x378\n");
+ port1 = 0x378;
}
/*
@@ -2667,36 +2749,39 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
printk(KERN_INFO "parport_pc: VIA parallel port disabled in BIOS\n");
return 0;
}
-
+
/* Bits 7-4: PnP Routing for Parallel Port IRQ */
pci_read_config_byte(pdev, via->via_pci_parport_irq_reg, &tmp);
irq = ((tmp & VIA_IRQCONTROL_PARALLEL) >> 4);
- if (siofunc == VIA_FUNCTION_PARPORT_ECP)
- {
- /* Bits 3-2: PnP Routing for Parallel Port DMA */
- pci_read_config_byte(pdev, via->via_pci_parport_dma_reg, &tmp);
- dma = ((tmp & VIA_DMACONTROL_PARALLEL) >> 2);
- }
- else
- /* if ECP not enabled, DMA is not enabled, assumed bogus 'dma' value */
- dma = PARPORT_DMA_NONE;
+ if (siofunc == VIA_FUNCTION_PARPORT_ECP) {
+ /* Bits 3-2: PnP Routing for Parallel Port DMA */
+ pci_read_config_byte(pdev, via->via_pci_parport_dma_reg, &tmp);
+ dma = ((tmp & VIA_DMACONTROL_PARALLEL) >> 2);
+ } else
+ /* if ECP not enabled, DMA is not enabled, assumed
+ bogus 'dma' value */
+ dma = PARPORT_DMA_NONE;
/* Let the user (or defaults) steer us away from interrupts and DMA */
if (autoirq == PARPORT_IRQ_NONE) {
- irq = PARPORT_IRQ_NONE;
- dma = PARPORT_DMA_NONE;
+ irq = PARPORT_IRQ_NONE;
+ dma = PARPORT_DMA_NONE;
}
if (autodma == PARPORT_DMA_NONE)
- dma = PARPORT_DMA_NONE;
+ dma = PARPORT_DMA_NONE;
switch (port1) {
- case 0x3bc: port2 = 0x7bc; break;
- case 0x378: port2 = 0x778; break;
- case 0x278: port2 = 0x678; break;
+ case 0x3bc:
+ port2 = 0x7bc; break;
+ case 0x378:
+ port2 = 0x778; break;
+ case 0x278:
+ port2 = 0x678; break;
default:
- printk(KERN_INFO "parport_pc: Weird VIA parport base 0x%X, ignoring\n",
- port1);
+ printk(KERN_INFO
+ "parport_pc: Weird VIA parport base 0x%X, ignoring\n",
+ port1);
return 0;
}
@@ -2714,17 +2799,17 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
}
/* finally, do the probe with values obtained */
- if (parport_pc_probe_port (port1, port2, irq, dma, &pdev->dev, 0)) {
- printk (KERN_INFO
+ if (parport_pc_probe_port(port1, port2, irq, dma, &pdev->dev, 0)) {
+ printk(KERN_INFO
"parport_pc: VIA parallel port: io=0x%X", port1);
if (irq != PARPORT_IRQ_NONE)
- printk (", irq=%d", irq);
+ printk(", irq=%d", irq);
if (dma != PARPORT_DMA_NONE)
- printk (", dma=%d", dma);
- printk ("\n");
+ printk(", dma=%d", dma);
+ printk("\n");
return 1;
}
-
+
printk(KERN_WARNING "parport_pc: Strange, can't probe VIA parallel port: io=0x%X, irq=%d, dma=%d\n",
port1, irq, dma);
return 0;
@@ -2732,8 +2817,8 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
enum parport_pc_sio_types {
- sio_via_686a = 0, /* Via VT82C686A motherboard Super I/O */
- sio_via_8231, /* Via VT8231 south bridge integrated Super IO */
+ sio_via_686a = 0, /* Via VT82C686A motherboard Super I/O */
+ sio_via_8231, /* Via VT8231 south bridge integrated Super IO */
sio_ite_8872,
last_sio
};
@@ -2804,15 +2889,15 @@ enum parport_pc_pci_cards {
};
-/* each element directly indexed from enum list, above
+/* each element directly indexed from enum list, above
* (but offset by last_sio) */
static struct parport_pc_pci {
int numports;
struct { /* BAR (base address registers) numbers in the config
- space header */
+ space header */
int lo;
- int hi; /* -1 if not there, >6 for offset-method (max
- BAR is 6) */
+ int hi;
+ /* -1 if not there, >6 for offset-method (max BAR is 6) */
} addr[4];
/* If set, this is called immediately after pci_enable_device.
@@ -2857,7 +2942,7 @@ static struct parport_pc_pci {
/* timedia_4018 */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* timedia_9018a */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* SYBA uses fixed offsets in
- a 1K io window */
+ a 1K io window */
/* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } },
/* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } },
/* titan_010l */ { 1, { { 3, -1 }, } },
@@ -2873,11 +2958,14 @@ static struct parport_pc_pci {
/* oxsemi_pcie_pport */ { 1, { { 0, 1 }, } },
/* aks_0100 */ { 1, { { 0, -1 }, } },
/* mobility_pp */ { 1, { { 0, 1 }, } },
- /* netmos_9705 */ { 1, { { 0, -1 }, } }, /* untested */
- /* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} }, /* untested */
- /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} }, /* untested */
- /* netmos_9805 */ { 1, { { 0, -1 }, } }, /* untested */
- /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, /* untested */
+
+ /* The netmos entries below are untested */
+ /* netmos_9705 */ { 1, { { 0, -1 }, } },
+ /* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} },
+ /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
+ /* netmos_9805 */ { 1, { { 0, -1 }, } },
+ /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
+
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
};
@@ -2906,7 +2994,7 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_BOCA_IOPPAR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, boca_ioppar },
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
- PCI_SUBVENDOR_ID_EXSYS, PCI_SUBDEVICE_ID_EXSYS_4014, 0,0, plx_9050 },
+ PCI_SUBVENDOR_ID_EXSYS, PCI_SUBDEVICE_ID_EXSYS_4014, 0, 0, plx_9050 },
/* PCI_VENDOR_ID_TIMEDIA/SUNIX has many differing cards ...*/
{ 0x1409, 0x7168, 0x1409, 0x4078, 0, 0, timedia_4078a },
{ 0x1409, 0x7168, 0x1409, 0x4079, 0, 0, timedia_4079h },
@@ -2940,7 +3028,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
{ 0x9710, 0x9805, 0x1000, 0x0010, 0, 0, titan_1284p1 },
{ 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 },
/* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
- { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, /* AFAVLAB_TK9902 */
+ /* AFAVLAB_TK9902 */
+ { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p},
{ 0x14db, 0x2121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2p},
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952PP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_952 },
@@ -2983,14 +3072,14 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
{ 0, } /* terminate list */
};
-MODULE_DEVICE_TABLE(pci,parport_pc_pci_tbl);
+MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
struct pci_parport_data {
int num;
struct parport *ports[2];
};
-static int parport_pc_pci_probe (struct pci_dev *dev,
+static int parport_pc_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
int err, count, n, i = id->driver_data;
@@ -3003,7 +3092,8 @@ static int parport_pc_pci_probe (struct pci_dev *dev,
/* This is a PCI card */
i -= last_sio;
count = 0;
- if ((err = pci_enable_device (dev)) != 0)
+ err = pci_enable_device(dev);
+ if (err)
return err;
data = kmalloc(sizeof(struct pci_parport_data), GFP_KERNEL);
@@ -3011,7 +3101,7 @@ static int parport_pc_pci_probe (struct pci_dev *dev,
return -ENOMEM;
if (cards[i].preinit_hook &&
- cards[i].preinit_hook (dev, PARPORT_IRQ_NONE, PARPORT_DMA_NONE)) {
+ cards[i].preinit_hook(dev, PARPORT_IRQ_NONE, PARPORT_DMA_NONE)) {
kfree(data);
return -ENODEV;
}
@@ -3021,25 +3111,25 @@ static int parport_pc_pci_probe (struct pci_dev *dev,
int hi = cards[i].addr[n].hi;
int irq;
unsigned long io_lo, io_hi;
- io_lo = pci_resource_start (dev, lo);
+ io_lo = pci_resource_start(dev, lo);
io_hi = 0;
if ((hi >= 0) && (hi <= 6))
- io_hi = pci_resource_start (dev, hi);
+ io_hi = pci_resource_start(dev, hi);
else if (hi > 6)
io_lo += hi; /* Reinterpret the meaning of
- "hi" as an offset (see SYBA
- def.) */
+ "hi" as an offset (see SYBA
+ def.) */
/* TODO: test if sharing interrupts works */
irq = dev->irq;
if (irq == IRQ_NONE) {
- printk (KERN_DEBUG
+ printk(KERN_DEBUG
"PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n",
parport_pc_pci_tbl[i + last_sio].vendor,
parport_pc_pci_tbl[i + last_sio].device,
io_lo, io_hi);
irq = PARPORT_IRQ_NONE;
} else {
- printk (KERN_DEBUG
+ printk(KERN_DEBUG
"PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n",
parport_pc_pci_tbl[i + last_sio].vendor,
parport_pc_pci_tbl[i + last_sio].device,
@@ -3056,7 +3146,7 @@ static int parport_pc_pci_probe (struct pci_dev *dev,
data->num = count;
if (cards[i].postinit_hook)
- cards[i].postinit_hook (dev, count == 0);
+ cards[i].postinit_hook(dev, count == 0);
if (count) {
pci_set_drvdata(dev, data);
@@ -3090,7 +3180,7 @@ static struct pci_driver parport_pc_pci_driver = {
.remove = __devexit_p(parport_pc_pci_remove),
};
-static int __init parport_pc_init_superio (int autoirq, int autodma)
+static int __init parport_pc_init_superio(int autoirq, int autodma)
{
const struct pci_device_id *id;
struct pci_dev *pdev = NULL;
@@ -3101,8 +3191,9 @@ static int __init parport_pc_init_superio (int autoirq, int autodma)
if (id == NULL || id->driver_data >= last_sio)
continue;
- if (parport_pc_superio_info[id->driver_data].probe
- (pdev, autoirq, autodma,parport_pc_superio_info[id->driver_data].via)) {
+ if (parport_pc_superio_info[id->driver_data].probe(
+ pdev, autoirq, autodma,
+ parport_pc_superio_info[id->driver_data].via)) {
ret++;
}
}
@@ -3111,7 +3202,10 @@ static int __init parport_pc_init_superio (int autoirq, int autodma)
}
#else
static struct pci_driver parport_pc_pci_driver;
-static int __init parport_pc_init_superio(int autoirq, int autodma) {return 0;}
+static int __init parport_pc_init_superio(int autoirq, int autodma)
+{
+ return 0;
+}
#endif /* CONFIG_PCI */
#ifdef CONFIG_PNP
@@ -3124,44 +3218,45 @@ static const struct pnp_device_id parport_pc_pnp_tbl[] = {
{ }
};
-MODULE_DEVICE_TABLE(pnp,parport_pc_pnp_tbl);
+MODULE_DEVICE_TABLE(pnp, parport_pc_pnp_tbl);
-static int parport_pc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id)
+static int parport_pc_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *id)
{
struct parport *pdata;
unsigned long io_lo, io_hi;
int dma, irq;
- if (pnp_port_valid(dev,0) &&
- !(pnp_port_flags(dev,0) & IORESOURCE_DISABLED)) {
- io_lo = pnp_port_start(dev,0);
+ if (pnp_port_valid(dev, 0) &&
+ !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
+ io_lo = pnp_port_start(dev, 0);
} else
return -EINVAL;
- if (pnp_port_valid(dev,1) &&
- !(pnp_port_flags(dev,1) & IORESOURCE_DISABLED)) {
- io_hi = pnp_port_start(dev,1);
+ if (pnp_port_valid(dev, 1) &&
+ !(pnp_port_flags(dev, 1) & IORESOURCE_DISABLED)) {
+ io_hi = pnp_port_start(dev, 1);
} else
io_hi = 0;
- if (pnp_irq_valid(dev,0) &&
- !(pnp_irq_flags(dev,0) & IORESOURCE_DISABLED)) {
- irq = pnp_irq(dev,0);
+ if (pnp_irq_valid(dev, 0) &&
+ !(pnp_irq_flags(dev, 0) & IORESOURCE_DISABLED)) {
+ irq = pnp_irq(dev, 0);
} else
irq = PARPORT_IRQ_NONE;
- if (pnp_dma_valid(dev,0) &&
- !(pnp_dma_flags(dev,0) & IORESOURCE_DISABLED)) {
- dma = pnp_dma(dev,0);
+ if (pnp_dma_valid(dev, 0) &&
+ !(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED)) {
+ dma = pnp_dma(dev, 0);
} else
dma = PARPORT_DMA_NONE;
dev_info(&dev->dev, "reported by %s\n", dev->protocol->name);
- if (!(pdata = parport_pc_probe_port(io_lo, io_hi,
- irq, dma, &dev->dev, 0)))
+ pdata = parport_pc_probe_port(io_lo, io_hi, irq, dma, &dev->dev, 0);
+ if (pdata == NULL)
return -ENODEV;
- pnp_set_drvdata(dev,pdata);
+ pnp_set_drvdata(dev, pdata);
return 0;
}
@@ -3203,7 +3298,7 @@ static struct platform_driver parport_pc_platform_driver = {
/* This is called by parport_pc_find_nonpci_ports (in asm/parport.h) */
static int __devinit __attribute__((unused))
-parport_pc_find_isa_ports (int autoirq, int autodma)
+parport_pc_find_isa_ports(int autoirq, int autodma)
{
int count = 0;
@@ -3227,7 +3322,7 @@ parport_pc_find_isa_ports (int autoirq, int autodma)
* autoirq is PARPORT_IRQ_NONE, PARPORT_IRQ_AUTO, or PARPORT_IRQ_PROBEONLY
* autodma is PARPORT_DMA_NONE or PARPORT_DMA_AUTO
*/
-static void __init parport_pc_find_ports (int autoirq, int autodma)
+static void __init parport_pc_find_ports(int autoirq, int autodma)
{
int count = 0, err;
@@ -3261,11 +3356,18 @@ static void __init parport_pc_find_ports (int autoirq, int autodma)
* syntax and keep in mind that code below is a cleaned up version.
*/
-static int __initdata io[PARPORT_PC_MAX_PORTS+1] = { [0 ... PARPORT_PC_MAX_PORTS] = 0 };
-static int __initdata io_hi[PARPORT_PC_MAX_PORTS+1] =
- { [0 ... PARPORT_PC_MAX_PORTS] = PARPORT_IOHI_AUTO };
-static int __initdata dmaval[PARPORT_PC_MAX_PORTS] = { [0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_DMA_NONE };
-static int __initdata irqval[PARPORT_PC_MAX_PORTS] = { [0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_IRQ_PROBEONLY };
+static int __initdata io[PARPORT_PC_MAX_PORTS+1] = {
+ [0 ... PARPORT_PC_MAX_PORTS] = 0
+};
+static int __initdata io_hi[PARPORT_PC_MAX_PORTS+1] = {
+ [0 ... PARPORT_PC_MAX_PORTS] = PARPORT_IOHI_AUTO
+};
+static int __initdata dmaval[PARPORT_PC_MAX_PORTS] = {
+ [0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_DMA_NONE
+};
+static int __initdata irqval[PARPORT_PC_MAX_PORTS] = {
+ [0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_IRQ_PROBEONLY
+};
static int __init parport_parse_param(const char *s, int *val,
int automatic, int none, int nofifo)
@@ -3306,18 +3408,19 @@ static int __init parport_parse_dma(const char *dmastr, int *val)
#ifdef CONFIG_PCI
static int __init parport_init_mode_setup(char *str)
{
- printk(KERN_DEBUG "parport_pc.c: Specified parameter parport_init_mode=%s\n", str);
-
- if (!strcmp (str, "spp"))
- parport_init_mode=1;
- if (!strcmp (str, "ps2"))
- parport_init_mode=2;
- if (!strcmp (str, "epp"))
- parport_init_mode=3;
- if (!strcmp (str, "ecp"))
- parport_init_mode=4;
- if (!strcmp (str, "ecpepp"))
- parport_init_mode=5;
+ printk(KERN_DEBUG
+ "parport_pc.c: Specified parameter parport_init_mode=%s\n", str);
+
+ if (!strcmp(str, "spp"))
+ parport_init_mode = 1;
+ if (!strcmp(str, "ps2"))
+ parport_init_mode = 2;
+ if (!strcmp(str, "epp"))
+ parport_init_mode = 3;
+ if (!strcmp(str, "ecp"))
+ parport_init_mode = 4;
+ if (!strcmp(str, "ecpepp"))
+ parport_init_mode = 5;
return 1;
}
#endif
@@ -3341,7 +3444,8 @@ module_param(verbose_probing, int, 0644);
#endif
#ifdef CONFIG_PCI
static char *init_mode;
-MODULE_PARM_DESC(init_mode, "Initialise mode for VIA VT8231 port (spp, ps2, epp, ecp or ecpepp)");
+MODULE_PARM_DESC(init_mode,
+ "Initialise mode for VIA VT8231 port (spp, ps2, epp, ecp or ecpepp)");
module_param(init_mode, charp, 0);
#endif
@@ -3372,7 +3476,7 @@ static int __init parse_parport_params(void)
irqval[0] = val;
break;
default:
- printk (KERN_WARNING
+ printk(KERN_WARNING
"parport_pc: irq specified "
"without base address. Use 'io=' "
"to specify one\n");
@@ -3385,7 +3489,7 @@ static int __init parse_parport_params(void)
dmaval[0] = val;
break;
default:
- printk (KERN_WARNING
+ printk(KERN_WARNING
"parport_pc: dma specified "
"without base address. Use 'io=' "
"to specify one\n");
@@ -3396,7 +3500,7 @@ static int __init parse_parport_params(void)
#else
-static int parport_setup_ptr __initdata = 0;
+static int parport_setup_ptr __initdata;
/*
* Acceptable parameters:
@@ -3407,7 +3511,7 @@ static int parport_setup_ptr __initdata = 0;
*
* IRQ/DMA may be numeric or 'auto' or 'none'
*/
-static int __init parport_setup (char *str)
+static int __init parport_setup(char *str)
{
char *endptr;
char *sep;
@@ -3419,15 +3523,15 @@ static int __init parport_setup (char *str)
return 1;
}
- if (!strncmp (str, "auto", 4)) {
+ if (!strncmp(str, "auto", 4)) {
irqval[0] = PARPORT_IRQ_AUTO;
dmaval[0] = PARPORT_DMA_AUTO;
return 1;
}
- val = simple_strtoul (str, &endptr, 0);
+ val = simple_strtoul(str, &endptr, 0);
if (endptr == str) {
- printk (KERN_WARNING "parport=%s not understood\n", str);
+ printk(KERN_WARNING "parport=%s not understood\n", str);
return 1;
}
@@ -3461,7 +3565,7 @@ static int __init parse_parport_params(void)
return io[0] == PARPORT_DISABLE;
}
-__setup ("parport=", parport_setup);
+__setup("parport=", parport_setup);
/*
* Acceptable parameters:
@@ -3469,7 +3573,7 @@ __setup ("parport=", parport_setup);
* parport_init_mode=[spp|ps2|epp|ecp|ecpepp]
*/
#ifdef CONFIG_PCI
-__setup("parport_init_mode=",parport_init_mode_setup);
+__setup("parport_init_mode=", parport_init_mode_setup);
#endif
#endif
@@ -3493,13 +3597,13 @@ static int __init parport_pc_init(void)
for (i = 0; i < PARPORT_PC_MAX_PORTS; i++) {
if (!io[i])
break;
- if ((io_hi[i]) == PARPORT_IOHI_AUTO)
- io_hi[i] = 0x400 + io[i];
+ if (io_hi[i] == PARPORT_IOHI_AUTO)
+ io_hi[i] = 0x400 + io[i];
parport_pc_probe_port(io[i], io_hi[i],
- irqval[i], dmaval[i], NULL, 0);
+ irqval[i], dmaval[i], NULL, 0);
}
} else
- parport_pc_find_ports (irqval[0], dmaval[0]);
+ parport_pc_find_ports(irqval[0], dmaval[0]);
return 0;
}
@@ -3507,9 +3611,9 @@ static int __init parport_pc_init(void)
static void __exit parport_pc_exit(void)
{
if (pci_registered_parport)
- pci_unregister_driver (&parport_pc_pci_driver);
+ pci_unregister_driver(&parport_pc_pci_driver);
if (pnp_registered_parport)
- pnp_unregister_driver (&parport_pc_pnp_driver);
+ pnp_unregister_driver(&parport_pc_pnp_driver);
platform_driver_unregister(&parport_pc_platform_driver);
while (!list_empty(&ports_list)) {
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 0ebca450ed2..dffa5d4fb29 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -614,7 +614,10 @@ parport_register_device(struct parport *port, const char *name,
* pardevice fields. -arca
*/
port->ops->init_state(tmp, tmp->state);
- parport_device_proc_register(tmp);
+ if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
+ port->proc_device = tmp;
+ parport_device_proc_register(tmp);
+ }
return tmp;
out_free_all:
@@ -646,10 +649,14 @@ void parport_unregister_device(struct pardevice *dev)
}
#endif
- parport_device_proc_unregister(dev);
-
port = dev->port->physport;
+ if (port->proc_device == dev) {
+ port->proc_device = NULL;
+ clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
+ parport_device_proc_unregister(dev);
+ }
+
if (port->cad == dev) {
printk(KERN_DEBUG "%s: %s forgot to release port\n",
port->name, dev->name);
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 4fc168b7009..e68d5f20ffb 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -129,7 +129,6 @@ struct acpiphp_func {
struct acpiphp_bridge *bridge; /* Ejectable PCI-to-PCI bridge */
struct list_head sibling;
- struct pci_dev *pci_dev;
struct notifier_block nb;
acpi_handle handle;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a33794d9e0d..3a6064bce56 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -32,9 +32,6 @@
/*
* Lifetime rules for pci_dev:
- * - The one in acpiphp_func has its refcount elevated by pci_get_slot()
- * when the driver is loaded or when an insertion event occurs. It loses
- * a refcount when its ejected or the driver unloads.
* - The one in acpiphp_bridge has its refcount elevated by pci_get_slot()
* when the bridge is scanned and it loses a refcount when the bridge
* is removed.
@@ -130,6 +127,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
unsigned long long adr, sun;
int device, function, retval;
struct pci_bus *pbus = bridge->pci_bus;
+ struct pci_dev *pdev;
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
return AE_OK;
@@ -213,10 +211,10 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
newfunc->slot = slot;
list_add_tail(&newfunc->sibling, &slot->funcs);
- /* associate corresponding pci_dev */
- newfunc->pci_dev = pci_get_slot(pbus, PCI_DEVFN(device, function));
- if (newfunc->pci_dev) {
+ pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
+ if (pdev) {
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
+ pci_dev_put(pdev);
}
if (is_dock_device(handle)) {
@@ -617,7 +615,6 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
if (ACPI_FAILURE(status))
err("failed to remove notify handler\n");
}
- pci_dev_put(func->pci_dev);
list_del(list);
kfree(func);
}
@@ -1101,22 +1098,24 @@ static int __ref enable_device(struct acpiphp_slot *slot)
pci_enable_bridges(bus);
pci_bus_add_devices(bus);
- /* associate pci_dev to our representation */
list_for_each (l, &slot->funcs) {
func = list_entry(l, struct acpiphp_func, sibling);
- func->pci_dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
- func->function));
- if (!func->pci_dev)
+ dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
+ func->function));
+ if (!dev)
continue;
- if (func->pci_dev->hdr_type != PCI_HEADER_TYPE_BRIDGE &&
- func->pci_dev->hdr_type != PCI_HEADER_TYPE_CARDBUS)
+ if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE &&
+ dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) {
+ pci_dev_put(dev);
continue;
+ }
status = find_p2p_bridge(func->handle, (u32)1, bus, NULL);
if (ACPI_FAILURE(status))
warn("find_p2p_bridge failed (error code = 0x%x)\n",
status);
+ pci_dev_put(dev);
}
slot->flags |= SLOT_ENABLED;
@@ -1142,17 +1141,14 @@ static void disable_bridges(struct pci_bus *bus)
*/
static int disable_device(struct acpiphp_slot *slot)
{
- int retval = 0;
struct acpiphp_func *func;
- struct list_head *l;
+ struct pci_dev *pdev;
/* is this slot already disabled? */
if (!(slot->flags & SLOT_ENABLED))
goto err_exit;
- list_for_each (l, &slot->funcs) {
- func = list_entry(l, struct acpiphp_func, sibling);
-
+ list_for_each_entry(func, &slot->funcs, sibling) {
if (func->bridge) {
/* cleanup p2p bridges under this P2P bridge */
cleanup_p2p_bridge(func->bridge->handle,
@@ -1160,35 +1156,28 @@ static int disable_device(struct acpiphp_slot *slot)
func->bridge = NULL;
}
- if (func->pci_dev) {
- pci_stop_bus_device(func->pci_dev);
- if (func->pci_dev->subordinate) {
- disable_bridges(func->pci_dev->subordinate);
- pci_disable_device(func->pci_dev);
+ pdev = pci_get_slot(slot->bridge->pci_bus,
+ PCI_DEVFN(slot->device, func->function));
+ if (pdev) {
+ pci_stop_bus_device(pdev);
+ if (pdev->subordinate) {
+ disable_bridges(pdev->subordinate);
+ pci_disable_device(pdev);
}
+ pci_remove_bus_device(pdev);
+ pci_dev_put(pdev);
}
}
- list_for_each (l, &slot->funcs) {
- func = list_entry(l, struct acpiphp_func, sibling);
-
+ list_for_each_entry(func, &slot->funcs, sibling) {
acpiphp_unconfigure_ioapics(func->handle);
acpiphp_bus_trim(func->handle);
- /* try to remove anyway.
- * acpiphp_bus_add might have been failed */
-
- if (!func->pci_dev)
- continue;
-
- pci_remove_bus_device(func->pci_dev);
- pci_dev_put(func->pci_dev);
- func->pci_dev = NULL;
}
slot->flags &= (~SLOT_ENABLED);
- err_exit:
- return retval;
+err_exit:
+ return 0;
}
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index dd18f857dfb..42e4260c3b1 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -153,45 +153,47 @@ int ibmphp_init_devno(struct slot **cur_slot)
return -1;
}
for (loop = 0; loop < len; loop++) {
- if ((*cur_slot)->number == rtable->slots[loop].slot) {
- if ((*cur_slot)->bus == rtable->slots[loop].bus) {
+ if ((*cur_slot)->number == rtable->slots[loop].slot &&
+ (*cur_slot)->bus == rtable->slots[loop].bus) {
+ struct io_apic_irq_attr irq_attr;
+
(*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn);
for (i = 0; i < 4; i++)
(*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus,
- (int) (*cur_slot)->device, i);
-
- debug("(*cur_slot)->irq[0] = %x\n",
- (*cur_slot)->irq[0]);
- debug("(*cur_slot)->irq[1] = %x\n",
- (*cur_slot)->irq[1]);
- debug("(*cur_slot)->irq[2] = %x\n",
- (*cur_slot)->irq[2]);
- debug("(*cur_slot)->irq[3] = %x\n",
- (*cur_slot)->irq[3]);
-
- debug("rtable->exlusive_irqs = %x\n",
+ (int) (*cur_slot)->device, i,
+ &irq_attr);
+
+ debug("(*cur_slot)->irq[0] = %x\n",
+ (*cur_slot)->irq[0]);
+ debug("(*cur_slot)->irq[1] = %x\n",
+ (*cur_slot)->irq[1]);
+ debug("(*cur_slot)->irq[2] = %x\n",
+ (*cur_slot)->irq[2]);
+ debug("(*cur_slot)->irq[3] = %x\n",
+ (*cur_slot)->irq[3]);
+
+ debug("rtable->exlusive_irqs = %x\n",
rtable->exclusive_irqs);
- debug("rtable->slots[loop].irq[0].bitmap = %x\n",
+ debug("rtable->slots[loop].irq[0].bitmap = %x\n",
rtable->slots[loop].irq[0].bitmap);
- debug("rtable->slots[loop].irq[1].bitmap = %x\n",
+ debug("rtable->slots[loop].irq[1].bitmap = %x\n",
rtable->slots[loop].irq[1].bitmap);
- debug("rtable->slots[loop].irq[2].bitmap = %x\n",
+ debug("rtable->slots[loop].irq[2].bitmap = %x\n",
rtable->slots[loop].irq[2].bitmap);
- debug("rtable->slots[loop].irq[3].bitmap = %x\n",
+ debug("rtable->slots[loop].irq[3].bitmap = %x\n",
rtable->slots[loop].irq[3].bitmap);
- debug("rtable->slots[loop].irq[0].link = %x\n",
+ debug("rtable->slots[loop].irq[0].link = %x\n",
rtable->slots[loop].irq[0].link);
- debug("rtable->slots[loop].irq[1].link = %x\n",
+ debug("rtable->slots[loop].irq[1].link = %x\n",
rtable->slots[loop].irq[1].link);
- debug("rtable->slots[loop].irq[2].link = %x\n",
+ debug("rtable->slots[loop].irq[2].link = %x\n",
rtable->slots[loop].irq[2].link);
- debug("rtable->slots[loop].irq[3].link = %x\n",
+ debug("rtable->slots[loop].irq[3].link = %x\n",
rtable->slots[loop].irq[3].link);
- debug("end of init_devno\n");
- kfree(rtable);
- return 0;
- }
+ debug("end of init_devno\n");
+ kfree(rtable);
+ return 0;
}
}
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index 6808d8333ec..737a1c44b07 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -98,6 +98,7 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
int max_irq;
int pos;
int irq;
+ int node;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
if (!pos)
@@ -125,7 +126,8 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
cfg->msg.address_lo = 0xffffffff;
cfg->msg.address_hi = 0xffffffff;
- irq = create_irq();
+ node = dev_to_node(&dev->dev);
+ irq = create_irq_nr(0, node);
if (irq <= 0) {
kfree(cfg);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index a563fbe559d..cd389162735 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1972,15 +1972,6 @@ static int __init init_dmars(void)
}
}
-#ifdef CONFIG_INTR_REMAP
- if (!intr_remapping_enabled) {
- ret = enable_intr_remapping(0);
- if (ret)
- printk(KERN_ERR
- "IOMMU: enable interrupt remapping failed\n");
- }
-#endif
-
/*
* For each rmrr
* for each dev attached to rmrr
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index f5e0ea724a6..3a0cb0bb059 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -15,6 +15,14 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static int ir_ioapic_num;
int intr_remapping_enabled;
+static int disable_intremap;
+static __init int setup_nointremap(char *str)
+{
+ disable_intremap = 1;
+ return 0;
+}
+early_param("nointremap", setup_nointremap);
+
struct irq_2_iommu {
struct intel_iommu *iommu;
u16 irte_index;
@@ -23,15 +31,12 @@ struct irq_2_iommu {
};
#ifdef CONFIG_GENERIC_HARDIRQS
-static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
+static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
{
struct irq_2_iommu *iommu;
- int node;
-
- node = cpu_to_node(cpu);
iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
- printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node);
+ printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
return iommu;
}
@@ -48,7 +53,7 @@ static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
return desc->irq_2_iommu;
}
-static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
+static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
{
struct irq_desc *desc;
struct irq_2_iommu *irq_iommu;
@@ -56,7 +61,7 @@ static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
/*
* alloc irq desc if not allocated already.
*/
- desc = irq_to_desc_alloc_cpu(irq, cpu);
+ desc = irq_to_desc_alloc_node(irq, node);
if (!desc) {
printk(KERN_INFO "can not get irq_desc for %d\n", irq);
return NULL;
@@ -65,14 +70,14 @@ static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
irq_iommu = desc->irq_2_iommu;
if (!irq_iommu)
- desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu);
+ desc->irq_2_iommu = get_one_free_irq_2_iommu(node);
return desc->irq_2_iommu;
}
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
{
- return irq_2_iommu_alloc_cpu(irq, boot_cpu_id);
+ return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id));
}
#else /* !CONFIG_SPARSE_IRQ */
@@ -423,20 +428,6 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
readl, (sts & DMA_GSTS_IRTPS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags);
- if (mode == 0) {
- spin_lock_irqsave(&iommu->register_lock, flags);
-
- /* enable comaptiblity format interrupt pass through */
- cmd = iommu->gcmd | DMA_GCMD_CFI;
- iommu->gcmd |= DMA_GCMD_CFI;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
-
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_CFIS), sts);
-
- spin_unlock_irqrestore(&iommu->register_lock, flags);
- }
-
/*
* global invalidation of interrupt entry cache before enabling
* interrupt-remapping.
@@ -516,6 +507,23 @@ end:
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
+int __init intr_remapping_supported(void)
+{
+ struct dmar_drhd_unit *drhd;
+
+ if (disable_intremap)
+ return 0;
+
+ for_each_drhd_unit(drhd) {
+ struct intel_iommu *iommu = drhd->iommu;
+
+ if (!ecap_ir_support(iommu->ecap))
+ return 0;
+ }
+
+ return 1;
+}
+
int __init enable_intr_remapping(int eim)
{
struct dmar_drhd_unit *drhd;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e3c3e081b83..f1ae2475fff 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -745,6 +745,8 @@ int pci_setup_device(struct pci_dev *dev)
/* Early fixups, before probing the BARs */
pci_fixup_device(pci_fixup_early, dev);
+ /* device class may be changed after fixup */
+ class = dev->class >> 8;
switch (dev->hdr_type) { /* header type */
case PCI_HEADER_TYPE_NORMAL: /* standard header */
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index adf17856bac..7f207f335be 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -123,7 +123,7 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev,
}
flags = irq_flags(triggering, polarity, shareable);
- irq = acpi_register_gsi(gsi, triggering, polarity);
+ irq = acpi_register_gsi(&dev->dev, gsi, triggering, polarity);
if (irq >= 0)
pcibios_penalize_isa_irq(irq, 1);
else
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4e9851fc174..277d35d232f 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -692,7 +692,7 @@ config RTC_DRV_GENERIC
tristate "Generic RTC support"
# Please consider writing a new RTC driver instead of using the generic
# RTC abstraction
- depends on PARISC || M68K || PPC
+ depends on PARISC || M68K || PPC || SUPERH32
help
Say Y or M here to enable RTC support on systems using the generic
RTC abstraction. If you do not know what you are doing, you should
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d1815272c43..27a1be0cd4d 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -603,7 +603,7 @@ static void dasd_profile_end(struct dasd_block *block,
if (dasd_profile_level != DASD_PROFILE_ON)
return;
- sectors = req->nr_sectors;
+ sectors = blk_rq_sectors(req);
if (!cqr->buildclk || !cqr->startclk ||
!cqr->stopclk || !cqr->endclk ||
!sectors)
@@ -1614,15 +1614,6 @@ void dasd_block_clear_timer(struct dasd_block *block)
}
/*
- * posts the buffer_cache about a finalized request
- */
-static inline void dasd_end_request(struct request *req, int error)
-{
- if (__blk_end_request(req, error, blk_rq_bytes(req)))
- BUG();
-}
-
-/*
* Process finished error recovery ccw.
*/
static inline void __dasd_block_process_erp(struct dasd_block *block,
@@ -1665,18 +1656,14 @@ static void __dasd_process_request_queue(struct dasd_block *block)
if (basedev->state < DASD_STATE_READY)
return;
/* Now we try to fetch requests from the request queue */
- while (!blk_queue_plugged(queue) &&
- elv_next_request(queue)) {
-
- req = elv_next_request(queue);
-
+ while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY &&
rq_data_dir(req) == WRITE) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"Rejecting write request %p",
req);
- blkdev_dequeue_request(req);
- dasd_end_request(req, -EIO);
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
continue;
}
cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -1704,8 +1691,8 @@ static void __dasd_process_request_queue(struct dasd_block *block)
"CCW creation failed (rc=%ld) "
"on request %p",
PTR_ERR(cqr), req);
- blkdev_dequeue_request(req);
- dasd_end_request(req, -EIO);
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
continue;
}
/*
@@ -1714,7 +1701,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
*/
cqr->callback_data = (void *) req;
cqr->status = DASD_CQR_FILLED;
- blkdev_dequeue_request(req);
+ blk_start_request(req);
list_add_tail(&cqr->blocklist, &block->ccw_queue);
dasd_profile_start(block, cqr, req);
}
@@ -1731,7 +1718,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
status = cqr->block->base->discipline->free_cp(cqr, req);
if (status <= 0)
error = status ? status : -EIO;
- dasd_end_request(req, error);
+ __blk_end_request_all(req, error);
}
/*
@@ -2003,7 +1990,7 @@ static void dasd_setup_queue(struct dasd_block *block)
{
int max;
- blk_queue_hardsect_size(block->request_queue, block->bp_block);
+ blk_queue_logical_block_size(block->request_queue, block->bp_block);
max = block->base->discipline->max_blocks << block->s2b_shift;
blk_queue_max_sectors(block->request_queue, max);
blk_queue_max_phys_segments(block->request_queue, -1L);
@@ -2038,10 +2025,8 @@ static void dasd_flush_request_queue(struct dasd_block *block)
return;
spin_lock_irq(&block->request_queue_lock);
- while ((req = elv_next_request(block->request_queue))) {
- blkdev_dequeue_request(req);
- dasd_end_request(req, -EIO);
- }
+ while ((req = blk_fetch_request(block->request_queue)))
+ __blk_end_request_all(req, -EIO);
spin_unlock_irq(&block->request_queue_lock);
}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index b9a7f773344..2efaddfae56 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -505,8 +505,9 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
return ERR_PTR(-EINVAL);
blksize = block->bp_block;
/* Calculate record id of first and last block. */
- first_rec = req->sector >> block->s2b_shift;
- last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+ first_rec = blk_rq_pos(req) >> block->s2b_shift;
+ last_rec =
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
rq_for_each_segment(bv, req, iter) {
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cb52da033f0..a41c94053e6 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2354,10 +2354,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
blksize = block->bp_block;
blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
/* Calculate record id of first and last block. */
- first_rec = first_trk = req->sector >> block->s2b_shift;
+ first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
first_offs = sector_div(first_trk, blk_per_trk);
last_rec = last_trk =
- (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
last_offs = sector_div(last_trk, blk_per_trk);
cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
@@ -2420,7 +2420,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
private = (struct dasd_eckd_private *) cqr->block->base->private;
blksize = cqr->block->bp_block;
blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
- recid = req->sector >> cqr->block->s2b_shift;
+ recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
ccw = cqr->cpaddr;
/* Skip over define extent & locate record. */
ccw++;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index a3eb6fd1467..8912358daa2 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -270,8 +270,9 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
return ERR_PTR(-EINVAL);
blksize = block->bp_block;
/* Calculate record id of first and last block. */
- first_rec = req->sector >> block->s2b_shift;
- last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+ first_rec = blk_rq_pos(req) >> block->s2b_shift;
+ last_rec =
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
@@ -309,7 +310,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
ccw = cqr->cpaddr;
/* First ccw is define extent. */
define_extent(ccw++, cqr->data, rq_data_dir(req),
- block->bp_block, req->sector, req->nr_sectors);
+ block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
/* Build locate_record + read/write ccws. */
idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
LO_data = (struct LO_fba_data *) (idaws + cidaw);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index cfdcf1aed33..a4c7ffcd998 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -602,7 +602,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->private_data = dev_info;
dev_info->gd->driverfs_dev = &dev_info->dev;
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
- blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
+ blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 76814f3e898..0ae0c83ef87 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -343,7 +343,7 @@ static int __init xpram_setup_blkdev(void)
goto out;
}
blk_queue_make_request(xpram_queues[i], xpram_make_request);
- blk_queue_hardsect_size(xpram_queues[i], 4096);
+ blk_queue_logical_block_size(xpram_queues[i], 4096);
}
/*
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5f8e8ef43dd..2d00a383a47 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1134,7 +1134,7 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
/* Setup ccws. */
request->op = TO_BLOCK;
start_block = (struct tape_34xx_block_id *) request->cpdata;
- start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B;
+ start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
DBF_EVENT(6, "start_block = %i\n", start_block->block);
ccw = request->cpaddr;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 823b05bd0dd..c453b2f3e9f 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -633,7 +633,7 @@ tape_3590_bread(struct tape_device *device, struct request *req)
struct req_iterator iter;
DBF_EVENT(6, "xBREDid:");
- start_block = req->sector >> TAPEBLOCK_HSEC_S2B;
+ start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
DBF_EVENT(6, "start_block = %i\n", start_block);
rq_for_each_segment(bv, req, iter)
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index f32e89e7c4f..47ff695255e 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -74,13 +74,6 @@ tapeblock_trigger_requeue(struct tape_device *device)
* Post finished request.
*/
static void
-tapeblock_end_request(struct request *req, int error)
-{
- if (blk_end_request(req, error, blk_rq_bytes(req)))
- BUG();
-}
-
-static void
__tapeblock_end_request(struct tape_request *ccw_req, void *data)
{
struct tape_device *device;
@@ -90,17 +83,17 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
device = ccw_req->device;
req = (struct request *) data;
- tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO);
+ blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
if (ccw_req->rc == 0)
/* Update position. */
device->blk_data.block_position =
- (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B;
+ (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
else
/* We lost the position information due to an error. */
device->blk_data.block_position = -1;
device->discipline->free_bread(ccw_req);
if (!list_empty(&device->req_queue) ||
- elv_next_request(device->blk_data.request_queue))
+ blk_peek_request(device->blk_data.request_queue))
tapeblock_trigger_requeue(device);
}
@@ -118,7 +111,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
ccw_req = device->discipline->bread(device, req);
if (IS_ERR(ccw_req)) {
DBF_EVENT(1, "TBLOCK: bread failed\n");
- tapeblock_end_request(req, -EIO);
+ blk_end_request_all(req, -EIO);
return PTR_ERR(ccw_req);
}
ccw_req->callback = __tapeblock_end_request;
@@ -131,7 +124,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
* Start/enqueueing failed. No retries in
* this case.
*/
- tapeblock_end_request(req, -EIO);
+ blk_end_request_all(req, -EIO);
device->discipline->free_bread(ccw_req);
}
@@ -169,19 +162,16 @@ tapeblock_requeue(struct work_struct *work) {
spin_lock_irq(&device->blk_data.request_queue_lock);
while (
!blk_queue_plugged(queue) &&
- elv_next_request(queue) &&
+ (req = blk_fetch_request(queue)) &&
nr_queued < TAPEBLOCK_MIN_REQUEUE
) {
- req = elv_next_request(queue);
if (rq_data_dir(req) == WRITE) {
DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
- blkdev_dequeue_request(req);
spin_unlock_irq(&device->blk_data.request_queue_lock);
- tapeblock_end_request(req, -EIO);
+ blk_end_request_all(req, -EIO);
spin_lock_irq(&device->blk_data.request_queue_lock);
continue;
}
- blkdev_dequeue_request(req);
nr_queued++;
spin_unlock_irq(&device->blk_data.request_queue_lock);
rc = tapeblock_start_request(device, req);
@@ -232,7 +222,7 @@ tapeblock_setup_device(struct tape_device * device)
if (rc)
goto cleanup_queue;
- blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
+ blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
blk_queue_max_phys_segments(blkdat->request_queue, -1L);
blk_queue_max_hw_segments(blkdat->request_queue, -1L);
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index a85ad05e854..6d465168468 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -186,31 +186,31 @@ static void jsfd_do_request(struct request_queue *q)
{
struct request *req;
- while ((req = elv_next_request(q)) != NULL) {
+ req = blk_fetch_request(q);
+ while (req) {
struct jsfd_part *jdp = req->rq_disk->private_data;
- unsigned long offset = req->sector << 9;
- size_t len = req->current_nr_sectors << 9;
+ unsigned long offset = blk_rq_pos(req) << 9;
+ size_t len = blk_rq_cur_bytes(req);
+ int err = -EIO;
- if ((offset + len) > jdp->dsize) {
- end_request(req, 0);
- continue;
- }
+ if ((offset + len) > jdp->dsize)
+ goto end;
if (rq_data_dir(req) != READ) {
printk(KERN_ERR "jsfd: write\n");
- end_request(req, 0);
- continue;
+ goto end;
}
if ((jdp->dbase & 0xff000000) != 0x20000000) {
printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase);
- end_request(req, 0);
- continue;
+ goto end;
}
jsfd_read(req->buffer, jdp->dbase + offset, len);
-
- end_request(req, 1);
+ err = 0;
+ end:
+ if (!__blk_end_request_cur(req, err))
+ req = blk_fetch_request(q);
}
}
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index be5099dd94b..c7076ce25e2 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1825,7 +1825,7 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt,
if (linked_comm && SCpnt->device->queue_depth > 2
&& TLDEV(SCpnt->device->type)) {
ha->cp_stat[i] = READY;
- flush_dev(SCpnt->device, SCpnt->request->sector, ha, 0);
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 0);
return 0;
}
@@ -2144,13 +2144,13 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
if (!cpp->din)
input_only = 0;
- if (SCpnt->request->sector < minsec)
- minsec = SCpnt->request->sector;
- if (SCpnt->request->sector > maxsec)
- maxsec = SCpnt->request->sector;
+ if (blk_rq_pos(SCpnt->request) < minsec)
+ minsec = blk_rq_pos(SCpnt->request);
+ if (blk_rq_pos(SCpnt->request) > maxsec)
+ maxsec = blk_rq_pos(SCpnt->request);
- sl[n] = SCpnt->request->sector;
- ioseek += SCpnt->request->nr_sectors;
+ sl[n] = blk_rq_pos(SCpnt->request);
+ ioseek += blk_rq_sectors(SCpnt->request);
if (!n)
continue;
@@ -2190,7 +2190,7 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
k = il[n];
cpp = &ha->cp[k];
SCpnt = cpp->SCpnt;
- ll[n] = SCpnt->request->nr_sectors;
+ ll[n] = blk_rq_sectors(SCpnt->request);
pl[n] = SCpnt->serial_number;
if (!n)
@@ -2236,12 +2236,12 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
cpp = &ha->cp[k];
SCpnt = cpp->SCpnt;
scmd_printk(KERN_INFO, SCpnt,
- "%s pid %ld mb %d fc %d nr %d sec %ld ns %ld"
+ "%s pid %ld mb %d fc %d nr %d sec %ld ns %u"
" cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
(ihdlr ? "ihdlr" : "qcomm"),
SCpnt->serial_number, k, flushcount,
- n_ready, SCpnt->request->sector,
- SCpnt->request->nr_sectors, cursec, YESNO(s),
+ n_ready, blk_rq_pos(SCpnt->request),
+ blk_rq_sectors(SCpnt->request), cursec, YESNO(s),
YESNO(r), YESNO(rev), YESNO(input_only),
YESNO(overlap), cpp->din);
}
@@ -2408,7 +2408,7 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
if (linked_comm && SCpnt->device->queue_depth > 2
&& TLDEV(SCpnt->device->type))
- flush_dev(SCpnt->device, SCpnt->request->sector, ha, 1);
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 1);
tstatus = status_byte(spp->target_status);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 3da02e43678..54fa1e42dc4 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1927,21 +1927,21 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk("%s: multiple segments req %u %u, rsp %u %u\n",
- __func__, req->bio->bi_vcnt, req->data_len,
- rsp->bio->bi_vcnt, rsp->data_len);
+ __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
+ rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
return -EINVAL;
}
- ret = smp_execute_task(dev, bio_data(req->bio), req->data_len,
- bio_data(rsp->bio), rsp->data_len);
+ ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req),
+ bio_data(rsp->bio), blk_rq_bytes(rsp));
if (ret > 0) {
/* positive number is the untransferred residual */
- rsp->data_len = ret;
- req->data_len = 0;
+ rsp->resid_len = ret;
+ req->resid_len = 0;
ret = 0;
} else if (ret == 0) {
- rsp->data_len = 0;
- req->data_len = 0;
+ rsp->resid_len = 0;
+ req->resid_len = 0;
}
return ret;
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index d110a366c48..1bc3b756799 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -134,24 +134,24 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
{
u8 *req_data = NULL, *resp_data = NULL, *buf;
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
- int error = -EINVAL, resp_data_len = rsp->data_len;
+ int error = -EINVAL;
/* eight is the minimum size for request and response frames */
- if (req->data_len < 8 || rsp->data_len < 8)
+ if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8)
goto out;
- if (bio_offset(req->bio) + req->data_len > PAGE_SIZE ||
- bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) {
+ if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE ||
+ bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) {
shost_printk(KERN_ERR, shost,
"SMP request/response frame crosses page boundary");
goto out;
}
- req_data = kzalloc(req->data_len, GFP_KERNEL);
+ req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL);
/* make sure frame can always be built ... we copy
* back only the requested length */
- resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL);
+ resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL);
if (!req_data || !resp_data) {
error = -ENOMEM;
@@ -160,7 +160,7 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
local_irq_disable();
buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
- memcpy(req_data, buf, req->data_len);
+ memcpy(req_data, buf, blk_rq_bytes(req));
kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
local_irq_enable();
@@ -178,15 +178,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
switch (req_data[1]) {
case SMP_REPORT_GENERAL:
- req->data_len -= 8;
- resp_data_len -= 32;
+ req->resid_len -= 8;
+ rsp->resid_len -= 32;
resp_data[2] = SMP_RESP_FUNC_ACC;
resp_data[9] = sas_ha->num_phys;
break;
case SMP_REPORT_MANUF_INFO:
- req->data_len -= 8;
- resp_data_len -= 64;
+ req->resid_len -= 8;
+ rsp->resid_len -= 64;
resp_data[2] = SMP_RESP_FUNC_ACC;
memcpy(resp_data + 12, shost->hostt->name,
SAS_EXPANDER_VENDOR_ID_LEN);
@@ -199,13 +199,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_DISCOVER:
- req->data_len -= 16;
- if ((int)req->data_len < 0) {
- req->data_len = 0;
+ req->resid_len -= 16;
+ if ((int)req->resid_len < 0) {
+ req->resid_len = 0;
error = -EINVAL;
goto out;
}
- resp_data_len -= 56;
+ rsp->resid_len -= 56;
sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
break;
@@ -215,13 +215,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_REPORT_PHY_SATA:
- req->data_len -= 16;
- if ((int)req->data_len < 0) {
- req->data_len = 0;
+ req->resid_len -= 16;
+ if ((int)req->resid_len < 0) {
+ req->resid_len = 0;
error = -EINVAL;
goto out;
}
- resp_data_len -= 60;
+ rsp->resid_len -= 60;
sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
break;
@@ -238,13 +238,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_PHY_CONTROL:
- req->data_len -= 44;
- if ((int)req->data_len < 0) {
- req->data_len = 0;
+ req->resid_len -= 44;
+ if ((int)req->resid_len < 0) {
+ req->resid_len = 0;
error = -EINVAL;
goto out;
}
- resp_data_len -= 8;
+ rsp->resid_len -= 8;
sas_phy_control(sas_ha, req_data[9], req_data[10],
req_data[32] >> 4, req_data[33] >> 4,
resp_data);
@@ -261,11 +261,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
local_irq_disable();
buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
- memcpy(buf, resp_data, rsp->data_len);
+ memcpy(buf, resp_data, blk_rq_bytes(rsp));
flush_kernel_dcache_page(bio_page(rsp->bio));
kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
local_irq_enable();
- rsp->data_len = resp_data_len;
out:
kfree(req_data);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 167b66dd34c..8032c5adb6a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1312,10 +1312,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
uint32_t bgstat = bgf->bgstat;
uint64_t failing_sector = 0;
- printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
+ printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
"bgstat=0x%x bghm=0x%x\n",
cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
- cmd->request->nr_sectors, bgstat, bghm);
+ blk_rq_sectors(cmd->request), bgstat, bghm);
spin_lock(&_dump_buf_lock);
if (!_dump_buf_done) {
@@ -2378,15 +2378,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
if (cmnd->cmnd[0] == READ_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9035 BLKGRD: READ @ sector %llu, "
- "count %lu\n",
- (unsigned long long)scsi_get_lba(cmnd),
- cmnd->request->nr_sectors);
+ "count %u\n",
+ (unsigned long long)scsi_get_lba(cmnd),
+ blk_rq_sectors(cmnd->request));
else if (cmnd->cmnd[0] == WRITE_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9036 BLKGRD: WRITE @ sector %llu, "
- "count %lu cmd=%p\n",
+ "count %u cmd=%p\n",
(unsigned long long)scsi_get_lba(cmnd),
- cmnd->request->nr_sectors,
+ blk_rq_sectors(cmnd->request),
cmnd);
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -2406,15 +2406,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
if (cmnd->cmnd[0] == READ_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9040 dbg: READ @ sector %llu, "
- "count %lu\n",
+ "count %u\n",
(unsigned long long)scsi_get_lba(cmnd),
- cmnd->request->nr_sectors);
+ blk_rq_sectors(cmnd->request));
else if (cmnd->cmnd[0] == WRITE_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9041 dbg: WRITE @ sector %llu, "
- "count %lu cmd=%p\n",
+ "count %u cmd=%p\n",
(unsigned long long)scsi_get_lba(cmnd),
- cmnd->request->nr_sectors, cmnd);
+ blk_rq_sectors(cmnd->request), cmnd);
else
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9042 dbg: parser not implemented\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index e03dc0b1e1a..5c65da519e3 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1041,7 +1041,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
"rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
- req->data_len, rsp->bio->bi_vcnt, rsp->data_len);
+ blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
return -EINVAL;
}
@@ -1104,7 +1104,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
*((u64 *)&mpi_request->SASAddress) = (rphy) ?
cpu_to_le64(rphy->identify.sas_address) :
cpu_to_le64(ioc->sas_hba.sas_address);
- mpi_request->RequestDataLength = cpu_to_le16(req->data_len - 4);
+ mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
psge = &mpi_request->SGL;
/* WRITE sgel first */
@@ -1112,13 +1112,13 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
- req->data_len, PCI_DMA_BIDIRECTIONAL);
+ blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_out) {
mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
goto unmap;
}
- ioc->base_add_sg_single(psge, sgl_flags | (req->data_len - 4),
+ ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4),
dma_addr_out);
/* incr sgel */
@@ -1129,14 +1129,14 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_END_OF_LIST);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
- rsp->data_len, PCI_DMA_BIDIRECTIONAL);
+ dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
+ blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_in) {
mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
goto unmap;
}
- ioc->base_add_sg_single(psge, sgl_flags | (rsp->data_len + 4),
+ ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
dma_addr_in);
dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - "
@@ -1170,9 +1170,8 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
req->sense_len = sizeof(*mpi_reply);
- req->data_len = 0;
- rsp->data_len -= mpi_reply->ResponseDataLength;
-
+ req->resid_len = 0;
+ rsp->resid_len -= mpi_reply->ResponseDataLength;
} else {
dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
"%s - no reply\n", ioc->name, __func__));
@@ -1188,10 +1187,10 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
unmap:
if (dma_addr_out)
- pci_unmap_single(ioc->pdev, dma_addr_out, req->data_len,
+ pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
PCI_DMA_BIDIRECTIONAL);
if (dma_addr_in)
- pci_unmap_single(ioc->pdev, dma_addr_in, rsp->data_len,
+ pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
PCI_DMA_BIDIRECTIONAL);
out:
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 1ce6b24abab..5776b2ab6b1 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -889,26 +889,6 @@ int osd_req_add_set_attr_list(struct osd_request *or,
}
EXPORT_SYMBOL(osd_req_add_set_attr_list);
-static int _append_map_kern(struct request *req,
- void *buff, unsigned len, gfp_t flags)
-{
- struct bio *bio;
- int ret;
-
- bio = bio_map_kern(req->q, buff, len, flags);
- if (IS_ERR(bio)) {
- OSD_ERR("Failed bio_map_kern(%p, %d) => %ld\n", buff, len,
- PTR_ERR(bio));
- return PTR_ERR(bio);
- }
- ret = blk_rq_append_bio(req->q, req, bio);
- if (ret) {
- OSD_ERR("Failed blk_rq_append_bio(%p) => %d\n", bio, ret);
- bio_put(bio);
- }
- return ret;
-}
-
static int _req_append_segment(struct osd_request *or,
unsigned padding, struct _osd_req_data_segment *seg,
struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
@@ -924,14 +904,14 @@ static int _req_append_segment(struct osd_request *or,
else
pad_buff = io->pad_buff;
- ret = _append_map_kern(io->req, pad_buff, padding,
+ ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
or->alloc_flags);
if (ret)
return ret;
io->total_bytes += padding;
}
- ret = _append_map_kern(io->req, seg->buff, seg->total_bytes,
+ ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
or->alloc_flags);
if (ret)
return ret;
@@ -1293,6 +1273,21 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
/*
* osd_finalize_request and helpers
*/
+static struct request *_make_request(struct request_queue *q, bool has_write,
+ struct _osd_io_info *oii, gfp_t flags)
+{
+ if (oii->bio)
+ return blk_make_request(q, oii->bio, flags);
+ else {
+ struct request *req;
+
+ req = blk_get_request(q, has_write ? WRITE : READ, flags);
+ if (unlikely(!req))
+ return ERR_PTR(-ENOMEM);
+
+ return req;
+ }
+}
static int _init_blk_request(struct osd_request *or,
bool has_in, bool has_out)
@@ -1301,11 +1296,13 @@ static int _init_blk_request(struct osd_request *or,
struct scsi_device *scsi_device = or->osd_dev->scsi_device;
struct request_queue *q = scsi_device->request_queue;
struct request *req;
- int ret = -ENOMEM;
+ int ret;
- req = blk_get_request(q, has_out, flags);
- if (!req)
+ req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
goto out;
+ }
or->request = req;
req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1318,9 +1315,10 @@ static int _init_blk_request(struct osd_request *or,
or->out.req = req;
if (has_in) {
/* allocate bidi request */
- req = blk_get_request(q, READ, flags);
- if (!req) {
+ req = _make_request(q, false, &or->in, flags);
+ if (IS_ERR(req)) {
OSD_DEBUG("blk_get_request for bidi failed\n");
+ ret = PTR_ERR(req);
goto out;
}
req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1364,26 +1362,6 @@ int osd_finalize_request(struct osd_request *or,
return ret;
}
- if (or->out.bio) {
- ret = blk_rq_append_bio(or->request->q, or->out.req,
- or->out.bio);
- if (ret) {
- OSD_DEBUG("blk_rq_append_bio out failed\n");
- return ret;
- }
- OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n",
- _LLU(or->out.total_bytes), or->out.req->data_len);
- }
- if (or->in.bio) {
- ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio);
- if (ret) {
- OSD_DEBUG("blk_rq_append_bio in failed\n");
- return ret;
- }
- OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n",
- _LLU(or->in.total_bytes), or->in.req->data_len);
- }
-
or->out.pad_buff = sg_out_pad_buffer;
or->in.pad_buff = sg_in_pad_buffer;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index bb218c8b6e9..dd3f9d2b99f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -240,11 +240,11 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
* is invalid. Prevent the garbage from being misinterpreted
* and prevent security leaks by zeroing out the excess data.
*/
- if (unlikely(req->data_len > 0 && req->data_len <= bufflen))
- memset(buffer + (bufflen - req->data_len), 0, req->data_len);
+ if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
+ memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
if (resid)
- *resid = req->data_len;
+ *resid = req->resid_len;
ret = req->errors;
out:
blk_put_request(req);
@@ -546,14 +546,9 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
* to queue the remainder of them.
*/
if (blk_end_request(req, error, bytes)) {
- int leftover = (req->hard_nr_sectors << 9);
-
- if (blk_pc_request(req))
- leftover = req->data_len;
-
/* kill remainder if no retrys */
if (error && scsi_noretry_cmd(cmd))
- blk_end_request(req, error, leftover);
+ blk_end_request_all(req, error);
else {
if (requeue) {
/*
@@ -673,34 +668,6 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
EXPORT_SYMBOL(scsi_release_buffers);
/*
- * Bidi commands Must be complete as a whole, both sides at once.
- * If part of the bytes were written and lld returned
- * scsi_in()->resid and/or scsi_out()->resid this information will be left
- * in req->data_len and req->next_rq->data_len. The upper-layer driver can
- * decide what to do with this information.
- */
-static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
-{
- struct request *req = cmd->request;
- unsigned int dlen = req->data_len;
- unsigned int next_dlen = req->next_rq->data_len;
-
- req->data_len = scsi_out(cmd)->resid;
- req->next_rq->data_len = scsi_in(cmd)->resid;
-
- /* The req and req->next_rq have not been completed */
- BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
-
- scsi_release_buffers(cmd);
-
- /*
- * This will goose the queue request function at the end, so we don't
- * need to worry about launching another command.
- */
- scsi_next_command(cmd);
-}
-
-/*
* Function: scsi_io_completion()
*
* Purpose: Completion processing for block device I/O requests.
@@ -739,7 +706,6 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
- int this_count;
struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
int error = 0;
@@ -773,12 +739,22 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (!sense_deferred)
error = -EIO;
}
+
+ req->resid_len = scsi_get_resid(cmd);
+
if (scsi_bidi_cmnd(cmd)) {
- /* will also release_buffers */
- scsi_end_bidi_request(cmd);
+ /*
+ * Bidi commands Must be complete as a whole,
+ * both sides at once.
+ */
+ req->next_rq->resid_len = scsi_in(cmd)->resid;
+
+ blk_end_request_all(req, 0);
+
+ scsi_release_buffers(cmd);
+ scsi_next_command(cmd);
return;
}
- req->data_len = scsi_get_resid(cmd);
}
BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
@@ -787,9 +763,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* Next deal with any sectors which we were able to correctly
* handle.
*/
- SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
+ SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
"%d bytes done.\n",
- req->nr_sectors, good_bytes));
+ blk_rq_sectors(req), good_bytes));
/*
* Recovered errors need reporting, but they're always treated
@@ -812,7 +788,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*/
if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
return;
- this_count = blk_rq_bytes(req);
error = -EIO;
@@ -922,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (driver_byte(result) & DRIVER_SENSE)
scsi_print_sense("", cmd);
}
- blk_end_request(req, -EIO, blk_rq_bytes(req));
+ blk_end_request_all(req, -EIO);
scsi_next_command(cmd);
break;
case ACTION_REPREP:
@@ -965,10 +940,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
BUG_ON(count > sdb->table.nents);
sdb->table.nents = count;
- if (blk_pc_request(req))
- sdb->length = req->data_len;
- else
- sdb->length = req->nr_sectors << 9;
+ sdb->length = blk_rq_bytes(req);
return BLKPREP_OK;
}
@@ -1087,22 +1059,21 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
if (unlikely(ret))
return ret;
} else {
- BUG_ON(req->data_len);
- BUG_ON(req->data);
+ BUG_ON(blk_rq_bytes(req));
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
req->buffer = NULL;
}
cmd->cmd_len = req->cmd_len;
- if (!req->data_len)
+ if (!blk_rq_bytes(req))
cmd->sc_data_direction = DMA_NONE;
else if (rq_data_dir(req) == WRITE)
cmd->sc_data_direction = DMA_TO_DEVICE;
else
cmd->sc_data_direction = DMA_FROM_DEVICE;
- cmd->transfersize = req->data_len;
+ cmd->transfersize = blk_rq_bytes(req);
cmd->allowed = req->retries;
return BLKPREP_OK;
}
@@ -1212,7 +1183,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
break;
case BLKPREP_DEFER:
/*
- * If we defer, the elv_next_request() returns NULL, but the
+ * If we defer, the blk_peek_request() returns NULL, but the
* queue must be restarted, so we plug here if no returning
* command will automatically do that.
*/
@@ -1388,7 +1359,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
struct scsi_target *starget = scsi_target(sdev);
struct Scsi_Host *shost = sdev->host;
- blkdev_dequeue_request(req);
+ blk_start_request(req);
if (unlikely(cmd == NULL)) {
printk(KERN_CRIT "impossible request in %s.\n",
@@ -1480,7 +1451,7 @@ static void scsi_request_fn(struct request_queue *q)
if (!sdev) {
printk("scsi: killing requests for dead queue\n");
- while ((req = elv_next_request(q)) != NULL)
+ while ((req = blk_peek_request(q)) != NULL)
scsi_kill_request(req, q);
return;
}
@@ -1501,7 +1472,7 @@ static void scsi_request_fn(struct request_queue *q)
* that the request is fully prepared even if we cannot
* accept it.
*/
- req = elv_next_request(q);
+ req = blk_peek_request(q);
if (!req || !scsi_dev_queue_ready(q, sdev))
break;
@@ -1517,7 +1488,7 @@ static void scsi_request_fn(struct request_queue *q)
* Remove the request from the request list.
*/
if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
- blkdev_dequeue_request(req);
+ blk_start_request(req);
sdev->device_busy++;
spin_unlock(q->queue_lock);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 48ba413f7f6..10303272ba4 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -387,7 +387,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
* we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
* length for us.
*/
- cmd->sdb.length = rq->data_len;
+ cmd->sdb.length = blk_rq_bytes(rq);
return 0;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 50988cbf7b2..d606452297c 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -163,12 +163,10 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
while (!blk_queue_plugged(q)) {
- req = elv_next_request(q);
+ req = blk_fetch_request(q);
if (!req)
break;
- blkdev_dequeue_request(req);
-
spin_unlock_irq(q->queue_lock);
handler = to_sas_internal(shost->transportt)->f->smp_handler;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 84044233b63..bcf3bd40bbd 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -384,9 +384,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
struct scsi_device *sdp = q->queuedata;
struct gendisk *disk = rq->rq_disk;
struct scsi_disk *sdkp;
- sector_t block = rq->sector;
+ sector_t block = blk_rq_pos(rq);
sector_t threshold;
- unsigned int this_count = rq->nr_sectors;
+ unsigned int this_count = blk_rq_sectors(rq);
int ret, host_dif;
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -413,10 +413,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
this_count));
if (!sdp || !scsi_device_online(sdp) ||
- block + rq->nr_sectors > get_capacity(disk)) {
+ block + blk_rq_sectors(rq) > get_capacity(disk)) {
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
- "Finishing %ld sectors\n",
- rq->nr_sectors));
+ "Finishing %u sectors\n",
+ blk_rq_sectors(rq)));
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
"Retry with 0x%p\n", SCpnt));
goto out;
@@ -463,7 +463,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
* for this.
*/
if (sdp->sector_size == 1024) {
- if ((block & 1) || (rq->nr_sectors & 1)) {
+ if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
scmd_printk(KERN_ERR, SCpnt,
"Bad block number requested\n");
goto out;
@@ -473,7 +473,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
}
if (sdp->sector_size == 2048) {
- if ((block & 3) || (rq->nr_sectors & 3)) {
+ if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
scmd_printk(KERN_ERR, SCpnt,
"Bad block number requested\n");
goto out;
@@ -483,7 +483,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
}
if (sdp->sector_size == 4096) {
- if ((block & 7) || (rq->nr_sectors & 7)) {
+ if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
scmd_printk(KERN_ERR, SCpnt,
"Bad block number requested\n");
goto out;
@@ -512,10 +512,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
- "%s %d/%ld 512 byte blocks.\n",
+ "%s %d/%u 512 byte blocks.\n",
(rq_data_dir(rq) == WRITE) ?
"writing" : "reading", this_count,
- rq->nr_sectors));
+ blk_rq_sectors(rq)));
/* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
@@ -971,8 +971,8 @@ static struct block_device_operations sd_fops = {
static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
{
- u64 start_lba = scmd->request->sector;
- u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512);
+ u64 start_lba = blk_rq_pos(scmd->request);
+ u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
u64 bad_lba;
int info_valid;
@@ -1510,7 +1510,7 @@ got_data:
*/
sector_size = 512;
}
- blk_queue_hardsect_size(sdp->request_queue, sector_size);
+ blk_queue_logical_block_size(sdp->request_queue, sector_size);
{
char cap_str_2[10], cap_str_10[10];
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 184dff49279..82f14a9482d 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -507,7 +507,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
sector_sz = scmd->device->sector_size;
sectors = good_bytes / sector_sz;
- phys = scmd->request->sector & 0xffffffff;
+ phys = blk_rq_pos(scmd->request) & 0xffffffff;
if (sector_sz == 4096)
phys >>= 3;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index e1716f14cd4..8201387b4da 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -289,8 +289,8 @@ sg_open(struct inode *inode, struct file *filp)
if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
- sdp->sg_tablesize = min(q->max_hw_segments,
- q->max_phys_segments);
+ sdp->sg_tablesize = min(queue_max_hw_segments(q),
+ queue_max_phys_segments(q));
}
if ((sfp = sg_add_sfp(sdp, dev)))
filp->private_data = sfp;
@@ -909,7 +909,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (val < 0)
return -EINVAL;
val = min_t(int, val,
- sdp->device->request_queue->max_sectors * 512);
+ queue_max_sectors(sdp->device->request_queue) * 512);
if (val != sfp->reserve.bufflen) {
if (sg_res_in_use(sfp) || sfp->mmap_called)
return -EBUSY;
@@ -919,7 +919,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
- sdp->device->request_queue->max_sectors * 512);
+ queue_max_sectors(sdp->device->request_queue) * 512);
return put_user(val, ip);
case SG_SET_COMMAND_Q:
result = get_user(val, ip);
@@ -1059,12 +1059,13 @@ sg_ioctl(struct inode *inode, struct file *filp,
return -ENODEV;
return scsi_ioctl(sdp->device, cmd_in, p);
case BLKSECTGET:
- return put_user(sdp->device->request_queue->max_sectors * 512,
+ return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
ip);
case BLKTRACESETUP:
return blk_trace_setup(sdp->device->request_queue,
sdp->disk->disk_name,
MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
+ NULL,
(char *)arg);
case BLKTRACESTART:
return blk_trace_startstop(sdp->device->request_queue, 1);
@@ -1260,7 +1261,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
sense = rq->sense;
result = rq->errors;
- resid = rq->data_len;
+ resid = rq->resid_len;
SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
sdp->disk->disk_name, srp->header.pack_id, result));
@@ -1377,7 +1378,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
sdp->device = scsidp;
INIT_LIST_HEAD(&sdp->sfds);
init_waitqueue_head(&sdp->o_excl_wait);
- sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
+ sdp->sg_tablesize = min(queue_max_hw_segments(q),
+ queue_max_phys_segments(q));
sdp->index = k;
kref_init(&sdp->d_ref);
@@ -2055,7 +2057,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
sg_big_buff = def_reserved_size;
bufflen = min_t(int, sg_big_buff,
- sdp->device->request_queue->max_sectors * 512);
+ queue_max_sectors(sdp->device->request_queue) * 512);
sg_build_reserve(sfp, bufflen);
SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
sfp->reserve.bufflen, sfp->reserve.k_use_sg));
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0e1a0f2d2ad..cd350dfc121 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -292,7 +292,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
if (cd->device->sector_size == 2048)
error_sector <<= 2;
error_sector &= ~(block_sectors - 1);
- good_bytes = (error_sector - SCpnt->request->sector) << 9;
+ good_bytes = (error_sector -
+ blk_rq_pos(SCpnt->request)) << 9;
if (good_bytes < 0 || good_bytes >= this_count)
good_bytes = 0;
/*
@@ -349,8 +350,8 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
cd->disk->disk_name, block));
if (!cd->device || !scsi_device_online(cd->device)) {
- SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n",
- rq->nr_sectors));
+ SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n",
+ blk_rq_sectors(rq)));
SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
goto out;
}
@@ -413,7 +414,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
/*
* request doesn't start on hw block boundary, add scatter pads
*/
- if (((unsigned int)rq->sector % (s_size >> 9)) ||
+ if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) ||
(scsi_bufflen(SCpnt) % s_size)) {
scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
goto out;
@@ -422,14 +423,14 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
- SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
+ SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n",
cd->cdi.name,
(rq_data_dir(rq) == WRITE) ?
"writing" : "reading",
- this_count, rq->nr_sectors));
+ this_count, blk_rq_sectors(rq)));
SCpnt->cmnd[1] = 0;
- block = (unsigned int)rq->sector / (s_size >> 9);
+ block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
if (this_count > 0xffff) {
this_count = 0xffff;
@@ -726,7 +727,7 @@ static void get_sectorsize(struct scsi_cd *cd)
}
queue = cd->device->request_queue;
- blk_queue_hardsect_size(queue, sector_size);
+ blk_queue_logical_block_size(queue, sector_size);
return;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index eb24efea8f1..89bd438e1fe 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -463,7 +463,7 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
struct scsi_tape *STp = SRpnt->stp;
STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
- STp->buffer->cmdstat.residual = req->data_len;
+ STp->buffer->cmdstat.residual = req->resid_len;
if (SRpnt->waiting)
complete(SRpnt->waiting);
@@ -3983,8 +3983,8 @@ static int st_probe(struct device *dev)
return -ENODEV;
}
- i = min(SDp->request_queue->max_hw_segments,
- SDp->request_queue->max_phys_segments);
+ i = min(queue_max_hw_segments(SDp->request_queue),
+ queue_max_phys_segments(SDp->request_queue));
if (st_max_sg_segs < i)
i = st_max_sg_segs;
buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 601e95141cb..54023d41fd1 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1306,7 +1306,7 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs
if (linked_comm && SCpnt->device->queue_depth > 2
&& TLDEV(SCpnt->device->type)) {
HD(j)->cp_stat[i] = READY;
- flush_dev(SCpnt->device, SCpnt->request->sector, j, FALSE);
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
return 0;
}
@@ -1610,11 +1610,13 @@ static int reorder(unsigned int j, unsigned long cursec,
if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
- if (SCpnt->request->sector < minsec) minsec = SCpnt->request->sector;
- if (SCpnt->request->sector > maxsec) maxsec = SCpnt->request->sector;
+ if (blk_rq_pos(SCpnt->request) < minsec)
+ minsec = blk_rq_pos(SCpnt->request);
+ if (blk_rq_pos(SCpnt->request) > maxsec)
+ maxsec = blk_rq_pos(SCpnt->request);
- sl[n] = SCpnt->request->sector;
- ioseek += SCpnt->request->nr_sectors;
+ sl[n] = blk_rq_pos(SCpnt->request);
+ ioseek += blk_rq_sectors(SCpnt->request);
if (!n) continue;
@@ -1642,7 +1644,7 @@ static int reorder(unsigned int j, unsigned long cursec,
if (!input_only) for (n = 0; n < n_ready; n++) {
k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
- ll[n] = SCpnt->request->nr_sectors; pl[n] = SCpnt->serial_number;
+ ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
if (!n) continue;
@@ -1666,12 +1668,12 @@ static int reorder(unsigned int j, unsigned long cursec,
if (link_statistics && (overlap || !(flushcount % link_statistics)))
for (n = 0; n < n_ready; n++) {
k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
- printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\
+ printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\
" cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
(ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready,
- SCpnt->request->sector, SCpnt->request->nr_sectors, cursec,
- YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
+ blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
+ cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
YESNO(overlap), cpp->xdir);
}
#endif
@@ -1799,7 +1801,7 @@ static irqreturn_t ihdlr(unsigned int j)
if (linked_comm && SCpnt->device->queue_depth > 2
&& TLDEV(SCpnt->device->type))
- flush_dev(SCpnt->device, SCpnt->request->sector, j, TRUE);
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
tstatus = status_byte(spp->target_status);
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index b4b39811b44..fb867a9f55e 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -137,6 +137,7 @@ struct uart_8250_port {
unsigned char mcr;
unsigned char mcr_mask; /* mask of user bits */
unsigned char mcr_force; /* mask of forced bits */
+ unsigned char cur_iotype; /* Running I/O type */
/*
* Some bits in registers are cleared on a read, so they must
@@ -286,6 +287,13 @@ static const struct serial8250_config uart_config[] = {
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.flags = UART_CAP_FIFO,
},
+ [PORT_AR7] = {
+ .name = "AR7",
+ .fifo_size = 16,
+ .tx_loadsz = 16,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE,
+ },
};
#if defined (CONFIG_SERIAL_8250_AU1X00)
@@ -471,6 +479,7 @@ static void io_serial_out(struct uart_port *p, int offset, int value)
static void set_io_from_upio(struct uart_port *p)
{
+ struct uart_8250_port *up = (struct uart_8250_port *)p;
switch (p->iotype) {
case UPIO_HUB6:
p->serial_in = hub6_serial_in;
@@ -509,6 +518,8 @@ static void set_io_from_upio(struct uart_port *p)
p->serial_out = io_serial_out;
break;
}
+ /* Remember loaded iotype */
+ up->cur_iotype = p->iotype;
}
static void
@@ -1937,6 +1948,9 @@ static int serial8250_startup(struct uart_port *port)
up->capabilities = uart_config[up->port.type].flags;
up->mcr = 0;
+ if (up->port.iotype != up->cur_iotype)
+ set_io_from_upio(port);
+
if (up->port.type == PORT_16C950) {
/* Wake up and initialize UART */
up->acr = 0;
@@ -2563,6 +2577,9 @@ static void serial8250_config_port(struct uart_port *port, int flags)
if (ret < 0)
probeflags &= ~PROBE_RSA;
+ if (up->port.iotype != up->cur_iotype)
+ set_io_from_upio(port);
+
if (flags & UART_CONFIG_TYPE)
autoconfig(up, probeflags);
if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
@@ -2671,6 +2688,11 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
{
int i;
+ for (i = 0; i < nr_uarts; i++) {
+ struct uart_8250_port *up = &serial8250_ports[i];
+ up->cur_iotype = 0xFF;
+ }
+
serial8250_isa_init_ports();
for (i = 0; i < nr_uarts; i++) {
diff --git a/drivers/serial/8250_gsc.c b/drivers/serial/8250_gsc.c
index 418b4fe9a0a..33149d982e8 100644
--- a/drivers/serial/8250_gsc.c
+++ b/drivers/serial/8250_gsc.c
@@ -39,9 +39,9 @@ static int __init serial_init_chip(struct parisc_device *dev)
*/
if (parisc_parent(dev)->id.hw_type != HPHW_IOA)
printk(KERN_INFO
- "Serial: device 0x%lx not configured.\n"
+ "Serial: device 0x%llx not configured.\n"
"Enable support for Wax, Lasi, Asp or Dino.\n",
- dev->hpa.start);
+ (unsigned long long)dev->hpa.start);
return -ENODEV;
}
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 938bc1b6c3f..e371a9c1534 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -2776,6 +2776,9 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_OXSEMI, 0x950a,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_2_1130000 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_C950,
+ PCI_VENDOR_ID_OXSEMI, PCI_SUBDEVICE_ID_OXSEMI_C950, 0, 0,
+ pbn_b0_1_921600 },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_4_115200 },
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 343e3a35b6a..641e800ed69 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -833,6 +833,7 @@ config SERIAL_IMX
bool "IMX serial port support"
depends on ARM && (ARCH_IMX || ARCH_MXC)
select SERIAL_CORE
+ select RATIONAL
help
If you have a machine based on a Motorola IMX CPU you
can enable its onboard serial port by enabling this option.
@@ -1433,4 +1434,11 @@ config SPORT_BAUD_RATE
default 19200 if (SERIAL_SPORT_BAUD_RATE_19200)
default 9600 if (SERIAL_SPORT_BAUD_RATE_9600)
+config SERIAL_TIMBERDALE
+ tristate "Support for timberdale UART"
+ depends on MFD_TIMBERDALE
+ select SERIAL_CORE
+ ---help---
+ Add support for UART controller on timberdale.
+
endmenu
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index d438eb2a73d..45a8658f54d 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -77,3 +77,4 @@ obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
+obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index d86123e0339..e2f6b1bfac9 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -330,6 +330,11 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
/* Clear TFI bit */
UART_PUT_LSR(uart, TFI);
#endif
+ /* Anomaly notes:
+ * 05000215 - we always clear ETBEI within last UART TX
+ * interrupt to end a string. It is always set
+ * when start a new tx.
+ */
UART_CLEAR_IER(uart, ETBEI);
return;
}
@@ -415,6 +420,7 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail));
set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
set_dma_x_modify(uart->tx_dma_channel, 1);
+ SSYNC();
enable_dma(uart->tx_dma_channel);
UART_SET_IER(uart, ETBEI);
@@ -473,27 +479,41 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
{
int x_pos, pos;
- unsigned long flags;
-
- spin_lock_irqsave(&uart->port.lock, flags);
+ dma_disable_irq(uart->rx_dma_channel);
+ spin_lock_bh(&uart->port.lock);
+
+ /* 2D DMA RX buffer ring is used. Because curr_y_count and
+ * curr_x_count can't be read as an atomic operation,
+ * curr_y_count should be read before curr_x_count. When
+ * curr_x_count is read, curr_y_count may already indicate
+ * next buffer line. But, the position calculated here is
+ * still indicate the old line. The wrong position data may
+ * be smaller than current buffer tail, which cause garbages
+ * are received if it is not prohibit.
+ */
uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
- if (uart->rx_dma_nrows == DMA_RX_YCOUNT)
+ if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
uart->rx_dma_nrows = 0;
x_pos = DMA_RX_XCOUNT - x_pos;
if (x_pos == DMA_RX_XCOUNT)
x_pos = 0;
pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
- if (pos != uart->rx_dma_buf.tail) {
+ /* Ignore receiving data if new position is in the same line of
+ * current buffer tail and small.
+ */
+ if (pos > uart->rx_dma_buf.tail ||
+ uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
uart->rx_dma_buf.head = pos;
bfin_serial_dma_rx_chars(uart);
uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
}
- spin_unlock_irqrestore(&uart->port.lock, flags);
+ spin_unlock_bh(&uart->port.lock);
+ dma_enable_irq(uart->rx_dma_channel);
mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
}
@@ -514,6 +534,11 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
disable_dma(uart->tx_dma_channel);
clear_dma_irqstat(uart->tx_dma_channel);
+ /* Anomaly notes:
+ * 05000215 - we always clear ETBEI within last UART TX
+ * interrupt to end a string. It is always set
+ * when start a new tx.
+ */
UART_CLEAR_IER(uart, ETBEI);
xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
uart->port.icount.tx += uart->tx_count;
@@ -532,11 +557,26 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
unsigned short irqstat;
+ int x_pos, pos;
spin_lock(&uart->port.lock);
irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
clear_dma_irqstat(uart->rx_dma_channel);
- bfin_serial_dma_rx_chars(uart);
+
+ uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
+ x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
+ uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
+ if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
+ uart->rx_dma_nrows = 0;
+
+ pos = uart->rx_dma_nrows * DMA_RX_XCOUNT;
+ if (pos > uart->rx_dma_buf.tail ||
+ uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
+ uart->rx_dma_buf.head = pos;
+ bfin_serial_dma_rx_chars(uart);
+ uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
+ }
+
spin_unlock(&uart->port.lock);
return IRQ_HANDLED;
@@ -789,8 +829,16 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
__func__);
}
- if (termios->c_cflag & CSTOPB)
- lcr |= STB;
+ /* Anomaly notes:
+ * 05000231 - STOP bit is always set to 1 whatever the user is set.
+ */
+ if (termios->c_cflag & CSTOPB) {
+ if (ANOMALY_05000231)
+ printk(KERN_WARNING "STOP bits other than 1 is not "
+ "supported in case of anomaly 05000231.\n");
+ else
+ lcr |= STB;
+ }
if (termios->c_cflag & PARENB)
lcr |= PEN;
if (!(termios->c_cflag & PARODD))
@@ -940,6 +988,10 @@ static void bfin_serial_reset_irda(struct uart_port *port)
}
#ifdef CONFIG_CONSOLE_POLL
+/* Anomaly notes:
+ * 05000099 - Because we only use THRE in poll_put and DR in poll_get,
+ * losing other bits of UART_LSR is not a problem here.
+ */
static void bfin_serial_poll_put_char(struct uart_port *port, unsigned char chr)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
@@ -1245,12 +1297,17 @@ static __init void early_serial_write(struct console *con, const char *s,
}
}
+/*
+ * This should have a .setup or .early_setup in it, but then things get called
+ * without the command line options, and the baud rate gets messed up - so
+ * don't let the common infrastructure play with things. (see calls to setup
+ * & earlysetup in ./kernel/printk.c:register_console()
+ */
static struct __initdata console bfin_early_serial_console = {
.name = "early_BFuart",
.write = early_serial_write,
.device = uart_console_device,
.flags = CON_PRINTBUFFER,
- .setup = bfin_serial_console_setup,
.index = -1,
.data = &bfin_serial_reg,
};
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index 529c0ff7952..34b4ae0fe76 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -101,15 +101,16 @@ static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
{
pr_debug("%s value:%x\n", __func__, value);
/* Place a Start and Stop bit */
- __asm__ volatile (
- "R2 = b#01111111100;\n\t"
- "R3 = b#10000000001;\n\t"
- "%0 <<= 2;\n\t"
- "%0 = %0 & R2;\n\t"
- "%0 = %0 | R3;\n\t"
- :"=r"(value)
- :"0"(value)
- :"R2", "R3");
+ __asm__ __volatile__ (
+ "R2 = b#01111111100;"
+ "R3 = b#10000000001;"
+ "%0 <<= 2;"
+ "%0 = %0 & R2;"
+ "%0 = %0 | R3;"
+ : "=d"(value)
+ : "d"(value)
+ : "ASTAT", "R2", "R3"
+ );
pr_debug("%s value:%x\n", __func__, value);
SPORT_PUT_TX(up, value);
@@ -118,27 +119,30 @@ static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
static inline unsigned int rx_one_byte(struct sport_uart_port *up)
{
unsigned int value, extract;
+ u32 tmp_mask1, tmp_mask2, tmp_shift, tmp;
value = SPORT_GET_RX32(up);
pr_debug("%s value:%x\n", __func__, value);
/* Extract 8 bits data */
- __asm__ volatile (
- "R5 = 0;\n\t"
- "P0 = 8;\n\t"
- "R1 = 0x1801(Z);\n\t"
- "R3 = 0x0300(Z);\n\t"
- "R4 = 0;\n\t"
- "LSETUP(loop_s, loop_e) LC0 = P0;\nloop_s:\t"
- "R2 = extract(%1, R1.L)(Z);\n\t"
- "R2 <<= R4;\n\t"
- "R5 = R5 | R2;\n\t"
- "R1 = R1 - R3;\nloop_e:\t"
- "R4 += 1;\n\t"
- "%0 = R5;\n\t"
- :"=r"(extract)
- :"r"(value)
- :"P0", "R1", "R2","R3","R4", "R5");
+ __asm__ __volatile__ (
+ "%[extr] = 0;"
+ "%[mask1] = 0x1801(Z);"
+ "%[mask2] = 0x0300(Z);"
+ "%[shift] = 0;"
+ "LSETUP(.Lloop_s, .Lloop_e) LC0 = %[lc];"
+ ".Lloop_s:"
+ "%[tmp] = extract(%[val], %[mask1].L)(Z);"
+ "%[tmp] <<= %[shift];"
+ "%[extr] = %[extr] | %[tmp];"
+ "%[mask1] = %[mask1] - %[mask2];"
+ ".Lloop_e:"
+ "%[shift] += 1;"
+ : [val]"=d"(value), [extr]"=d"(extract), [shift]"=d"(tmp_shift), [tmp]"=d"(tmp),
+ [mask1]"=d"(tmp_mask1), [mask2]"=d"(tmp_mask2)
+ : "d"(value), [lc]"a"(8)
+ : "ASTAT", "LB0", "LC0", "LT0"
+ );
pr_debug(" extract:%x\n", extract);
return extract;
@@ -149,7 +153,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate)
int tclkdiv, tfsdiv, rclkdiv;
/* Set TCR1 and TCR2 */
- SPORT_PUT_TCR1(up, (LTFS | ITFS | TFSR | TLSBIT | ITCLK));
+ SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
SPORT_PUT_TCR2(up, 10);
pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
@@ -419,7 +423,7 @@ static void sport_shutdown(struct uart_port *port)
}
static void sport_set_termios(struct uart_port *port,
- struct termios *termios, struct termios *old)
+ struct ktermios *termios, struct ktermios *old)
{
pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag);
uart_update_timeout(port, CS8 ,port->uartclk);
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index a461b3b2c72..9f2891c2c4a 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -137,7 +137,12 @@ static LIST_HEAD(icom_adapter_head);
static spinlock_t icom_lock;
#ifdef ICOM_TRACE
-static inline void trace(struct icom_port *, char *, unsigned long) {};
+static inline void trace(struct icom_port *icom_port, char *trace_pt,
+ unsigned long trace_data)
+{
+ dev_info(&icom_port->adapter->pci_dev->dev, ":%d:%s - %lx\n",
+ icom_port->port, trace_pt, trace_data);
+}
#else
static inline void trace(struct icom_port *icom_port, char *trace_pt, unsigned long trace_data) {};
#endif
@@ -408,7 +413,7 @@ static void load_code(struct icom_port *icom_port)
release_firmware(fw);
/* Set Hardware level */
- if ((icom_port->adapter->version | ADAPTER_V2) == ADAPTER_V2)
+ if (icom_port->adapter->version == ADAPTER_V2)
writeb(V2_HARDWARE, &(icom_port->dram->misc_flags));
/* Start the processor in Adapter */
@@ -861,7 +866,7 @@ static irqreturn_t icom_interrupt(int irq, void *dev_id)
/* find icom_port for this interrupt */
icom_adapter = (struct icom_adapter *) dev_id;
- if ((icom_adapter->version | ADAPTER_V2) == ADAPTER_V2) {
+ if (icom_adapter->version == ADAPTER_V2) {
int_reg = icom_adapter->base_addr + 0x8024;
adapter_interrupts = readl(int_reg);
@@ -1647,15 +1652,6 @@ static void __exit icom_exit(void)
module_init(icom_init);
module_exit(icom_exit);
-#ifdef ICOM_TRACE
-static inline void trace(struct icom_port *icom_port, char *trace_pt,
- unsigned long trace_data)
-{
- dev_info(&icom_port->adapter->pci_dev->dev, ":%d:%s - %lx\n",
- icom_port->port, trace_pt, trace_data);
-}
-#endif
-
MODULE_AUTHOR("Michael Anderson <mjanders@us.ibm.com>");
MODULE_DESCRIPTION("IBM iSeries Serial IOA driver");
MODULE_SUPPORTED_DEVICE
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 9f460b175c5..7b5d1de9cfe 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -8,6 +8,9 @@
* Author: Sascha Hauer <sascha@saschahauer.de>
* Copyright (C) 2004 Pengutronix
*
+ * Copyright (C) 2009 emlix GmbH
+ * Author: Fabian Godehardt (added IrDA support for iMX)
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -41,6 +44,8 @@
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/rational.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -148,6 +153,7 @@
#define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
#define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
#define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
+#define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
#define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */
#define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */
#define USR1_RTSS (1<<14) /* RTS pin status */
@@ -211,10 +217,20 @@ struct imx_port {
struct timer_list timer;
unsigned int old_status;
int txirq,rxirq,rtsirq;
- int have_rtscts:1;
+ unsigned int have_rtscts:1;
+ unsigned int use_irda:1;
+ unsigned int irda_inv_rx:1;
+ unsigned int irda_inv_tx:1;
+ unsigned short trcv_delay; /* transceiver delay */
struct clk *clk;
};
+#ifdef CONFIG_IRDA
+#define USE_IRDA(sport) ((sport)->use_irda)
+#else
+#define USE_IRDA(sport) (0)
+#endif
+
/*
* Handle any change of modem status signal since we were last called.
*/
@@ -268,6 +284,48 @@ static void imx_stop_tx(struct uart_port *port)
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
+ if (USE_IRDA(sport)) {
+ /* half duplex - wait for end of transmission */
+ int n = 256;
+ while ((--n > 0) &&
+ !(readl(sport->port.membase + USR2) & USR2_TXDC)) {
+ udelay(5);
+ barrier();
+ }
+ /*
+ * irda transceiver - wait a bit more to avoid
+ * cutoff, hardware dependent
+ */
+ udelay(sport->trcv_delay);
+
+ /*
+ * half duplex - reactivate receive mode,
+ * flush receive pipe echo crap
+ */
+ if (readl(sport->port.membase + USR2) & USR2_TXDC) {
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~(UCR1_TXMPTYEN | UCR1_TRDYEN);
+ writel(temp, sport->port.membase + UCR1);
+
+ temp = readl(sport->port.membase + UCR4);
+ temp &= ~(UCR4_TCEN);
+ writel(temp, sport->port.membase + UCR4);
+
+ while (readl(sport->port.membase + URXD0) &
+ URXD_CHARRDY)
+ barrier();
+
+ temp = readl(sport->port.membase + UCR1);
+ temp |= UCR1_RRDYEN;
+ writel(temp, sport->port.membase + UCR1);
+
+ temp = readl(sport->port.membase + UCR4);
+ temp |= UCR4_DREN;
+ writel(temp, sport->port.membase + UCR4);
+ }
+ return;
+ }
+
temp = readl(sport->port.membase + UCR1);
writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1);
}
@@ -302,13 +360,15 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
/* send xmit->buf[xmit->tail]
* out the port here */
writel(xmit->buf[xmit->tail], sport->port.membase + URTX0);
- xmit->tail = (xmit->tail + 1) &
- (UART_XMIT_SIZE - 1);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
sport->port.icount.tx++;
if (uart_circ_empty(xmit))
break;
}
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+
if (uart_circ_empty(xmit))
imx_stop_tx(&sport->port);
}
@@ -321,9 +381,30 @@ static void imx_start_tx(struct uart_port *port)
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
+ if (USE_IRDA(sport)) {
+ /* half duplex in IrDA mode; have to disable receive mode */
+ temp = readl(sport->port.membase + UCR4);
+ temp &= ~(UCR4_DREN);
+ writel(temp, sport->port.membase + UCR4);
+
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~(UCR1_RRDYEN);
+ writel(temp, sport->port.membase + UCR1);
+ }
+
temp = readl(sport->port.membase + UCR1);
writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
+ if (USE_IRDA(sport)) {
+ temp = readl(sport->port.membase + UCR1);
+ temp |= UCR1_TRDYEN;
+ writel(temp, sport->port.membase + UCR1);
+
+ temp = readl(sport->port.membase + UCR4);
+ temp |= UCR4_TCEN;
+ writel(temp, sport->port.membase + UCR4);
+ }
+
if (readl(sport->port.membase + UTS) & UTS_TXEMPTY)
imx_transmit_buffer(sport);
}
@@ -395,8 +476,7 @@ static irqreturn_t imx_rxint(int irq, void *dev_id)
continue;
}
- if (uart_handle_sysrq_char
- (&sport->port, (unsigned char)rx))
+ if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
continue;
if (rx & (URXD_PRERR | URXD_OVRRUN | URXD_FRMERR) ) {
@@ -471,26 +551,26 @@ static unsigned int imx_tx_empty(struct uart_port *port)
*/
static unsigned int imx_get_mctrl(struct uart_port *port)
{
- struct imx_port *sport = (struct imx_port *)port;
- unsigned int tmp = TIOCM_DSR | TIOCM_CAR;
+ struct imx_port *sport = (struct imx_port *)port;
+ unsigned int tmp = TIOCM_DSR | TIOCM_CAR;
- if (readl(sport->port.membase + USR1) & USR1_RTSS)
- tmp |= TIOCM_CTS;
+ if (readl(sport->port.membase + USR1) & USR1_RTSS)
+ tmp |= TIOCM_CTS;
- if (readl(sport->port.membase + UCR2) & UCR2_CTS)
- tmp |= TIOCM_RTS;
+ if (readl(sport->port.membase + UCR2) & UCR2_CTS)
+ tmp |= TIOCM_RTS;
- return tmp;
+ return tmp;
}
static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- struct imx_port *sport = (struct imx_port *)port;
+ struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
temp = readl(sport->port.membase + UCR2) & ~UCR2_CTS;
- if (mctrl & TIOCM_RTS)
+ if (mctrl & TIOCM_RTS)
temp |= UCR2_CTS;
writel(temp, sport->port.membase + UCR2);
@@ -534,12 +614,7 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
if(!ufcr_rfdiv)
ufcr_rfdiv = 1;
- if(ufcr_rfdiv >= 7)
- ufcr_rfdiv = 6;
- else
- ufcr_rfdiv = 6 - ufcr_rfdiv;
-
- val |= UFCR_RFDIV & (ufcr_rfdiv << 7);
+ val |= UFCR_RFDIV_REG(ufcr_rfdiv);
writel(val, sport->port.membase + UFCR);
@@ -558,8 +633,24 @@ static int imx_startup(struct uart_port *port)
* requesting IRQs
*/
temp = readl(sport->port.membase + UCR4);
+
+ if (USE_IRDA(sport))
+ temp |= UCR4_IRSC;
+
writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
+ if (USE_IRDA(sport)) {
+ /* reset fifo's and state machines */
+ int i = 100;
+ temp = readl(sport->port.membase + UCR2);
+ temp &= ~UCR2_SRST;
+ writel(temp, sport->port.membase + UCR2);
+ while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) &&
+ (--i > 0)) {
+ udelay(1);
+ }
+ }
+
/*
* Allocate the IRQ(s) i.MX1 has three interrupts whereas later
* chips only have one interrupt.
@@ -575,12 +666,16 @@ static int imx_startup(struct uart_port *port)
if (retval)
goto error_out2;
- retval = request_irq(sport->rtsirq, imx_rtsint,
- (sport->rtsirq < MAX_INTERNAL_IRQ) ? 0 :
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- DRIVER_NAME, sport);
- if (retval)
- goto error_out3;
+ /* do not use RTS IRQ on IrDA */
+ if (!USE_IRDA(sport)) {
+ retval = request_irq(sport->rtsirq, imx_rtsint,
+ (sport->rtsirq < MAX_INTERNAL_IRQ) ? 0 :
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING,
+ DRIVER_NAME, sport);
+ if (retval)
+ goto error_out3;
+ }
} else {
retval = request_irq(sport->port.irq, imx_int, 0,
DRIVER_NAME, sport);
@@ -597,18 +692,49 @@ static int imx_startup(struct uart_port *port)
temp = readl(sport->port.membase + UCR1);
temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
+
+ if (USE_IRDA(sport)) {
+ temp |= UCR1_IREN;
+ temp &= ~(UCR1_RTSDEN);
+ }
+
writel(temp, sport->port.membase + UCR1);
temp = readl(sport->port.membase + UCR2);
temp |= (UCR2_RXEN | UCR2_TXEN);
writel(temp, sport->port.membase + UCR2);
+ if (USE_IRDA(sport)) {
+ /* clear RX-FIFO */
+ int i = 64;
+ while ((--i > 0) &&
+ (readl(sport->port.membase + URXD0) & URXD_CHARRDY)) {
+ barrier();
+ }
+ }
+
#if defined CONFIG_ARCH_MX2 || defined CONFIG_ARCH_MX3
temp = readl(sport->port.membase + UCR3);
temp |= UCR3_RXDMUXSEL;
writel(temp, sport->port.membase + UCR3);
#endif
+ if (USE_IRDA(sport)) {
+ temp = readl(sport->port.membase + UCR4);
+ if (sport->irda_inv_rx)
+ temp |= UCR4_INVR;
+ else
+ temp &= ~(UCR4_INVR);
+ writel(temp | UCR4_DREN, sport->port.membase + UCR4);
+
+ temp = readl(sport->port.membase + UCR3);
+ if (sport->irda_inv_tx)
+ temp |= UCR3_INVT;
+ else
+ temp &= ~(UCR3_INVT);
+ writel(temp, sport->port.membase + UCR3);
+ }
+
/*
* Enable modem status interrupts
*/
@@ -616,6 +742,16 @@ static int imx_startup(struct uart_port *port)
imx_enable_ms(&sport->port);
spin_unlock_irqrestore(&sport->port.lock,flags);
+ if (USE_IRDA(sport)) {
+ struct imxuart_platform_data *pdata;
+ pdata = sport->port.dev->platform_data;
+ sport->irda_inv_rx = pdata->irda_inv_rx;
+ sport->irda_inv_tx = pdata->irda_inv_tx;
+ sport->trcv_delay = pdata->transceiver_delay;
+ if (pdata->irda_enable)
+ pdata->irda_enable(1);
+ }
+
return 0;
error_out3:
@@ -633,6 +769,17 @@ static void imx_shutdown(struct uart_port *port)
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
+ temp = readl(sport->port.membase + UCR2);
+ temp &= ~(UCR2_TXEN);
+ writel(temp, sport->port.membase + UCR2);
+
+ if (USE_IRDA(sport)) {
+ struct imxuart_platform_data *pdata;
+ pdata = sport->port.dev->platform_data;
+ if (pdata->irda_enable)
+ pdata->irda_enable(0);
+ }
+
/*
* Stop our timer.
*/
@@ -642,7 +789,8 @@ static void imx_shutdown(struct uart_port *port)
* Free the interrupts
*/
if (sport->txirq > 0) {
- free_irq(sport->rtsirq, sport);
+ if (!USE_IRDA(sport))
+ free_irq(sport->rtsirq, sport);
free_irq(sport->txirq, sport);
free_irq(sport->rxirq, sport);
} else
@@ -654,6 +802,9 @@ static void imx_shutdown(struct uart_port *port)
temp = readl(sport->port.membase + UCR1);
temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
+ if (USE_IRDA(sport))
+ temp &= ~(UCR1_IREN);
+
writel(temp, sport->port.membase + UCR1);
}
@@ -665,7 +816,9 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned long flags;
unsigned int ucr2, old_ucr1, old_txrxen, baud, quot;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
- unsigned int div, num, denom, ufcr;
+ unsigned int div, ufcr;
+ unsigned long num, denom;
+ uint64_t tdiv64;
/*
* If we don't support modem control lines, don't allow
@@ -761,38 +914,39 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
sport->port.membase + UCR2);
old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
- div = sport->port.uartclk / (baud * 16);
- if (div > 7)
- div = 7;
- if (!div)
+ if (USE_IRDA(sport)) {
+ /*
+ * use maximum available submodule frequency to
+ * avoid missing short pulses due to low sampling rate
+ */
div = 1;
-
- num = baud;
- denom = port->uartclk / div / 16;
-
- /* shift num and denom right until they fit into 16 bits */
- while (num > 0x10000 || denom > 0x10000) {
- num >>= 1;
- denom >>= 1;
+ } else {
+ div = sport->port.uartclk / (baud * 16);
+ if (div > 7)
+ div = 7;
+ if (!div)
+ div = 1;
}
- if (num > 0)
- num -= 1;
- if (denom > 0)
- denom -= 1;
- writel(num, sport->port.membase + UBIR);
- writel(denom, sport->port.membase + UBMR);
+ rational_best_approximation(16 * div * baud, sport->port.uartclk,
+ 1 << 16, 1 << 16, &num, &denom);
- if (div == 7)
- div = 6; /* 6 in RFDIV means divide by 7 */
- else
- div = 6 - div;
+ tdiv64 = sport->port.uartclk;
+ tdiv64 *= num;
+ do_div(tdiv64, denom * 16 * div);
+ tty_encode_baud_rate(sport->port.info->port.tty,
+ (speed_t)tdiv64, (speed_t)tdiv64);
+
+ num -= 1;
+ denom -= 1;
ufcr = readl(sport->port.membase + UFCR);
- ufcr = (ufcr & (~UFCR_RFDIV)) |
- (div << 7);
+ ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
writel(ufcr, sport->port.membase + UFCR);
+ writel(num, sport->port.membase + UBIR);
+ writel(denom, sport->port.membase + UBMR);
+
#ifdef ONEMS
writel(sport->port.uartclk / div / 1000, sport->port.membase + ONEMS);
#endif
@@ -1031,6 +1185,8 @@ imx_console_setup(struct console *co, char *options)
if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports))
co->index = 0;
sport = imx_ports[co->index];
+ if(sport == NULL)
+ return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -1070,22 +1226,22 @@ static struct uart_driver imx_reg = {
static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
{
- struct imx_port *sport = platform_get_drvdata(dev);
+ struct imx_port *sport = platform_get_drvdata(dev);
- if (sport)
- uart_suspend_port(&imx_reg, &sport->port);
+ if (sport)
+ uart_suspend_port(&imx_reg, &sport->port);
- return 0;
+ return 0;
}
static int serial_imx_resume(struct platform_device *dev)
{
- struct imx_port *sport = platform_get_drvdata(dev);
+ struct imx_port *sport = platform_get_drvdata(dev);
- if (sport)
- uart_resume_port(&imx_reg, &sport->port);
+ if (sport)
+ uart_resume_port(&imx_reg, &sport->port);
- return 0;
+ return 0;
}
static int serial_imx_probe(struct platform_device *pdev)
@@ -1141,19 +1297,29 @@ static int serial_imx_probe(struct platform_device *pdev)
imx_ports[pdev->id] = sport;
pdata = pdev->dev.platform_data;
- if(pdata && (pdata->flags & IMXUART_HAVE_RTSCTS))
+ if (pdata && (pdata->flags & IMXUART_HAVE_RTSCTS))
sport->have_rtscts = 1;
+#ifdef CONFIG_IRDA
+ if (pdata && (pdata->flags & IMXUART_IRDA))
+ sport->use_irda = 1;
+#endif
+
if (pdata->init) {
ret = pdata->init(pdev);
if (ret)
goto clkput;
}
- uart_add_one_port(&imx_reg, &sport->port);
+ ret = uart_add_one_port(&imx_reg, &sport->port);
+ if (ret)
+ goto deinit;
platform_set_drvdata(pdev, &sport->port);
return 0;
+deinit:
+ if (pdata->exit)
+ pdata->exit(pdev);
clkput:
clk_put(sport->clk);
clk_disable(sport->clk);
@@ -1191,13 +1357,13 @@ static int serial_imx_remove(struct platform_device *pdev)
}
static struct platform_driver serial_imx_driver = {
- .probe = serial_imx_probe,
- .remove = serial_imx_remove,
+ .probe = serial_imx_probe,
+ .remove = serial_imx_remove,
.suspend = serial_imx_suspend,
.resume = serial_imx_resume,
.driver = {
- .name = "imx-uart",
+ .name = "imx-uart",
.owner = THIS_MODULE,
},
};
diff --git a/drivers/serial/jsm/jsm.h b/drivers/serial/jsm/jsm.h
index c0a3e2734e2..4e5f3bde046 100644
--- a/drivers/serial/jsm/jsm.h
+++ b/drivers/serial/jsm/jsm.h
@@ -61,6 +61,7 @@ enum {
if ((DBG_##nlevel & jsm_debug)) \
dev_printk(KERN_##klevel, pdev->dev, fmt, ## args)
+#define MAXLINES 256
#define MAXPORTS 8
#define MAX_STOPS_SENT 5
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index 31496dc0a0d..107ce2e187b 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -33,6 +33,8 @@
#include "jsm.h"
+static DECLARE_BITMAP(linemap, MAXLINES);
+
static void jsm_carrier(struct jsm_channel *ch);
static inline int jsm_get_mstat(struct jsm_channel *ch)
@@ -433,6 +435,7 @@ int __devinit jsm_tty_init(struct jsm_board *brd)
int __devinit jsm_uart_port_init(struct jsm_board *brd)
{
int i;
+ unsigned int line;
struct jsm_channel *ch;
if (!brd)
@@ -459,9 +462,15 @@ int __devinit jsm_uart_port_init(struct jsm_board *brd)
brd->channels[i]->uart_port.membase = brd->re_map_membase;
brd->channels[i]->uart_port.fifosize = 16;
brd->channels[i]->uart_port.ops = &jsm_ops;
- brd->channels[i]->uart_port.line = brd->channels[i]->ch_portnum + brd->boardnum * 2;
+ line = find_first_zero_bit(linemap, MAXLINES);
+ if (line >= MAXLINES) {
+ printk(KERN_INFO "jsm: linemap is full, added device failed\n");
+ continue;
+ } else
+ set_bit((int)line, linemap);
+ brd->channels[i]->uart_port.line = line;
if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port))
- printk(KERN_INFO "Added device failed\n");
+ printk(KERN_INFO "jsm: add device failed\n");
else
printk(KERN_INFO "Added device \n");
}
@@ -494,6 +503,7 @@ int jsm_remove_uart_port(struct jsm_board *brd)
ch = brd->channels[i];
+ clear_bit((int)(ch->uart_port.line), linemap);
uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port);
}
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 7f72f8ceaa6..b3feb6198d5 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -988,7 +988,7 @@ mpc52xx_console_setup(struct console *co, char *options)
pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n",
co, co->index, options);
- if ((co->index < 0) || (co->index > MPC52xx_PSC_MAXNUM)) {
+ if ((co->index < 0) || (co->index >= MPC52xx_PSC_MAXNUM)) {
pr_debug("PSC%x out of range\n", co->index);
return -EINVAL;
}
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index dbf5357a77b..a4cf1079b31 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -47,12 +47,17 @@
#include <linux/clk.h>
#include <linux/ctype.h>
#include <linux/err.h>
+#include <linux/list.h>
#ifdef CONFIG_SUPERH
#include <asm/clock.h>
#include <asm/sh_bios.h>
#endif
+#ifdef CONFIG_H8300
+#include <asm/gpio.h>
+#endif
+
#include "sh-sci.h"
struct sci_port {
@@ -75,14 +80,22 @@ struct sci_port {
int break_flag;
#ifdef CONFIG_HAVE_CLK
- /* Port clock */
- struct clk *clk;
+ /* Interface clock */
+ struct clk *iclk;
+ /* Data clock */
+ struct clk *dclk;
#endif
+ struct list_head node;
};
-#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
-static struct sci_port *serial_console_port;
+struct sh_sci_priv {
+ spinlock_t lock;
+ struct list_head ports;
+
+#ifdef CONFIG_HAVE_CLK
+ struct notifier_block clk_nb;
#endif
+};
/* Function prototypes */
static void sci_stop_tx(struct uart_port *port);
@@ -138,9 +151,8 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
status = sci_in(port, SCxSR);
} while (!(status & SCxSR_TDxE(port)));
- sci_in(port, SCxSR); /* Dummy read */
- sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
sci_out(port, SCxTDR, c);
+ sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
}
#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
@@ -159,12 +171,12 @@ static void h8300_sci_config(struct uart_port *port, unsigned int ctrl)
*mstpcrl &= ~mask;
}
-static inline void h8300_sci_enable(struct uart_port *port)
+static void h8300_sci_enable(struct uart_port *port)
{
h8300_sci_config(port, sci_enable);
}
-static inline void h8300_sci_disable(struct uart_port *port)
+static void h8300_sci_disable(struct uart_port *port)
{
h8300_sci_config(port, sci_disable);
}
@@ -611,7 +623,7 @@ static inline int sci_handle_breaks(struct uart_port *port)
int copied = 0;
unsigned short status = sci_in(port, SCxSR);
struct tty_struct *tty = port->info->port.tty;
- struct sci_port *s = &sci_ports[port->line];
+ struct sci_port *s = to_sci_port(port);
if (uart_handle_break(port))
return 0;
@@ -726,19 +738,43 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
static int sci_notifier(struct notifier_block *self,
unsigned long phase, void *p)
{
- int i;
+ struct sh_sci_priv *priv = container_of(self,
+ struct sh_sci_priv, clk_nb);
+ struct sci_port *sci_port;
+ unsigned long flags;
if ((phase == CPUFREQ_POSTCHANGE) ||
- (phase == CPUFREQ_RESUMECHANGE))
- for (i = 0; i < SCI_NPORTS; i++) {
- struct sci_port *s = &sci_ports[i];
- s->port.uartclk = clk_get_rate(s->clk);
- }
+ (phase == CPUFREQ_RESUMECHANGE)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry(sci_port, &priv->ports, node)
+ sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
return NOTIFY_OK;
}
-static struct notifier_block sci_nb = { &sci_notifier, NULL, 0 };
+static void sci_clk_enable(struct uart_port *port)
+{
+ struct sci_port *sci_port = to_sci_port(port);
+
+ clk_enable(sci_port->dclk);
+ sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
+
+ if (sci_port->iclk)
+ clk_enable(sci_port->iclk);
+}
+
+static void sci_clk_disable(struct uart_port *port)
+{
+ struct sci_port *sci_port = to_sci_port(port);
+
+ if (sci_port->iclk)
+ clk_disable(sci_port->iclk);
+
+ clk_disable(sci_port->dclk);
+}
#endif
static int sci_request_irq(struct sci_port *port)
@@ -865,15 +901,11 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
static int sci_startup(struct uart_port *port)
{
- struct sci_port *s = &sci_ports[port->line];
+ struct sci_port *s = to_sci_port(port);
if (s->enable)
s->enable(port);
-#ifdef CONFIG_HAVE_CLK
- s->clk = clk_get(NULL, "module_clk");
-#endif
-
sci_request_irq(s);
sci_start_tx(port);
sci_start_rx(port, 1);
@@ -883,7 +915,7 @@ static int sci_startup(struct uart_port *port)
static void sci_shutdown(struct uart_port *port)
{
- struct sci_port *s = &sci_ports[port->line];
+ struct sci_port *s = to_sci_port(port);
sci_stop_rx(port);
sci_stop_tx(port);
@@ -891,11 +923,6 @@ static void sci_shutdown(struct uart_port *port)
if (s->disable)
s->disable(port);
-
-#ifdef CONFIG_HAVE_CLK
- clk_put(s->clk);
- s->clk = NULL;
-#endif
}
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
@@ -980,25 +1007,31 @@ static int sci_request_port(struct uart_port *port)
static void sci_config_port(struct uart_port *port, int flags)
{
- struct sci_port *s = &sci_ports[port->line];
+ struct sci_port *s = to_sci_port(port);
port->type = s->type;
- if (port->flags & UPF_IOREMAP && !port->membase) {
-#if defined(CONFIG_SUPERH64)
- port->mapbase = onchip_remap(SCIF_ADDR_SH5, 1024, "SCIF");
- port->membase = (void __iomem *)port->mapbase;
-#else
+ if (port->membase)
+ return;
+
+ if (port->flags & UPF_IOREMAP) {
port->membase = ioremap_nocache(port->mapbase, 0x40);
-#endif
- dev_err(port->dev, "can't remap port#%d\n", port->line);
+ if (IS_ERR(port->membase))
+ dev_err(port->dev, "can't remap port#%d\n", port->line);
+ } else {
+ /*
+ * For the simple (and majority of) cases where we don't
+ * need to do any remapping, just cast the cookie
+ * directly.
+ */
+ port->membase = (void __iomem *)port->mapbase;
}
}
static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
{
- struct sci_port *s = &sci_ports[port->line];
+ struct sci_port *s = to_sci_port(port);
if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
return -EINVAL;
@@ -1032,63 +1065,60 @@ static struct uart_ops sci_uart_ops = {
#endif
};
-static void __init sci_init_ports(void)
+static void __devinit sci_init_single(struct platform_device *dev,
+ struct sci_port *sci_port,
+ unsigned int index,
+ struct plat_sci_port *p)
{
- static int first = 1;
- int i;
-
- if (!first)
- return;
-
- first = 0;
-
- for (i = 0; i < SCI_NPORTS; i++) {
- sci_ports[i].port.ops = &sci_uart_ops;
- sci_ports[i].port.iotype = UPIO_MEM;
- sci_ports[i].port.line = i;
- sci_ports[i].port.fifosize = 1;
+ sci_port->port.ops = &sci_uart_ops;
+ sci_port->port.iotype = UPIO_MEM;
+ sci_port->port.line = index;
+ sci_port->port.fifosize = 1;
#if defined(__H8300H__) || defined(__H8300S__)
#ifdef __H8300S__
- sci_ports[i].enable = h8300_sci_enable;
- sci_ports[i].disable = h8300_sci_disable;
+ sci_port->enable = h8300_sci_enable;
+ sci_port->disable = h8300_sci_disable;
#endif
- sci_ports[i].port.uartclk = CONFIG_CPU_CLOCK;
+ sci_port->port.uartclk = CONFIG_CPU_CLOCK;
#elif defined(CONFIG_HAVE_CLK)
- /*
- * XXX: We should use a proper SCI/SCIF clock
- */
- {
- struct clk *clk = clk_get(NULL, "module_clk");
- sci_ports[i].port.uartclk = clk_get_rate(clk);
- clk_put(clk);
- }
+ sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
+ sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
+ sci_port->enable = sci_clk_enable;
+ sci_port->disable = sci_clk_disable;
#else
#error "Need a valid uartclk"
#endif
- sci_ports[i].break_timer.data = (unsigned long)&sci_ports[i];
- sci_ports[i].break_timer.function = sci_break_timer;
+ sci_port->break_timer.data = (unsigned long)sci_port;
+ sci_port->break_timer.function = sci_break_timer;
+ init_timer(&sci_port->break_timer);
- init_timer(&sci_ports[i].break_timer);
- }
-}
-
-int __init early_sci_setup(struct uart_port *port)
-{
- if (unlikely(port->line > SCI_NPORTS))
- return -ENODEV;
+ sci_port->port.mapbase = p->mapbase;
+ sci_port->port.membase = p->membase;
- sci_init_ports();
+ sci_port->port.irq = p->irqs[SCIx_TXI_IRQ];
+ sci_port->port.flags = p->flags;
+ sci_port->port.dev = &dev->dev;
+ sci_port->type = sci_port->port.type = p->type;
- sci_ports[port->line].port.membase = port->membase;
- sci_ports[port->line].port.mapbase = port->mapbase;
- sci_ports[port->line].port.type = port->type;
+ memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
- return 0;
}
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
+static struct tty_driver *serial_console_device(struct console *co, int *index)
+{
+ struct uart_driver *p = &sci_uart_driver;
+ *index = co->index;
+ return p->tty_driver;
+}
+
+static void serial_console_putchar(struct uart_port *port, int ch)
+{
+ sci_poll_put_char(port, ch);
+}
+
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
@@ -1096,25 +1126,27 @@ int __init early_sci_setup(struct uart_port *port)
static void serial_console_write(struct console *co, const char *s,
unsigned count)
{
- struct uart_port *port = &serial_console_port->port;
+ struct uart_port *port = co->data;
+ struct sci_port *sci_port = to_sci_port(port);
unsigned short bits;
- int i;
- for (i = 0; i < count; i++) {
- if (*s == 10)
- sci_poll_put_char(port, '\r');
+ if (sci_port->enable)
+ sci_port->enable(port);
- sci_poll_put_char(port, *s++);
- }
+ uart_console_write(port, s, count, serial_console_putchar);
/* wait until fifo is empty and last bit has been transmitted */
bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
while ((sci_in(port, SCxSR) & bits) != bits)
cpu_relax();
+
+ if (sci_port->disable);
+ sci_port->disable(port);
}
static int __init serial_console_setup(struct console *co, char *options)
{
+ struct sci_port *sci_port;
struct uart_port *port;
int baud = 115200;
int bits = 8;
@@ -1130,8 +1162,9 @@ static int __init serial_console_setup(struct console *co, char *options)
if (co->index >= SCI_NPORTS)
co->index = 0;
- serial_console_port = &sci_ports[co->index];
- port = &serial_console_port->port;
+ sci_port = &sci_ports[co->index];
+ port = &sci_port->port;
+ co->data = port;
/*
* Also need to check port->type, we don't actually have any
@@ -1141,21 +1174,11 @@ static int __init serial_console_setup(struct console *co, char *options)
*/
if (!port->type)
return -ENODEV;
- if (!port->membase || !port->mapbase)
- return -ENODEV;
-
- port->type = serial_console_port->type;
-
-#ifdef CONFIG_HAVE_CLK
- if (!serial_console_port->clk)
- serial_console_port->clk = clk_get(NULL, "module_clk");
-#endif
- if (port->flags & UPF_IOREMAP)
- sci_config_port(port, 0);
+ sci_config_port(port, 0);
- if (serial_console_port->enable)
- serial_console_port->enable(port);
+ if (sci_port->enable)
+ sci_port->enable(port);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -1166,22 +1189,21 @@ static int __init serial_console_setup(struct console *co, char *options)
if (ret == 0)
sci_stop_rx(port);
#endif
+ /* TODO: disable clock */
return ret;
}
static struct console serial_console = {
.name = "ttySC",
- .device = uart_console_device,
+ .device = serial_console_device,
.write = serial_console_write,
.setup = serial_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
- .data = &sci_uart_driver,
};
static int __init sci_console_init(void)
{
- sci_init_ports();
register_console(&serial_console);
return 0;
}
@@ -1207,6 +1229,61 @@ static struct uart_driver sci_uart_driver = {
.cons = SCI_CONSOLE,
};
+
+static int sci_remove(struct platform_device *dev)
+{
+ struct sh_sci_priv *priv = platform_get_drvdata(dev);
+ struct sci_port *p;
+ unsigned long flags;
+
+#ifdef CONFIG_HAVE_CLK
+ cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry(p, &priv->ports, node)
+ uart_remove_one_port(&sci_uart_driver, &p->port);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ kfree(priv);
+ return 0;
+}
+
+static int __devinit sci_probe_single(struct platform_device *dev,
+ unsigned int index,
+ struct plat_sci_port *p,
+ struct sci_port *sciport)
+{
+ struct sh_sci_priv *priv = platform_get_drvdata(dev);
+ unsigned long flags;
+ int ret;
+
+ /* Sanity check */
+ if (unlikely(index >= SCI_NPORTS)) {
+ dev_notice(&dev->dev, "Attempting to register port "
+ "%d when only %d are available.\n",
+ index+1, SCI_NPORTS);
+ dev_notice(&dev->dev, "Consider bumping "
+ "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
+ return 0;
+ }
+
+ sci_init_single(dev, sciport, index, p);
+
+ ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&sciport->node);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_add(&sciport->node, &priv->ports);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
/*
* Register a set of serial devices attached to a platform device. The
* list is terminated with a zero flags entry, which means we expect
@@ -1216,57 +1293,34 @@ static struct uart_driver sci_uart_driver = {
static int __devinit sci_probe(struct platform_device *dev)
{
struct plat_sci_port *p = dev->dev.platform_data;
+ struct sh_sci_priv *priv;
int i, ret = -EINVAL;
- for (i = 0; p && p->flags != 0; p++, i++) {
- struct sci_port *sciport = &sci_ports[i];
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- /* Sanity check */
- if (unlikely(i == SCI_NPORTS)) {
- dev_notice(&dev->dev, "Attempting to register port "
- "%d when only %d are available.\n",
- i+1, SCI_NPORTS);
- dev_notice(&dev->dev, "Consider bumping "
- "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
- break;
- }
+ INIT_LIST_HEAD(&priv->ports);
+ spin_lock_init(&priv->lock);
+ platform_set_drvdata(dev, priv);
- sciport->port.mapbase = p->mapbase;
+#ifdef CONFIG_HAVE_CLK
+ priv->clk_nb.notifier_call = sci_notifier;
+ cpufreq_register_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
- if (p->mapbase && !p->membase) {
- if (p->flags & UPF_IOREMAP) {
- p->membase = ioremap_nocache(p->mapbase, 0x40);
- if (IS_ERR(p->membase)) {
- ret = PTR_ERR(p->membase);
- goto err_unreg;
- }
- } else {
- /*
- * For the simple (and majority of) cases
- * where we don't need to do any remapping,
- * just cast the cookie directly.
- */
- p->membase = (void __iomem *)p->mapbase;
- }
+ if (dev->id != -1) {
+ ret = sci_probe_single(dev, dev->id, p, &sci_ports[dev->id]);
+ if (ret)
+ goto err_unreg;
+ } else {
+ for (i = 0; p && p->flags != 0; p++, i++) {
+ ret = sci_probe_single(dev, i, p, &sci_ports[i]);
+ if (ret)
+ goto err_unreg;
}
-
- sciport->port.membase = p->membase;
-
- sciport->port.irq = p->irqs[SCIx_TXI_IRQ];
- sciport->port.flags = p->flags;
- sciport->port.dev = &dev->dev;
-
- sciport->type = sciport->port.type = p->type;
-
- memcpy(&sciport->irqs, &p->irqs, sizeof(p->irqs));
-
- uart_add_one_port(&sci_uart_driver, &sciport->port);
}
-#ifdef CONFIG_HAVE_CLK
- cpufreq_register_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER);
-#endif
-
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_gdb_detach();
#endif
@@ -1274,50 +1328,36 @@ static int __devinit sci_probe(struct platform_device *dev)
return 0;
err_unreg:
- for (i = i - 1; i >= 0; i--)
- uart_remove_one_port(&sci_uart_driver, &sci_ports[i].port);
-
+ sci_remove(dev);
return ret;
}
-static int __devexit sci_remove(struct platform_device *dev)
-{
- int i;
-
-#ifdef CONFIG_HAVE_CLK
- cpufreq_unregister_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER);
-#endif
-
- for (i = 0; i < SCI_NPORTS; i++)
- uart_remove_one_port(&sci_uart_driver, &sci_ports[i].port);
-
- return 0;
-}
-
static int sci_suspend(struct platform_device *dev, pm_message_t state)
{
- int i;
+ struct sh_sci_priv *priv = platform_get_drvdata(dev);
+ struct sci_port *p;
+ unsigned long flags;
- for (i = 0; i < SCI_NPORTS; i++) {
- struct sci_port *p = &sci_ports[i];
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry(p, &priv->ports, node)
+ uart_suspend_port(&sci_uart_driver, &p->port);
- if (p->type != PORT_UNKNOWN && p->port.dev == &dev->dev)
- uart_suspend_port(&sci_uart_driver, &p->port);
- }
+ spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int sci_resume(struct platform_device *dev)
{
- int i;
+ struct sh_sci_priv *priv = platform_get_drvdata(dev);
+ struct sci_port *p;
+ unsigned long flags;
- for (i = 0; i < SCI_NPORTS; i++) {
- struct sci_port *p = &sci_ports[i];
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry(p, &priv->ports, node)
+ uart_resume_port(&sci_uart_driver, &p->port);
- if (p->type != PORT_UNKNOWN && p->port.dev == &dev->dev)
- uart_resume_port(&sci_uart_driver, &p->port);
- }
+ spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
@@ -1339,8 +1379,6 @@ static int __init sci_init(void)
printk(banner);
- sci_init_ports();
-
ret = uart_register_driver(&sci_uart_driver);
if (likely(ret == 0)) {
ret = platform_driver_register(&sci_driver);
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index d0aa82d7fce..38072c15b84 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -91,6 +91,9 @@
# define SCSPTR5 0xa4050128
# define SCIF_ORER 0x0001 /* overrun error bit */
# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
+# define SCIF_ORER 0x0001 /* overrun error bit */
+# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
@@ -314,7 +317,18 @@
} \
}
-#define CPU_SCIF_FNS(name, scif_offset, scif_size) \
+#ifdef CONFIG_H8300
+/* h8300 don't have SCIF */
+#define CPU_SCIF_FNS(name) \
+ static inline unsigned int sci_##name##_in(struct uart_port *port) \
+ { \
+ return 0; \
+ } \
+ static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
+ { \
+ }
+#else
+#define CPU_SCIF_FNS(name, scif_offset, scif_size) \
static inline unsigned int sci_##name##_in(struct uart_port *port) \
{ \
SCI_IN(scif_size, scif_offset); \
@@ -323,6 +337,7 @@
{ \
SCI_OUT(scif_size, scif_offset, value); \
}
+#endif
#define CPU_SCI_FNS(name, sci_offset, sci_size) \
static inline unsigned int sci_##name##_in(struct uart_port* port) \
@@ -360,8 +375,10 @@
sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
h8_sci_offset, h8_sci_size) \
CPU_SCI_FNS(name, h8_sci_offset, h8_sci_size)
-#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
+#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
+ CPU_SCIF_FNS(name)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
+ defined(CONFIG_CPU_SUBTYPE_SH7724)
#define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \
CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size)
#define SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) \
@@ -390,7 +407,8 @@ SCIF_FNS(SCFDR, 0x1c, 16)
SCIF_FNS(SCxTDR, 0x20, 8)
SCIF_FNS(SCxRDR, 0x24, 8)
SCIF_FNS(SCLSR, 0x24, 16)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
+ defined(CONFIG_CPU_SUBTYPE_SH7724)
SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16)
SCIx_FNS(SCBRR, 0x04, 8, 0x04, 8)
SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16)
@@ -604,10 +622,21 @@ static inline int sci_rxd_in(struct uart_port *port)
return ctrl_inb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */
return 1;
}
+#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
+# define SCFSR 0x0010
+# define SCASSR 0x0014
+static inline int sci_rxd_in(struct uart_port *port)
+{
+ if (port->type == PORT_SCIF)
+ return ctrl_inw((port->mapbase + SCFSR)) & SCIF_BRK ? 1 : 0;
+ if (port->type == PORT_SCIFA)
+ return ctrl_inw((port->mapbase + SCASSR)) & SCIF_BRK ? 1 : 0;
+ return 1;
+}
#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
static inline int sci_rxd_in(struct uart_port *port)
{
- return sci_in(port, SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
+ return sci_in(port, SCSPTR)&0x0001 ? 1 : 0; /* SCIF */
}
#elif defined(__H8300H__) || defined(__H8300S__)
static inline int sci_rxd_in(struct uart_port *port)
@@ -757,7 +786,8 @@ static inline int sci_rxd_in(struct uart_port *port)
defined(CONFIG_CPU_SUBTYPE_SH7720) || \
defined(CONFIG_CPU_SUBTYPE_SH7721)
#define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
+ defined(CONFIG_CPU_SUBTYPE_SH7724)
static inline int scbrr_calc(struct uart_port *port, int bps, int clk)
{
if (port->type == PORT_SCIF)
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
new file mode 100644
index 00000000000..ac9e5d5f742
--- /dev/null
+++ b/drivers/serial/timbuart.c
@@ -0,0 +1,526 @@
+/*
+ * timbuart.c timberdale FPGA UART driver
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA UART
+ */
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/serial_core.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+
+#include "timbuart.h"
+
+struct timbuart_port {
+ struct uart_port port;
+ struct tasklet_struct tasklet;
+ int usedma;
+ u8 last_ier;
+ struct platform_device *dev;
+};
+
+static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800,
+ 921600, 1843200, 3250000};
+
+static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier);
+
+static irqreturn_t timbuart_handleinterrupt(int irq, void *devid);
+
+static void timbuart_stop_rx(struct uart_port *port)
+{
+ /* spin lock held by upper layer, disable all RX interrupts */
+ u8 ier = ioread8(port->membase + TIMBUART_IER) & ~RXFLAGS;
+ iowrite8(ier, port->membase + TIMBUART_IER);
+}
+
+static void timbuart_stop_tx(struct uart_port *port)
+{
+ /* spinlock held by upper layer, disable TX interrupt */
+ u8 ier = ioread8(port->membase + TIMBUART_IER) & ~TXBAE;
+ iowrite8(ier, port->membase + TIMBUART_IER);
+}
+
+static void timbuart_start_tx(struct uart_port *port)
+{
+ struct timbuart_port *uart =
+ container_of(port, struct timbuart_port, port);
+
+ /* do not transfer anything here -> fire off the tasklet */
+ tasklet_schedule(&uart->tasklet);
+}
+
+static void timbuart_flush_buffer(struct uart_port *port)
+{
+ u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX;
+
+ iowrite8(ctl, port->membase + TIMBUART_CTRL);
+ iowrite8(TXBF, port->membase + TIMBUART_ISR);
+}
+
+static void timbuart_rx_chars(struct uart_port *port)
+{
+ struct tty_struct *tty = port->info->port.tty;
+
+ while (ioread8(port->membase + TIMBUART_ISR) & RXDP) {
+ u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
+ port->icount.rx++;
+ tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ }
+
+ spin_unlock(&port->lock);
+ tty_flip_buffer_push(port->info->port.tty);
+ spin_lock(&port->lock);
+
+ dev_dbg(port->dev, "%s - total read %d bytes\n",
+ __func__, port->icount.rx);
+}
+
+static void timbuart_tx_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->info->xmit;
+
+ while (!(ioread8(port->membase + TIMBUART_ISR) & TXBF) &&
+ !uart_circ_empty(xmit)) {
+ iowrite8(xmit->buf[xmit->tail],
+ port->membase + TIMBUART_TXFIFO);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+
+ dev_dbg(port->dev,
+ "%s - total written %d bytes, CTL: %x, RTS: %x, baud: %x\n",
+ __func__,
+ port->icount.tx,
+ ioread8(port->membase + TIMBUART_CTRL),
+ port->mctrl & TIOCM_RTS,
+ ioread8(port->membase + TIMBUART_BAUDRATE));
+}
+
+static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
+{
+ struct timbuart_port *uart =
+ container_of(port, struct timbuart_port, port);
+ struct circ_buf *xmit = &port->info->xmit;
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+ return;
+
+ if (port->x_char)
+ return;
+
+ if (isr & TXFLAGS) {
+ timbuart_tx_chars(port);
+ /* clear all TX interrupts */
+ iowrite8(TXFLAGS, port->membase + TIMBUART_ISR);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+ } else
+ /* Re-enable any tx interrupt */
+ *ier |= uart->last_ier & TXFLAGS;
+
+ /* enable interrupts if there are chars in the transmit buffer,
+ * Or if we delivered some bytes and want the almost empty interrupt
+ * we wake up the upper layer later when we got the interrupt
+ * to give it some time to go out...
+ */
+ if (!uart_circ_empty(xmit))
+ *ier |= TXBAE;
+
+ dev_dbg(port->dev, "%s - leaving\n", __func__);
+}
+
+void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
+{
+ if (isr & RXFLAGS) {
+ /* Some RX status is set */
+ if (isr & RXBF) {
+ u8 ctl = ioread8(port->membase + TIMBUART_CTRL) |
+ TIMBUART_CTRL_FLSHRX;
+ iowrite8(ctl, port->membase + TIMBUART_CTRL);
+ port->icount.overrun++;
+ } else if (isr & (RXDP))
+ timbuart_rx_chars(port);
+
+ /* ack all RX interrupts */
+ iowrite8(RXFLAGS, port->membase + TIMBUART_ISR);
+ }
+
+ /* always have the RX interrupts enabled */
+ *ier |= RXBAF | RXBF | RXTT;
+
+ dev_dbg(port->dev, "%s - leaving\n", __func__);
+}
+
+void timbuart_tasklet(unsigned long arg)
+{
+ struct timbuart_port *uart = (struct timbuart_port *)arg;
+ u8 isr, ier = 0;
+
+ spin_lock(&uart->port.lock);
+
+ isr = ioread8(uart->port.membase + TIMBUART_ISR);
+ dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
+
+ if (!uart->usedma)
+ timbuart_handle_tx_port(&uart->port, isr, &ier);
+
+ timbuart_mctrl_check(&uart->port, isr, &ier);
+
+ if (!uart->usedma)
+ timbuart_handle_rx_port(&uart->port, isr, &ier);
+
+ iowrite8(ier, uart->port.membase + TIMBUART_IER);
+
+ spin_unlock(&uart->port.lock);
+ dev_dbg(uart->port.dev, "%s leaving\n", __func__);
+}
+
+static unsigned int timbuart_tx_empty(struct uart_port *port)
+{
+ u8 isr = ioread8(port->membase + TIMBUART_ISR);
+
+ return (isr & TXBAE) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int timbuart_get_mctrl(struct uart_port *port)
+{
+ u8 cts = ioread8(port->membase + TIMBUART_CTRL);
+ dev_dbg(port->dev, "%s - cts %x\n", __func__, cts);
+
+ if (cts & TIMBUART_CTRL_CTS)
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+ else
+ return TIOCM_DSR | TIOCM_CAR;
+}
+
+static void timbuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ dev_dbg(port->dev, "%s - %x\n", __func__, mctrl);
+
+ if (mctrl & TIOCM_RTS)
+ iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
+ else
+ iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
+}
+
+static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier)
+{
+ unsigned int cts;
+
+ if (isr & CTS_DELTA) {
+ /* ack */
+ iowrite8(CTS_DELTA, port->membase + TIMBUART_ISR);
+ cts = timbuart_get_mctrl(port);
+ uart_handle_cts_change(port, cts & TIOCM_CTS);
+ wake_up_interruptible(&port->info->delta_msr_wait);
+ }
+
+ *ier |= CTS_DELTA;
+}
+
+static void timbuart_enable_ms(struct uart_port *port)
+{
+ /* N/A */
+}
+
+static void timbuart_break_ctl(struct uart_port *port, int ctl)
+{
+ /* N/A */
+}
+
+static int timbuart_startup(struct uart_port *port)
+{
+ struct timbuart_port *uart =
+ container_of(port, struct timbuart_port, port);
+
+ dev_dbg(port->dev, "%s\n", __func__);
+
+ iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL);
+ iowrite8(0xff, port->membase + TIMBUART_ISR);
+ /* Enable all but TX interrupts */
+ iowrite8(RXBAF | RXBF | RXTT | CTS_DELTA,
+ port->membase + TIMBUART_IER);
+
+ return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED,
+ "timb-uart", uart);
+}
+
+static void timbuart_shutdown(struct uart_port *port)
+{
+ struct timbuart_port *uart =
+ container_of(port, struct timbuart_port, port);
+ dev_dbg(port->dev, "%s\n", __func__);
+ free_irq(port->irq, uart);
+ iowrite8(0, port->membase + TIMBUART_IER);
+}
+
+static int get_bindex(int baud)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(baudrates); i++)
+ if (baud <= baudrates[i])
+ return i;
+
+ return -1;
+}
+
+static void timbuart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int baud;
+ short bindex;
+ unsigned long flags;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+ bindex = get_bindex(baud);
+ dev_dbg(port->dev, "%s - bindex %d\n", __func__, bindex);
+
+ if (bindex < 0)
+ bindex = 0;
+ baud = baudrates[bindex];
+
+ /* The serial layer calls into this once with old = NULL when setting
+ up initially */
+ if (old)
+ tty_termios_copy_hw(termios, old);
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ spin_lock_irqsave(&port->lock, flags);
+ iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *timbuart_type(struct uart_port *port)
+{
+ return port->type == PORT_UNKNOWN ? "timbuart" : NULL;
+}
+
+/* We do not request/release mappings of the registers here,
+ * currently it's done in the proble function.
+ */
+static void timbuart_release_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ int size =
+ resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
+
+ if (port->flags & UPF_IOREMAP) {
+ iounmap(port->membase);
+ port->membase = NULL;
+ }
+
+ release_mem_region(port->mapbase, size);
+}
+
+static int timbuart_request_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ int size =
+ resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
+
+ if (!request_mem_region(port->mapbase, size, "timb-uart"))
+ return -EBUSY;
+
+ if (port->flags & UPF_IOREMAP) {
+ port->membase = ioremap(port->mapbase, size);
+ if (port->membase == NULL) {
+ release_mem_region(port->mapbase, size);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t timbuart_handleinterrupt(int irq, void *devid)
+{
+ struct timbuart_port *uart = (struct timbuart_port *)devid;
+
+ if (ioread8(uart->port.membase + TIMBUART_IPR)) {
+ uart->last_ier = ioread8(uart->port.membase + TIMBUART_IER);
+
+ /* disable interrupts, the tasklet enables them again */
+ iowrite8(0, uart->port.membase + TIMBUART_IER);
+
+ /* fire off bottom half */
+ tasklet_schedule(&uart->tasklet);
+
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void timbuart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_TIMBUART;
+ timbuart_request_port(port);
+ }
+}
+
+static int timbuart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ /* we don't want the core code to modify any port params */
+ return -EINVAL;
+}
+
+static struct uart_ops timbuart_ops = {
+ .tx_empty = timbuart_tx_empty,
+ .set_mctrl = timbuart_set_mctrl,
+ .get_mctrl = timbuart_get_mctrl,
+ .stop_tx = timbuart_stop_tx,
+ .start_tx = timbuart_start_tx,
+ .flush_buffer = timbuart_flush_buffer,
+ .stop_rx = timbuart_stop_rx,
+ .enable_ms = timbuart_enable_ms,
+ .break_ctl = timbuart_break_ctl,
+ .startup = timbuart_startup,
+ .shutdown = timbuart_shutdown,
+ .set_termios = timbuart_set_termios,
+ .type = timbuart_type,
+ .release_port = timbuart_release_port,
+ .request_port = timbuart_request_port,
+ .config_port = timbuart_config_port,
+ .verify_port = timbuart_verify_port
+};
+
+static struct uart_driver timbuart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "timberdale_uart",
+ .dev_name = "ttyTU",
+ .major = TIMBUART_MAJOR,
+ .minor = TIMBUART_MINOR,
+ .nr = 1
+};
+
+static int timbuart_probe(struct platform_device *dev)
+{
+ int err;
+ struct timbuart_port *uart;
+ struct resource *iomem;
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+
+ uart = kzalloc(sizeof(*uart), GFP_KERNEL);
+ if (!uart) {
+ err = -EINVAL;
+ goto err_mem;
+ }
+
+ uart->usedma = 0;
+
+ uart->port.uartclk = 3250000 * 16;
+ uart->port.fifosize = TIMBUART_FIFO_SIZE;
+ uart->port.regshift = 2;
+ uart->port.iotype = UPIO_MEM;
+ uart->port.ops = &timbuart_ops;
+ uart->port.irq = 0;
+ uart->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
+ uart->port.line = 0;
+ uart->port.dev = &dev->dev;
+
+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ err = -ENOMEM;
+ goto err_register;
+ }
+ uart->port.mapbase = iomem->start;
+ uart->port.membase = NULL;
+
+ uart->port.irq = platform_get_irq(dev, 0);
+ if (uart->port.irq < 0) {
+ err = -EINVAL;
+ goto err_register;
+ }
+
+ tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
+
+ err = uart_register_driver(&timbuart_driver);
+ if (err)
+ goto err_register;
+
+ err = uart_add_one_port(&timbuart_driver, &uart->port);
+ if (err)
+ goto err_add_port;
+
+ platform_set_drvdata(dev, uart);
+
+ return 0;
+
+err_add_port:
+ uart_unregister_driver(&timbuart_driver);
+err_register:
+ kfree(uart);
+err_mem:
+ printk(KERN_ERR "timberdale: Failed to register Timberdale UART: %d\n",
+ err);
+
+ return err;
+}
+
+static int timbuart_remove(struct platform_device *dev)
+{
+ struct timbuart_port *uart = platform_get_drvdata(dev);
+
+ tasklet_kill(&uart->tasklet);
+ uart_remove_one_port(&timbuart_driver, &uart->port);
+ uart_unregister_driver(&timbuart_driver);
+ kfree(uart);
+
+ return 0;
+}
+
+static struct platform_driver timbuart_platform_driver = {
+ .driver = {
+ .name = "timb-uart",
+ .owner = THIS_MODULE,
+ },
+ .probe = timbuart_probe,
+ .remove = timbuart_remove,
+};
+
+/*--------------------------------------------------------------------------*/
+
+static int __init timbuart_init(void)
+{
+ return platform_driver_register(&timbuart_platform_driver);
+}
+
+static void __exit timbuart_exit(void)
+{
+ platform_driver_unregister(&timbuart_platform_driver);
+}
+
+module_init(timbuart_init);
+module_exit(timbuart_exit);
+
+MODULE_DESCRIPTION("Timberdale UART driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:timb-uart");
+
diff --git a/drivers/serial/timbuart.h b/drivers/serial/timbuart.h
new file mode 100644
index 00000000000..7e566766bc4
--- /dev/null
+++ b/drivers/serial/timbuart.h
@@ -0,0 +1,58 @@
+/*
+ * timbuart.c timberdale FPGA GPIO driver
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA UART
+ */
+
+#ifndef _TIMBUART_H
+#define _TIMBUART_H
+
+#define TIMBUART_FIFO_SIZE 2048
+
+#define TIMBUART_RXFIFO 0x08
+#define TIMBUART_TXFIFO 0x0c
+#define TIMBUART_IER 0x10
+#define TIMBUART_IPR 0x14
+#define TIMBUART_ISR 0x18
+#define TIMBUART_CTRL 0x1c
+#define TIMBUART_BAUDRATE 0x20
+
+#define TIMBUART_CTRL_RTS 0x01
+#define TIMBUART_CTRL_CTS 0x02
+#define TIMBUART_CTRL_FLSHTX 0x40
+#define TIMBUART_CTRL_FLSHRX 0x80
+
+#define TXBF 0x01
+#define TXBAE 0x02
+#define CTS_DELTA 0x04
+#define RXDP 0x08
+#define RXBAF 0x10
+#define RXBF 0x20
+#define RXTT 0x40
+#define RXBNAE 0x80
+#define TXBE 0x100
+
+#define RXFLAGS (RXDP | RXBAF | RXBF | RXTT | RXBNAE)
+#define TXFLAGS (TXBF | TXBAE)
+
+#define TIMBUART_MAJOR 204
+#define TIMBUART_MINOR 192
+
+#endif /* _TIMBUART_H */
+
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 12d13d99b6f..d687a9b93d0 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -24,6 +24,7 @@
#include <linux/sh_intc.h>
#include <linux/sysdev.h>
#include <linux/list.h>
+#include <linux/topology.h>
#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -770,11 +771,19 @@ void __init register_intc_controller(struct intc_desc *desc)
/* register the vectors one by one */
for (i = 0; i < desc->nr_vectors; i++) {
struct intc_vect *vect = desc->vectors + i;
+ unsigned int irq = evt2irq(vect->vect);
+ struct irq_desc *irq_desc;
if (!vect->enum_id)
continue;
- intc_register_irq(desc, d, vect->enum_id, evt2irq(vect->vect));
+ irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
+ if (unlikely(!irq_desc)) {
+ printk(KERN_INFO "can not get irq_desc for %d\n", irq);
+ continue;
+ }
+
+ intc_register_irq(desc, d, vect->enum_id, irq);
}
}
diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c
index 7dc3a6b4139..a0e0d246b59 100644
--- a/drivers/ssb/embedded.c
+++ b/drivers/ssb/embedded.c
@@ -29,6 +29,7 @@ int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks)
}
return -ENODEV;
}
+EXPORT_SYMBOL(ssb_watchdog_timer_set);
u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask)
{
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 0716cdb44cd..0a3dc5ece63 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_USB_MON) += mon/
obj-$(CONFIG_PCI) += host/
obj-$(CONFIG_USB_EHCI_HCD) += host/
obj-$(CONFIG_USB_ISP116X_HCD) += host/
-obj-$(CONFIG_USB_ISP1760_HCD) += host/
obj-$(CONFIG_USB_OHCI_HCD) += host/
obj-$(CONFIG_USB_UHCI_HCD) += host/
obj-$(CONFIG_USB_FHCI_HCD) += host/
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0a69c0977e3..ddeb6919253 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -16,7 +16,8 @@
* v0.9 - thorough cleaning, URBification, almost a rewrite
* v0.10 - some more cleanups
* v0.11 - fixed flow control, read error doesn't stop reads
- * v0.12 - added TIOCM ioctls, added break handling, made struct acm kmalloced
+ * v0.12 - added TIOCM ioctls, added break handling, made struct acm
+ * kmalloced
* v0.13 - added termios, added hangup
* v0.14 - sized down struct acm
* v0.15 - fixed flow control again - characters could be lost
@@ -62,7 +63,7 @@
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <asm/byteorder.h>
@@ -87,7 +88,10 @@ static struct acm *acm_table[ACM_TTY_MINORS];
static DEFINE_MUTEX(open_mutex);
-#define ACM_READY(acm) (acm && acm->dev && acm->used)
+#define ACM_READY(acm) (acm && acm->dev && acm->port.count)
+
+static const struct tty_port_operations acm_port_ops = {
+};
#ifdef VERBOSE_DEBUG
#define verbose 1
@@ -99,13 +103,15 @@ static DEFINE_MUTEX(open_mutex);
* Functions for ACM control messages.
*/
-static int acm_ctrl_msg(struct acm *acm, int request, int value, void *buf, int len)
+static int acm_ctrl_msg(struct acm *acm, int request, int value,
+ void *buf, int len)
{
int retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
request, USB_RT_ACM, value,
acm->control->altsetting[0].desc.bInterfaceNumber,
buf, len, 5000);
- dbg("acm_control_msg: rq: 0x%02x val: %#x len: %#x result: %d", request, value, len, retval);
+ dbg("acm_control_msg: rq: 0x%02x val: %#x len: %#x result: %d",
+ request, value, len, retval);
return retval < 0 ? retval : 0;
}
@@ -150,9 +156,8 @@ static int acm_wb_is_avail(struct acm *acm)
n = ACM_NW;
spin_lock_irqsave(&acm->write_lock, flags);
- for (i = 0; i < ACM_NW; i++) {
+ for (i = 0; i < ACM_NW; i++)
n -= acm->wb[i].use;
- }
spin_unlock_irqrestore(&acm->write_lock, flags);
return n;
}
@@ -183,7 +188,8 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
wb->urb->transfer_buffer_length = wb->len;
wb->urb->dev = acm->dev;
- if ((rc = usb_submit_urb(wb->urb, GFP_ATOMIC)) < 0) {
+ rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
+ if (rc < 0) {
dbg("usb_submit_urb(write bulk) failed: %d", rc);
acm_write_done(acm, wb);
}
@@ -262,6 +268,7 @@ static void acm_ctrl_irq(struct urb *urb)
{
struct acm *acm = urb->context;
struct usb_cdc_notification *dr = urb->transfer_buffer;
+ struct tty_struct *tty;
unsigned char *data;
int newctrl;
int retval;
@@ -287,40 +294,45 @@ static void acm_ctrl_irq(struct urb *urb)
data = (unsigned char *)(dr + 1);
switch (dr->bNotificationType) {
+ case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+ dbg("%s network", dr->wValue ?
+ "connected to" : "disconnected from");
+ break;
- case USB_CDC_NOTIFY_NETWORK_CONNECTION:
-
- dbg("%s network", dr->wValue ? "connected to" : "disconnected from");
- break;
-
- case USB_CDC_NOTIFY_SERIAL_STATE:
-
- newctrl = get_unaligned_le16(data);
+ case USB_CDC_NOTIFY_SERIAL_STATE:
+ tty = tty_port_tty_get(&acm->port);
+ newctrl = get_unaligned_le16(data);
- if (acm->tty && !acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
+ if (tty) {
+ if (!acm->clocal &&
+ (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
dbg("calling hangup");
- tty_hangup(acm->tty);
+ tty_hangup(tty);
}
+ tty_kref_put(tty);
+ }
- acm->ctrlin = newctrl;
-
- dbg("input control lines: dcd%c dsr%c break%c ring%c framing%c parity%c overrun%c",
- acm->ctrlin & ACM_CTRL_DCD ? '+' : '-', acm->ctrlin & ACM_CTRL_DSR ? '+' : '-',
- acm->ctrlin & ACM_CTRL_BRK ? '+' : '-', acm->ctrlin & ACM_CTRL_RI ? '+' : '-',
- acm->ctrlin & ACM_CTRL_FRAMING ? '+' : '-', acm->ctrlin & ACM_CTRL_PARITY ? '+' : '-',
- acm->ctrlin & ACM_CTRL_OVERRUN ? '+' : '-');
+ acm->ctrlin = newctrl;
+ dbg("input control lines: dcd%c dsr%c break%c ring%c framing%c parity%c overrun%c",
+ acm->ctrlin & ACM_CTRL_DCD ? '+' : '-',
+ acm->ctrlin & ACM_CTRL_DSR ? '+' : '-',
+ acm->ctrlin & ACM_CTRL_BRK ? '+' : '-',
+ acm->ctrlin & ACM_CTRL_RI ? '+' : '-',
+ acm->ctrlin & ACM_CTRL_FRAMING ? '+' : '-',
+ acm->ctrlin & ACM_CTRL_PARITY ? '+' : '-',
+ acm->ctrlin & ACM_CTRL_OVERRUN ? '+' : '-');
break;
- default:
- dbg("unknown notification %d received: index %d len %d data0 %d data1 %d",
- dr->bNotificationType, dr->wIndex,
- dr->wLength, data[0], data[1]);
- break;
+ default:
+ dbg("unknown notification %d received: index %d len %d data0 %d data1 %d",
+ dr->bNotificationType, dr->wIndex,
+ dr->wLength, data[0], data[1]);
+ break;
}
exit:
usb_mark_last_busy(acm->dev);
- retval = usb_submit_urb (urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with "
"result %d", __func__, retval);
@@ -371,15 +383,14 @@ static void acm_rx_tasklet(unsigned long _acm)
{
struct acm *acm = (void *)_acm;
struct acm_rb *buf;
- struct tty_struct *tty = acm->tty;
+ struct tty_struct *tty;
struct acm_ru *rcv;
unsigned long flags;
unsigned char throttled;
dbg("Entering acm_rx_tasklet");
- if (!ACM_READY(acm))
- {
+ if (!ACM_READY(acm)) {
dbg("acm_rx_tasklet: ACM not ready");
return;
}
@@ -387,12 +398,13 @@ static void acm_rx_tasklet(unsigned long _acm)
spin_lock_irqsave(&acm->throttle_lock, flags);
throttled = acm->throttle;
spin_unlock_irqrestore(&acm->throttle_lock, flags);
- if (throttled)
- {
+ if (throttled) {
dbg("acm_rx_tasklet: throttled");
return;
}
+ tty = tty_port_tty_get(&acm->port);
+
next_buffer:
spin_lock_irqsave(&acm->read_lock, flags);
if (list_empty(&acm->filled_read_bufs)) {
@@ -406,20 +418,22 @@ next_buffer:
dbg("acm_rx_tasklet: procesing buf 0x%p, size = %d", buf, buf->size);
- tty_buffer_request_room(tty, buf->size);
- spin_lock_irqsave(&acm->throttle_lock, flags);
- throttled = acm->throttle;
- spin_unlock_irqrestore(&acm->throttle_lock, flags);
- if (!throttled)
- tty_insert_flip_string(tty, buf->base, buf->size);
- tty_flip_buffer_push(tty);
-
- if (throttled) {
- dbg("Throttling noticed");
- spin_lock_irqsave(&acm->read_lock, flags);
- list_add(&buf->list, &acm->filled_read_bufs);
- spin_unlock_irqrestore(&acm->read_lock, flags);
- return;
+ if (tty) {
+ spin_lock_irqsave(&acm->throttle_lock, flags);
+ throttled = acm->throttle;
+ spin_unlock_irqrestore(&acm->throttle_lock, flags);
+ if (!throttled) {
+ tty_buffer_request_room(tty, buf->size);
+ tty_insert_flip_string(tty, buf->base, buf->size);
+ tty_flip_buffer_push(tty);
+ } else {
+ tty_kref_put(tty);
+ dbg("Throttling noticed");
+ spin_lock_irqsave(&acm->read_lock, flags);
+ list_add(&buf->list, &acm->filled_read_bufs);
+ spin_unlock_irqrestore(&acm->read_lock, flags);
+ return;
+ }
}
spin_lock_irqsave(&acm->read_lock, flags);
@@ -428,6 +442,8 @@ next_buffer:
goto next_buffer;
urbs:
+ tty_kref_put(tty);
+
while (!list_empty(&acm->spare_read_bufs)) {
spin_lock_irqsave(&acm->read_lock, flags);
if (list_empty(&acm->spare_read_urbs)) {
@@ -454,10 +470,11 @@ urbs:
rcv->urb->transfer_dma = buf->dma;
rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- /* This shouldn't kill the driver as unsuccessful URBs are returned to the
- free-urbs-pool and resubmited ASAP */
+ /* This shouldn't kill the driver as unsuccessful URBs are
+ returned to the free-urbs-pool and resubmited ASAP */
spin_lock_irqsave(&acm->read_lock, flags);
- if (acm->susp_count || usb_submit_urb(rcv->urb, GFP_ATOMIC) < 0) {
+ if (acm->susp_count ||
+ usb_submit_urb(rcv->urb, GFP_ATOMIC) < 0) {
list_add(&buf->list, &acm->spare_read_bufs);
list_add(&rcv->list, &acm->spare_read_urbs);
acm->processing = 0;
@@ -499,11 +516,14 @@ static void acm_write_bulk(struct urb *urb)
static void acm_softint(struct work_struct *work)
{
struct acm *acm = container_of(work, struct acm, work);
+ struct tty_struct *tty;
dev_vdbg(&acm->data->dev, "tx work\n");
if (!ACM_READY(acm))
return;
- tty_wakeup(acm->tty);
+ tty = tty_port_tty_get(&acm->port);
+ tty_wakeup(tty);
+ tty_kref_put(tty);
}
static void acm_waker(struct work_struct *waker)
@@ -543,8 +563,9 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
rv = 0;
set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
+
tty->driver_data = acm;
- acm->tty = tty;
+ tty_port_tty_set(&acm->port, tty);
if (usb_autopm_get_interface(acm->control) < 0)
goto early_bail;
@@ -552,11 +573,10 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
acm->control->needs_remote_wakeup = 1;
mutex_lock(&acm->mutex);
- if (acm->used++) {
+ if (acm->port.count++) {
usb_autopm_put_interface(acm->control);
goto done;
- }
-
+ }
acm->ctrlurb->dev = acm->dev;
if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
@@ -567,22 +587,22 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) &&
(acm->ctrl_caps & USB_CDC_CAP_LINE))
goto full_bailout;
+
usb_autopm_put_interface(acm->control);
INIT_LIST_HEAD(&acm->spare_read_urbs);
INIT_LIST_HEAD(&acm->spare_read_bufs);
INIT_LIST_HEAD(&acm->filled_read_bufs);
- for (i = 0; i < acm->rx_buflimit; i++) {
+
+ for (i = 0; i < acm->rx_buflimit; i++)
list_add(&(acm->ru[i].list), &acm->spare_read_urbs);
- }
- for (i = 0; i < acm->rx_buflimit; i++) {
+ for (i = 0; i < acm->rx_buflimit; i++)
list_add(&(acm->rb[i].list), &acm->spare_read_bufs);
- }
acm->throttle = 0;
tasklet_schedule(&acm->urb_task);
-
+ rv = tty_port_block_til_ready(&acm->port, tty, filp);
done:
mutex_unlock(&acm->mutex);
err_out:
@@ -593,16 +613,17 @@ full_bailout:
usb_kill_urb(acm->ctrlurb);
bail_out:
usb_autopm_put_interface(acm->control);
- acm->used--;
+ acm->port.count--;
mutex_unlock(&acm->mutex);
early_bail:
mutex_unlock(&open_mutex);
+ tty_port_tty_set(&acm->port, NULL);
return -EIO;
}
static void acm_tty_unregister(struct acm *acm)
{
- int i,nr;
+ int i, nr;
nr = acm->rx_buflimit;
tty_unregister_device(acm_tty_driver, acm->minor);
@@ -619,41 +640,56 @@ static void acm_tty_unregister(struct acm *acm)
static int acm_tty_chars_in_buffer(struct tty_struct *tty);
+static void acm_port_down(struct acm *acm, int drain)
+{
+ int i, nr = acm->rx_buflimit;
+ mutex_lock(&open_mutex);
+ if (acm->dev) {
+ usb_autopm_get_interface(acm->control);
+ acm_set_control(acm, acm->ctrlout = 0);
+ /* try letting the last writes drain naturally */
+ if (drain) {
+ wait_event_interruptible_timeout(acm->drain_wait,
+ (ACM_NW == acm_wb_is_avail(acm)) || !acm->dev,
+ ACM_CLOSE_TIMEOUT * HZ);
+ }
+ usb_kill_urb(acm->ctrlurb);
+ for (i = 0; i < ACM_NW; i++)
+ usb_kill_urb(acm->wb[i].urb);
+ for (i = 0; i < nr; i++)
+ usb_kill_urb(acm->ru[i].urb);
+ acm->control->needs_remote_wakeup = 0;
+ usb_autopm_put_interface(acm->control);
+ }
+ mutex_unlock(&open_mutex);
+}
+
+static void acm_tty_hangup(struct tty_struct *tty)
+{
+ struct acm *acm = tty->driver_data;
+ tty_port_hangup(&acm->port);
+ acm_port_down(acm, 0);
+}
+
static void acm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct acm *acm = tty->driver_data;
- int i,nr;
- if (!acm || !acm->used)
+ /* Perform the closing process and see if we need to do the hardware
+ shutdown */
+ if (tty_port_close_start(&acm->port, tty, filp) == 0)
return;
-
- nr = acm->rx_buflimit;
+ acm_port_down(acm, 0);
+ tty_port_close_end(&acm->port, tty);
mutex_lock(&open_mutex);
- if (!--acm->used) {
- if (acm->dev) {
- usb_autopm_get_interface(acm->control);
- acm_set_control(acm, acm->ctrlout = 0);
-
- /* try letting the last writes drain naturally */
- wait_event_interruptible_timeout(acm->drain_wait,
- (ACM_NW == acm_wb_is_avail(acm))
- || !acm->dev,
- ACM_CLOSE_TIMEOUT * HZ);
-
- usb_kill_urb(acm->ctrlurb);
- for (i = 0; i < ACM_NW; i++)
- usb_kill_urb(acm->wb[i].urb);
- for (i = 0; i < nr; i++)
- usb_kill_urb(acm->ru[i].urb);
- acm->control->needs_remote_wakeup = 0;
- usb_autopm_put_interface(acm->control);
- } else
- acm_tty_unregister(acm);
- }
+ tty_port_tty_set(&acm->port, NULL);
+ if (!acm->dev)
+ acm_tty_unregister(acm);
mutex_unlock(&open_mutex);
}
-static int acm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+static int acm_tty_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
{
struct acm *acm = tty->driver_data;
int stat;
@@ -669,7 +705,8 @@ static int acm_tty_write(struct tty_struct *tty, const unsigned char *buf, int c
return 0;
spin_lock_irqsave(&acm->write_lock, flags);
- if ((wbn = acm_wb_alloc(acm)) < 0) {
+ wbn = acm_wb_alloc(acm);
+ if (wbn < 0) {
spin_unlock_irqrestore(&acm->write_lock, flags);
return 0;
}
@@ -681,7 +718,8 @@ static int acm_tty_write(struct tty_struct *tty, const unsigned char *buf, int c
wb->len = count;
spin_unlock_irqrestore(&acm->write_lock, flags);
- if ((stat = acm_write_start(acm, wbn)) < 0)
+ stat = acm_write_start(acm, wbn);
+ if (stat < 0)
return stat;
return count;
}
@@ -767,8 +805,10 @@ static int acm_tty_tiocmset(struct tty_struct *tty, struct file *file,
return -EINVAL;
newctrl = acm->ctrlout;
- set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) | (set & TIOCM_RTS ? ACM_CTRL_RTS : 0);
- clear = (clear & TIOCM_DTR ? ACM_CTRL_DTR : 0) | (clear & TIOCM_RTS ? ACM_CTRL_RTS : 0);
+ set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
+ (set & TIOCM_RTS ? ACM_CTRL_RTS : 0);
+ clear = (clear & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
+ (clear & TIOCM_RTS ? ACM_CTRL_RTS : 0);
newctrl = (newctrl & ~clear) | set;
@@ -777,7 +817,8 @@ static int acm_tty_tiocmset(struct tty_struct *tty, struct file *file,
return acm_set_control(acm, acm->ctrlout = newctrl);
}
-static int acm_tty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
+static int acm_tty_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
{
struct acm *acm = tty->driver_data;
@@ -799,7 +840,8 @@ static const __u8 acm_tty_size[] = {
5, 6, 7, 8
};
-static void acm_tty_set_termios(struct tty_struct *tty, struct ktermios *termios_old)
+static void acm_tty_set_termios(struct tty_struct *tty,
+ struct ktermios *termios_old)
{
struct acm *acm = tty->driver_data;
struct ktermios *termios = tty->termios;
@@ -809,19 +851,23 @@ static void acm_tty_set_termios(struct tty_struct *tty, struct ktermios *termios
if (!ACM_READY(acm))
return;
+ /* FIXME: Needs to support the tty_baud interface */
+ /* FIXME: Broken on sparc */
newline.dwDTERate = cpu_to_le32p(acm_tty_speed +
(termios->c_cflag & CBAUD & ~CBAUDEX) + (termios->c_cflag & CBAUDEX ? 15 : 0));
newline.bCharFormat = termios->c_cflag & CSTOPB ? 2 : 0;
newline.bParityType = termios->c_cflag & PARENB ?
- (termios->c_cflag & PARODD ? 1 : 2) + (termios->c_cflag & CMSPAR ? 2 : 0) : 0;
+ (termios->c_cflag & PARODD ? 1 : 2) +
+ (termios->c_cflag & CMSPAR ? 2 : 0) : 0;
newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4];
-
+ /* FIXME: Needs to clear unsupported bits in the termios */
acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
if (!newline.dwDTERate) {
newline.dwDTERate = acm->line.dwDTERate;
newctrl &= ~ACM_CTRL_DTR;
- } else newctrl |= ACM_CTRL_DTR;
+ } else
+ newctrl |= ACM_CTRL_DTR;
if (newctrl != acm->ctrlout)
acm_set_control(acm, acm->ctrlout = newctrl);
@@ -846,9 +892,8 @@ static void acm_write_buffers_free(struct acm *acm)
struct acm_wb *wb;
struct usb_device *usb_dev = interface_to_usbdev(acm->control);
- for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) {
+ for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++)
usb_buffer_free(usb_dev, acm->writesize, wb->buf, wb->dmah);
- }
}
static void acm_read_buffers_free(struct acm *acm)
@@ -857,7 +902,8 @@ static void acm_read_buffers_free(struct acm *acm)
int i, n = acm->rx_buflimit;
for (i = 0; i < n; i++)
- usb_buffer_free(usb_dev, acm->readsize, acm->rb[i].base, acm->rb[i].dma);
+ usb_buffer_free(usb_dev, acm->readsize,
+ acm->rb[i].base, acm->rb[i].dma);
}
/* Little helper: write buffers allocate */
@@ -882,8 +928,8 @@ static int acm_write_buffers_alloc(struct acm *acm)
return 0;
}
-static int acm_probe (struct usb_interface *intf,
- const struct usb_device_id *id)
+static int acm_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct usb_cdc_union_desc *union_header = NULL;
struct usb_cdc_country_functional_desc *cfd = NULL;
@@ -897,7 +943,7 @@ static int acm_probe (struct usb_interface *intf,
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct acm *acm;
int minor;
- int ctrlsize,readsize;
+ int ctrlsize, readsize;
u8 *buf;
u8 ac_management_function = 0;
u8 call_management_function = 0;
@@ -917,7 +963,7 @@ static int acm_probe (struct usb_interface *intf,
control_interface = usb_ifnum_to_if(usb_dev, 0);
goto skip_normal_probe;
}
-
+
/* normal probing*/
if (!buffer) {
dev_err(&intf->dev, "Weird descriptor references\n");
@@ -925,8 +971,10 @@ static int acm_probe (struct usb_interface *intf,
}
if (!buflen) {
- if (intf->cur_altsetting->endpoint->extralen && intf->cur_altsetting->endpoint->extra) {
- dev_dbg(&intf->dev,"Seeking extra descriptors on endpoint\n");
+ if (intf->cur_altsetting->endpoint->extralen &&
+ intf->cur_altsetting->endpoint->extra) {
+ dev_dbg(&intf->dev,
+ "Seeking extra descriptors on endpoint\n");
buflen = intf->cur_altsetting->endpoint->extralen;
buffer = intf->cur_altsetting->endpoint->extra;
} else {
@@ -937,47 +985,43 @@ static int acm_probe (struct usb_interface *intf,
}
while (buflen > 0) {
- if (buffer [1] != USB_DT_CS_INTERFACE) {
+ if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
}
- switch (buffer [2]) {
- case USB_CDC_UNION_TYPE: /* we've found it */
- if (union_header) {
- dev_err(&intf->dev, "More than one "
- "union descriptor, "
- "skipping ...\n");
- goto next_desc;
- }
- union_header = (struct usb_cdc_union_desc *)
- buffer;
- break;
- case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
- cfd = (struct usb_cdc_country_functional_desc *)buffer;
- break;
- case USB_CDC_HEADER_TYPE: /* maybe check version */
- break; /* for now we ignore it */
- case USB_CDC_ACM_TYPE:
- ac_management_function = buffer[3];
- break;
- case USB_CDC_CALL_MANAGEMENT_TYPE:
- call_management_function = buffer[3];
- call_interface_num = buffer[4];
- if ((call_management_function & 3) != 3)
- dev_err(&intf->dev, "This device "
- "cannot do calls on its own. "
- "It is no modem.\n");
- break;
- default:
- /* there are LOTS more CDC descriptors that
- * could legitimately be found here.
- */
- dev_dbg(&intf->dev, "Ignoring descriptor: "
- "type %02x, length %d\n",
- buffer[2], buffer[0]);
- break;
+ switch (buffer[2]) {
+ case USB_CDC_UNION_TYPE: /* we've found it */
+ if (union_header) {
+ dev_err(&intf->dev, "More than one "
+ "union descriptor, skipping ...\n");
+ goto next_desc;
}
+ union_header = (struct usb_cdc_union_desc *)buffer;
+ break;
+ case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
+ cfd = (struct usb_cdc_country_functional_desc *)buffer;
+ break;
+ case USB_CDC_HEADER_TYPE: /* maybe check version */
+ break; /* for now we ignore it */
+ case USB_CDC_ACM_TYPE:
+ ac_management_function = buffer[3];
+ break;
+ case USB_CDC_CALL_MANAGEMENT_TYPE:
+ call_management_function = buffer[3];
+ call_interface_num = buffer[4];
+ if ((call_management_function & 3) != 3)
+ dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n");
+ break;
+ default:
+ /* there are LOTS more CDC descriptors that
+ * could legitimately be found here.
+ */
+ dev_dbg(&intf->dev, "Ignoring descriptor: "
+ "type %02x, length %d\n",
+ buffer[2], buffer[0]);
+ break;
+ }
next_desc:
buflen -= buffer[0];
buffer += buffer[0];
@@ -985,33 +1029,36 @@ next_desc:
if (!union_header) {
if (call_interface_num > 0) {
- dev_dbg(&intf->dev,"No union descriptor, using call management descriptor\n");
+ dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n");
data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num));
control_interface = intf;
} else {
- dev_dbg(&intf->dev,"No union descriptor, giving up\n");
+ dev_dbg(&intf->dev,
+ "No union descriptor, giving up\n");
return -ENODEV;
}
} else {
control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0));
if (!control_interface || !data_interface) {
- dev_dbg(&intf->dev,"no interfaces\n");
+ dev_dbg(&intf->dev, "no interfaces\n");
return -ENODEV;
}
}
-
+
if (data_interface_num != call_interface_num)
- dev_dbg(&intf->dev,"Separate call control interface. That is not fully supported.\n");
+ dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n");
skip_normal_probe:
/*workaround for switched interfaces */
- if (data_interface->cur_altsetting->desc.bInterfaceClass != CDC_DATA_INTERFACE_TYPE) {
- if (control_interface->cur_altsetting->desc.bInterfaceClass == CDC_DATA_INTERFACE_TYPE) {
+ if (data_interface->cur_altsetting->desc.bInterfaceClass
+ != CDC_DATA_INTERFACE_TYPE) {
+ if (control_interface->cur_altsetting->desc.bInterfaceClass
+ == CDC_DATA_INTERFACE_TYPE) {
struct usb_interface *t;
- dev_dbg(&intf->dev,"Your device has switched interfaces.\n");
-
+ dev_dbg(&intf->dev,
+ "Your device has switched interfaces.\n");
t = control_interface;
control_interface = data_interface;
data_interface = t;
@@ -1023,9 +1070,9 @@ skip_normal_probe:
/* Accept probe requests only for the control interface */
if (intf != control_interface)
return -ENODEV;
-
+
if (usb_interface_claimed(data_interface)) { /* valid in this context */
- dev_dbg(&intf->dev,"The data interface isn't available\n");
+ dev_dbg(&intf->dev, "The data interface isn't available\n");
return -EBUSY;
}
@@ -1042,8 +1089,8 @@ skip_normal_probe:
if (!usb_endpoint_dir_in(epread)) {
/* descriptors are swapped */
struct usb_endpoint_descriptor *t;
- dev_dbg(&intf->dev,"The data interface has switched endpoints\n");
-
+ dev_dbg(&intf->dev,
+ "The data interface has switched endpoints\n");
t = epread;
epread = epwrite;
epwrite = t;
@@ -1056,13 +1103,15 @@ skip_normal_probe:
return -ENODEV;
}
- if (!(acm = kzalloc(sizeof(struct acm), GFP_KERNEL))) {
+ acm = kzalloc(sizeof(struct acm), GFP_KERNEL);
+ if (acm == NULL) {
dev_dbg(&intf->dev, "out of memory (acm kzalloc)\n");
goto alloc_fail;
}
ctrlsize = le16_to_cpu(epctrl->wMaxPacketSize);
- readsize = le16_to_cpu(epread->wMaxPacketSize)* ( quirks == SINGLE_RX_URB ? 1 : 2);
+ readsize = le16_to_cpu(epread->wMaxPacketSize) *
+ (quirks == SINGLE_RX_URB ? 1 : 2);
acm->writesize = le16_to_cpu(epwrite->wMaxPacketSize) * 20;
acm->control = control_interface;
acm->data = data_interface;
@@ -1082,6 +1131,8 @@ skip_normal_probe:
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
+ tty_port_init(&acm->port);
+ acm->port.ops = &acm_port_ops;
buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
if (!buf) {
@@ -1103,8 +1154,10 @@ skip_normal_probe:
for (i = 0; i < num_rx_buf; i++) {
struct acm_ru *rcv = &(acm->ru[i]);
- if (!(rcv->urb = usb_alloc_urb(0, GFP_KERNEL))) {
- dev_dbg(&intf->dev, "out of memory (read urbs usb_alloc_urb)\n");
+ rcv->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (rcv->urb == NULL) {
+ dev_dbg(&intf->dev,
+ "out of memory (read urbs usb_alloc_urb)\n");
goto alloc_fail7;
}
@@ -1117,26 +1170,29 @@ skip_normal_probe:
rb->base = usb_buffer_alloc(acm->dev, readsize,
GFP_KERNEL, &rb->dma);
if (!rb->base) {
- dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n");
+ dev_dbg(&intf->dev,
+ "out of memory (read bufs usb_buffer_alloc)\n");
goto alloc_fail7;
}
}
- for(i = 0; i < ACM_NW; i++)
- {
+ for (i = 0; i < ACM_NW; i++) {
struct acm_wb *snd = &(acm->wb[i]);
- if (!(snd->urb = usb_alloc_urb(0, GFP_KERNEL))) {
- dev_dbg(&intf->dev, "out of memory (write urbs usb_alloc_urb)");
+ snd->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (snd->urb == NULL) {
+ dev_dbg(&intf->dev,
+ "out of memory (write urbs usb_alloc_urb)");
goto alloc_fail7;
}
- usb_fill_bulk_urb(snd->urb, usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
- NULL, acm->writesize, acm_write_bulk, snd);
+ usb_fill_bulk_urb(snd->urb, usb_dev,
+ usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
+ NULL, acm->writesize, acm_write_bulk, snd);
snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
snd->instance = acm;
}
- usb_set_intfdata (intf, acm);
+ usb_set_intfdata(intf, acm);
i = device_create_file(&intf->dev, &dev_attr_bmCapabilities);
if (i < 0)
@@ -1147,7 +1203,8 @@ skip_normal_probe:
if (!acm->country_codes)
goto skip_countries;
acm->country_code_size = cfd->bLength - 4;
- memcpy(acm->country_codes, (u8 *)&cfd->wCountyCode0, cfd->bLength - 4);
+ memcpy(acm->country_codes, (u8 *)&cfd->wCountyCode0,
+ cfd->bLength - 4);
acm->country_rel_date = cfd->iCountryCodeRelDate;
i = device_create_file(&intf->dev, &dev_attr_wCountryCodes);
@@ -1156,7 +1213,8 @@ skip_normal_probe:
goto skip_countries;
}
- i = device_create_file(&intf->dev, &dev_attr_iCountryCodeRelDate);
+ i = device_create_file(&intf->dev,
+ &dev_attr_iCountryCodeRelDate);
if (i < 0) {
kfree(acm->country_codes);
goto skip_countries;
@@ -1164,8 +1222,10 @@ skip_normal_probe:
}
skip_countries:
- usb_fill_int_urb(acm->ctrlurb, usb_dev, usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress),
- acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm, epctrl->bInterval);
+ usb_fill_int_urb(acm->ctrlurb, usb_dev,
+ usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress),
+ acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm,
+ epctrl->bInterval);
acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
acm->ctrlurb->transfer_dma = acm->ctrl_dma;
@@ -1212,7 +1272,7 @@ static void stop_data_traffic(struct acm *acm)
tasklet_disable(&acm->urb_task);
usb_kill_urb(acm->ctrlurb);
- for(i = 0; i < ACM_NW; i++)
+ for (i = 0; i < ACM_NW; i++)
usb_kill_urb(acm->wb[i].urb);
for (i = 0; i < acm->rx_buflimit; i++)
usb_kill_urb(acm->ru[i].urb);
@@ -1227,13 +1287,14 @@ static void acm_disconnect(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct tty_struct *tty;
/* sibling interface is already cleaning up */
if (!acm)
return;
mutex_lock(&open_mutex);
- if (acm->country_codes){
+ if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
device_remove_file(&acm->control->dev,
@@ -1247,22 +1308,25 @@ static void acm_disconnect(struct usb_interface *intf)
stop_data_traffic(acm);
acm_write_buffers_free(acm);
- usb_buffer_free(usb_dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
+ usb_buffer_free(usb_dev, acm->ctrlsize, acm->ctrl_buffer,
+ acm->ctrl_dma);
acm_read_buffers_free(acm);
usb_driver_release_interface(&acm_driver, intf == acm->control ?
acm->data : acm->control);
- if (!acm->used) {
+ if (acm->port.count == 0) {
acm_tty_unregister(acm);
mutex_unlock(&open_mutex);
return;
}
mutex_unlock(&open_mutex);
-
- if (acm->tty)
- tty_hangup(acm->tty);
+ tty = tty_port_tty_get(&acm->port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
}
#ifdef CONFIG_PM
@@ -1297,7 +1361,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
*/
mutex_lock(&acm->mutex);
- if (acm->used)
+ if (acm->port.count)
stop_data_traffic(acm);
mutex_unlock(&acm->mutex);
@@ -1319,7 +1383,7 @@ static int acm_resume(struct usb_interface *intf)
return 0;
mutex_lock(&acm->mutex);
- if (acm->used) {
+ if (acm->port.count) {
rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
if (rv < 0)
goto err_out;
@@ -1375,6 +1439,9 @@ static struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0572, 0x1324), /* Conexant USB MODEM RD02-D400 */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
+ { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
},
{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
@@ -1395,7 +1462,7 @@ static struct usb_device_id acm_ids[] = {
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_GSM) },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
- USB_CDC_ACM_PROTO_AT_3G ) },
+ USB_CDC_ACM_PROTO_AT_3G) },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_CDMA) },
@@ -1403,7 +1470,7 @@ static struct usb_device_id acm_ids[] = {
{ }
};
-MODULE_DEVICE_TABLE (usb, acm_ids);
+MODULE_DEVICE_TABLE(usb, acm_ids);
static struct usb_driver acm_driver = {
.name = "cdc_acm",
@@ -1426,6 +1493,7 @@ static struct usb_driver acm_driver = {
static const struct tty_operations acm_ops = {
.open = acm_tty_open,
.close = acm_tty_close,
+ .hangup = acm_tty_hangup,
.write = acm_tty_write,
.write_room = acm_tty_write_room,
.ioctl = acm_tty_ioctl,
@@ -1457,7 +1525,8 @@ static int __init acm_init(void)
acm_tty_driver->subtype = SERIAL_TYPE_NORMAL,
acm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
acm_tty_driver->init_termios = tty_std_termios;
- acm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ acm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD |
+ HUPCL | CLOCAL;
tty_set_operations(acm_tty_driver, &acm_ops);
retval = tty_register_driver(acm_tty_driver);
@@ -1489,7 +1558,7 @@ static void __exit acm_exit(void)
module_init(acm_init);
module_exit(acm_exit);
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(ACM_TTY_MAJOR);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 1f95e7aa1b6..4c3856420ad 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -89,8 +89,8 @@ struct acm {
struct usb_device *dev; /* the corresponding usb device */
struct usb_interface *control; /* control interface */
struct usb_interface *data; /* data interface */
- struct tty_struct *tty; /* the corresponding tty */
- struct urb *ctrlurb; /* urbs */
+ struct tty_port port; /* our tty port data */
+ struct urb *ctrlurb; /* urbs */
u8 *ctrl_buffer; /* buffers of urbs */
dma_addr_t ctrl_dma; /* dma handles of buffers */
u8 *country_codes; /* country codes from device */
@@ -120,7 +120,6 @@ struct acm {
unsigned int ctrlout; /* output control lines (DTR, RTS) */
unsigned int writesize; /* max packet size for the output bulk endpoint */
unsigned int readsize,ctrlsize; /* buffer sizes for freeing */
- unsigned int used; /* someone has this acm's device open */
unsigned int minor; /* acm minor number */
unsigned char throttle; /* throttled by tty layer */
unsigned char clocal; /* termios CLOCAL */
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index dff5760a37f..ffe75e83787 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -39,6 +39,7 @@
#include <linux/parser.h>
#include <linux/notifier.h>
#include <linux/seq_file.h>
+#include <linux/smp_lock.h>
#include <asm/byteorder.h>
#include "usb.h"
#include "hcd.h"
@@ -265,9 +266,13 @@ static int remount(struct super_block *sb, int *flags, char *data)
return -EINVAL;
}
+ lock_kernel();
+
if (usbfs_mount && usbfs_mount->mnt_sb)
update_sb(usbfs_mount->mnt_sb);
+ unlock_kernel();
+
return 0;
}
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 563d5727544..05c913cc365 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -794,7 +794,8 @@ usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
if (ep->desc) {
list_add_tail(&req->queue, &ep->queue);
- if (ep->is_in || (ep_is_control(ep)
+ if ((!ep_is_control(ep) && ep->is_in) ||
+ (ep_is_control(ep)
&& (ep->state == DATA_STAGE_IN
|| ep->state == STATUS_STAGE_IN)))
usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
@@ -1940,7 +1941,7 @@ static int __init usba_udc_probe(struct platform_device *pdev)
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
clk_disable(pclk);
- usba_ep = kmalloc(sizeof(struct usba_ep) * pdata->num_ep,
+ usba_ep = kzalloc(sizeof(struct usba_ep) * pdata->num_ep,
GFP_KERNEL);
if (!usba_ep)
goto err_alloc_ep;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index cd07ea3f0c6..15438469f21 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1658,6 +1658,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
u32 reg_base, or_reg, skip_reg;
unsigned long flags;
struct ptd ptd;
+ packet_enqueue *pe;
switch (usb_pipetype(urb->pipe)) {
case PIPE_ISOCHRONOUS:
@@ -1669,6 +1670,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
reg_base = INT_REGS_OFFSET;
or_reg = HC_INT_IRQ_MASK_OR_REG;
skip_reg = HC_INT_PTD_SKIPMAP_REG;
+ pe = enqueue_an_INT_packet;
break;
default:
@@ -1676,6 +1678,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
reg_base = ATL_REGS_OFFSET;
or_reg = HC_ATL_IRQ_MASK_OR_REG;
skip_reg = HC_ATL_PTD_SKIPMAP_REG;
+ pe = enqueue_an_ATL_packet;
break;
}
@@ -1687,6 +1690,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
u32 skip_map;
u32 or_map;
struct isp1760_qtd *qtd;
+ struct isp1760_qh *qh = ints->qh;
skip_map = isp1760_readl(hcd->regs + skip_reg);
skip_map |= 1 << i;
@@ -1699,8 +1703,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base
+ i * sizeof(ptd), sizeof(ptd));
qtd = ints->qtd;
-
- clean_up_qtdlist(qtd);
+ qtd = clean_up_qtdlist(qtd);
free_mem(priv, ints->payload);
@@ -1711,7 +1714,24 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
ints->payload = 0;
isp1760_urb_done(priv, urb, status);
+ if (qtd)
+ pe(hcd, qh, qtd);
break;
+
+ } else if (ints->qtd) {
+ struct isp1760_qtd *qtd, *prev_qtd = ints->qtd;
+
+ for (qtd = ints->qtd->hw_next; qtd; qtd = qtd->hw_next) {
+ if (qtd->urb == urb) {
+ prev_qtd->hw_next = clean_up_qtdlist(qtd);
+ isp1760_urb_done(priv, urb, status);
+ break;
+ }
+ prev_qtd = qtd;
+ }
+ /* we found the urb before the end of the list */
+ if (qtd)
+ break;
}
ints++;
}
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index b7eacad4d48..2bfd6dd85b5 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -93,8 +93,7 @@ static int belkin_sa_startup(struct usb_serial *serial);
static void belkin_sa_shutdown(struct usb_serial *serial);
static int belkin_sa_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void belkin_sa_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void belkin_sa_close(struct usb_serial_port *port);
static void belkin_sa_read_int_callback(struct urb *urb);
static void belkin_sa_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios * old);
@@ -244,8 +243,7 @@ exit:
} /* belkin_sa_open */
-static void belkin_sa_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void belkin_sa_close(struct usb_serial_port *port)
{
dbg("%s port %d", __func__, port->number);
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index ab4cc277aa6..2830766f5b3 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -262,32 +262,40 @@ error: kfree(priv);
return r;
}
-static void ch341_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp)
+static int ch341_carrier_raised(struct usb_serial_port *port)
+{
+ struct ch341_private *priv = usb_get_serial_port_data(port);
+ if (priv->line_status & CH341_BIT_DCD)
+ return 1;
+ return 0;
+}
+
+static void ch341_dtr_rts(struct usb_serial_port *port, int on)
{
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
- unsigned int c_cflag;
dbg("%s - port %d", __func__, port->number);
+ /* drop DTR and RTS */
+ spin_lock_irqsave(&priv->lock, flags);
+ if (on)
+ priv->line_control |= CH341_BIT_RTS | CH341_BIT_DTR;
+ else
+ priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ch341_set_handshake(port->serial->dev, priv->line_control);
+ wake_up_interruptible(&priv->delta_msr_wait);
+}
+
+static void ch341_close(struct usb_serial_port *port)
+{
+ dbg("%s - port %d", __func__, port->number);
/* shutdown our urbs */
dbg("%s - shutting down urbs", __func__);
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
usb_kill_urb(port->interrupt_in_urb);
-
- if (tty) {
- c_cflag = tty->termios->c_cflag;
- if (c_cflag & HUPCL) {
- /* drop DTR and RTS */
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
- ch341_set_handshake(port->serial->dev, 0);
- }
- }
- wake_up_interruptible(&priv->delta_msr_wait);
}
@@ -302,7 +310,6 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port,
dbg("ch341_open()");
priv->baud_rate = DEFAULT_BAUD_RATE;
- priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
r = ch341_configure(serial->dev, priv);
if (r)
@@ -322,7 +329,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port,
if (r) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
" error %d\n", __func__, r);
- ch341_close(tty, port, NULL);
+ ch341_close(port);
return -EPROTO;
}
@@ -343,9 +350,6 @@ static void ch341_set_termios(struct tty_struct *tty,
dbg("ch341_set_termios()");
- if (!tty || !tty->termios)
- return;
-
baud_rate = tty_get_baud_rate(tty);
priv->baud_rate = baud_rate;
@@ -568,6 +572,8 @@ static struct usb_serial_driver ch341_device = {
.usb_driver = &ch341_driver,
.num_ports = 1,
.open = ch341_open,
+ .dtr_rts = ch341_dtr_rts,
+ .carrier_raised = ch341_carrier_raised,
.close = ch341_close,
.ioctl = ch341_ioctl,
.set_termios = ch341_set_termios,
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 19e24045b13..247b61bfb7f 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -169,7 +169,9 @@ static int usb_console_setup(struct console *co, char *options)
kfree(tty);
}
}
-
+ /* So we know not to kill the hardware on a hangup on this
+ port. We have also bumped the use count by one so it won't go
+ idle */
port->console = 1;
retval = 0;
@@ -182,7 +184,7 @@ free_tty:
kfree(tty);
reset_open_count:
port->port.count = 0;
-goto out;
+ goto out;
}
static void usb_console_write(struct console *co,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index e8d5133ce9c..16a154d3b2f 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1,5 +1,5 @@
/*
- * Silicon Laboratories CP2101/CP2102 USB to RS232 serial adaptor driver
+ * Silicon Laboratories CP210x USB to RS232 serial adaptor driver
*
* Copyright (C) 2005 Craig Shelley (craig@microtron.org.uk)
*
@@ -27,44 +27,46 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.08"
-#define DRIVER_DESC "Silicon Labs CP2101/CP2102 RS232 serial adaptor driver"
+#define DRIVER_VERSION "v0.09"
+#define DRIVER_DESC "Silicon Labs CP210x RS232 serial adaptor driver"
/*
* Function Prototypes
*/
-static int cp2101_open(struct tty_struct *, struct usb_serial_port *,
+static int cp210x_open(struct tty_struct *, struct usb_serial_port *,
struct file *);
-static void cp2101_cleanup(struct usb_serial_port *);
-static void cp2101_close(struct tty_struct *, struct usb_serial_port *,
- struct file*);
-static void cp2101_get_termios(struct tty_struct *,
+static void cp210x_cleanup(struct usb_serial_port *);
+static void cp210x_close(struct usb_serial_port *);
+static void cp210x_get_termios(struct tty_struct *,
struct usb_serial_port *port);
-static void cp2101_get_termios_port(struct usb_serial_port *port,
+static void cp210x_get_termios_port(struct usb_serial_port *port,
unsigned int *cflagp, unsigned int *baudp);
-static void cp2101_set_termios(struct tty_struct *, struct usb_serial_port *,
+static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
struct ktermios*);
-static int cp2101_tiocmget(struct tty_struct *, struct file *);
-static int cp2101_tiocmset(struct tty_struct *, struct file *,
+static int cp210x_tiocmget(struct tty_struct *, struct file *);
+static int cp210x_tiocmset(struct tty_struct *, struct file *,
unsigned int, unsigned int);
-static int cp2101_tiocmset_port(struct usb_serial_port *port, struct file *,
+static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
unsigned int, unsigned int);
-static void cp2101_break_ctl(struct tty_struct *, int);
-static int cp2101_startup(struct usb_serial *);
-static void cp2101_shutdown(struct usb_serial *);
+static void cp210x_break_ctl(struct tty_struct *, int);
+static int cp210x_startup(struct usb_serial *);
+static void cp210x_shutdown(struct usb_serial *);
static int debug;
static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
+ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
{ USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */
{ USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
{ USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
+ { USB_DEVICE(0x10C4, 0x0F91) }, /* Vstabi */
{ USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */
{ USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
{ USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
@@ -85,10 +87,12 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
+ { USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */
{ USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
{ USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */
{ USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
@@ -99,7 +103,9 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
+ { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+ { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
@@ -108,53 +114,70 @@ static struct usb_device_id id_table [] = {
MODULE_DEVICE_TABLE(usb, id_table);
-static struct usb_driver cp2101_driver = {
- .name = "cp2101",
+static struct usb_driver cp210x_driver = {
+ .name = "cp210x",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
.no_dynamic_id = 1,
};
-static struct usb_serial_driver cp2101_device = {
+static struct usb_serial_driver cp210x_device = {
.driver = {
.owner = THIS_MODULE,
- .name = "cp2101",
+ .name = "cp210x",
},
- .usb_driver = &cp2101_driver,
+ .usb_driver = &cp210x_driver,
.id_table = id_table,
.num_ports = 1,
- .open = cp2101_open,
- .close = cp2101_close,
- .break_ctl = cp2101_break_ctl,
- .set_termios = cp2101_set_termios,
- .tiocmget = cp2101_tiocmget,
- .tiocmset = cp2101_tiocmset,
- .attach = cp2101_startup,
- .shutdown = cp2101_shutdown,
+ .open = cp210x_open,
+ .close = cp210x_close,
+ .break_ctl = cp210x_break_ctl,
+ .set_termios = cp210x_set_termios,
+ .tiocmget = cp210x_tiocmget,
+ .tiocmset = cp210x_tiocmset,
+ .attach = cp210x_startup,
+ .shutdown = cp210x_shutdown,
};
/* Config request types */
#define REQTYPE_HOST_TO_DEVICE 0x41
#define REQTYPE_DEVICE_TO_HOST 0xc1
-/* Config SET requests. To GET, add 1 to the request number */
-#define CP2101_UART 0x00 /* Enable / Disable */
-#define CP2101_BAUDRATE 0x01 /* (BAUD_RATE_GEN_FREQ / baudrate) */
-#define CP2101_BITS 0x03 /* 0x(0)(databits)(parity)(stopbits) */
-#define CP2101_BREAK 0x05 /* On / Off */
-#define CP2101_CONTROL 0x07 /* Flow control line states */
-#define CP2101_MODEMCTL 0x13 /* Modem controls */
-#define CP2101_CONFIG_6 0x19 /* 6 bytes of config data ??? */
-
-/* CP2101_UART */
+/* Config request codes */
+#define CP210X_IFC_ENABLE 0x00
+#define CP210X_SET_BAUDDIV 0x01
+#define CP210X_GET_BAUDDIV 0x02
+#define CP210X_SET_LINE_CTL 0x03
+#define CP210X_GET_LINE_CTL 0x04
+#define CP210X_SET_BREAK 0x05
+#define CP210X_IMM_CHAR 0x06
+#define CP210X_SET_MHS 0x07
+#define CP210X_GET_MDMSTS 0x08
+#define CP210X_SET_XON 0x09
+#define CP210X_SET_XOFF 0x0A
+#define CP210X_SET_EVENTMASK 0x0B
+#define CP210X_GET_EVENTMASK 0x0C
+#define CP210X_SET_CHAR 0x0D
+#define CP210X_GET_CHARS 0x0E
+#define CP210X_GET_PROPS 0x0F
+#define CP210X_GET_COMM_STATUS 0x10
+#define CP210X_RESET 0x11
+#define CP210X_PURGE 0x12
+#define CP210X_SET_FLOW 0x13
+#define CP210X_GET_FLOW 0x14
+#define CP210X_EMBED_EVENTS 0x15
+#define CP210X_GET_EVENTSTATE 0x16
+#define CP210X_SET_CHARS 0x19
+
+/* CP210X_IFC_ENABLE */
#define UART_ENABLE 0x0001
#define UART_DISABLE 0x0000
-/* CP2101_BAUDRATE */
+/* CP210X_(SET|GET)_BAUDDIV */
#define BAUD_RATE_GEN_FREQ 0x384000
-/* CP2101_BITS */
+/* CP210X_(SET|GET)_LINE_CTL */
#define BITS_DATA_MASK 0X0f00
#define BITS_DATA_5 0X0500
#define BITS_DATA_6 0X0600
@@ -174,11 +197,11 @@ static struct usb_serial_driver cp2101_device = {
#define BITS_STOP_1_5 0x0001
#define BITS_STOP_2 0x0002
-/* CP2101_BREAK */
+/* CP210X_SET_BREAK */
#define BREAK_ON 0x0000
#define BREAK_OFF 0x0001
-/* CP2101_CONTROL */
+/* CP210X_(SET_MHS|GET_MDMSTS) */
#define CONTROL_DTR 0x0001
#define CONTROL_RTS 0x0002
#define CONTROL_CTS 0x0010
@@ -189,13 +212,13 @@ static struct usb_serial_driver cp2101_device = {
#define CONTROL_WRITE_RTS 0x0200
/*
- * cp2101_get_config
- * Reads from the CP2101 configuration registers
+ * cp210x_get_config
+ * Reads from the CP210x configuration registers
* 'size' is specified in bytes.
* 'data' is a pointer to a pre-allocated array of integers large
* enough to hold 'size' bytes (with 4 bytes to each integer)
*/
-static int cp2101_get_config(struct usb_serial_port *port, u8 request,
+static int cp210x_get_config(struct usb_serial_port *port, u8 request,
unsigned int *data, int size)
{
struct usb_serial *serial = port->serial;
@@ -211,9 +234,6 @@ static int cp2101_get_config(struct usb_serial_port *port, u8 request,
return -ENOMEM;
}
- /* For get requests, the request number must be incremented */
- request++;
-
/* Issue the request, attempting to read 'size' bytes */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
request, REQTYPE_DEVICE_TO_HOST, 0x0000,
@@ -236,12 +256,12 @@ static int cp2101_get_config(struct usb_serial_port *port, u8 request,
}
/*
- * cp2101_set_config
- * Writes to the CP2101 configuration registers
+ * cp210x_set_config
+ * Writes to the CP210x configuration registers
* Values less than 16 bits wide are sent directly
* 'size' is specified in bytes.
*/
-static int cp2101_set_config(struct usb_serial_port *port, u8 request,
+static int cp210x_set_config(struct usb_serial_port *port, u8 request,
unsigned int *data, int size)
{
struct usb_serial *serial = port->serial;
@@ -292,21 +312,21 @@ static int cp2101_set_config(struct usb_serial_port *port, u8 request,
}
/*
- * cp2101_set_config_single
- * Convenience function for calling cp2101_set_config on single data values
+ * cp210x_set_config_single
+ * Convenience function for calling cp210x_set_config on single data values
* without requiring an integer pointer
*/
-static inline int cp2101_set_config_single(struct usb_serial_port *port,
+static inline int cp210x_set_config_single(struct usb_serial_port *port,
u8 request, unsigned int data)
{
- return cp2101_set_config(port, request, &data, 2);
+ return cp210x_set_config(port, request, &data, 2);
}
/*
- * cp2101_quantise_baudrate
+ * cp210x_quantise_baudrate
* Quantises the baud rate as per AN205 Table 1
*/
-static unsigned int cp2101_quantise_baudrate(unsigned int baud) {
+static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
if (baud <= 56) baud = 0;
else if (baud <= 300) baud = 300;
else if (baud <= 600) baud = 600;
@@ -343,7 +363,7 @@ static unsigned int cp2101_quantise_baudrate(unsigned int baud) {
return baud;
}
-static int cp2101_open(struct tty_struct *tty, struct usb_serial_port *port,
+static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp)
{
struct usb_serial *serial = port->serial;
@@ -351,7 +371,7 @@ static int cp2101_open(struct tty_struct *tty, struct usb_serial_port *port,
dbg("%s - port %d", __func__, port->number);
- if (cp2101_set_config_single(port, CP2101_UART, UART_ENABLE)) {
+ if (cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_ENABLE)) {
dev_err(&port->dev, "%s - Unable to enable UART\n",
__func__);
return -EPROTO;
@@ -373,17 +393,17 @@ static int cp2101_open(struct tty_struct *tty, struct usb_serial_port *port,
}
/* Configure the termios structure */
- cp2101_get_termios(tty, port);
+ cp210x_get_termios(tty, port);
/* Set the DTR and RTS pins low */
- cp2101_tiocmset_port(tty ? (struct usb_serial_port *) tty->driver_data
+ cp210x_tiocmset_port(tty ? (struct usb_serial_port *) tty->driver_data
: port,
NULL, TIOCM_DTR | TIOCM_RTS, 0);
return 0;
}
-static void cp2101_cleanup(struct usb_serial_port *port)
+static void cp210x_cleanup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
@@ -398,8 +418,7 @@ static void cp2101_cleanup(struct usb_serial_port *port)
}
}
-static void cp2101_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp)
+static void cp210x_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
@@ -410,23 +429,23 @@ static void cp2101_close(struct tty_struct *tty, struct usb_serial_port *port,
mutex_lock(&port->serial->disc_mutex);
if (!port->serial->disconnected)
- cp2101_set_config_single(port, CP2101_UART, UART_DISABLE);
+ cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_DISABLE);
mutex_unlock(&port->serial->disc_mutex);
}
/*
- * cp2101_get_termios
+ * cp210x_get_termios
* Reads the baud rate, data bits, parity, stop bits and flow control mode
* from the device, corrects any unsupported values, and configures the
* termios structure to reflect the state of the device
*/
-static void cp2101_get_termios(struct tty_struct *tty,
+static void cp210x_get_termios(struct tty_struct *tty,
struct usb_serial_port *port)
{
unsigned int baud;
if (tty) {
- cp2101_get_termios_port(tty->driver_data,
+ cp210x_get_termios_port(tty->driver_data,
&tty->termios->c_cflag, &baud);
tty_encode_baud_rate(tty, baud, baud);
}
@@ -434,15 +453,15 @@ static void cp2101_get_termios(struct tty_struct *tty,
else {
unsigned int cflag;
cflag = 0;
- cp2101_get_termios_port(port, &cflag, &baud);
+ cp210x_get_termios_port(port, &cflag, &baud);
}
}
/*
- * cp2101_get_termios_port
- * This is the heart of cp2101_get_termios which always uses a &usb_serial_port.
+ * cp210x_get_termios_port
+ * This is the heart of cp210x_get_termios which always uses a &usb_serial_port.
*/
-static void cp2101_get_termios_port(struct usb_serial_port *port,
+static void cp210x_get_termios_port(struct usb_serial_port *port,
unsigned int *cflagp, unsigned int *baudp)
{
unsigned int cflag, modem_ctl[4];
@@ -451,17 +470,17 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
dbg("%s - port %d", __func__, port->number);
- cp2101_get_config(port, CP2101_BAUDRATE, &baud, 2);
+ cp210x_get_config(port, CP210X_GET_BAUDDIV, &baud, 2);
/* Convert to baudrate */
if (baud)
- baud = cp2101_quantise_baudrate((BAUD_RATE_GEN_FREQ + baud/2)/ baud);
+ baud = cp210x_quantise_baudrate((BAUD_RATE_GEN_FREQ + baud/2)/ baud);
dbg("%s - baud rate = %d", __func__, baud);
*baudp = baud;
cflag = *cflagp;
- cp2101_get_config(port, CP2101_BITS, &bits, 2);
+ cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
cflag &= ~CSIZE;
switch (bits & BITS_DATA_MASK) {
case BITS_DATA_5:
@@ -486,14 +505,14 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
cflag |= CS8;
bits &= ~BITS_DATA_MASK;
bits |= BITS_DATA_8;
- cp2101_set_config(port, CP2101_BITS, &bits, 2);
+ cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
break;
default:
dbg("%s - Unknown number of data bits, using 8", __func__);
cflag |= CS8;
bits &= ~BITS_DATA_MASK;
bits |= BITS_DATA_8;
- cp2101_set_config(port, CP2101_BITS, &bits, 2);
+ cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
break;
}
@@ -516,20 +535,20 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
__func__);
cflag &= ~PARENB;
bits &= ~BITS_PARITY_MASK;
- cp2101_set_config(port, CP2101_BITS, &bits, 2);
+ cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
break;
case BITS_PARITY_SPACE:
dbg("%s - parity = SPACE (not supported, disabling parity)",
__func__);
cflag &= ~PARENB;
bits &= ~BITS_PARITY_MASK;
- cp2101_set_config(port, CP2101_BITS, &bits, 2);
+ cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
break;
default:
dbg("%s - Unknown parity mode, disabling parity", __func__);
cflag &= ~PARENB;
bits &= ~BITS_PARITY_MASK;
- cp2101_set_config(port, CP2101_BITS, &bits, 2);
+ cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
break;
}
@@ -542,7 +561,7 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
dbg("%s - stop bits = 1.5 (not supported, using 1 stop bit)",
__func__);
bits &= ~BITS_STOP_MASK;
- cp2101_set_config(port, CP2101_BITS, &bits, 2);
+ cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
break;
case BITS_STOP_2:
dbg("%s - stop bits = 2", __func__);
@@ -552,11 +571,11 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
dbg("%s - Unknown number of stop bits, using 1 stop bit",
__func__);
bits &= ~BITS_STOP_MASK;
- cp2101_set_config(port, CP2101_BITS, &bits, 2);
+ cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
break;
}
- cp2101_get_config(port, CP2101_MODEMCTL, modem_ctl, 16);
+ cp210x_get_config(port, CP210X_GET_FLOW, modem_ctl, 16);
if (modem_ctl[0] & 0x0008) {
dbg("%s - flow control = CRTSCTS", __func__);
cflag |= CRTSCTS;
@@ -568,7 +587,7 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
*cflagp = cflag;
}
-static void cp2101_set_termios(struct tty_struct *tty,
+static void cp210x_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
unsigned int cflag, old_cflag;
@@ -583,13 +602,13 @@ static void cp2101_set_termios(struct tty_struct *tty,
tty->termios->c_cflag &= ~CMSPAR;
cflag = tty->termios->c_cflag;
old_cflag = old_termios->c_cflag;
- baud = cp2101_quantise_baudrate(tty_get_baud_rate(tty));
+ baud = cp210x_quantise_baudrate(tty_get_baud_rate(tty));
/* If the baud rate is to be updated*/
if (baud != tty_termios_baud_rate(old_termios) && baud != 0) {
dbg("%s - Setting baud rate to %d baud", __func__,
baud);
- if (cp2101_set_config_single(port, CP2101_BAUDRATE,
+ if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV,
((BAUD_RATE_GEN_FREQ + baud/2) / baud))) {
dbg("Baud rate requested not supported by device\n");
baud = tty_termios_baud_rate(old_termios);
@@ -600,7 +619,7 @@ static void cp2101_set_termios(struct tty_struct *tty,
/* If the number of data bits is to be updated */
if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
- cp2101_get_config(port, CP2101_BITS, &bits, 2);
+ cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
bits &= ~BITS_DATA_MASK;
switch (cflag & CSIZE) {
case CS5:
@@ -624,19 +643,19 @@ static void cp2101_set_termios(struct tty_struct *tty,
dbg("%s - data bits = 9", __func__);
break;*/
default:
- dbg("cp2101 driver does not "
+ dbg("cp210x driver does not "
"support the number of bits requested,"
" using 8 bit mode\n");
bits |= BITS_DATA_8;
break;
}
- if (cp2101_set_config(port, CP2101_BITS, &bits, 2))
+ if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
dbg("Number of data bits requested "
"not supported by device\n");
}
if ((cflag & (PARENB|PARODD)) != (old_cflag & (PARENB|PARODD))) {
- cp2101_get_config(port, CP2101_BITS, &bits, 2);
+ cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
bits &= ~BITS_PARITY_MASK;
if (cflag & PARENB) {
if (cflag & PARODD) {
@@ -647,13 +666,13 @@ static void cp2101_set_termios(struct tty_struct *tty,
dbg("%s - parity = EVEN", __func__);
}
}
- if (cp2101_set_config(port, CP2101_BITS, &bits, 2))
+ if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
dbg("Parity mode not supported "
"by device\n");
}
if ((cflag & CSTOPB) != (old_cflag & CSTOPB)) {
- cp2101_get_config(port, CP2101_BITS, &bits, 2);
+ cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
bits &= ~BITS_STOP_MASK;
if (cflag & CSTOPB) {
bits |= BITS_STOP_2;
@@ -662,13 +681,13 @@ static void cp2101_set_termios(struct tty_struct *tty,
bits |= BITS_STOP_1;
dbg("%s - stop bits = 1", __func__);
}
- if (cp2101_set_config(port, CP2101_BITS, &bits, 2))
+ if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
dbg("Number of stop bits requested "
"not supported by device\n");
}
if ((cflag & CRTSCTS) != (old_cflag & CRTSCTS)) {
- cp2101_get_config(port, CP2101_MODEMCTL, modem_ctl, 16);
+ cp210x_get_config(port, CP210X_GET_FLOW, modem_ctl, 16);
dbg("%s - read modem controls = 0x%.4x 0x%.4x 0x%.4x 0x%.4x",
__func__, modem_ctl[0], modem_ctl[1],
modem_ctl[2], modem_ctl[3]);
@@ -688,19 +707,19 @@ static void cp2101_set_termios(struct tty_struct *tty,
dbg("%s - write modem controls = 0x%.4x 0x%.4x 0x%.4x 0x%.4x",
__func__, modem_ctl[0], modem_ctl[1],
modem_ctl[2], modem_ctl[3]);
- cp2101_set_config(port, CP2101_MODEMCTL, modem_ctl, 16);
+ cp210x_set_config(port, CP210X_SET_FLOW, modem_ctl, 16);
}
}
-static int cp2101_tiocmset (struct tty_struct *tty, struct file *file,
+static int cp210x_tiocmset (struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
- return cp2101_tiocmset_port(port, file, set, clear);
+ return cp210x_tiocmset_port(port, file, set, clear);
}
-static int cp2101_tiocmset_port(struct usb_serial_port *port, struct file *file,
+static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *file,
unsigned int set, unsigned int clear)
{
unsigned int control = 0;
@@ -726,10 +745,10 @@ static int cp2101_tiocmset_port(struct usb_serial_port *port, struct file *file,
dbg("%s - control = 0x%.4x", __func__, control);
- return cp2101_set_config(port, CP2101_CONTROL, &control, 2);
+ return cp210x_set_config(port, CP210X_SET_MHS, &control, 2);
}
-static int cp2101_tiocmget (struct tty_struct *tty, struct file *file)
+static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
{
struct usb_serial_port *port = tty->driver_data;
unsigned int control;
@@ -737,7 +756,7 @@ static int cp2101_tiocmget (struct tty_struct *tty, struct file *file)
dbg("%s - port %d", __func__, port->number);
- cp2101_get_config(port, CP2101_CONTROL, &control, 1);
+ cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
|((control & CONTROL_RTS) ? TIOCM_RTS : 0)
@@ -751,7 +770,7 @@ static int cp2101_tiocmget (struct tty_struct *tty, struct file *file)
return result;
}
-static void cp2101_break_ctl (struct tty_struct *tty, int break_state)
+static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
unsigned int state;
@@ -763,17 +782,17 @@ static void cp2101_break_ctl (struct tty_struct *tty, int break_state)
state = BREAK_ON;
dbg("%s - turning break %s", __func__,
state == BREAK_OFF ? "off" : "on");
- cp2101_set_config(port, CP2101_BREAK, &state, 2);
+ cp210x_set_config(port, CP210X_SET_BREAK, &state, 2);
}
-static int cp2101_startup(struct usb_serial *serial)
+static int cp210x_startup(struct usb_serial *serial)
{
- /* CP2101 buffers behave strangely unless device is reset */
+ /* cp210x buffers behave strangely unless device is reset */
usb_reset_device(serial->dev);
return 0;
}
-static void cp2101_shutdown(struct usb_serial *serial)
+static void cp210x_shutdown(struct usb_serial *serial)
{
int i;
@@ -781,21 +800,21 @@ static void cp2101_shutdown(struct usb_serial *serial)
/* Stop reads and writes on all ports */
for (i = 0; i < serial->num_ports; ++i)
- cp2101_cleanup(serial->port[i]);
+ cp210x_cleanup(serial->port[i]);
}
-static int __init cp2101_init(void)
+static int __init cp210x_init(void)
{
int retval;
- retval = usb_serial_register(&cp2101_device);
+ retval = usb_serial_register(&cp210x_device);
if (retval)
return retval; /* Failed to register */
- retval = usb_register(&cp2101_driver);
+ retval = usb_register(&cp210x_driver);
if (retval) {
/* Failed to register */
- usb_serial_deregister(&cp2101_device);
+ usb_serial_deregister(&cp210x_device);
return retval;
}
@@ -805,14 +824,14 @@ static int __init cp2101_init(void)
return 0;
}
-static void __exit cp2101_exit(void)
+static void __exit cp210x_exit(void)
{
- usb_deregister(&cp2101_driver);
- usb_serial_deregister(&cp2101_device);
+ usb_deregister(&cp210x_driver);
+ usb_serial_deregister(&cp210x_device);
}
-module_init(cp2101_init);
-module_exit(cp2101_exit);
+module_init(cp210x_init);
+module_exit(cp210x_exit);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index dd501bb63ed..933ba913e66 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -61,8 +61,7 @@ static int cyberjack_startup(struct usb_serial *serial);
static void cyberjack_shutdown(struct usb_serial *serial);
static int cyberjack_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void cyberjack_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void cyberjack_close(struct usb_serial_port *port);
static int cyberjack_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count);
static int cyberjack_write_room(struct tty_struct *tty);
@@ -185,8 +184,7 @@ static int cyberjack_open(struct tty_struct *tty,
return result;
}
-static void cyberjack_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void cyberjack_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index e568710b263..669f9384853 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -174,8 +174,8 @@ static int cypress_ca42v2_startup(struct usb_serial *serial);
static void cypress_shutdown(struct usb_serial *serial);
static int cypress_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void cypress_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void cypress_close(struct usb_serial_port *port);
+static void cypress_dtr_rts(struct usb_serial_port *port, int on);
static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static void cypress_send(struct usb_serial_port *port);
@@ -218,6 +218,7 @@ static struct usb_serial_driver cypress_earthmate_device = {
.shutdown = cypress_shutdown,
.open = cypress_open,
.close = cypress_close,
+ .dtr_rts = cypress_dtr_rts,
.write = cypress_write,
.write_room = cypress_write_room,
.ioctl = cypress_ioctl,
@@ -244,6 +245,7 @@ static struct usb_serial_driver cypress_hidcom_device = {
.shutdown = cypress_shutdown,
.open = cypress_open,
.close = cypress_close,
+ .dtr_rts = cypress_dtr_rts,
.write = cypress_write,
.write_room = cypress_write_room,
.ioctl = cypress_ioctl,
@@ -270,6 +272,7 @@ static struct usb_serial_driver cypress_ca42v2_device = {
.shutdown = cypress_shutdown,
.open = cypress_open,
.close = cypress_close,
+ .dtr_rts = cypress_dtr_rts,
.write = cypress_write,
.write_room = cypress_write_room,
.ioctl = cypress_ioctl,
@@ -656,11 +659,7 @@ static int cypress_open(struct tty_struct *tty,
priv->rx_flags = 0;
spin_unlock_irqrestore(&priv->lock, flags);
- /* raise both lines and set termios */
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control = CONTROL_DTR | CONTROL_RTS;
- priv->cmd_ctrl = 1;
- spin_unlock_irqrestore(&priv->lock, flags);
+ /* Set termios */
result = cypress_write(tty, port, NULL, 0);
if (result) {
@@ -694,76 +693,42 @@ static int cypress_open(struct tty_struct *tty,
__func__, result);
cypress_set_dead(port);
}
-
+ port->port.drain_delay = 256;
return result;
} /* cypress_open */
+static void cypress_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct cypress_private *priv = usb_get_serial_port_data(port);
+ /* drop dtr and rts */
+ priv = usb_get_serial_port_data(port);
+ spin_lock_irq(&priv->lock);
+ if (on == 0)
+ priv->line_control = 0;
+ else
+ priv->line_control = CONTROL_DTR | CONTROL_RTS;
+ priv->cmd_ctrl = 1;
+ spin_unlock_irq(&priv->lock);
+ cypress_write(NULL, port, NULL, 0);
+}
-static void cypress_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void cypress_close(struct usb_serial_port *port)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
- unsigned int c_cflag;
- int bps;
- long timeout;
- wait_queue_t wait;
dbg("%s - port %d", __func__, port->number);
- /* wait for data to drain from buffer */
- spin_lock_irq(&priv->lock);
- timeout = CYPRESS_CLOSING_WAIT;
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&tty->write_wait, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (cypress_buf_data_avail(priv->buf) == 0
- || timeout == 0 || signal_pending(current)
- /* without mutex, allowed due to harmless failure mode */
- || port->serial->disconnected)
- break;
- spin_unlock_irq(&priv->lock);
- timeout = schedule_timeout(timeout);
- spin_lock_irq(&priv->lock);
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&tty->write_wait, &wait);
- /* clear out any remaining data in the buffer */
- cypress_buf_clear(priv->buf);
- spin_unlock_irq(&priv->lock);
-
/* writing is potentially harmful, lock must be taken */
mutex_lock(&port->serial->disc_mutex);
if (port->serial->disconnected) {
mutex_unlock(&port->serial->disc_mutex);
return;
}
- /* wait for characters to drain from device */
- if (tty) {
- bps = tty_get_baud_rate(tty);
- if (bps > 1200)
- timeout = max((HZ * 2560) / bps, HZ / 10);
- else
- timeout = 2 * HZ;
- schedule_timeout_interruptible(timeout);
- }
-
+ cypress_buf_clear(priv->buf);
dbg("%s - stopping urbs", __func__);
usb_kill_urb(port->interrupt_in_urb);
usb_kill_urb(port->interrupt_out_urb);
- if (tty) {
- c_cflag = tty->termios->c_cflag;
- if (c_cflag & HUPCL) {
- /* drop dtr and rts */
- priv = usb_get_serial_port_data(port);
- spin_lock_irq(&priv->lock);
- priv->line_control = 0;
- priv->cmd_ctrl = 1;
- spin_unlock_irq(&priv->lock);
- cypress_write(tty, port, NULL, 0);
- }
- }
if (stats)
dev_info(&port->dev, "Statistics: %d Bytes In | %d Bytes Out | %d Commands Issued\n",
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 38ba4ea8b6b..30f5140eff0 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -422,7 +422,6 @@ struct digi_port {
int dp_throttled;
int dp_throttle_restart;
wait_queue_head_t dp_flush_wait;
- int dp_in_close; /* close in progress */
wait_queue_head_t dp_close_wait; /* wait queue for close */
struct work_struct dp_wakeup_work;
struct usb_serial_port *dp_port;
@@ -456,8 +455,9 @@ static int digi_write_room(struct tty_struct *tty);
static int digi_chars_in_buffer(struct tty_struct *tty);
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp);
-static void digi_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp);
+static void digi_close(struct usb_serial_port *port);
+static int digi_carrier_raised(struct usb_serial_port *port);
+static void digi_dtr_rts(struct usb_serial_port *port, int on);
static int digi_startup_device(struct usb_serial *serial);
static int digi_startup(struct usb_serial *serial);
static void digi_shutdown(struct usb_serial *serial);
@@ -510,6 +510,8 @@ static struct usb_serial_driver digi_acceleport_2_device = {
.num_ports = 3,
.open = digi_open,
.close = digi_close,
+ .dtr_rts = digi_dtr_rts,
+ .carrier_raised = digi_carrier_raised,
.write = digi_write,
.write_room = digi_write_room,
.write_bulk_callback = digi_write_bulk_callback,
@@ -1328,6 +1330,19 @@ static int digi_chars_in_buffer(struct tty_struct *tty)
}
+static void digi_dtr_rts(struct usb_serial_port *port, int on)
+{
+ /* Adjust DTR and RTS */
+ digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
+}
+
+static int digi_carrier_raised(struct usb_serial_port *port)
+{
+ struct digi_port *priv = usb_get_serial_port_data(port);
+ if (priv->dp_modem_signals & TIOCM_CD)
+ return 1;
+ return 0;
+}
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp)
@@ -1336,7 +1351,6 @@ static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
unsigned char buf[32];
struct digi_port *priv = usb_get_serial_port_data(port);
struct ktermios not_termios;
- unsigned long flags = 0;
dbg("digi_open: TOP: port=%d, open_count=%d",
priv->dp_port_num, port->port.count);
@@ -1345,26 +1359,6 @@ static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
if (digi_startup_device(port->serial) != 0)
return -ENXIO;
- spin_lock_irqsave(&priv->dp_port_lock, flags);
-
- /* don't wait on a close in progress for non-blocking opens */
- if (priv->dp_in_close && (filp->f_flags&(O_NDELAY|O_NONBLOCK)) == 0) {
- spin_unlock_irqrestore(&priv->dp_port_lock, flags);
- return -EAGAIN;
- }
-
- /* wait for a close in progress to finish */
- while (priv->dp_in_close) {
- cond_wait_interruptible_timeout_irqrestore(
- &priv->dp_close_wait, DIGI_RETRY_TIMEOUT,
- &priv->dp_port_lock, flags);
- if (signal_pending(current))
- return -EINTR;
- spin_lock_irqsave(&priv->dp_port_lock, flags);
- }
-
- spin_unlock_irqrestore(&priv->dp_port_lock, flags);
-
/* read modem signals automatically whenever they change */
buf[0] = DIGI_CMD_READ_INPUT_SIGNALS;
buf[1] = priv->dp_port_num;
@@ -1387,16 +1381,11 @@ static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
not_termios.c_iflag = ~tty->termios->c_iflag;
digi_set_termios(tty, port, &not_termios);
}
-
- /* set DTR and RTS */
- digi_set_modem_signals(port, TIOCM_DTR|TIOCM_RTS, 1);
-
return 0;
}
-static void digi_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp)
+static void digi_close(struct usb_serial_port *port)
{
DEFINE_WAIT(wait);
int ret;
@@ -1411,28 +1400,9 @@ static void digi_close(struct tty_struct *tty, struct usb_serial_port *port,
if (port->serial->disconnected)
goto exit;
- /* do cleanup only after final close on this port */
- spin_lock_irq(&priv->dp_port_lock);
- priv->dp_in_close = 1;
- spin_unlock_irq(&priv->dp_port_lock);
-
- /* tell line discipline to process only XON/XOFF */
- tty->closing = 1;
-
- /* wait for output to drain */
- if ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) == 0)
- tty_wait_until_sent(tty, DIGI_CLOSE_TIMEOUT);
-
- /* flush driver and line discipline buffers */
- tty_driver_flush_buffer(tty);
- tty_ldisc_flush(tty);
-
if (port->serial->dev) {
- /* wait for transmit idle */
- if ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) == 0)
- digi_transmit_idle(port, DIGI_CLOSE_TIMEOUT);
- /* drop DTR and RTS */
- digi_set_modem_signals(port, 0, 0);
+ /* FIXME: Transmit idle belongs in the wait_unti_sent path */
+ digi_transmit_idle(port, DIGI_CLOSE_TIMEOUT);
/* disable input flow control */
buf[0] = DIGI_CMD_SET_INPUT_FLOW_CONTROL;
@@ -1477,11 +1447,9 @@ static void digi_close(struct tty_struct *tty, struct usb_serial_port *port,
/* shutdown any outstanding bulk writes */
usb_kill_urb(port->write_urb);
}
- tty->closing = 0;
exit:
spin_lock_irq(&priv->dp_port_lock);
priv->dp_write_urb_in_use = 0;
- priv->dp_in_close = 0;
wake_up_interruptible(&priv->dp_close_wait);
spin_unlock_irq(&priv->dp_port_lock);
mutex_unlock(&port->serial->disc_mutex);
@@ -1560,7 +1528,6 @@ static int digi_startup(struct usb_serial *serial)
priv->dp_throttled = 0;
priv->dp_throttle_restart = 0;
init_waitqueue_head(&priv->dp_flush_wait);
- priv->dp_in_close = 0;
init_waitqueue_head(&priv->dp_close_wait);
INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
priv->dp_port = serial->port[i];
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index c709ec474a8..2b141ccb0cd 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -81,8 +81,7 @@ static int debug;
/* function prototypes for an empeg-car player */
static int empeg_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp);
-static void empeg_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp);
+static void empeg_close(struct usb_serial_port *port);
static int empeg_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf,
int count);
@@ -181,8 +180,7 @@ static int empeg_open(struct tty_struct *tty, struct usb_serial_port *port,
}
-static void empeg_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp)
+static void empeg_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index d9fcdaedf38..683304d6061 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -89,6 +89,7 @@ struct ftdi_private {
int force_rtscts; /* if non-zero, force RTS-CTS to always
be enabled */
+ unsigned int latency; /* latency setting in use */
spinlock_t tx_lock; /* spinlock for transmit state */
unsigned long tx_bytes;
unsigned long tx_outstanding_bytes;
@@ -719,8 +720,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port);
static int ftdi_sio_port_remove(struct usb_serial_port *port);
static int ftdi_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void ftdi_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void ftdi_close(struct usb_serial_port *port);
+static void ftdi_dtr_rts(struct usb_serial_port *port, int on);
static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int ftdi_write_room(struct tty_struct *tty);
@@ -758,6 +759,7 @@ static struct usb_serial_driver ftdi_sio_device = {
.port_remove = ftdi_sio_port_remove,
.open = ftdi_open,
.close = ftdi_close,
+ .dtr_rts = ftdi_dtr_rts,
.throttle = ftdi_throttle,
.unthrottle = ftdi_unthrottle,
.write = ftdi_write,
@@ -1037,7 +1039,54 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
return rv;
}
+static int write_latency_timer(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_device *udev = port->serial->dev;
+ char buf[1];
+ int rv = 0;
+ int l = priv->latency;
+
+ if (priv->flags & ASYNC_LOW_LATENCY)
+ l = 1;
+
+ dbg("%s: setting latency timer = %i", __func__, l);
+
+ rv = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
+ FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
+ l, priv->interface,
+ buf, 0, WDR_TIMEOUT);
+
+ if (rv < 0)
+ dev_err(&port->dev, "Unable to write latency timer: %i\n", rv);
+ return rv;
+}
+
+static int read_latency_timer(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_device *udev = port->serial->dev;
+ unsigned short latency = 0;
+ int rv = 0;
+
+ dbg("%s", __func__);
+
+ rv = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
+ FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
+ 0, priv->interface,
+ (char *) &latency, 1, WDR_TIMEOUT);
+
+ if (rv < 0) {
+ dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
+ return -EIO;
+ }
+ return latency;
+}
static int get_serial_info(struct usb_serial_port *port,
struct serial_struct __user *retinfo)
@@ -1097,6 +1146,7 @@ static int set_serial_info(struct tty_struct *tty,
priv->custom_divisor = new_serial.custom_divisor;
tty->low_latency = (priv->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ write_latency_timer(port);
check_and_exit:
if ((old_priv.flags & ASYNC_SPD_MASK) !=
@@ -1192,27 +1242,13 @@ static ssize_t show_latency_timer(struct device *dev,
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
- struct usb_device *udev = port->serial->dev;
- unsigned short latency = 0;
- int rv = 0;
-
-
- dbg("%s", __func__);
-
- rv = usb_control_msg(udev,
- usb_rcvctrlpipe(udev, 0),
- FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
- FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
- 0, priv->interface,
- (char *) &latency, 1, WDR_TIMEOUT);
-
- if (rv < 0) {
- dev_err(dev, "Unable to read latency timer: %i\n", rv);
- return -EIO;
- }
- return sprintf(buf, "%i\n", latency);
+ if (priv->flags & ASYNC_LOW_LATENCY)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "%i\n", priv->latency);
}
+
/* Write a new value of the latency timer, in units of milliseconds. */
static ssize_t store_latency_timer(struct device *dev,
struct device_attribute *attr, const char *valbuf,
@@ -1220,25 +1256,13 @@ static ssize_t store_latency_timer(struct device *dev,
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
- struct usb_device *udev = port->serial->dev;
- char buf[1];
int v = simple_strtoul(valbuf, NULL, 10);
int rv = 0;
- dbg("%s: setting latency timer = %i", __func__, v);
-
- rv = usb_control_msg(udev,
- usb_sndctrlpipe(udev, 0),
- FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
- FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
- v, priv->interface,
- buf, 0, WDR_TIMEOUT);
-
- if (rv < 0) {
- dev_err(dev, "Unable to write latency timer: %i\n", rv);
+ priv->latency = v;
+ rv = write_latency_timer(port);
+ if (rv < 0)
return -EIO;
- }
-
return count;
}
@@ -1392,6 +1416,7 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
usb_set_serial_port_data(port, priv);
ftdi_determine_type(port);
+ read_latency_timer(port);
create_sysfs_attrs(port);
return 0;
}
@@ -1514,6 +1539,8 @@ static int ftdi_open(struct tty_struct *tty,
if (tty)
tty->low_latency = (priv->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ write_latency_timer(port);
+
/* No error checking for this (will get errors later anyway) */
/* See ftdi_sio.h for description of what is reset */
usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
@@ -1529,11 +1556,6 @@ static int ftdi_open(struct tty_struct *tty,
if (tty)
ftdi_set_termios(tty, port, tty->termios);
- /* FIXME: Flow control might be enabled, so it should be checked -
- we have no control of defaults! */
- /* Turn on RTS and DTR since we are not flow controlling by default */
- set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
-
/* Not throttled */
spin_lock_irqsave(&priv->rx_lock, flags);
priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
@@ -1558,6 +1580,30 @@ static int ftdi_open(struct tty_struct *tty,
} /* ftdi_open */
+static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ char buf[1];
+
+ mutex_lock(&port->serial->disc_mutex);
+ if (!port->serial->disconnected) {
+ /* Disable flow control */
+ if (!on && usb_control_msg(port->serial->dev,
+ usb_sndctrlpipe(port->serial->dev, 0),
+ FTDI_SIO_SET_FLOW_CTRL_REQUEST,
+ FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
+ 0, priv->interface, buf, 0,
+ WDR_TIMEOUT) < 0) {
+ dev_err(&port->dev, "error from flowcontrol urb\n");
+ }
+ /* drop RTS and DTR */
+ if (on)
+ set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ else
+ clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ }
+ mutex_unlock(&port->serial->disc_mutex);
+}
/*
* usbserial:__serial_close only calls ftdi_close if the point is open
@@ -1567,31 +1613,12 @@ static int ftdi_open(struct tty_struct *tty,
*
*/
-static void ftdi_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void ftdi_close(struct usb_serial_port *port)
{ /* ftdi_close */
- unsigned int c_cflag = tty->termios->c_cflag;
struct ftdi_private *priv = usb_get_serial_port_data(port);
- char buf[1];
dbg("%s", __func__);
- mutex_lock(&port->serial->disc_mutex);
- if (c_cflag & HUPCL && !port->serial->disconnected) {
- /* Disable flow control */
- if (usb_control_msg(port->serial->dev,
- usb_sndctrlpipe(port->serial->dev, 0),
- FTDI_SIO_SET_FLOW_CTRL_REQUEST,
- FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
- 0, priv->interface, buf, 0,
- WDR_TIMEOUT) < 0) {
- dev_err(&port->dev, "error from flowcontrol urb\n");
- }
-
- /* drop RTS and DTR */
- clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
- } /* Note change no line if hupcl is off */
- mutex_unlock(&port->serial->disc_mutex);
/* cancel any scheduled reading */
cancel_delayed_work_sync(&priv->rx_work);
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 586d30ff450..ee25a3fe3b0 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -993,8 +993,7 @@ static int garmin_open(struct tty_struct *tty,
}
-static void garmin_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void garmin_close(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 4cec9906ccf..be82ea95672 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -184,8 +184,7 @@ int usb_serial_generic_resume(struct usb_serial *serial)
}
EXPORT_SYMBOL_GPL(usb_serial_generic_resume);
-void usb_serial_generic_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+void usb_serial_generic_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
generic_cleanup(port);
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index fb4a73d090f..53ef5996e33 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -207,8 +207,7 @@ static void edge_bulk_out_cmd_callback(struct urb *urb);
/* function prototypes for the usbserial callbacks */
static int edge_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp);
-static void edge_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp);
+static void edge_close(struct usb_serial_port *port);
static int edge_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int edge_write_room(struct tty_struct *tty);
@@ -965,7 +964,7 @@ static int edge_open(struct tty_struct *tty,
if (!edge_port->txfifo.fifo) {
dbg("%s - no memory", __func__);
- edge_close(tty, port, filp);
+ edge_close(port);
return -ENOMEM;
}
@@ -975,7 +974,7 @@ static int edge_open(struct tty_struct *tty,
if (!edge_port->write_urb) {
dbg("%s - no memory", __func__);
- edge_close(tty, port, filp);
+ edge_close(port);
return -ENOMEM;
}
@@ -1099,8 +1098,7 @@ static void block_until_tx_empty(struct edgeport_port *edge_port)
* edge_close
* this function is called by the tty driver when a port is closed
*****************************************************************************/
-static void edge_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void edge_close(struct usb_serial_port *port)
{
struct edgeport_serial *edge_serial;
struct edgeport_port *edge_port;
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 513b25e044c..eabf20eeb37 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2009,8 +2009,7 @@ release_es_lock:
return status;
}
-static void edge_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void edge_close(struct usb_serial_port *port)
{
struct edgeport_serial *edge_serial;
struct edgeport_port *edge_port;
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index cd62825a9ac..c610a99fa47 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -76,8 +76,7 @@ static int initial_wait;
/* Function prototypes for an ipaq */
static int ipaq_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void ipaq_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void ipaq_close(struct usb_serial_port *port);
static int ipaq_calc_num_ports(struct usb_serial *serial);
static int ipaq_startup(struct usb_serial *serial);
static void ipaq_shutdown(struct usb_serial *serial);
@@ -714,8 +713,7 @@ error:
}
-static void ipaq_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void ipaq_close(struct usb_serial_port *port)
{
struct ipaq_private *priv = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index da2a2b46644..29ad038b9c8 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -302,23 +302,17 @@ static int ipw_open(struct tty_struct *tty,
return 0;
}
-static void ipw_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void ipw_dtr_rts(struct usb_serial_port *port, int on)
{
struct usb_device *dev = port->serial->dev;
int result;
- if (tty_hung_up_p(filp)) {
- dbg("%s: tty_hung_up_p ...", __func__);
- return;
- }
-
/*--1: drop the dtr */
dbg("%s:dropping dtr", __func__);
result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
IPW_SIO_SET_PIN,
USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
- IPW_PIN_CLRDTR,
+ on ? IPW_PIN_SETDTR : IPW_PIN_CLRDTR,
0,
NULL,
0,
@@ -332,7 +326,7 @@ static void ipw_close(struct tty_struct *tty,
result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
IPW_SIO_SET_PIN, USB_TYPE_VENDOR |
USB_RECIP_INTERFACE | USB_DIR_OUT,
- IPW_PIN_CLRRTS,
+ on ? IPW_PIN_SETRTS : IPW_PIN_CLRRTS,
0,
NULL,
0,
@@ -340,7 +334,12 @@ static void ipw_close(struct tty_struct *tty,
if (result < 0)
dev_err(&port->dev,
"dropping rts failed (error = %d)\n", result);
+}
+static void ipw_close(struct usb_serial_port *port)
+{
+ struct usb_device *dev = port->serial->dev;
+ int result;
/*--3: purge */
dbg("%s:sending purge", __func__);
@@ -461,6 +460,7 @@ static struct usb_serial_driver ipw_device = {
.num_ports = 1,
.open = ipw_open,
.close = ipw_close,
+ .dtr_rts = ipw_dtr_rts,
.port_probe = ipw_probe,
.port_remove = ipw_disconnect,
.write = ipw_write,
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 4e2cda93da5..66009b6b763 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -88,8 +88,7 @@ static int xbof = -1;
static int ir_startup (struct usb_serial *serial);
static int ir_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filep);
-static void ir_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filep);
+static void ir_close(struct usb_serial_port *port);
static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static void ir_write_bulk_callback (struct urb *urb);
@@ -346,8 +345,7 @@ static int ir_open(struct tty_struct *tty,
return result;
}
-static void ir_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file * filp)
+static void ir_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 4473d442b2a..76a3cc327bb 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -40,7 +40,7 @@ static int debug;
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v0.10"
#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver"
static struct usb_device_id id_table[] = {
@@ -70,7 +70,6 @@ static void read_rxcmd_callback(struct urb *urb);
struct iuu_private {
spinlock_t lock; /* store irq state */
wait_queue_head_t delta_msr_wait;
- u8 line_control;
u8 line_status;
u8 termios_initialized;
int tiostatus; /* store IUART SIGNAL for tiocmget call */
@@ -651,32 +650,33 @@ static int iuu_bulk_write(struct usb_serial_port *port)
unsigned long flags;
int result;
int i;
+ int buf_len;
char *buf_ptr = port->write_urb->transfer_buffer;
dbg("%s - enter", __func__);
+ spin_lock_irqsave(&priv->lock, flags);
*buf_ptr++ = IUU_UART_ESC;
*buf_ptr++ = IUU_UART_TX;
*buf_ptr++ = priv->writelen;
- memcpy(buf_ptr, priv->writebuf,
- priv->writelen);
+ memcpy(buf_ptr, priv->writebuf, priv->writelen);
+ buf_len = priv->writelen;
+ priv->writelen = 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
if (debug == 1) {
- for (i = 0; i < priv->writelen; i++)
+ for (i = 0; i < buf_len; i++)
sprintf(priv->dbgbuf + i*2 ,
"%02X", priv->writebuf[i]);
- priv->dbgbuf[priv->writelen+i*2] = 0;
+ priv->dbgbuf[buf_len+i*2] = 0;
dbg("%s - writing %i chars : %s", __func__,
- priv->writelen, priv->dbgbuf);
+ buf_len, priv->dbgbuf);
}
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, priv->writelen + 3,
+ port->write_urb->transfer_buffer, buf_len + 3,
iuu_rxcmd, port);
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- spin_lock_irqsave(&priv->lock, flags);
- priv->writelen = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
usb_serial_port_softint(port);
return result;
}
@@ -770,14 +770,10 @@ static int iuu_uart_write(struct tty_struct *tty, struct usb_serial_port *port,
return -ENOMEM;
spin_lock_irqsave(&priv->lock, flags);
- if (priv->writelen > 0) {
- /* buffer already filled but not commited */
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
- }
+
/* fill the buffer */
- memcpy(priv->writebuf, buf, count);
- priv->writelen = count;
+ memcpy(priv->writebuf + priv->writelen, buf, count);
+ priv->writelen += count;
spin_unlock_irqrestore(&priv->lock, flags);
return count;
@@ -819,7 +815,7 @@ static int iuu_uart_on(struct usb_serial_port *port)
buf[0] = IUU_UART_ENABLE;
buf[1] = (u8) ((IUU_BAUD_9600 >> 8) & 0x00FF);
buf[2] = (u8) (0x00FF & IUU_BAUD_9600);
- buf[3] = (u8) (0x0F0 & IUU_TWO_STOP_BITS) | (0x07 & IUU_PARITY_EVEN);
+ buf[3] = (u8) (0x0F0 & IUU_ONE_STOP_BIT) | (0x07 & IUU_PARITY_EVEN);
status = bulk_immediate(port, buf, 4);
if (status != IUU_OPERATION_OK) {
@@ -946,19 +942,59 @@ static int iuu_uart_baud(struct usb_serial_port *port, u32 baud,
return status;
}
-static int set_control_lines(struct usb_device *dev, u8 value)
+static void iuu_set_termios(struct tty_struct *tty,
+ struct usb_serial_port *port, struct ktermios *old_termios)
{
- return 0;
+ const u32 supported_mask = CMSPAR|PARENB|PARODD;
+
+ unsigned int cflag = tty->termios->c_cflag;
+ int status;
+ u32 actual;
+ u32 parity;
+ int csize = CS7;
+ int baud = 9600; /* Fixed for the moment */
+ u32 newval = cflag & supported_mask;
+
+ /* compute the parity parameter */
+ parity = 0;
+ if (cflag & CMSPAR) { /* Using mark space */
+ if (cflag & PARODD)
+ parity |= IUU_PARITY_SPACE;
+ else
+ parity |= IUU_PARITY_MARK;
+ } else if (!(cflag & PARENB)) {
+ parity |= IUU_PARITY_NONE;
+ csize = CS8;
+ } else if (cflag & PARODD)
+ parity |= IUU_PARITY_ODD;
+ else
+ parity |= IUU_PARITY_EVEN;
+
+ parity |= (cflag & CSTOPB ? IUU_TWO_STOP_BITS : IUU_ONE_STOP_BIT);
+
+ /* set it */
+ status = iuu_uart_baud(port,
+ (clockmode == 2) ? 16457 : 9600 * boost / 100,
+ &actual, parity);
+
+ /* set the termios value to the real one, so the user now what has
+ * changed. We support few fields so its easies to copy the old hw
+ * settings back over and then adjust them
+ */
+ if (old_termios)
+ tty_termios_copy_hw(tty->termios, old_termios);
+ if (status != 0) /* Set failed - return old bits */
+ return;
+ /* Re-encode speed, parity and csize */
+ tty_encode_baud_rate(tty, baud, baud);
+ tty->termios->c_cflag &= ~(supported_mask|CSIZE);
+ tty->termios->c_cflag |= newval | csize;
}
-static void iuu_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void iuu_close(struct usb_serial_port *port)
{
/* iuu_led (port,255,0,0,0); */
struct usb_serial *serial;
- struct iuu_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
- unsigned int c_cflag;
serial = port->serial;
if (!serial)
@@ -968,17 +1004,6 @@ static void iuu_close(struct tty_struct *tty,
iuu_uart_off(port);
if (serial->dev) {
- if (tty) {
- c_cflag = tty->termios->c_cflag;
- if (c_cflag & HUPCL) {
- /* drop DTR and RTS */
- priv = usb_get_serial_port_data(port);
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
- set_control_lines(port->serial->dev, 0);
- }
- }
/* free writebuf */
/* shutdown our urbs */
dbg("%s - shutting down urbs", __func__);
@@ -1154,7 +1179,7 @@ static int iuu_open(struct tty_struct *tty,
if (result) {
dev_err(&port->dev, "%s - failed submitting read urb,"
" error %d\n", __func__, result);
- iuu_close(tty, port, NULL);
+ iuu_close(port);
return -EPROTO;
} else {
dbg("%s - rxcmd OK", __func__);
@@ -1175,6 +1200,7 @@ static struct usb_serial_driver iuu_device = {
.read_bulk_callback = iuu_uart_read_callback,
.tiocmget = iuu_tiocmget,
.tiocmset = iuu_tiocmset,
+ .set_termios = iuu_set_termios,
.attach = iuu_startup,
.shutdown = iuu_shutdown,
};
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 00daa8f7759..f1195a98f31 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1298,8 +1298,16 @@ static inline void stop_urb(struct urb *urb)
usb_kill_urb(urb);
}
-static void keyspan_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void keyspan_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct keyspan_port_private *p_priv = usb_get_serial_port_data(port);
+
+ p_priv->rts_state = on;
+ p_priv->dtr_state = on;
+ keyspan_send_setup(port, 0);
+}
+
+static void keyspan_close(struct usb_serial_port *port)
{
int i;
struct usb_serial *serial = port->serial;
@@ -1336,7 +1344,6 @@ static void keyspan_close(struct tty_struct *tty,
stop_urb(p_priv->out_urbs[i]);
}
}
- tty_port_tty_set(&port->port, NULL);
}
/* download the firmware to a pre-renumeration device */
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 38b4582e073..0d4569b6076 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -38,9 +38,8 @@
static int keyspan_open (struct tty_struct *tty,
struct usb_serial_port *port,
struct file *filp);
-static void keyspan_close (struct tty_struct *tty,
- struct usb_serial_port *port,
- struct file *filp);
+static void keyspan_close (struct usb_serial_port *port);
+static void keyspan_dtr_rts (struct usb_serial_port *port, int on);
static int keyspan_startup (struct usb_serial *serial);
static void keyspan_shutdown (struct usb_serial *serial);
static int keyspan_write_room (struct tty_struct *tty);
@@ -562,6 +561,7 @@ static struct usb_serial_driver keyspan_1port_device = {
.num_ports = 1,
.open = keyspan_open,
.close = keyspan_close,
+ .dtr_rts = keyspan_dtr_rts,
.write = keyspan_write,
.write_room = keyspan_write_room,
.set_termios = keyspan_set_termios,
@@ -582,6 +582,7 @@ static struct usb_serial_driver keyspan_2port_device = {
.num_ports = 2,
.open = keyspan_open,
.close = keyspan_close,
+ .dtr_rts = keyspan_dtr_rts,
.write = keyspan_write,
.write_room = keyspan_write_room,
.set_termios = keyspan_set_termios,
@@ -602,6 +603,7 @@ static struct usb_serial_driver keyspan_4port_device = {
.num_ports = 4,
.open = keyspan_open,
.close = keyspan_close,
+ .dtr_rts = keyspan_dtr_rts,
.write = keyspan_write,
.write_room = keyspan_write_room,
.set_termios = keyspan_set_termios,
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index bf1ae247da6..ab769dbea1b 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -651,6 +651,35 @@ static int keyspan_pda_chars_in_buffer(struct tty_struct *tty)
}
+static void keyspan_pda_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct usb_serial *serial = port->serial;
+
+ if (serial->dev) {
+ if (on)
+ keyspan_pda_set_modem_info(serial, (1<<7) | (1<< 2));
+ else
+ keyspan_pda_set_modem_info(serial, 0);
+ }
+}
+
+static int keyspan_pda_carrier_raised(struct usb_serial_port *port)
+{
+ struct usb_serial *serial = port->serial;
+ unsigned char modembits;
+
+ /* If we can read the modem status and the DCD is low then
+ carrier is not raised yet */
+ if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) {
+ if (!(modembits & (1>>6)))
+ return 0;
+ }
+ /* Carrier raised, or we failed (eg disconnected) so
+ progress accordingly */
+ return 1;
+}
+
+
static int keyspan_pda_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp)
{
@@ -682,13 +711,6 @@ static int keyspan_pda_open(struct tty_struct *tty,
priv->tx_room = room;
priv->tx_throttled = room ? 0 : 1;
- /* the normal serial device seems to always turn on DTR and RTS here,
- so do the same */
- if (tty && (tty->termios->c_cflag & CBAUD))
- keyspan_pda_set_modem_info(serial, (1<<7) | (1<<2));
- else
- keyspan_pda_set_modem_info(serial, 0);
-
/*Start reading from the device*/
port->interrupt_in_urb->dev = serial->dev;
rc = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
@@ -700,19 +722,11 @@ static int keyspan_pda_open(struct tty_struct *tty,
error:
return rc;
}
-
-
-static void keyspan_pda_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void keyspan_pda_close(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
if (serial->dev) {
- /* the normal serial device seems to always shut
- off DTR and RTS now */
- if (tty->termios->c_cflag & HUPCL)
- keyspan_pda_set_modem_info(serial, 0);
-
/* shutdown our bulk reads and writes */
usb_kill_urb(port->write_urb);
usb_kill_urb(port->interrupt_in_urb);
@@ -839,6 +853,8 @@ static struct usb_serial_driver keyspan_pda_device = {
.usb_driver = &keyspan_pda_driver,
.id_table = id_table_std,
.num_ports = 1,
+ .dtr_rts = keyspan_pda_dtr_rts,
+ .carrier_raised = keyspan_pda_carrier_raised,
.open = keyspan_pda_open,
.close = keyspan_pda_close,
.write = keyspan_pda_write,
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index fcd9082f3e7..fa817c66b3e 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -76,8 +76,7 @@ static int klsi_105_startup(struct usb_serial *serial);
static void klsi_105_shutdown(struct usb_serial *serial);
static int klsi_105_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void klsi_105_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void klsi_105_close(struct usb_serial_port *port);
static int klsi_105_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count);
static void klsi_105_write_bulk_callback(struct urb *urb);
@@ -447,8 +446,7 @@ exit:
} /* klsi_105_open */
-static void klsi_105_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void klsi_105_close(struct usb_serial_port *port)
{
struct klsi_105_private *priv = usb_get_serial_port_data(port);
int rc;
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index c148544953b..6b570498287 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -72,8 +72,7 @@ static int kobil_startup(struct usb_serial *serial);
static void kobil_shutdown(struct usb_serial *serial);
static int kobil_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void kobil_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp);
+static void kobil_close(struct usb_serial_port *port);
static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int kobil_write_room(struct tty_struct *tty);
@@ -209,7 +208,7 @@ static void kobil_shutdown(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
while (serial->port[i]->port.count > 0)
- kobil_close(NULL, serial->port[i], NULL);
+ kobil_close(serial->port[i]);
kfree(usb_get_serial_port_data(serial->port[i]));
usb_set_serial_port_data(serial->port[i], NULL);
}
@@ -346,11 +345,11 @@ static int kobil_open(struct tty_struct *tty,
}
-static void kobil_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void kobil_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
+ /* FIXME: Add rts/dtr methods */
if (port->write_urb) {
usb_kill_urb(port->write_urb);
usb_free_urb(port->write_urb);
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 82930a7d509..873795548fc 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -95,8 +95,8 @@ static int mct_u232_startup(struct usb_serial *serial);
static void mct_u232_shutdown(struct usb_serial *serial);
static int mct_u232_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void mct_u232_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void mct_u232_close(struct usb_serial_port *port);
+static void mct_u232_dtr_rts(struct usb_serial_port *port, int on);
static void mct_u232_read_int_callback(struct urb *urb);
static void mct_u232_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
@@ -140,6 +140,7 @@ static struct usb_serial_driver mct_u232_device = {
.num_ports = 1,
.open = mct_u232_open,
.close = mct_u232_close,
+ .dtr_rts = mct_u232_dtr_rts,
.throttle = mct_u232_throttle,
.unthrottle = mct_u232_unthrottle,
.read_int_callback = mct_u232_read_int_callback,
@@ -496,29 +497,29 @@ error:
return retval;
} /* mct_u232_open */
-
-static void mct_u232_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void mct_u232_dtr_rts(struct usb_serial_port *port, int on)
{
- unsigned int c_cflag;
unsigned int control_state;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
- dbg("%s port %d", __func__, port->number);
- if (tty) {
- c_cflag = tty->termios->c_cflag;
- mutex_lock(&port->serial->disc_mutex);
- if (c_cflag & HUPCL && !port->serial->disconnected) {
- /* drop DTR and RTS */
- spin_lock_irq(&priv->lock);
+ mutex_lock(&port->serial->disc_mutex);
+ if (!port->serial->disconnected) {
+ /* drop DTR and RTS */
+ spin_lock_irq(&priv->lock);
+ if (on)
+ priv->control_state |= TIOCM_DTR | TIOCM_RTS;
+ else
priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
- control_state = priv->control_state;
- spin_unlock_irq(&priv->lock);
- mct_u232_set_modem_ctrl(port->serial, control_state);
- }
- mutex_unlock(&port->serial->disc_mutex);
+ control_state = priv->control_state;
+ spin_unlock_irq(&priv->lock);
+ mct_u232_set_modem_ctrl(port->serial, control_state);
}
+ mutex_unlock(&port->serial->disc_mutex);
+}
+static void mct_u232_close(struct usb_serial_port *port)
+{
+ dbg("%s port %d", __func__, port->number);
if (port->serial->dev) {
/* shutdown our urbs */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 24e3b5d4b4d..9e1a013ee7f 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -533,8 +533,7 @@ static int mos7720_chars_in_buffer(struct tty_struct *tty)
return chars;
}
-static void mos7720_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void mos7720_close(struct usb_serial_port *port)
{
struct usb_serial *serial;
struct moschip_port *mos7720_port;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 84fb1dcd30d..10b78a37214 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1135,54 +1135,12 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
}
-/************************************************************************
- *
- * mos7840_block_until_tx_empty
- *
- * This function will block the close until one of the following:
- * 1. TX count are 0
- * 2. The mos7840 has stopped
- * 3. A timeout of 3 seconds without activity has expired
- *
- ************************************************************************/
-static void mos7840_block_until_tx_empty(struct tty_struct *tty,
- struct moschip_port *mos7840_port)
-{
- int timeout = HZ / 10;
- int wait = 30;
- int count;
-
- while (1) {
-
- count = mos7840_chars_in_buffer(tty);
-
- /* Check for Buffer status */
- if (count <= 0)
- return;
-
- /* Block the thread for a while */
- interruptible_sleep_on_timeout(&mos7840_port->wait_chase,
- timeout);
-
- /* No activity.. count down section */
- wait--;
- if (wait == 0) {
- dbg("%s - TIMEOUT", __func__);
- return;
- } else {
- /* Reset timeout value back to seconds */
- wait = 30;
- }
- }
-}
-
/*****************************************************************************
* mos7840_close
* this function is called by the tty driver when a port is closed
*****************************************************************************/
-static void mos7840_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void mos7840_close(struct usb_serial_port *port)
{
struct usb_serial *serial;
struct moschip_port *mos7840_port;
@@ -1223,10 +1181,6 @@ static void mos7840_close(struct tty_struct *tty,
}
}
- if (serial->dev)
- /* flush and block until tx is empty */
- mos7840_block_until_tx_empty(tty, mos7840_port);
-
/* While closing port, shutdown all bulk read, write *
* and interrupt read if they exists */
if (serial->dev) {
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index bcdcbb82270..f5f3751a888 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -98,8 +98,7 @@ static int navman_open(struct tty_struct *tty,
return result;
}
-static void navman_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void navman_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index df653971272..1104617334f 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -66,8 +66,7 @@ static int debug;
/* function prototypes */
static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp);
-static void omninet_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp);
+static void omninet_close(struct usb_serial_port *port);
static void omninet_read_bulk_callback(struct urb *urb);
static void omninet_write_bulk_callback(struct urb *urb);
static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
@@ -189,8 +188,7 @@ static int omninet_open(struct tty_struct *tty,
return result;
}
-static void omninet_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void omninet_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
usb_kill_urb(port->read_urb);
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index b500ad10b75..c20480aa975 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -173,8 +173,7 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port,
return result;
}
-static void opticon_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp)
+static void opticon_close(struct usb_serial_port *port)
{
struct opticon_private *priv = usb_get_serial_data(port->serial);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7817b82889c..a16d69fadba 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -45,8 +45,9 @@
/* Function prototypes */
static int option_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp);
-static void option_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp);
+static void option_close(struct usb_serial_port *port);
+static void option_dtr_rts(struct usb_serial_port *port, int on);
+
static int option_startup(struct usb_serial *serial);
static void option_shutdown(struct usb_serial *serial);
static int option_write_room(struct tty_struct *tty);
@@ -61,7 +62,7 @@ static void option_set_termios(struct tty_struct *tty,
static int option_tiocmget(struct tty_struct *tty, struct file *file);
static int option_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
-static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *port);
+static int option_send_setup(struct usb_serial_port *port);
static int option_suspend(struct usb_serial *serial, pm_message_t message);
static int option_resume(struct usb_serial *serial);
@@ -551,6 +552,7 @@ static struct usb_serial_driver option_1port_device = {
.num_ports = 1,
.open = option_open,
.close = option_close,
+ .dtr_rts = option_dtr_rts,
.write = option_write,
.write_room = option_write_room,
.chars_in_buffer = option_chars_in_buffer,
@@ -630,7 +632,7 @@ static void option_set_termios(struct tty_struct *tty,
dbg("%s", __func__);
/* Doesn't support option setting */
tty_termios_copy_hw(tty->termios, old_termios);
- option_send_setup(tty, port);
+ option_send_setup(port);
}
static int option_tiocmget(struct tty_struct *tty, struct file *file)
@@ -669,7 +671,7 @@ static int option_tiocmset(struct tty_struct *tty, struct file *file,
portdata->rts_state = 0;
if (clear & TIOCM_DTR)
portdata->dtr_state = 0;
- return option_send_setup(tty, port);
+ return option_send_setup(port);
}
/* Write */
@@ -897,10 +899,6 @@ static int option_open(struct tty_struct *tty,
dbg("%s", __func__);
- /* Set some sane defaults */
- portdata->rts_state = 1;
- portdata->dtr_state = 1;
-
/* Reset low level data toggle and start reading from endpoints */
for (i = 0; i < N_IN_URB; i++) {
urb = portdata->in_urbs[i];
@@ -936,37 +934,43 @@ static int option_open(struct tty_struct *tty,
usb_pipeout(urb->pipe), 0); */
}
- option_send_setup(tty, port);
+ option_send_setup(port);
return 0;
}
-static void option_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void option_dtr_rts(struct usb_serial_port *port, int on)
{
- int i;
struct usb_serial *serial = port->serial;
struct option_port_private *portdata;
dbg("%s", __func__);
portdata = usb_get_serial_port_data(port);
+ mutex_lock(&serial->disc_mutex);
+ portdata->rts_state = on;
+ portdata->dtr_state = on;
+ if (serial->dev)
+ option_send_setup(port);
+ mutex_unlock(&serial->disc_mutex);
+}
- portdata->rts_state = 0;
- portdata->dtr_state = 0;
- if (serial->dev) {
- mutex_lock(&serial->disc_mutex);
- if (!serial->disconnected)
- option_send_setup(tty, port);
- mutex_unlock(&serial->disc_mutex);
+static void option_close(struct usb_serial_port *port)
+{
+ int i;
+ struct usb_serial *serial = port->serial;
+ struct option_port_private *portdata;
+
+ dbg("%s", __func__);
+ portdata = usb_get_serial_port_data(port);
+ if (serial->dev) {
/* Stop reading/writing urbs */
for (i = 0; i < N_IN_URB; i++)
usb_kill_urb(portdata->in_urbs[i]);
for (i = 0; i < N_OUT_URB; i++)
usb_kill_urb(portdata->out_urbs[i]);
}
- tty_port_tty_set(&port->port, NULL);
}
/* Helper functions used by option_setup_urbs */
@@ -1032,28 +1036,24 @@ static void option_setup_urbs(struct usb_serial *serial)
* This is exactly the same as SET_CONTROL_LINE_STATE from the PSTN
* CDC.
*/
-static int option_send_setup(struct tty_struct *tty,
- struct usb_serial_port *port)
+static int option_send_setup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct option_port_private *portdata;
int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
+ int val = 0;
dbg("%s", __func__);
portdata = usb_get_serial_port_data(port);
- if (tty) {
- int val = 0;
- if (portdata->dtr_state)
- val |= 0x01;
- if (portdata->rts_state)
- val |= 0x02;
+ if (portdata->dtr_state)
+ val |= 0x01;
+ if (portdata->rts_state)
+ val |= 0x02;
- return usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
- 0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT);
- }
- return 0;
+ return usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ 0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT);
}
static int option_startup(struct usb_serial *serial)
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index ba551f00f16..7de54781fe6 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -143,8 +143,7 @@ struct oti6858_control_pkt {
/* function prototypes */
static int oti6858_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void oti6858_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void oti6858_close(struct usb_serial_port *port);
static void oti6858_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
static int oti6858_ioctl(struct tty_struct *tty, struct file *file,
@@ -622,67 +621,30 @@ static int oti6858_open(struct tty_struct *tty,
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
" with error %d\n", __func__, result);
- oti6858_close(tty, port, NULL);
+ oti6858_close(port);
return -EPROTO;
}
/* setup termios */
if (tty)
oti6858_set_termios(tty, port, &tmp_termios);
-
+ port->port.drain_delay = 256; /* FIXME: check the FIFO length */
return 0;
}
-static void oti6858_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void oti6858_close(struct usb_serial_port *port)
{
struct oti6858_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
- long timeout;
- wait_queue_t wait;
dbg("%s(port = %d)", __func__, port->number);
- /* wait for data to drain from the buffer */
spin_lock_irqsave(&priv->lock, flags);
- timeout = 30 * HZ; /* PL2303_CLOSING_WAIT */
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&tty->write_wait, &wait);
- dbg("%s(): entering wait loop", __func__);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (oti6858_buf_data_avail(priv->buf) == 0
- || timeout == 0 || signal_pending(current)
- || port->serial->disconnected)
- break;
- spin_unlock_irqrestore(&priv->lock, flags);
- timeout = schedule_timeout(timeout);
- spin_lock_irqsave(&priv->lock, flags);
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&tty->write_wait, &wait);
- dbg("%s(): after wait loop", __func__);
-
/* clear out any remaining data in the buffer */
oti6858_buf_clear(priv->buf);
spin_unlock_irqrestore(&priv->lock, flags);
- /* wait for characters to drain from the device */
- /* (this is long enough for the entire 256 byte */
- /* pl2303 hardware buffer to drain with no flow */
- /* control for data rates of 1200 bps or more, */
- /* for lower rates we should really know how much */
- /* data is in the buffer to compute a delay */
- /* that is not unnecessarily long) */
- /* FIXME
- bps = tty_get_baud_rate(tty);
- if (bps > 1200)
- timeout = max((HZ*2560)/bps,HZ/10);
- else
- */
- timeout = 2*HZ;
- schedule_timeout_interruptible(timeout);
- dbg("%s(): after schedule_timeout_interruptible()", __func__);
+ dbg("%s(): after buf_clear()", __func__);
/* cancel scheduled setup */
cancel_delayed_work(&priv->delayed_setup_work);
@@ -694,15 +656,6 @@ static void oti6858_close(struct tty_struct *tty,
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
usb_kill_urb(port->interrupt_in_urb);
-
- /*
- if (tty && (tty->termios->c_cflag) & HUPCL) {
- // drop DTR and RTS
- spin_lock_irqsave(&priv->lock, flags);
- priv->pending_setup.control &= ~CONTROL_MASK;
- spin_unlock_irqrestore(&priv->lock, flags);
- }
- */
}
static int oti6858_tiocmset(struct tty_struct *tty, struct file *file,
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 751a533a434..e02dc3d643c 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -652,69 +652,41 @@ static void pl2303_set_termios(struct tty_struct *tty,
kfree(buf);
}
-static void pl2303_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void pl2303_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ u8 control;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ /* Change DTR and RTS */
+ if (on)
+ priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
+ else
+ priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
+ control = priv->line_control;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ set_control_lines(port->serial->dev, control);
+}
+
+static void pl2303_close(struct usb_serial_port *port)
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
- unsigned int c_cflag;
- int bps;
- long timeout;
- wait_queue_t wait;
dbg("%s - port %d", __func__, port->number);
- /* wait for data to drain from the buffer */
spin_lock_irqsave(&priv->lock, flags);
- timeout = PL2303_CLOSING_WAIT;
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&tty->write_wait, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (pl2303_buf_data_avail(priv->buf) == 0 ||
- timeout == 0 || signal_pending(current) ||
- port->serial->disconnected)
- break;
- spin_unlock_irqrestore(&priv->lock, flags);
- timeout = schedule_timeout(timeout);
- spin_lock_irqsave(&priv->lock, flags);
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&tty->write_wait, &wait);
/* clear out any remaining data in the buffer */
pl2303_buf_clear(priv->buf);
spin_unlock_irqrestore(&priv->lock, flags);
- /* wait for characters to drain from the device */
- /* (this is long enough for the entire 256 byte */
- /* pl2303 hardware buffer to drain with no flow */
- /* control for data rates of 1200 bps or more, */
- /* for lower rates we should really know how much */
- /* data is in the buffer to compute a delay */
- /* that is not unnecessarily long) */
- bps = tty_get_baud_rate(tty);
- if (bps > 1200)
- timeout = max((HZ*2560)/bps, HZ/10);
- else
- timeout = 2*HZ;
- schedule_timeout_interruptible(timeout);
-
/* shutdown our urbs */
dbg("%s - shutting down urbs", __func__);
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
usb_kill_urb(port->interrupt_in_urb);
- if (tty) {
- c_cflag = tty->termios->c_cflag;
- if (c_cflag & HUPCL) {
- /* drop DTR and RTS */
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
- set_control_lines(port->serial->dev, 0);
- }
- }
}
static int pl2303_open(struct tty_struct *tty,
@@ -748,7 +720,7 @@ static int pl2303_open(struct tty_struct *tty,
if (result) {
dev_err(&port->dev, "%s - failed submitting read urb,"
" error %d\n", __func__, result);
- pl2303_close(tty, port, NULL);
+ pl2303_close(port);
return -EPROTO;
}
@@ -758,9 +730,10 @@ static int pl2303_open(struct tty_struct *tty,
if (result) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
" error %d\n", __func__, result);
- pl2303_close(tty, port, NULL);
+ pl2303_close(port);
return -EPROTO;
}
+ port->port.drain_delay = 256;
return 0;
}
@@ -821,6 +794,14 @@ static int pl2303_tiocmget(struct tty_struct *tty, struct file *file)
return result;
}
+static int pl2303_carrier_raised(struct usb_serial_port *port)
+{
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
+ if (priv->line_status & UART_DCD)
+ return 1;
+ return 0;
+}
+
static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
@@ -1125,6 +1106,8 @@ static struct usb_serial_driver pl2303_device = {
.num_ports = 1,
.open = pl2303_open,
.close = pl2303_close,
+ .dtr_rts = pl2303_dtr_rts,
+ .carrier_raised = pl2303_carrier_raised,
.write = pl2303_write,
.ioctl = pl2303_ioctl,
.break_ctl = pl2303_break_ctl,
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 913225c6161..17ac34f4d66 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -26,12 +26,10 @@
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
-#include <linux/usb/ch9.h>
#define SWIMS_USB_REQUEST_SetPower 0x00
#define SWIMS_USB_REQUEST_SetNmea 0x07
-/* per port private data */
#define N_IN_URB 4
#define N_OUT_URB 4
#define IN_BUFLEN 4096
@@ -39,6 +37,12 @@
static int debug;
static int nmea;
+/* Used in interface blacklisting */
+struct sierra_iface_info {
+ const u32 infolen; /* number of interface numbers on blacklist */
+ const u8 *ifaceinfo; /* pointer to the array holding the numbers */
+};
+
static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
{
int result;
@@ -85,6 +89,23 @@ static int sierra_calc_num_ports(struct usb_serial *serial)
return result;
}
+static int is_blacklisted(const u8 ifnum,
+ const struct sierra_iface_info *blacklist)
+{
+ const u8 *info;
+ int i;
+
+ if (blacklist) {
+ info = blacklist->ifaceinfo;
+
+ for (i = 0; i < blacklist->infolen; i++) {
+ if (info[i] == ifnum)
+ return 1;
+ }
+ }
+ return 0;
+}
+
static int sierra_calc_interface(struct usb_serial *serial)
{
int interface;
@@ -153,9 +174,25 @@ static int sierra_probe(struct usb_serial *serial,
*/
usb_set_serial_data(serial, (void *)num_ports);
+ /* ifnum could have changed - by calling usb_set_interface */
+ ifnum = sierra_calc_interface(serial);
+
+ if (is_blacklisted(ifnum,
+ (struct sierra_iface_info *)id->driver_info)) {
+ dev_dbg(&serial->dev->dev,
+ "Ignoring blacklisted interface #%d\n", ifnum);
+ return -ENODEV;
+ }
+
return result;
}
+static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 };
+static const struct sierra_iface_info direct_ip_interface_blacklist = {
+ .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces),
+ .ifaceinfo = direct_ip_non_serial_ifaces,
+};
+
static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
{ USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */
@@ -188,9 +225,11 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */
{ USB_DEVICE(0x1199, 0x683A) }, /* Sierra Wireless MC8785 */
{ USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */
- { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */
- { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */
- { USB_DEVICE(0x1199, 0x683E) }, /* Sierra Wireless MC8790 */
+ /* Sierra Wireless MC8790, MC8791, MC8792 Composite */
+ { USB_DEVICE(0x1199, 0x683C) },
+ { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8791 Composite */
+ /* Sierra Wireless MC8790, MC8791, MC8792 */
+ { USB_DEVICE(0x1199, 0x683E) },
{ USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */
{ USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */
{ USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */
@@ -211,6 +250,10 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
{ USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
+ { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
+
{ }
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -229,7 +272,6 @@ struct sierra_port_private {
/* Input endpoints and buffers for this port */
struct urb *in_urbs[N_IN_URB];
- char *in_buffer[N_IN_URB];
/* Settings for the port */
int rts_state; /* Handshaking pins (outputs) */
@@ -240,57 +282,50 @@ struct sierra_port_private {
int ri_state;
};
-static int sierra_send_setup(struct tty_struct *tty,
- struct usb_serial_port *port)
+static int sierra_send_setup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct sierra_port_private *portdata;
__u16 interface = 0;
+ int val = 0;
dev_dbg(&port->dev, "%s", __func__);
portdata = usb_get_serial_port_data(port);
- if (tty) {
- int val = 0;
- if (portdata->dtr_state)
- val |= 0x01;
- if (portdata->rts_state)
- val |= 0x02;
-
- /* If composite device then properly report interface */
- if (serial->num_ports == 1) {
- interface = sierra_calc_interface(serial);
-
- /* Control message is sent only to interfaces with
- * interrupt_in endpoints
- */
- if (port->interrupt_in_urb) {
- /* send control message */
- return usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
- 0x22, 0x21, val, interface,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
- }
- }
-
- /* Otherwise the need to do non-composite mapping */
- else {
- if (port->bulk_out_endpointAddress == 2)
- interface = 0;
- else if (port->bulk_out_endpointAddress == 4)
- interface = 1;
- else if (port->bulk_out_endpointAddress == 5)
- interface = 2;
+ if (portdata->dtr_state)
+ val |= 0x01;
+ if (portdata->rts_state)
+ val |= 0x02;
+ /* If composite device then properly report interface */
+ if (serial->num_ports == 1) {
+ interface = sierra_calc_interface(serial);
+ /* Control message is sent only to interfaces with
+ * interrupt_in endpoints
+ */
+ if (port->interrupt_in_urb) {
+ /* send control message */
return usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
0x22, 0x21, val, interface,
NULL, 0, USB_CTRL_SET_TIMEOUT);
-
}
}
+ /* Otherwise the need to do non-composite mapping */
+ else {
+ if (port->bulk_out_endpointAddress == 2)
+ interface = 0;
+ else if (port->bulk_out_endpointAddress == 4)
+ interface = 1;
+ else if (port->bulk_out_endpointAddress == 5)
+ interface = 2;
+ return usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ 0x22, 0x21, val, interface,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+ }
return 0;
}
@@ -299,7 +334,7 @@ static void sierra_set_termios(struct tty_struct *tty,
{
dev_dbg(&port->dev, "%s", __func__);
tty_termios_copy_hw(tty->termios, old_termios);
- sierra_send_setup(tty, port);
+ sierra_send_setup(port);
}
static int sierra_tiocmget(struct tty_struct *tty, struct file *file)
@@ -338,7 +373,18 @@ static int sierra_tiocmset(struct tty_struct *tty, struct file *file,
portdata->rts_state = 0;
if (clear & TIOCM_DTR)
portdata->dtr_state = 0;
- return sierra_send_setup(tty, port);
+ return sierra_send_setup(port);
+}
+
+static void sierra_release_urb(struct urb *urb)
+{
+ struct usb_serial_port *port;
+ if (urb) {
+ port = urb->context;
+ dev_dbg(&port->dev, "%s: %p\n", __func__, urb);
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
}
static void sierra_outdat_callback(struct urb *urb)
@@ -465,7 +511,7 @@ static void sierra_indat_callback(struct urb *urb)
" received", __func__);
/* Resubmit urb so we continue receiving */
- if (port->port.count && status != -ESHUTDOWN) {
+ if (port->port.count && status != -ESHUTDOWN && status != -EPERM) {
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err)
dev_err(&port->dev, "resubmit read urb failed."
@@ -557,67 +603,99 @@ static int sierra_write_room(struct tty_struct *tty)
return 2048;
}
-static int sierra_open(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void sierra_stop_rx_urbs(struct usb_serial_port *port)
{
- struct sierra_port_private *portdata;
- struct usb_serial *serial = port->serial;
int i;
- struct urb *urb;
- int result;
+ struct sierra_port_private *portdata = usb_get_serial_port_data(port);
- portdata = usb_get_serial_port_data(port);
+ for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++)
+ usb_kill_urb(portdata->in_urbs[i]);
- dev_dbg(&port->dev, "%s", __func__);
+ usb_kill_urb(port->interrupt_in_urb);
+}
- /* Set some sane defaults */
- portdata->rts_state = 1;
- portdata->dtr_state = 1;
+static int sierra_submit_rx_urbs(struct usb_serial_port *port, gfp_t mem_flags)
+{
+ int ok_cnt;
+ int err = -EINVAL;
+ int i;
+ struct urb *urb;
+ struct sierra_port_private *portdata = usb_get_serial_port_data(port);
- /* Reset low level data toggle and start reading from endpoints */
- for (i = 0; i < N_IN_URB; i++) {
+ ok_cnt = 0;
+ for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) {
urb = portdata->in_urbs[i];
if (!urb)
continue;
- if (urb->dev != serial->dev) {
- dev_dbg(&port->dev, "%s: dev %p != %p",
- __func__, urb->dev, serial->dev);
- continue;
+ err = usb_submit_urb(urb, mem_flags);
+ if (err) {
+ dev_err(&port->dev, "%s: submit urb failed: %d\n",
+ __func__, err);
+ } else {
+ ok_cnt++;
}
+ }
- /*
- * make sure endpoint data toggle is synchronized with the
- * device
- */
- usb_clear_halt(urb->dev, urb->pipe);
-
- result = usb_submit_urb(urb, GFP_KERNEL);
- if (result) {
- dev_err(&port->dev, "submit urb %d failed (%d) %d\n",
- i, result, urb->transfer_buffer_length);
+ if (ok_cnt && port->interrupt_in_urb) {
+ err = usb_submit_urb(port->interrupt_in_urb, mem_flags);
+ if (err) {
+ dev_err(&port->dev, "%s: submit intr urb failed: %d\n",
+ __func__, err);
}
}
- sierra_send_setup(tty, port);
+ if (ok_cnt > 0) /* at least one rx urb submitted */
+ return 0;
+ else
+ return err;
+}
+
+static struct urb *sierra_setup_urb(struct usb_serial *serial, int endpoint,
+ int dir, void *ctx, int len,
+ gfp_t mem_flags,
+ usb_complete_t callback)
+{
+ struct urb *urb;
+ u8 *buf;
+
+ if (endpoint == -1)
+ return NULL;
- /* start up the interrupt endpoint if we have one */
- if (port->interrupt_in_urb) {
- result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev, "submit irq_in urb failed %d\n",
- result);
+ urb = usb_alloc_urb(0, mem_flags);
+ if (urb == NULL) {
+ dev_dbg(&serial->dev->dev, "%s: alloc for endpoint %d failed\n",
+ __func__, endpoint);
+ return NULL;
}
- return 0;
+
+ buf = kmalloc(len, mem_flags);
+ if (buf) {
+ /* Fill URB using supplied data */
+ usb_fill_bulk_urb(urb, serial->dev,
+ usb_sndbulkpipe(serial->dev, endpoint) | dir,
+ buf, len, callback, ctx);
+
+ /* debug */
+ dev_dbg(&serial->dev->dev, "%s %c u : %p d:%p\n", __func__,
+ dir == USB_DIR_IN ? 'i' : 'o', urb, buf);
+ } else {
+ dev_dbg(&serial->dev->dev, "%s %c u:%p d:%p\n", __func__,
+ dir == USB_DIR_IN ? 'i' : 'o', urb, buf);
+
+ sierra_release_urb(urb);
+ urb = NULL;
+ }
+
+ return urb;
}
-static void sierra_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void sierra_close(struct usb_serial_port *port)
{
int i;
struct usb_serial *serial = port->serial;
struct sierra_port_private *portdata;
- dev_dbg(&port->dev, "%s", __func__);
+ dev_dbg(&port->dev, "%s\n", __func__);
portdata = usb_get_serial_port_data(port);
portdata->rts_state = 0;
@@ -626,25 +704,83 @@ static void sierra_close(struct tty_struct *tty,
if (serial->dev) {
mutex_lock(&serial->disc_mutex);
if (!serial->disconnected)
- sierra_send_setup(tty, port);
+ sierra_send_setup(port);
mutex_unlock(&serial->disc_mutex);
- /* Stop reading/writing urbs */
- for (i = 0; i < N_IN_URB; i++)
- usb_kill_urb(portdata->in_urbs[i]);
+ /* Stop reading urbs */
+ sierra_stop_rx_urbs(port);
+ /* .. and release them */
+ for (i = 0; i < N_IN_URB; i++) {
+ sierra_release_urb(portdata->in_urbs[i]);
+ portdata->in_urbs[i] = NULL;
+ }
}
+}
- usb_kill_urb(port->interrupt_in_urb);
- tty_port_tty_set(&port->port, NULL);
+static int sierra_open(struct tty_struct *tty,
+ struct usb_serial_port *port, struct file *filp)
+{
+ struct sierra_port_private *portdata;
+ struct usb_serial *serial = port->serial;
+ int i;
+ int err;
+ int endpoint;
+ struct urb *urb;
+
+ portdata = usb_get_serial_port_data(port);
+
+ dev_dbg(&port->dev, "%s", __func__);
+
+ /* Set some sane defaults */
+ portdata->rts_state = 1;
+ portdata->dtr_state = 1;
+
+
+ endpoint = port->bulk_in_endpointAddress;
+ for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) {
+ urb = sierra_setup_urb(serial, endpoint, USB_DIR_IN, port,
+ IN_BUFLEN, GFP_KERNEL,
+ sierra_indat_callback);
+ portdata->in_urbs[i] = urb;
+ }
+ /* clear halt condition */
+ usb_clear_halt(serial->dev,
+ usb_sndbulkpipe(serial->dev, endpoint) | USB_DIR_IN);
+
+ err = sierra_submit_rx_urbs(port, GFP_KERNEL);
+ if (err) {
+ /* get rid of everything as in close */
+ sierra_close(port);
+ return err;
+ }
+ sierra_send_setup(port);
+
+ return 0;
+}
+
+
+static void sierra_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct usb_serial *serial = port->serial;
+ struct sierra_port_private *portdata;
+
+ portdata = usb_get_serial_port_data(port);
+ portdata->rts_state = on;
+ portdata->dtr_state = on;
+
+ if (serial->dev) {
+ mutex_lock(&serial->disc_mutex);
+ if (!serial->disconnected)
+ sierra_send_setup(port);
+ mutex_unlock(&serial->disc_mutex);
+ }
}
static int sierra_startup(struct usb_serial *serial)
{
struct usb_serial_port *port;
struct sierra_port_private *portdata;
- struct urb *urb;
int i;
- int j;
dev_dbg(&serial->dev->dev, "%s", __func__);
@@ -666,34 +802,8 @@ static int sierra_startup(struct usb_serial *serial)
return -ENOMEM;
}
spin_lock_init(&portdata->lock);
- for (j = 0; j < N_IN_URB; j++) {
- portdata->in_buffer[j] = kmalloc(IN_BUFLEN, GFP_KERNEL);
- if (!portdata->in_buffer[j]) {
- for (--j; j >= 0; j--)
- kfree(portdata->in_buffer[j]);
- kfree(portdata);
- return -ENOMEM;
- }
- }
-
+ /* Set the port private data pointer */
usb_set_serial_port_data(port, portdata);
-
- /* initialize the in urbs */
- for (j = 0; j < N_IN_URB; ++j) {
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (urb == NULL) {
- dev_dbg(&port->dev, "%s: alloc for in "
- "port failed.", __func__);
- continue;
- }
- /* Fill URB using supplied data. */
- usb_fill_bulk_urb(urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- portdata->in_buffer[j], IN_BUFLEN,
- sierra_indat_callback, port);
- portdata->in_urbs[j] = urb;
- }
}
return 0;
@@ -701,7 +811,7 @@ static int sierra_startup(struct usb_serial *serial)
static void sierra_shutdown(struct usb_serial *serial)
{
- int i, j;
+ int i;
struct usb_serial_port *port;
struct sierra_port_private *portdata;
@@ -714,12 +824,6 @@ static void sierra_shutdown(struct usb_serial *serial)
portdata = usb_get_serial_port_data(port);
if (!portdata)
continue;
-
- for (j = 0; j < N_IN_URB; j++) {
- usb_kill_urb(portdata->in_urbs[j]);
- usb_free_urb(portdata->in_urbs[j]);
- kfree(portdata->in_buffer[j]);
- }
kfree(portdata);
usb_set_serial_port_data(port, NULL);
}
@@ -737,6 +841,7 @@ static struct usb_serial_driver sierra_device = {
.probe = sierra_probe,
.open = sierra_open,
.close = sierra_close,
+ .dtr_rts = sierra_dtr_rts,
.write = sierra_write,
.write_room = sierra_write_room,
.set_termios = sierra_set_termios,
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 5e7528cc81a..8f7ed8f1399 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -446,66 +446,47 @@ static void spcp8x5_set_workMode(struct usb_device *dev, u16 value,
"RTSCTS usb_control_msg(enable flowctrl) = %d\n", ret);
}
+static int spcp8x5_carrier_raised(struct usb_serial_port *port)
+{
+ struct spcp8x5_private *priv = usb_get_serial_port_data(port);
+ if (priv->line_status & MSR_STATUS_LINE_DCD)
+ return 1;
+ return 0;
+}
+
+static void spcp8x5_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct spcp8x5_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ u8 control;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (on)
+ priv->line_control = MCR_CONTROL_LINE_DTR
+ | MCR_CONTROL_LINE_RTS;
+ else
+ priv->line_control &= ~ (MCR_CONTROL_LINE_DTR
+ | MCR_CONTROL_LINE_RTS);
+ control = priv->line_control;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ spcp8x5_set_ctrlLine(port->serial->dev, control , priv->type);
+}
+
/* close the serial port. We should wait for data sending to device 1st and
* then kill all urb. */
-static void spcp8x5_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void spcp8x5_close(struct usb_serial_port *port)
{
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
- unsigned int c_cflag;
- int bps;
- long timeout;
- wait_queue_t wait;
int result;
dbg("%s - port %d", __func__, port->number);
- /* wait for data to drain from the buffer */
spin_lock_irqsave(&priv->lock, flags);
- timeout = SPCP8x5_CLOSING_WAIT;
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&tty->write_wait, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (ringbuf_avail_data(priv->buf) == 0 ||
- timeout == 0 || signal_pending(current))
- break;
- spin_unlock_irqrestore(&priv->lock, flags);
- timeout = schedule_timeout(timeout);
- spin_lock_irqsave(&priv->lock, flags);
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&tty->write_wait, &wait);
-
/* clear out any remaining data in the buffer */
clear_ringbuf(priv->buf);
spin_unlock_irqrestore(&priv->lock, flags);
- /* wait for characters to drain from the device (this is long enough
- * for the entire all byte spcp8x5 hardware buffer to drain with no
- * flow control for data rates of 1200 bps or more, for lower rates we
- * should really know how much data is in the buffer to compute a delay
- * that is not unnecessarily long) */
- bps = tty_get_baud_rate(tty);
- if (bps > 1200)
- timeout = max((HZ*2560) / bps, HZ/10);
- else
- timeout = 2*HZ;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(timeout);
-
- /* clear control lines */
- if (tty) {
- c_cflag = tty->termios->c_cflag;
- if (c_cflag & HUPCL) {
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
- spcp8x5_set_ctrlLine(port->serial->dev, 0 , priv->type);
- }
- }
-
/* kill urb */
if (port->write_urb != NULL) {
result = usb_unlink_urb(port->write_urb);
@@ -665,13 +646,6 @@ static int spcp8x5_open(struct tty_struct *tty,
if (ret)
return ret;
- spin_lock_irqsave(&priv->lock, flags);
- if (tty && (tty->termios->c_cflag & CBAUD))
- priv->line_control = MCR_DTR | MCR_RTS;
- else
- priv->line_control = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
-
spcp8x5_set_ctrlLine(serial->dev, priv->line_control , priv->type);
/* Setup termios */
@@ -691,9 +665,10 @@ static int spcp8x5_open(struct tty_struct *tty,
port->read_urb->dev = serial->dev;
ret = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (ret) {
- spcp8x5_close(tty, port, NULL);
+ spcp8x5_close(port);
return -EPROTO;
}
+ port->port.drain_delay = 256;
return 0;
}
@@ -1033,6 +1008,8 @@ static struct usb_serial_driver spcp8x5_device = {
.num_ports = 1,
.open = spcp8x5_open,
.close = spcp8x5_close,
+ .dtr_rts = spcp8x5_dtr_rts,
+ .carrier_raised = spcp8x5_carrier_raised,
.write = spcp8x5_write,
.set_termios = spcp8x5_set_termios,
.ioctl = spcp8x5_ioctl,
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 69879e43794..8b07ebc6bae 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -152,8 +152,7 @@ static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port,
return result;
}
-static void symbol_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp)
+static void symbol_close(struct usb_serial_port *port)
{
struct symbol_private *priv = usb_get_serial_data(port->serial);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 0a64bac306e..42cb04c403b 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -100,8 +100,7 @@ static int ti_startup(struct usb_serial *serial);
static void ti_shutdown(struct usb_serial *serial);
static int ti_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *file);
-static void ti_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *file);
+static void ti_close(struct usb_serial_port *port);
static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count);
static int ti_write_room(struct tty_struct *tty);
@@ -647,8 +646,7 @@ release_lock:
}
-static void ti_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *file)
+static void ti_close(struct usb_serial_port *port)
{
struct ti_device *tdev;
struct ti_port *tport;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 0a566eea49c..1967a7edc10 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -238,9 +238,11 @@ static int serial_open (struct tty_struct *tty, struct file *filp)
goto bailout_interface_put;
mutex_unlock(&serial->disc_mutex);
}
-
mutex_unlock(&port->mutex);
- return 0;
+ /* Now do the correct tty layer semantics */
+ retval = tty_port_block_til_ready(&port->port, tty, filp);
+ if (retval == 0)
+ return 0;
bailout_interface_put:
usb_autopm_put_interface(serial->interface);
@@ -259,64 +261,89 @@ bailout_serial_put:
return retval;
}
-static void serial_close(struct tty_struct *tty, struct file *filp)
+/**
+ * serial_do_down - shut down hardware
+ * @port: port to shut down
+ *
+ * Shut down a USB port unless it is the console. We never shut down the
+ * console hardware as it will always be in use.
+ *
+ * Don't free any resources at this point
+ */
+static void serial_do_down(struct usb_serial_port *port)
{
- struct usb_serial_port *port = tty->driver_data;
+ struct usb_serial_driver *drv = port->serial->type;
struct usb_serial *serial;
struct module *owner;
- int count;
- if (!port)
+ /* The console is magical, do not hang up the console hardware
+ or there will be tears */
+ if (port->console)
return;
- dbg("%s - port %d", __func__, port->number);
-
mutex_lock(&port->mutex);
serial = port->serial;
owner = serial->type->driver.owner;
- if (port->port.count == 0) {
- mutex_unlock(&port->mutex);
- return;
- }
-
- if (port->port.count == 1)
- /* only call the device specific close if this
- * port is being closed by the last owner. Ensure we do
- * this before we drop the port count. The call is protected
- * by the port mutex
- */
- serial->type->close(tty, port, filp);
-
- if (port->port.count == (port->console ? 2 : 1)) {
- struct tty_struct *tty = tty_port_tty_get(&port->port);
- if (tty) {
- /* We must do this before we drop the port count to
- zero. */
- if (tty->driver_data)
- tty->driver_data = NULL;
- tty_port_tty_set(&port->port, NULL);
- tty_kref_put(tty);
- }
- }
+ if (drv->close)
+ drv->close(port);
- --port->port.count;
- count = port->port.count;
mutex_unlock(&port->mutex);
- put_device(&port->dev);
+}
+
+/**
+ * serial_do_free - free resources post close/hangup
+ * @port: port to free up
+ *
+ * Do the resource freeing and refcount dropping for the port. We must
+ * be careful about ordering and we must avoid freeing up the console.
+ */
+static void serial_do_free(struct usb_serial_port *port)
+{
+ struct usb_serial *serial;
+ struct module *owner;
+
+ /* The console is magical, do not hang up the console hardware
+ or there will be tears */
+ if (port->console)
+ return;
+
+ serial = port->serial;
+ owner = serial->type->driver.owner;
+ put_device(&port->dev);
/* Mustn't dereference port any more */
- if (count == 0) {
- mutex_lock(&serial->disc_mutex);
- if (!serial->disconnected)
- usb_autopm_put_interface(serial->interface);
- mutex_unlock(&serial->disc_mutex);
- }
+ mutex_lock(&serial->disc_mutex);
+ if (!serial->disconnected)
+ usb_autopm_put_interface(serial->interface);
+ mutex_unlock(&serial->disc_mutex);
usb_serial_put(serial);
-
/* Mustn't dereference serial any more */
- if (count == 0)
- module_put(owner);
+ module_put(owner);
+}
+
+static void serial_close(struct tty_struct *tty, struct file *filp)
+{
+ struct usb_serial_port *port = tty->driver_data;
+
+ dbg("%s - port %d", __func__, port->number);
+
+
+ if (tty_port_close_start(&port->port, tty, filp) == 0)
+ return;
+
+ serial_do_down(port);
+ tty_port_close_end(&port->port, tty);
+ tty_port_tty_set(&port->port, NULL);
+ serial_do_free(port);
+}
+
+static void serial_hangup(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ serial_do_down(port);
+ tty_port_hangup(&port->port);
+ serial_do_free(port);
}
static int serial_write(struct tty_struct *tty, const unsigned char *buf,
@@ -648,6 +675,29 @@ static struct usb_serial_driver *search_serial_device(
return NULL;
}
+static int serial_carrier_raised(struct tty_port *port)
+{
+ struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
+ struct usb_serial_driver *drv = p->serial->type;
+ if (drv->carrier_raised)
+ return drv->carrier_raised(p);
+ /* No carrier control - don't block */
+ return 1;
+}
+
+static void serial_dtr_rts(struct tty_port *port, int on)
+{
+ struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
+ struct usb_serial_driver *drv = p->serial->type;
+ if (drv->dtr_rts)
+ drv->dtr_rts(p, on);
+}
+
+static const struct tty_port_operations serial_port_ops = {
+ .carrier_raised = serial_carrier_raised,
+ .dtr_rts = serial_dtr_rts,
+};
+
int usb_serial_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -841,6 +891,7 @@ int usb_serial_probe(struct usb_interface *interface,
if (!port)
goto probe_error;
tty_port_init(&port->port);
+ port->port.ops = &serial_port_ops;
port->serial = serial;
spin_lock_init(&port->lock);
mutex_init(&port->mutex);
@@ -974,6 +1025,7 @@ int usb_serial_probe(struct usb_interface *interface,
if (retval > 0) {
/* quietly accept this device, but don't bind to a
serial port as it's about to disappear */
+ serial->num_ports = 0;
goto exit;
}
}
@@ -1070,6 +1122,9 @@ void usb_serial_disconnect(struct usb_interface *interface)
if (port) {
struct tty_struct *tty = tty_port_tty_get(&port->port);
if (tty) {
+ /* The hangup will occur asynchronously but
+ the object refcounts will sort out all the
+ cleanup */
tty_hangup(tty);
tty_kref_put(tty);
}
@@ -1134,6 +1189,7 @@ static const struct tty_operations serial_ops = {
.open = serial_open,
.close = serial_close,
.write = serial_write,
+ .hangup = serial_hangup,
.write_room = serial_write_room,
.ioctl = serial_ioctl,
.set_termios = serial_set_termios,
@@ -1146,6 +1202,7 @@ static const struct tty_operations serial_ops = {
.proc_fops = &serial_proc_fops,
};
+
struct tty_driver *usb_serial_tty_driver;
static int __init usb_serial_init(void)
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 5ac414bda71..b15f1c0e1d4 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -38,8 +38,7 @@
/* function prototypes for a handspring visor */
static int visor_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp);
-static void visor_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp);
+static void visor_close(struct usb_serial_port *port);
static int visor_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int visor_write_room(struct tty_struct *tty);
@@ -324,8 +323,7 @@ exit:
}
-static void visor_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void visor_close(struct usb_serial_port *port)
{
struct visor_private *priv = usb_get_serial_port_data(port);
unsigned char *transfer_buffer;
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 5335d3211c0..7c7295d09f3 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -147,8 +147,7 @@ static int whiteheat_attach(struct usb_serial *serial);
static void whiteheat_shutdown(struct usb_serial *serial);
static int whiteheat_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void whiteheat_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void whiteheat_close(struct usb_serial_port *port);
static int whiteheat_write(struct tty_struct *tty,
struct usb_serial_port *port,
const unsigned char *buf, int count);
@@ -712,8 +711,7 @@ exit:
}
-static void whiteheat_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void whiteheat_close(struct usb_serial_port *port)
{
struct whiteheat_private *info = usb_get_serial_port_data(port);
struct whiteheat_urb_wrap *wrap;
@@ -723,31 +721,7 @@ static void whiteheat_close(struct tty_struct *tty,
dbg("%s - port %d", __func__, port->number);
- mutex_lock(&port->serial->disc_mutex);
- /* filp is NULL when called from usb_serial_disconnect */
- if ((filp && (tty_hung_up_p(filp))) || port->serial->disconnected) {
- mutex_unlock(&port->serial->disc_mutex);
- return;
- }
- mutex_unlock(&port->serial->disc_mutex);
-
- tty->closing = 1;
-
-/*
- * Not currently in use; tty_wait_until_sent() calls
- * serial_chars_in_buffer() which deadlocks on the second semaphore
- * acquisition. This should be fixed at some point. Greg's been
- * notified.
- if ((filp->f_flags & (O_NDELAY | O_NONBLOCK)) == 0) {
- tty_wait_until_sent(tty, CLOSING_DELAY);
- }
-*/
-
- tty_driver_flush_buffer(tty);
- tty_ldisc_flush(tty);
-
firm_report_tx_done(port);
-
firm_close(port);
/* shutdown our bulk reads and writes */
@@ -775,10 +749,7 @@ static void whiteheat_close(struct tty_struct *tty,
}
spin_unlock_irq(&info->lock);
mutex_unlock(&info->deathwarrant);
-
stop_command_port(port->serial);
-
- tty->closing = 0;
}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 4ca3b586064..cfa26d56ce6 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -132,7 +132,7 @@ static int slave_configure(struct scsi_device *sdev)
if (us->fflags & US_FL_MAX_SECTORS_MIN)
max_sectors = PAGE_CACHE_SIZE >> 9;
- if (sdev->request_queue->max_sectors > max_sectors)
+ if (queue_max_sectors(sdev->request_queue) > max_sectors)
blk_queue_max_sectors(sdev->request_queue,
max_sectors);
} else if (sdev->type == TYPE_TAPE) {
@@ -483,7 +483,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att
{
struct scsi_device *sdev = to_scsi_device(dev);
- return sprintf(buf, "%u\n", sdev->request_queue->max_sectors);
+ return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue));
}
/* Input routine for the sysfs max_sectors file */
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 9a577a800db..2fb63f6ea2f 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -29,14 +29,8 @@
/* configurable parameters */
#define ATMEL_LCDC_CVAL_DEFAULT 0xc8
-#define ATMEL_LCDC_DMA_BURST_LEN 8
-
-#if defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91CAP9) || \
- defined(CONFIG_ARCH_AT91SAM9RL)
-#define ATMEL_LCDC_FIFO_SIZE 2048
-#else
-#define ATMEL_LCDC_FIFO_SIZE 512
-#endif
+#define ATMEL_LCDC_DMA_BURST_LEN 8 /* words */
+#define ATMEL_LCDC_FIFO_SIZE 512 /* words */
#if defined(CONFIG_ARCH_AT91)
#define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 38e86b84dce..59d7d5ec17a 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -180,7 +180,7 @@ static inline void vga_set_mem_top(struct vc_data *c)
}
#ifdef CONFIG_VGACON_SOFT_SCROLLBACK
-#include <linux/bootmem.h>
+#include <linux/slab.h>
/* software scrollback */
static void *vgacon_scrollback;
static int vgacon_scrollback_tail;
@@ -210,8 +210,7 @@ static void vgacon_scrollback_init(int pitch)
*/
static void __init_refok vgacon_scrollback_startup(void)
{
- vgacon_scrollback = alloc_bootmem(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE
- * 1024);
+ vgacon_scrollback = kcalloc(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, GFP_NOWAIT);
vgacon_scrollback_init(vga_video_num_columns * 2);
}
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index e6467cf9f19..020db7fc915 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -335,9 +335,9 @@ static int __init hitfb_probe(struct platform_device *dev)
if (fb_get_options("hitfb", NULL))
return -ENODEV;
- hitfb_fix.mmio_start = CONFIG_HD64461_IOBASE+0x1000;
+ hitfb_fix.mmio_start = HD64461_IO_OFFSET(0x1000);
hitfb_fix.mmio_len = 0x1000;
- hitfb_fix.smem_start = CONFIG_HD64461_IOBASE + 0x02000000;
+ hitfb_fix.smem_start = HD64461_IO_OFFSET(0x02000000);
hitfb_fix.smem_len = 512 * 1024;
lcdclor = fb_readw(HD64461_LCDCLOR);
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 5e9c6302433..d3a568e6b16 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -947,7 +947,8 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
int win;
for (win = 0; win <= S3C_FB_MAX_WIN; win++)
- s3c_fb_release_win(sfb, sfb->windows[win]);
+ if (sfb->windows[win])
+ s3c_fb_release_win(sfb, sfb->windows[win]);
iounmap(sfb->regs);
@@ -985,11 +986,20 @@ static int s3c_fb_suspend(struct platform_device *pdev, pm_message_t state)
static int s3c_fb_resume(struct platform_device *pdev)
{
struct s3c_fb *sfb = platform_get_drvdata(pdev);
+ struct s3c_fb_platdata *pd = sfb->pdata;
struct s3c_fb_win *win;
int win_no;
clk_enable(sfb->bus_clk);
+ /* setup registers */
+ writel(pd->vidcon1, sfb->regs + VIDCON1);
+
+ /* zero all windows before we do anything */
+ for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++)
+ s3c_fb_clear_win(sfb, win_no);
+
+ /* restore framebuffers */
for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
win = sfb->windows[win_no];
if (!win)
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 8ac9cddac57..cab100acf98 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -18,6 +18,16 @@ config XEN_SCRUB_PAGES
secure, but slightly less efficient.
If in doubt, say yes.
+config XEN_DEV_EVTCHN
+ tristate "Xen /dev/xen/evtchn device"
+ depends on XEN
+ default y
+ help
+ The evtchn driver allows a userspace process to triger event
+ channels and to receive notification of an event channel
+ firing.
+ If in doubt, say yes.
+
config XENFS
tristate "Xen filesystem"
depends on XEN
@@ -41,3 +51,13 @@ config XEN_COMPAT_XENFS
a xen platform.
If in doubt, say yes.
+config XEN_SYS_HYPERVISOR
+ bool "Create xen entries under /sys/hypervisor"
+ depends on XEN && SYSFS
+ select SYS_HYPERVISOR
+ default y
+ help
+ Create entries under /sys/hypervisor describing the Xen
+ hypervisor environment. When running native or in another
+ virtual environment, /sys/hypervisor will still be present,
+ but will have no xen contents. \ No newline at end of file
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index ff8accc9e10..ec2a39b1e26 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -4,4 +4,6 @@ obj-y += xenbus/
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += balloon.o
-obj-$(CONFIG_XENFS) += xenfs/ \ No newline at end of file
+obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o
+obj-$(CONFIG_XENFS) += xenfs/
+obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o \ No newline at end of file
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 30963af5dba..891d2e90753 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -151,6 +151,12 @@ static unsigned int evtchn_from_irq(unsigned irq)
return info_for_irq(irq)->evtchn;
}
+unsigned irq_from_evtchn(unsigned int evtchn)
+{
+ return evtchn_to_irq[evtchn];
+}
+EXPORT_SYMBOL_GPL(irq_from_evtchn);
+
static enum ipi_vector ipi_from_irq(unsigned irq)
{
struct irq_info *info = info_for_irq(irq);
@@ -335,7 +341,7 @@ static int find_unbound_irq(void)
if (irq == nr_irqs)
panic("No available IRQ to bind to: increase nr_irqs!\n");
- desc = irq_to_desc_alloc_cpu(irq, 0);
+ desc = irq_to_desc_alloc_node(irq, 0);
if (WARN_ON(desc == NULL))
return -1;
@@ -688,13 +694,13 @@ void rebind_evtchn_irq(int evtchn, int irq)
}
/* Rebind an evtchn so that it gets delivered to a specific cpu */
-static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq);
if (!VALID_EVTCHN(evtchn))
- return;
+ return -1;
/* Send future instances of this interrupt to other vcpu. */
bind_vcpu.port = evtchn;
@@ -707,13 +713,15 @@ static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
*/
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
bind_evtchn_to_cpu(evtchn, tcpu);
-}
+ return 0;
+}
-static void set_affinity_irq(unsigned irq, const struct cpumask *dest)
+static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
{
unsigned tcpu = cpumask_first(dest);
- rebind_irq_to_cpu(irq, tcpu);
+
+ return rebind_irq_to_cpu(irq, tcpu);
}
int resend_irq_on_evtchn(unsigned int irq)
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
new file mode 100644
index 00000000000..af031950f9b
--- /dev/null
+++ b/drivers/xen/evtchn.c
@@ -0,0 +1,507 @@
+/******************************************************************************
+ * evtchn.c
+ *
+ * Driver for receiving and demuxing event-channel signals.
+ *
+ * Copyright (c) 2004-2005, K A Fraser
+ * Multi-process extensions Copyright (c) 2004, Steven Smith
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/major.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/poll.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+#include <xen/events.h>
+#include <xen/evtchn.h>
+#include <asm/xen/hypervisor.h>
+
+struct per_user_data {
+ struct mutex bind_mutex; /* serialize bind/unbind operations */
+
+ /* Notification ring, accessed via /dev/xen/evtchn. */
+#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t))
+#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
+ evtchn_port_t *ring;
+ unsigned int ring_cons, ring_prod, ring_overflow;
+ struct mutex ring_cons_mutex; /* protect against concurrent readers */
+
+ /* Processes wait on this queue when ring is empty. */
+ wait_queue_head_t evtchn_wait;
+ struct fasync_struct *evtchn_async_queue;
+ const char *name;
+};
+
+/* Who's bound to each port? */
+static struct per_user_data *port_user[NR_EVENT_CHANNELS];
+static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */
+
+irqreturn_t evtchn_interrupt(int irq, void *data)
+{
+ unsigned int port = (unsigned long)data;
+ struct per_user_data *u;
+
+ spin_lock(&port_user_lock);
+
+ u = port_user[port];
+
+ disable_irq_nosync(irq);
+
+ if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
+ u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
+ wmb(); /* Ensure ring contents visible */
+ if (u->ring_cons == u->ring_prod++) {
+ wake_up_interruptible(&u->evtchn_wait);
+ kill_fasync(&u->evtchn_async_queue,
+ SIGIO, POLL_IN);
+ }
+ } else {
+ u->ring_overflow = 1;
+ }
+
+ spin_unlock(&port_user_lock);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t evtchn_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int rc;
+ unsigned int c, p, bytes1 = 0, bytes2 = 0;
+ struct per_user_data *u = file->private_data;
+
+ /* Whole number of ports. */
+ count &= ~(sizeof(evtchn_port_t)-1);
+
+ if (count == 0)
+ return 0;
+
+ if (count > PAGE_SIZE)
+ count = PAGE_SIZE;
+
+ for (;;) {
+ mutex_lock(&u->ring_cons_mutex);
+
+ rc = -EFBIG;
+ if (u->ring_overflow)
+ goto unlock_out;
+
+ c = u->ring_cons;
+ p = u->ring_prod;
+ if (c != p)
+ break;
+
+ mutex_unlock(&u->ring_cons_mutex);
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ rc = wait_event_interruptible(u->evtchn_wait,
+ u->ring_cons != u->ring_prod);
+ if (rc)
+ return rc;
+ }
+
+ /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
+ if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
+ bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
+ sizeof(evtchn_port_t);
+ bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
+ } else {
+ bytes1 = (p - c) * sizeof(evtchn_port_t);
+ bytes2 = 0;
+ }
+
+ /* Truncate chunks according to caller's maximum byte count. */
+ if (bytes1 > count) {
+ bytes1 = count;
+ bytes2 = 0;
+ } else if ((bytes1 + bytes2) > count) {
+ bytes2 = count - bytes1;
+ }
+
+ rc = -EFAULT;
+ rmb(); /* Ensure that we see the port before we copy it. */
+ if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
+ ((bytes2 != 0) &&
+ copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
+ goto unlock_out;
+
+ u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
+ rc = bytes1 + bytes2;
+
+ unlock_out:
+ mutex_unlock(&u->ring_cons_mutex);
+ return rc;
+}
+
+static ssize_t evtchn_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int rc, i;
+ evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
+ struct per_user_data *u = file->private_data;
+
+ if (kbuf == NULL)
+ return -ENOMEM;
+
+ /* Whole number of ports. */
+ count &= ~(sizeof(evtchn_port_t)-1);
+
+ rc = 0;
+ if (count == 0)
+ goto out;
+
+ if (count > PAGE_SIZE)
+ count = PAGE_SIZE;
+
+ rc = -EFAULT;
+ if (copy_from_user(kbuf, buf, count) != 0)
+ goto out;
+
+ spin_lock_irq(&port_user_lock);
+ for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
+ if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
+ enable_irq(irq_from_evtchn(kbuf[i]));
+ spin_unlock_irq(&port_user_lock);
+
+ rc = count;
+
+ out:
+ free_page((unsigned long)kbuf);
+ return rc;
+}
+
+static int evtchn_bind_to_user(struct per_user_data *u, int port)
+{
+ int rc = 0;
+
+ /*
+ * Ports are never reused, so every caller should pass in a
+ * unique port.
+ *
+ * (Locking not necessary because we haven't registered the
+ * interrupt handler yet, and our caller has already
+ * serialized bind operations.)
+ */
+ BUG_ON(port_user[port] != NULL);
+ port_user[port] = u;
+
+ rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
+ u->name, (void *)(unsigned long)port);
+ if (rc >= 0)
+ rc = 0;
+
+ return rc;
+}
+
+static void evtchn_unbind_from_user(struct per_user_data *u, int port)
+{
+ int irq = irq_from_evtchn(port);
+
+ unbind_from_irqhandler(irq, (void *)(unsigned long)port);
+
+ /* make sure we unbind the irq handler before clearing the port */
+ barrier();
+
+ port_user[port] = NULL;
+}
+
+static long evtchn_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc;
+ struct per_user_data *u = file->private_data;
+ void __user *uarg = (void __user *) arg;
+
+ /* Prevent bind from racing with unbind */
+ mutex_lock(&u->bind_mutex);
+
+ switch (cmd) {
+ case IOCTL_EVTCHN_BIND_VIRQ: {
+ struct ioctl_evtchn_bind_virq bind;
+ struct evtchn_bind_virq bind_virq;
+
+ rc = -EFAULT;
+ if (copy_from_user(&bind, uarg, sizeof(bind)))
+ break;
+
+ bind_virq.virq = bind.virq;
+ bind_virq.vcpu = 0;
+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
+ &bind_virq);
+ if (rc != 0)
+ break;
+
+ rc = evtchn_bind_to_user(u, bind_virq.port);
+ if (rc == 0)
+ rc = bind_virq.port;
+ break;
+ }
+
+ case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
+ struct ioctl_evtchn_bind_interdomain bind;
+ struct evtchn_bind_interdomain bind_interdomain;
+
+ rc = -EFAULT;
+ if (copy_from_user(&bind, uarg, sizeof(bind)))
+ break;
+
+ bind_interdomain.remote_dom = bind.remote_domain;
+ bind_interdomain.remote_port = bind.remote_port;
+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
+ &bind_interdomain);
+ if (rc != 0)
+ break;
+
+ rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
+ if (rc == 0)
+ rc = bind_interdomain.local_port;
+ break;
+ }
+
+ case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
+ struct ioctl_evtchn_bind_unbound_port bind;
+ struct evtchn_alloc_unbound alloc_unbound;
+
+ rc = -EFAULT;
+ if (copy_from_user(&bind, uarg, sizeof(bind)))
+ break;
+
+ alloc_unbound.dom = DOMID_SELF;
+ alloc_unbound.remote_dom = bind.remote_domain;
+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
+ &alloc_unbound);
+ if (rc != 0)
+ break;
+
+ rc = evtchn_bind_to_user(u, alloc_unbound.port);
+ if (rc == 0)
+ rc = alloc_unbound.port;
+ break;
+ }
+
+ case IOCTL_EVTCHN_UNBIND: {
+ struct ioctl_evtchn_unbind unbind;
+
+ rc = -EFAULT;
+ if (copy_from_user(&unbind, uarg, sizeof(unbind)))
+ break;
+
+ rc = -EINVAL;
+ if (unbind.port >= NR_EVENT_CHANNELS)
+ break;
+
+ spin_lock_irq(&port_user_lock);
+
+ rc = -ENOTCONN;
+ if (port_user[unbind.port] != u) {
+ spin_unlock_irq(&port_user_lock);
+ break;
+ }
+
+ evtchn_unbind_from_user(u, unbind.port);
+
+ spin_unlock_irq(&port_user_lock);
+
+ rc = 0;
+ break;
+ }
+
+ case IOCTL_EVTCHN_NOTIFY: {
+ struct ioctl_evtchn_notify notify;
+
+ rc = -EFAULT;
+ if (copy_from_user(&notify, uarg, sizeof(notify)))
+ break;
+
+ if (notify.port >= NR_EVENT_CHANNELS) {
+ rc = -EINVAL;
+ } else if (port_user[notify.port] != u) {
+ rc = -ENOTCONN;
+ } else {
+ notify_remote_via_evtchn(notify.port);
+ rc = 0;
+ }
+ break;
+ }
+
+ case IOCTL_EVTCHN_RESET: {
+ /* Initialise the ring to empty. Clear errors. */
+ mutex_lock(&u->ring_cons_mutex);
+ spin_lock_irq(&port_user_lock);
+ u->ring_cons = u->ring_prod = u->ring_overflow = 0;
+ spin_unlock_irq(&port_user_lock);
+ mutex_unlock(&u->ring_cons_mutex);
+ rc = 0;
+ break;
+ }
+
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ mutex_unlock(&u->bind_mutex);
+
+ return rc;
+}
+
+static unsigned int evtchn_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = POLLOUT | POLLWRNORM;
+ struct per_user_data *u = file->private_data;
+
+ poll_wait(file, &u->evtchn_wait, wait);
+ if (u->ring_cons != u->ring_prod)
+ mask |= POLLIN | POLLRDNORM;
+ if (u->ring_overflow)
+ mask = POLLERR;
+ return mask;
+}
+
+static int evtchn_fasync(int fd, struct file *filp, int on)
+{
+ struct per_user_data *u = filp->private_data;
+ return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
+}
+
+static int evtchn_open(struct inode *inode, struct file *filp)
+{
+ struct per_user_data *u;
+
+ u = kzalloc(sizeof(*u), GFP_KERNEL);
+ if (u == NULL)
+ return -ENOMEM;
+
+ u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm);
+ if (u->name == NULL) {
+ kfree(u);
+ return -ENOMEM;
+ }
+
+ init_waitqueue_head(&u->evtchn_wait);
+
+ u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
+ if (u->ring == NULL) {
+ kfree(u->name);
+ kfree(u);
+ return -ENOMEM;
+ }
+
+ mutex_init(&u->bind_mutex);
+ mutex_init(&u->ring_cons_mutex);
+
+ filp->private_data = u;
+
+ return 0;
+}
+
+static int evtchn_release(struct inode *inode, struct file *filp)
+{
+ int i;
+ struct per_user_data *u = filp->private_data;
+
+ spin_lock_irq(&port_user_lock);
+
+ free_page((unsigned long)u->ring);
+
+ for (i = 0; i < NR_EVENT_CHANNELS; i++) {
+ if (port_user[i] != u)
+ continue;
+
+ evtchn_unbind_from_user(port_user[i], i);
+ }
+
+ spin_unlock_irq(&port_user_lock);
+
+ kfree(u->name);
+ kfree(u);
+
+ return 0;
+}
+
+static const struct file_operations evtchn_fops = {
+ .owner = THIS_MODULE,
+ .read = evtchn_read,
+ .write = evtchn_write,
+ .unlocked_ioctl = evtchn_ioctl,
+ .poll = evtchn_poll,
+ .fasync = evtchn_fasync,
+ .open = evtchn_open,
+ .release = evtchn_release,
+};
+
+static struct miscdevice evtchn_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "evtchn",
+ .fops = &evtchn_fops,
+};
+static int __init evtchn_init(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ spin_lock_init(&port_user_lock);
+ memset(port_user, 0, sizeof(port_user));
+
+ /* Create '/dev/misc/evtchn'. */
+ err = misc_register(&evtchn_miscdev);
+ if (err != 0) {
+ printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
+ return err;
+ }
+
+ printk(KERN_INFO "Event-channel device installed.\n");
+
+ return 0;
+}
+
+static void __exit evtchn_cleanup(void)
+{
+ misc_deregister(&evtchn_miscdev);
+}
+
+module_init(evtchn_init);
+module_exit(evtchn_cleanup);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 4b5b84837ee..fddc2025dec 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -98,9 +98,8 @@ static void do_suspend(void)
goto out;
}
- printk("suspending xenbus...\n");
- /* XXX use normal device tree? */
- xenbus_suspend();
+ printk(KERN_DEBUG "suspending xenstore...\n");
+ xs_suspend();
err = device_power_down(PMSG_SUSPEND);
if (err) {
@@ -116,9 +115,9 @@ static void do_suspend(void)
if (!cancelled) {
xen_arch_resume();
- xenbus_resume();
+ xs_resume();
} else
- xenbus_suspend_cancel();
+ xs_suspend_cancel();
device_power_up(PMSG_RESUME);
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
new file mode 100644
index 00000000000..88a60e03ccf
--- /dev/null
+++ b/drivers/xen/sys-hypervisor.c
@@ -0,0 +1,445 @@
+/*
+ * copyright (c) 2006 IBM Corporation
+ * Authored by: Mike D. Day <ncmike@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kobject.h>
+
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/xenbus.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/version.h>
+
+#define HYPERVISOR_ATTR_RO(_name) \
+static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name)
+
+#define HYPERVISOR_ATTR_RW(_name) \
+static struct hyp_sysfs_attr _name##_attr = \
+ __ATTR(_name, 0644, _name##_show, _name##_store)
+
+struct hyp_sysfs_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct hyp_sysfs_attr *, char *);
+ ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t);
+ void *hyp_attr_data;
+};
+
+static ssize_t type_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ return sprintf(buffer, "xen\n");
+}
+
+HYPERVISOR_ATTR_RO(type);
+
+static int __init xen_sysfs_type_init(void)
+{
+ return sysfs_create_file(hypervisor_kobj, &type_attr.attr);
+}
+
+static void xen_sysfs_type_destroy(void)
+{
+ sysfs_remove_file(hypervisor_kobj, &type_attr.attr);
+}
+
+/* xen version attributes */
+static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int version = HYPERVISOR_xen_version(XENVER_version, NULL);
+ if (version)
+ return sprintf(buffer, "%d\n", version >> 16);
+ return -ENODEV;
+}
+
+HYPERVISOR_ATTR_RO(major);
+
+static ssize_t minor_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int version = HYPERVISOR_xen_version(XENVER_version, NULL);
+ if (version)
+ return sprintf(buffer, "%d\n", version & 0xff);
+ return -ENODEV;
+}
+
+HYPERVISOR_ATTR_RO(minor);
+
+static ssize_t extra_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret = -ENOMEM;
+ char *extra;
+
+ extra = kmalloc(XEN_EXTRAVERSION_LEN, GFP_KERNEL);
+ if (extra) {
+ ret = HYPERVISOR_xen_version(XENVER_extraversion, extra);
+ if (!ret)
+ ret = sprintf(buffer, "%s\n", extra);
+ kfree(extra);
+ }
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(extra);
+
+static struct attribute *version_attrs[] = {
+ &major_attr.attr,
+ &minor_attr.attr,
+ &extra_attr.attr,
+ NULL
+};
+
+static struct attribute_group version_group = {
+ .name = "version",
+ .attrs = version_attrs,
+};
+
+static int __init xen_sysfs_version_init(void)
+{
+ return sysfs_create_group(hypervisor_kobj, &version_group);
+}
+
+static void xen_sysfs_version_destroy(void)
+{
+ sysfs_remove_group(hypervisor_kobj, &version_group);
+}
+
+/* UUID */
+
+static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ char *vm, *val;
+ int ret;
+ extern int xenstored_ready;
+
+ if (!xenstored_ready)
+ return -EBUSY;
+
+ vm = xenbus_read(XBT_NIL, "vm", "", NULL);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
+ val = xenbus_read(XBT_NIL, vm, "uuid", NULL);
+ kfree(vm);
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+ ret = sprintf(buffer, "%s\n", val);
+ kfree(val);
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(uuid);
+
+static int __init xen_sysfs_uuid_init(void)
+{
+ return sysfs_create_file(hypervisor_kobj, &uuid_attr.attr);
+}
+
+static void xen_sysfs_uuid_destroy(void)
+{
+ sysfs_remove_file(hypervisor_kobj, &uuid_attr.attr);
+}
+
+/* xen compilation attributes */
+
+static ssize_t compiler_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret = -ENOMEM;
+ struct xen_compile_info *info;
+
+ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
+ if (info) {
+ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
+ if (!ret)
+ ret = sprintf(buffer, "%s\n", info->compiler);
+ kfree(info);
+ }
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(compiler);
+
+static ssize_t compiled_by_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret = -ENOMEM;
+ struct xen_compile_info *info;
+
+ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
+ if (info) {
+ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
+ if (!ret)
+ ret = sprintf(buffer, "%s\n", info->compile_by);
+ kfree(info);
+ }
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(compiled_by);
+
+static ssize_t compile_date_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret = -ENOMEM;
+ struct xen_compile_info *info;
+
+ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
+ if (info) {
+ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
+ if (!ret)
+ ret = sprintf(buffer, "%s\n", info->compile_date);
+ kfree(info);
+ }
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(compile_date);
+
+static struct attribute *xen_compile_attrs[] = {
+ &compiler_attr.attr,
+ &compiled_by_attr.attr,
+ &compile_date_attr.attr,
+ NULL
+};
+
+static struct attribute_group xen_compilation_group = {
+ .name = "compilation",
+ .attrs = xen_compile_attrs,
+};
+
+int __init static xen_compilation_init(void)
+{
+ return sysfs_create_group(hypervisor_kobj, &xen_compilation_group);
+}
+
+static void xen_compilation_destroy(void)
+{
+ sysfs_remove_group(hypervisor_kobj, &xen_compilation_group);
+}
+
+/* xen properties info */
+
+static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret = -ENOMEM;
+ char *caps;
+
+ caps = kmalloc(XEN_CAPABILITIES_INFO_LEN, GFP_KERNEL);
+ if (caps) {
+ ret = HYPERVISOR_xen_version(XENVER_capabilities, caps);
+ if (!ret)
+ ret = sprintf(buffer, "%s\n", caps);
+ kfree(caps);
+ }
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(capabilities);
+
+static ssize_t changeset_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret = -ENOMEM;
+ char *cset;
+
+ cset = kmalloc(XEN_CHANGESET_INFO_LEN, GFP_KERNEL);
+ if (cset) {
+ ret = HYPERVISOR_xen_version(XENVER_changeset, cset);
+ if (!ret)
+ ret = sprintf(buffer, "%s\n", cset);
+ kfree(cset);
+ }
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(changeset);
+
+static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret = -ENOMEM;
+ struct xen_platform_parameters *parms;
+
+ parms = kmalloc(sizeof(struct xen_platform_parameters), GFP_KERNEL);
+ if (parms) {
+ ret = HYPERVISOR_xen_version(XENVER_platform_parameters,
+ parms);
+ if (!ret)
+ ret = sprintf(buffer, "%lx\n", parms->virt_start);
+ kfree(parms);
+ }
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(virtual_start);
+
+static ssize_t pagesize_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret;
+
+ ret = HYPERVISOR_xen_version(XENVER_pagesize, NULL);
+ if (ret > 0)
+ ret = sprintf(buffer, "%x\n", ret);
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(pagesize);
+
+static ssize_t xen_feature_show(int index, char *buffer)
+{
+ ssize_t ret;
+ struct xen_feature_info info;
+
+ info.submap_idx = index;
+ ret = HYPERVISOR_xen_version(XENVER_get_features, &info);
+ if (!ret)
+ ret = sprintf(buffer, "%08x", info.submap);
+
+ return ret;
+}
+
+static ssize_t features_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ ssize_t len;
+ int i;
+
+ len = 0;
+ for (i = XENFEAT_NR_SUBMAPS-1; i >= 0; i--) {
+ int ret = xen_feature_show(i, buffer + len);
+ if (ret < 0) {
+ if (len == 0)
+ len = ret;
+ break;
+ }
+ len += ret;
+ }
+ if (len > 0)
+ buffer[len++] = '\n';
+
+ return len;
+}
+
+HYPERVISOR_ATTR_RO(features);
+
+static struct attribute *xen_properties_attrs[] = {
+ &capabilities_attr.attr,
+ &changeset_attr.attr,
+ &virtual_start_attr.attr,
+ &pagesize_attr.attr,
+ &features_attr.attr,
+ NULL
+};
+
+static struct attribute_group xen_properties_group = {
+ .name = "properties",
+ .attrs = xen_properties_attrs,
+};
+
+static int __init xen_properties_init(void)
+{
+ return sysfs_create_group(hypervisor_kobj, &xen_properties_group);
+}
+
+static void xen_properties_destroy(void)
+{
+ sysfs_remove_group(hypervisor_kobj, &xen_properties_group);
+}
+
+static int __init hyper_sysfs_init(void)
+{
+ int ret;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ ret = xen_sysfs_type_init();
+ if (ret)
+ goto out;
+ ret = xen_sysfs_version_init();
+ if (ret)
+ goto version_out;
+ ret = xen_compilation_init();
+ if (ret)
+ goto comp_out;
+ ret = xen_sysfs_uuid_init();
+ if (ret)
+ goto uuid_out;
+ ret = xen_properties_init();
+ if (ret)
+ goto prop_out;
+
+ goto out;
+
+prop_out:
+ xen_sysfs_uuid_destroy();
+uuid_out:
+ xen_compilation_destroy();
+comp_out:
+ xen_sysfs_version_destroy();
+version_out:
+ xen_sysfs_type_destroy();
+out:
+ return ret;
+}
+
+static void __exit hyper_sysfs_exit(void)
+{
+ xen_properties_destroy();
+ xen_compilation_destroy();
+ xen_sysfs_uuid_destroy();
+ xen_sysfs_version_destroy();
+ xen_sysfs_type_destroy();
+
+}
+module_init(hyper_sysfs_init);
+module_exit(hyper_sysfs_exit);
+
+static ssize_t hyp_sysfs_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buffer)
+{
+ struct hyp_sysfs_attr *hyp_attr;
+ hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
+ if (hyp_attr->show)
+ return hyp_attr->show(hyp_attr, buffer);
+ return 0;
+}
+
+static ssize_t hyp_sysfs_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer,
+ size_t len)
+{
+ struct hyp_sysfs_attr *hyp_attr;
+ hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
+ if (hyp_attr->store)
+ return hyp_attr->store(hyp_attr, buffer, len);
+ return 0;
+}
+
+static struct sysfs_ops hyp_sysfs_ops = {
+ .show = hyp_sysfs_show,
+ .store = hyp_sysfs_store,
+};
+
+static struct kobj_type hyp_sysfs_kobj_type = {
+ .sysfs_ops = &hyp_sysfs_ops,
+};
+
+static int __init hypervisor_subsys_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ hypervisor_kobj->ktype = &hyp_sysfs_kobj_type;
+ return 0;
+}
+device_initcall(hypervisor_subsys_init);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 773d1cf2328..d42e25d5968 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -71,6 +71,9 @@ static int xenbus_probe_frontend(const char *type, const char *name);
static void xenbus_dev_shutdown(struct device *_dev);
+static int xenbus_dev_suspend(struct device *dev, pm_message_t state);
+static int xenbus_dev_resume(struct device *dev);
+
/* If something in array of ids matches this device, return it. */
static const struct xenbus_device_id *
match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
@@ -188,6 +191,9 @@ static struct xen_bus_type xenbus_frontend = {
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
.dev_attrs = xenbus_dev_attrs,
+
+ .suspend = xenbus_dev_suspend,
+ .resume = xenbus_dev_resume,
},
};
@@ -654,6 +660,7 @@ void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
kfree(root);
}
+EXPORT_SYMBOL_GPL(xenbus_dev_changed);
static void frontend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len)
@@ -669,7 +676,7 @@ static struct xenbus_watch fe_watch = {
.callback = frontend_changed,
};
-static int suspend_dev(struct device *dev, void *data)
+static int xenbus_dev_suspend(struct device *dev, pm_message_t state)
{
int err = 0;
struct xenbus_driver *drv;
@@ -682,35 +689,14 @@ static int suspend_dev(struct device *dev, void *data)
drv = to_xenbus_driver(dev->driver);
xdev = container_of(dev, struct xenbus_device, dev);
if (drv->suspend)
- err = drv->suspend(xdev);
+ err = drv->suspend(xdev, state);
if (err)
printk(KERN_WARNING
"xenbus: suspend %s failed: %i\n", dev_name(dev), err);
return 0;
}
-static int suspend_cancel_dev(struct device *dev, void *data)
-{
- int err = 0;
- struct xenbus_driver *drv;
- struct xenbus_device *xdev;
-
- DPRINTK("");
-
- if (dev->driver == NULL)
- return 0;
- drv = to_xenbus_driver(dev->driver);
- xdev = container_of(dev, struct xenbus_device, dev);
- if (drv->suspend_cancel)
- err = drv->suspend_cancel(xdev);
- if (err)
- printk(KERN_WARNING
- "xenbus: suspend_cancel %s failed: %i\n",
- dev_name(dev), err);
- return 0;
-}
-
-static int resume_dev(struct device *dev, void *data)
+static int xenbus_dev_resume(struct device *dev)
{
int err;
struct xenbus_driver *drv;
@@ -755,33 +741,6 @@ static int resume_dev(struct device *dev, void *data)
return 0;
}
-void xenbus_suspend(void)
-{
- DPRINTK("");
-
- bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
- xenbus_backend_suspend(suspend_dev);
- xs_suspend();
-}
-EXPORT_SYMBOL_GPL(xenbus_suspend);
-
-void xenbus_resume(void)
-{
- xb_init_comms();
- xs_resume();
- bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
- xenbus_backend_resume(resume_dev);
-}
-EXPORT_SYMBOL_GPL(xenbus_resume);
-
-void xenbus_suspend_cancel(void)
-{
- xs_suspend_cancel();
- bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
- xenbus_backend_resume(suspend_cancel_dev);
-}
-EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
-
/* A flag to determine if xenstored is 'ready' (i.e. has started) */
int xenstored_ready = 0;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index e325eab4724..eab33f1dbdf 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -673,6 +673,8 @@ void xs_resume(void)
struct xenbus_watch *watch;
char token[sizeof(watch) * 2 + 1];
+ xb_init_comms();
+
mutex_unlock(&xs_state.response_mutex);
mutex_unlock(&xs_state.request_mutex);
up_write(&xs_state.transaction_mutex);
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 515741a8e6b..6559e0c752c 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -20,10 +20,27 @@
MODULE_DESCRIPTION("Xen filesystem");
MODULE_LICENSE("GPL");
+static ssize_t capabilities_read(struct file *file, char __user *buf,
+ size_t size, loff_t *off)
+{
+ char *tmp = "";
+
+ if (xen_initial_domain())
+ tmp = "control_d\n";
+
+ return simple_read_from_buffer(buf, size, off, tmp, strlen(tmp));
+}
+
+static const struct file_operations capabilities_file_ops = {
+ .read = capabilities_read,
+};
+
static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
{
static struct tree_descr xenfs_files[] = {
- [2] = {"xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR},
+ [1] = {},
+ { "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR },
+ { "capabilities", &capabilities_file_ops, S_IRUGO },
{""},
};