summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/ata_piix.c7
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-transport.c1
-rw-r--r--drivers/ata/pata_bf54x.c167
-rw-r--r--drivers/ata/sata_fsl.c11
-rw-r--r--drivers/bcma/bcma_private.h1
-rw-r--r--drivers/bcma/host_pci.c43
-rw-r--r--drivers/bcma/main.c24
-rw-r--r--drivers/block/Kconfig11
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/nvme.c1745
-rw-r--r--drivers/dma/Kconfig27
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c41
-rw-r--r--drivers/dma/at_hdmac.c103
-rw-r--r--drivers/dma/at_hdmac_regs.h1
-rw-r--r--drivers/dma/coh901318.c12
-rw-r--r--drivers/dma/coh901318_lli.c23
-rw-r--r--drivers/dma/coh901318_lli.h4
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dw_dmac.c83
-rw-r--r--drivers/dma/dw_dmac_regs.h1
-rw-r--r--drivers/dma/ep93xx_dma.c90
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/imx-dma.c10
-rw-r--r--drivers/dma/imx-sdma.c27
-rw-r--r--drivers/dma/intel_mid_dma.c39
-rw-r--r--drivers/dma/intel_mid_dma_regs.h4
-rw-r--r--drivers/dma/iop-adma.c16
-rw-r--r--drivers/dma/ipu/ipu_idmac.c29
-rw-r--r--drivers/dma/mpc512x_dma.c12
-rw-r--r--drivers/dma/mxs-dma.c53
-rw-r--r--drivers/dma/pch_dma.c20
-rw-r--r--drivers/dma/pl330.c31
-rw-r--r--drivers/dma/shdma.c72
-rw-r--r--drivers/dma/sirf-dma.c707
-rw-r--r--drivers/dma/ste_dma40.c441
-rw-r--r--drivers/dma/ste_dma40_ll.h11
-rw-r--r--drivers/dma/timb_dma.c30
-rw-r--r--drivers/dma/txx9dmac.c12
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c27
-rw-r--r--drivers/media/common/tuners/xc4000.c86
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c41
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c20
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700.h2
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c1
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c150
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_core.c10
-rw-r--r--drivers/media/dvb/frontends/ds3000.c2
-rw-r--r--drivers/media/dvb/frontends/mb86a20s.c8
-rw-r--r--drivers/media/dvb/frontends/tda18271c2dd.c1
-rw-r--r--drivers/media/video/as3645a.c1
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c41
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c4
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c5
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c7
-rw-r--r--drivers/media/video/cx88/cx88-cards.c24
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c3
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h3
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c118
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c22
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-yuv.c22
-rw-r--r--drivers/media/video/mx3_camera.c2
-rw-r--r--drivers/media/video/omap/omap_vout.c7
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c239
-rw-r--r--drivers/media/video/pwc/pwc-dec1.c16
-rw-r--r--drivers/media/video/pwc/pwc-dec1.h6
-rw-r--r--drivers/media/video/pwc/pwc-dec23.c41
-rw-r--r--drivers/media/video/pwc/pwc-dec23.h9
-rw-r--r--drivers/media/video/pwc/pwc-if.c175
-rw-r--r--drivers/media/video/pwc/pwc-misc.c1
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c90
-rw-r--r--drivers/media/video/pwc/pwc.h14
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c7
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c6
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c1
-rw-r--r--drivers/media/video/s5p-g2d/g2d.c1
-rw-r--r--drivers/media/video/s5p-jpeg/jpeg-core.c7
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc.c3
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c2
-rw-r--r--drivers/media/video/saa7164/saa7164-cards.c4
-rw-r--r--drivers/media/video/timblogiw.c2
-rw-r--r--drivers/media/video/tlg2300/pd-main.c4
-rw-r--r--drivers/media/video/v4l2-ctrls.c54
-rw-r--r--drivers/media/video/v4l2-ioctl.c8
-rw-r--r--drivers/media/video/zoran/zoran_driver.c1
-rw-r--r--drivers/misc/carma/carma-fpga-program.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c10
-rw-r--r--drivers/mmc/host/mmci.c11
-rw-r--r--drivers/mmc/host/mxcmmc.c10
-rw-r--r--drivers/mmc/host/mxs-mmc.c10
-rw-r--r--drivers/mmc/host/sh_mmcif.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c4
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c298
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c27
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c32
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c14
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c5
-rw-r--r--drivers/net/wireless/b43/main.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c40
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c2
-rw-r--r--drivers/net/wireless/mwl8k.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c28
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h7
-rw-r--r--drivers/scsi/bfa/bfa_fc.h155
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c416
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h7
-rw-r--r--drivers/scsi/bfa/bfa_svc.h5
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_attr.c2
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c27
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bfa/bfad_im.c56
-rw-r--r--drivers/scsi/bfa/bfad_im.h27
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c46
-rw-r--r--drivers/scsi/fcoe/fcoe.h4
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/isci/firmware/Makefile19
-rw-r--r--drivers/scsi/isci/firmware/README36
-rw-r--r--drivers/scsi/isci/firmware/create_fw.c99
-rw-r--r--drivers/scsi/isci/firmware/create_fw.h77
-rw-r--r--drivers/scsi/isci/host.c340
-rw-r--r--drivers/scsi/isci/host.h27
-rw-r--r--drivers/scsi/isci/init.c25
-rw-r--r--drivers/scsi/isci/isci.h1
-rw-r--r--drivers/scsi/isci/phy.c172
-rw-r--r--drivers/scsi/isci/port.c104
-rw-r--r--drivers/scsi/isci/port.h10
-rw-r--r--drivers/scsi/isci/port_config.c35
-rw-r--r--drivers/scsi/isci/probe_roms.c2
-rw-r--r--drivers/scsi/isci/probe_roms.h89
-rw-r--r--drivers/scsi/isci/remote_device.c10
-rw-r--r--drivers/scsi/isci/task.c2
-rw-r--r--drivers/scsi/isci/task.h7
-rw-r--r--drivers/scsi/libfc/fc_disc.c6
-rw-r--r--drivers/scsi/libfc/fc_elsct.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c4
-rw-r--r--drivers/scsi/libfc/fc_lport.c5
-rw-r--r--drivers/scsi/libfc/fc_rport.c10
-rw-r--r--drivers/scsi/megaraid.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c145
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c7
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h22
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c511
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/scsi_transport_fc.c3
-rw-r--r--drivers/scsi/sg.c25
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/spi/spi-dw-mid.c8
-rw-r--r--drivers/spi/spi-ep93xx.c9
-rw-r--r--drivers/spi/spi-pl022.c8
-rw-r--r--drivers/spi/spi-topcliff-pch.c4
-rw-r--r--drivers/tty/serial/amba-pl011.c8
-rw-r--r--drivers/tty/serial/pch_uart.c4
-rw-r--r--drivers/tty/serial/sh-sci.c4
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/musb/ux500_dma.c4
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/video/mx3fb.c65
-rw-r--r--drivers/xen/biomerge.c1
-rw-r--r--drivers/xen/xen-balloon.c2
185 files changed, 5639 insertions, 2894 deletions
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 69ac373c72a..fdf27b9fce4 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1117,6 +1117,13 @@ static int piix_broken_suspend(void)
},
},
{
+ .ident = "Satellite Pro A120",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"),
+ },
+ },
+ {
.ident = "Portege M500",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 11c9aea4f4f..c06e0ec1155 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4125,6 +4125,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
* device and controller are SATA.
*/
{ "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 9a7f0ea565d..74aaee30e26 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -291,6 +291,7 @@ int ata_tport_add(struct device *parent,
goto tport_err;
}
+ device_enable_async_suspend(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index d6a4677fdf7..1e65842e2ca 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -251,6 +251,8 @@ static const u32 udma_tenvmin = 20;
static const u32 udma_tackmin = 20;
static const u32 udma_tssmin = 50;
+#define BFIN_MAX_SG_SEGMENTS 4
+
/**
*
* Function: num_clocks_min
@@ -829,79 +831,61 @@ static void bfin_set_devctl(struct ata_port *ap, u8 ctl)
static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
{
- unsigned short config = WDSIZE_16;
+ struct ata_port *ap = qc->ap;
+ struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd;
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ unsigned short config = DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_16 | DMAEN;
struct scatterlist *sg;
unsigned int si;
+ unsigned int channel;
+ unsigned int dir;
+ unsigned int size = 0;
dev_dbg(qc->ap->dev, "in atapi dma setup\n");
/* Program the ATA_CTRL register with dir */
if (qc->tf.flags & ATA_TFLAG_WRITE) {
- /* fill the ATAPI DMA controller */
- set_dma_config(CH_ATAPI_TX, config);
- set_dma_x_modify(CH_ATAPI_TX, 2);
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
- set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
- }
+ channel = CH_ATAPI_TX;
+ dir = DMA_TO_DEVICE;
} else {
+ channel = CH_ATAPI_RX;
+ dir = DMA_FROM_DEVICE;
config |= WNR;
- /* fill the ATAPI DMA controller */
- set_dma_config(CH_ATAPI_RX, config);
- set_dma_x_modify(CH_ATAPI_RX, 2);
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
- set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
- }
}
-}
-/**
- * bfin_bmdma_start - Start an IDE DMA transaction
- * @qc: Info associated with this ATA transaction.
- *
- * Note: Original code is ata_bmdma_start().
- */
+ dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir);
-static void bfin_bmdma_start(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
- struct scatterlist *sg;
- unsigned int si;
+ /* fill the ATAPI DMA controller */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ dma_desc_cpu[si].start_addr = sg_dma_address(sg);
+ dma_desc_cpu[si].cfg = config;
+ dma_desc_cpu[si].x_count = sg_dma_len(sg) >> 1;
+ dma_desc_cpu[si].x_modify = 2;
+ size += sg_dma_len(sg);
+ }
- dev_dbg(qc->ap->dev, "in atapi dma start\n");
- if (!(ap->udma_mask || ap->mwdma_mask))
- return;
+ /* Set the last descriptor to stop mode */
+ dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE);
- /* start ATAPI DMA controller*/
- if (qc->tf.flags & ATA_TFLAG_WRITE) {
- /*
- * On blackfin arch, uncacheable memory is not
- * allocated with flag GFP_DMA. DMA buffer from
- * common kenel code should be flushed if WB
- * data cache is enabled. Otherwise, this loop
- * is an empty loop and optimized out.
- */
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- flush_dcache_range(sg_dma_address(sg),
- sg_dma_address(sg) + sg_dma_len(sg));
- }
- enable_dma(CH_ATAPI_TX);
- dev_dbg(qc->ap->dev, "enable udma write\n");
+ flush_dcache_range((unsigned int)dma_desc_cpu,
+ (unsigned int)dma_desc_cpu +
+ qc->n_elem * sizeof(struct dma_desc_array));
- /* Send ATA DMA write command */
- bfin_exec_command(ap, &qc->tf);
+ /* Enable ATA DMA operation*/
+ set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma);
+ set_dma_x_count(channel, 0);
+ set_dma_x_modify(channel, 0);
+ set_dma_config(channel, config);
+
+ SSYNC();
+
+ /* Send ATA DMA command */
+ bfin_exec_command(ap, &qc->tf);
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* set ATA DMA write direction */
ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
| XFER_DIR));
} else {
- enable_dma(CH_ATAPI_RX);
- dev_dbg(qc->ap->dev, "enable udma read\n");
-
- /* Send ATA DMA read command */
- bfin_exec_command(ap, &qc->tf);
-
/* set ATA DMA read direction */
ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
& ~XFER_DIR));
@@ -913,12 +897,28 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
/* Set ATAPI state machine contorl in terminate sequence */
ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
- /* Set transfer length to buffer len */
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
- }
+ /* Set transfer length to the total size of sg buffers */
+ ATAPI_SET_XFER_LEN(base, size >> 1);
+}
- /* Enable ATA DMA operation*/
+/**
+ * bfin_bmdma_start - Start an IDE DMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * Note: Original code is ata_bmdma_start().
+ */
+
+static void bfin_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+ dev_dbg(qc->ap->dev, "in atapi dma start\n");
+
+ if (!(ap->udma_mask || ap->mwdma_mask))
+ return;
+
+ /* start ATAPI transfer*/
if (ap->udma_mask)
ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
| ULTRA_START);
@@ -935,34 +935,23 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct scatterlist *sg;
- unsigned int si;
+ unsigned int dir;
dev_dbg(qc->ap->dev, "in atapi dma stop\n");
+
if (!(ap->udma_mask || ap->mwdma_mask))
return;
/* stop ATAPI DMA controller*/
- if (qc->tf.flags & ATA_TFLAG_WRITE)
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
+ dir = DMA_TO_DEVICE;
disable_dma(CH_ATAPI_TX);
- else {
+ } else {
+ dir = DMA_FROM_DEVICE;
disable_dma(CH_ATAPI_RX);
- if (ap->hsm_task_state & HSM_ST_LAST) {
- /*
- * On blackfin arch, uncacheable memory is not
- * allocated with flag GFP_DMA. DMA buffer from
- * common kenel code should be invalidated if
- * data cache is enabled. Otherwise, this loop
- * is an empty loop and optimized out.
- */
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- invalidate_dcache_range(
- sg_dma_address(sg),
- sg_dma_address(sg)
- + sg_dma_len(sg));
- }
- }
}
+
+ dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir);
}
/**
@@ -1260,6 +1249,11 @@ static void bfin_port_stop(struct ata_port *ap)
{
dev_dbg(ap->dev, "in atapi port stop\n");
if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
+ dma_free_coherent(ap->dev,
+ BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+ ap->bmdma_prd,
+ ap->bmdma_prd_dma);
+
free_dma(CH_ATAPI_RX);
free_dma(CH_ATAPI_TX);
}
@@ -1271,14 +1265,29 @@ static int bfin_port_start(struct ata_port *ap)
if (!(ap->udma_mask || ap->mwdma_mask))
return 0;
+ ap->bmdma_prd = dma_alloc_coherent(ap->dev,
+ BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+ &ap->bmdma_prd_dma,
+ GFP_KERNEL);
+
+ if (ap->bmdma_prd == NULL) {
+ dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n");
+ goto out;
+ }
+
if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
if (request_dma(CH_ATAPI_TX,
"BFIN ATAPI TX DMA") >= 0)
return 0;
free_dma(CH_ATAPI_RX);
+ dma_free_coherent(ap->dev,
+ BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+ ap->bmdma_prd,
+ ap->bmdma_prd_dma);
}
+out:
ap->udma_mask = 0;
ap->mwdma_mask = 0;
dev_err(ap->dev, "Unable to request ATAPI DMA!"
@@ -1400,7 +1409,7 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
static struct scsi_host_template bfin_sht = {
ATA_BASE_SHT(DRV_NAME),
- .sg_tablesize = SG_NONE,
+ .sg_tablesize = BFIN_MAX_SG_SEGMENTS,
.dma_boundary = ATA_DMA_BOUNDARY,
};
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 5a2c95ba050..0120b0d1e9a 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -140,6 +140,7 @@ enum {
*/
HCONTROL_ONLINE_PHY_RST = (1 << 31),
HCONTROL_FORCE_OFFLINE = (1 << 30),
+ HCONTROL_LEGACY = (1 << 28),
HCONTROL_PARITY_PROT_MOD = (1 << 14),
HCONTROL_DPATH_PARITY = (1 << 12),
HCONTROL_SNOOP_ENABLE = (1 << 10),
@@ -1223,6 +1224,10 @@ static int sata_fsl_init_controller(struct ata_host *host)
* part of the port_start() callback
*/
+ /* sata controller to operate in enterprise mode */
+ temp = ioread32(hcr_base + HCONTROL);
+ iowrite32(temp & ~HCONTROL_LEGACY, hcr_base + HCONTROL);
+
/* ack. any pending IRQs for this controller/port */
temp = ioread32(hcr_base + HSTATUS);
if (temp & 0x3F)
@@ -1421,6 +1426,12 @@ static int sata_fsl_resume(struct platform_device *op)
/* Recovery the CHBA register in host controller cmd register set */
iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
+ iowrite32((ioread32(hcr_base + HCONTROL)
+ | HCONTROL_ONLINE_PHY_RST
+ | HCONTROL_SNOOP_ENABLE
+ | HCONTROL_PMP_ATTACHED),
+ hcr_base + HCONTROL);
+
ata_host_resume(host);
return 0;
}
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index fda56bde36b..0def898a1d1 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -19,6 +19,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
struct bcma_device *core_cc,
struct bcma_device *core_mips);
#ifdef CONFIG_PM
+int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 443b83a2fd7..f59244e3397 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -235,38 +235,32 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
}
#ifdef CONFIG_PM
-static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
+static int bcma_host_pci_suspend(struct device *dev)
{
- /* Host specific */
- pci_save_state(dev);
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct bcma_bus *bus = pci_get_drvdata(pdev);
- return 0;
+ bus->mapped_core = NULL;
+
+ return bcma_bus_suspend(bus);
}
-static int bcma_host_pci_resume(struct pci_dev *dev)
+static int bcma_host_pci_resume(struct device *dev)
{
- struct bcma_bus *bus = pci_get_drvdata(dev);
- int err;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct bcma_bus *bus = pci_get_drvdata(pdev);
- /* Host specific */
- pci_set_power_state(dev, 0);
- err = pci_enable_device(dev);
- if (err)
- return err;
- pci_restore_state(dev);
+ return bcma_bus_resume(bus);
+}
- /* Bus specific */
- err = bcma_bus_resume(bus);
- if (err)
- return err;
+static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
+ bcma_host_pci_resume);
+#define BCMA_PM_OPS (&bcma_pm_ops)
- return 0;
-}
#else /* CONFIG_PM */
-# define bcma_host_pci_suspend NULL
-# define bcma_host_pci_resume NULL
+
+#define BCMA_PM_OPS NULL
+
#endif /* CONFIG_PM */
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
@@ -284,8 +278,7 @@ static struct pci_driver bcma_pci_bridge_driver = {
.id_table = bcma_pci_bridge_tbl,
.probe = bcma_host_pci_probe,
.remove = bcma_host_pci_remove,
- .suspend = bcma_host_pci_suspend,
- .resume = bcma_host_pci_resume,
+ .driver.pm = BCMA_PM_OPS,
};
int __init bcma_host_pci_init(void)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 10f92b371e5..febbc0a1222 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -241,6 +241,21 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
}
#ifdef CONFIG_PM
+int bcma_bus_suspend(struct bcma_bus *bus)
+{
+ struct bcma_device *core;
+
+ list_for_each_entry(core, &bus->cores, list) {
+ struct device_driver *drv = core->dev.driver;
+ if (drv) {
+ struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+ if (adrv->suspend)
+ adrv->suspend(core);
+ }
+ }
+ return 0;
+}
+
int bcma_bus_resume(struct bcma_bus *bus)
{
struct bcma_device *core;
@@ -252,6 +267,15 @@ int bcma_bus_resume(struct bcma_bus *bus)
bcma_core_chipcommon_init(&bus->drv_cc);
}
+ list_for_each_entry(core, &bus->cores, list) {
+ struct device_driver *drv = core->dev.driver;
+ if (drv) {
+ struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+ if (adrv->resume)
+ adrv->resume(core);
+ }
+ }
+
return 0;
}
#endif
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index a30aa103f95..4e4c8a4a5fd 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -317,6 +317,17 @@ config BLK_DEV_NBD
If unsure, say N.
+config BLK_DEV_NVME
+ tristate "NVM Express block device"
+ depends on PCI
+ ---help---
+ The NVM Express driver is for solid state drives directly
+ connected to the PCI or PCI Express bus. If you know you
+ don't have one of these, it is safe to answer N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nvme.
+
config BLK_DEV_OSD
tristate "OSD object-as-blkdev support"
depends on SCSI_OSD_ULD
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index ad7b74a44ef..5b795059f8f 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_MG_DISK) += mg_disk.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
+obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
new file mode 100644
index 00000000000..f4996b0e4b1
--- /dev/null
+++ b/drivers/block/nvme.c
@@ -0,0 +1,1745 @@
+/*
+ * NVM Express device driver
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/nvme.h>
+#include <linux/bio.h>
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kdev_t.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#define NVME_Q_DEPTH 1024
+#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
+#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
+#define NVME_MINORS 64
+#define NVME_IO_TIMEOUT (5 * HZ)
+#define ADMIN_TIMEOUT (60 * HZ)
+
+static int nvme_major;
+module_param(nvme_major, int, 0);
+
+static int use_threaded_interrupts;
+module_param(use_threaded_interrupts, int, 0);
+
+static DEFINE_SPINLOCK(dev_list_lock);
+static LIST_HEAD(dev_list);
+static struct task_struct *nvme_thread;
+
+/*
+ * Represents an NVM Express device. Each nvme_dev is a PCI function.
+ */
+struct nvme_dev {
+ struct list_head node;
+ struct nvme_queue **queues;
+ u32 __iomem *dbs;
+ struct pci_dev *pci_dev;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool;
+ int instance;
+ int queue_count;
+ int db_stride;
+ u32 ctrl_config;
+ struct msix_entry *entry;
+ struct nvme_bar __iomem *bar;
+ struct list_head namespaces;
+ char serial[20];
+ char model[40];
+ char firmware_rev[8];
+};
+
+/*
+ * An NVM Express namespace is equivalent to a SCSI LUN
+ */
+struct nvme_ns {
+ struct list_head list;
+
+ struct nvme_dev *dev;
+ struct request_queue *queue;
+ struct gendisk *disk;
+
+ int ns_id;
+ int lba_shift;
+};
+
+/*
+ * An NVM Express queue. Each device has at least two (one for admin
+ * commands and one for I/O commands).
+ */
+struct nvme_queue {
+ struct device *q_dmadev;
+ struct nvme_dev *dev;
+ spinlock_t q_lock;
+ struct nvme_command *sq_cmds;
+ volatile struct nvme_completion *cqes;
+ dma_addr_t sq_dma_addr;
+ dma_addr_t cq_dma_addr;
+ wait_queue_head_t sq_full;
+ wait_queue_t sq_cong_wait;
+ struct bio_list sq_cong;
+ u32 __iomem *q_db;
+ u16 q_depth;
+ u16 cq_vector;
+ u16 sq_head;
+ u16 sq_tail;
+ u16 cq_head;
+ u16 cq_phase;
+ unsigned long cmdid_data[];
+};
+
+/*
+ * Check we didin't inadvertently grow the command struct
+ */
+static inline void _nvme_check_size(void)
+{
+ BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
+ BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
+}
+
+typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
+ struct nvme_completion *);
+
+struct nvme_cmd_info {
+ nvme_completion_fn fn;
+ void *ctx;
+ unsigned long timeout;
+};
+
+static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
+{
+ return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
+}
+
+/**
+ * alloc_cmdid() - Allocate a Command ID
+ * @nvmeq: The queue that will be used for this command
+ * @ctx: A pointer that will be passed to the handler
+ * @handler: The function to call on completion
+ *
+ * Allocate a Command ID for a queue. The data passed in will
+ * be passed to the completion handler. This is implemented by using
+ * the bottom two bits of the ctx pointer to store the handler ID.
+ * Passing in a pointer that's not 4-byte aligned will cause a BUG.
+ * We can change this if it becomes a problem.
+ *
+ * May be called with local interrupts disabled and the q_lock held,
+ * or with interrupts enabled and no locks held.
+ */
+static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
+ nvme_completion_fn handler, unsigned timeout)
+{
+ int depth = nvmeq->q_depth - 1;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ int cmdid;
+
+ do {
+ cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
+ if (cmdid >= depth)
+ return -EBUSY;
+ } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
+
+ info[cmdid].fn = handler;
+ info[cmdid].ctx = ctx;
+ info[cmdid].timeout = jiffies + timeout;
+ return cmdid;
+}
+
+static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
+ nvme_completion_fn handler, unsigned timeout)
+{
+ int cmdid;
+ wait_event_killable(nvmeq->sq_full,
+ (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
+ return (cmdid < 0) ? -EINTR : cmdid;
+}
+
+/* Special values must be less than 0x1000 */
+#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
+#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
+#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
+#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
+#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
+
+static void special_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ if (ctx == CMD_CTX_CANCELLED)
+ return;
+ if (ctx == CMD_CTX_FLUSH)
+ return;
+ if (ctx == CMD_CTX_COMPLETED) {
+ dev_warn(&dev->pci_dev->dev,
+ "completed id %d twice on queue %d\n",
+ cqe->command_id, le16_to_cpup(&cqe->sq_id));
+ return;
+ }
+ if (ctx == CMD_CTX_INVALID) {
+ dev_warn(&dev->pci_dev->dev,
+ "invalid id %d completed on queue %d\n",
+ cqe->command_id, le16_to_cpup(&cqe->sq_id));
+ return;
+ }
+
+ dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held. May not sleep.
+ */
+static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
+ nvme_completion_fn *fn)
+{
+ void *ctx;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+
+ if (cmdid >= nvmeq->q_depth) {
+ *fn = special_completion;
+ return CMD_CTX_INVALID;
+ }
+ *fn = info[cmdid].fn;
+ ctx = info[cmdid].ctx;
+ info[cmdid].fn = special_completion;
+ info[cmdid].ctx = CMD_CTX_COMPLETED;
+ clear_bit(cmdid, nvmeq->cmdid_data);
+ wake_up(&nvmeq->sq_full);
+ return ctx;
+}
+
+static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
+ nvme_completion_fn *fn)
+{
+ void *ctx;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ if (fn)
+ *fn = info[cmdid].fn;
+ ctx = info[cmdid].ctx;
+ info[cmdid].fn = special_completion;
+ info[cmdid].ctx = CMD_CTX_CANCELLED;
+ return ctx;
+}
+
+static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+{
+ return dev->queues[get_cpu() + 1];
+}
+
+static void put_nvmeq(struct nvme_queue *nvmeq)
+{
+ put_cpu();
+}
+
+/**
+ * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
+ * @nvmeq: The queue to use
+ * @cmd: The command to send
+ *
+ * Safe to use from interrupt context
+ */
+static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+{
+ unsigned long flags;
+ u16 tail;
+ spin_lock_irqsave(&nvmeq->q_lock, flags);
+ tail = nvmeq->sq_tail;
+ memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+ if (++tail == nvmeq->q_depth)
+ tail = 0;
+ writel(tail, nvmeq->q_db);
+ nvmeq->sq_tail = tail;
+ spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+
+ return 0;
+}
+
+/*
+ * The nvme_iod describes the data in an I/O, including the list of PRP
+ * entries. You can't see it in this data structure because C doesn't let
+ * me express that. Use nvme_alloc_iod to ensure there's enough space
+ * allocated to store the PRP list.
+ */
+struct nvme_iod {
+ void *private; /* For the use of the submitter of the I/O */
+ int npages; /* In the PRP list. 0 means small pool in use */
+ int offset; /* Of PRP list */
+ int nents; /* Used in scatterlist */
+ int length; /* Of data, in bytes */
+ dma_addr_t first_dma;
+ struct scatterlist sg[0];
+};
+
+static __le64 **iod_list(struct nvme_iod *iod)
+{
+ return ((void *)iod) + iod->offset;
+}
+
+/*
+ * Will slightly overestimate the number of pages needed. This is OK
+ * as it only leads to a small amount of wasted memory for the lifetime of
+ * the I/O.
+ */
+static int nvme_npages(unsigned size)
+{
+ unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
+ return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+}
+
+static struct nvme_iod *
+nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
+{
+ struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
+ sizeof(__le64 *) * nvme_npages(nbytes) +
+ sizeof(struct scatterlist) * nseg, gfp);
+
+ if (iod) {
+ iod->offset = offsetof(struct nvme_iod, sg[nseg]);
+ iod->npages = -1;
+ iod->length = nbytes;
+ }
+
+ return iod;
+}
+
+static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
+{
+ const int last_prp = PAGE_SIZE / 8 - 1;
+ int i;
+ __le64 **list = iod_list(iod);
+ dma_addr_t prp_dma = iod->first_dma;
+
+ if (iod->npages == 0)
+ dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
+ for (i = 0; i < iod->npages; i++) {
+ __le64 *prp_list = list[i];
+ dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
+ dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
+ prp_dma = next_prp_dma;
+ }
+ kfree(iod);
+}
+
+static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
+{
+ struct nvme_queue *nvmeq = get_nvmeq(dev);
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, bio);
+ put_nvmeq(nvmeq);
+ wake_up_process(nvme_thread);
+}
+
+static void bio_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ struct nvme_iod *iod = ctx;
+ struct bio *bio = iod->private;
+ u16 status = le16_to_cpup(&cqe->status) >> 1;
+
+ dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+ bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ nvme_free_iod(dev, iod);
+ if (status) {
+ bio_endio(bio, -EIO);
+ } else if (bio->bi_vcnt > bio->bi_idx) {
+ requeue_bio(dev, bio);
+ } else {
+ bio_endio(bio, 0);
+ }
+}
+
+/* length is in bytes. gfp flags indicates whether we may sleep. */
+static int nvme_setup_prps(struct nvme_dev *dev,
+ struct nvme_common_command *cmd, struct nvme_iod *iod,
+ int total_len, gfp_t gfp)
+{
+ struct dma_pool *pool;
+ int length = total_len;
+ struct scatterlist *sg = iod->sg;
+ int dma_len = sg_dma_len(sg);
+ u64 dma_addr = sg_dma_address(sg);
+ int offset = offset_in_page(dma_addr);
+ __le64 *prp_list;
+ __le64 **list = iod_list(iod);
+ dma_addr_t prp_dma;
+ int nprps, i;
+
+ cmd->prp1 = cpu_to_le64(dma_addr);
+ length -= (PAGE_SIZE - offset);
+ if (length <= 0)
+ return total_len;
+
+ dma_len -= (PAGE_SIZE - offset);
+ if (dma_len) {
+ dma_addr += (PAGE_SIZE - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= PAGE_SIZE) {
+ cmd->prp2 = cpu_to_le64(dma_addr);
+ return total_len;
+ }
+
+ nprps = DIV_ROUND_UP(length, PAGE_SIZE);
+ if (nprps <= (256 / 8)) {
+ pool = dev->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = dev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+ if (!prp_list) {
+ cmd->prp2 = cpu_to_le64(dma_addr);
+ iod->npages = -1;
+ return (total_len - length) + PAGE_SIZE;
+ }
+ list[0] = prp_list;
+ iod->first_dma = prp_dma;
+ cmd->prp2 = cpu_to_le64(prp_dma);
+ i = 0;
+ for (;;) {
+ if (i == PAGE_SIZE / 8) {
+ __le64 *old_prp_list = prp_list;
+ prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+ if (!prp_list)
+ return total_len - length;
+ list[iod->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= PAGE_SIZE;
+ dma_addr += PAGE_SIZE;
+ length -= PAGE_SIZE;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ BUG_ON(dma_len < 0);
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ return total_len;
+}
+
+/* NVMe scatterlists require no holes in the virtual address */
+#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
+ (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
+
+static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
+ struct bio *bio, enum dma_data_direction dma_dir, int psegs)
+{
+ struct bio_vec *bvec, *bvprv = NULL;
+ struct scatterlist *sg = NULL;
+ int i, old_idx, length = 0, nsegs = 0;
+
+ sg_init_table(iod->sg, psegs);
+ old_idx = bio->bi_idx;
+ bio_for_each_segment(bvec, bio, i) {
+ if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
+ sg->length += bvec->bv_len;
+ } else {
+ if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
+ break;
+ sg = sg ? sg + 1 : iod->sg;
+ sg_set_page(sg, bvec->bv_page, bvec->bv_len,
+ bvec->bv_offset);
+ nsegs++;
+ }
+ length += bvec->bv_len;
+ bvprv = bvec;
+ }
+ bio->bi_idx = i;
+ iod->nents = nsegs;
+ sg_mark_end(sg);
+ if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
+ bio->bi_idx = old_idx;
+ return -ENOMEM;
+ }
+ return length;
+}
+
+static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ int cmdid)
+{
+ struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->common.opcode = nvme_cmd_flush;
+ cmnd->common.command_id = cmdid;
+ cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+
+ return 0;
+}
+
+static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
+{
+ int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
+ special_completion, NVME_IO_TIMEOUT);
+ if (unlikely(cmdid < 0))
+ return cmdid;
+
+ return nvme_submit_flush(nvmeq, ns, cmdid);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held. May not sleep.
+ */
+static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ struct bio *bio)
+{
+ struct nvme_command *cmnd;
+ struct nvme_iod *iod;
+ enum dma_data_direction dma_dir;
+ int cmdid, length, result = -ENOMEM;
+ u16 control;
+ u32 dsmgmt;
+ int psegs = bio_phys_segments(ns->queue, bio);
+
+ if ((bio->bi_rw & REQ_FLUSH) && psegs) {
+ result = nvme_submit_flush_data(nvmeq, ns);
+ if (result)
+ return result;
+ }
+
+ iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+ if (!iod)
+ goto nomem;
+ iod->private = bio;
+
+ result = -EBUSY;
+ cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
+ if (unlikely(cmdid < 0))
+ goto free_iod;
+
+ if ((bio->bi_rw & REQ_FLUSH) && !psegs)
+ return nvme_submit_flush(nvmeq, ns, cmdid);
+
+ control = 0;
+ if (bio->bi_rw & REQ_FUA)
+ control |= NVME_RW_FUA;
+ if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
+ control |= NVME_RW_LR;
+
+ dsmgmt = 0;
+ if (bio->bi_rw & REQ_RAHEAD)
+ dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+
+ cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ if (bio_data_dir(bio)) {
+ cmnd->rw.opcode = nvme_cmd_write;
+ dma_dir = DMA_TO_DEVICE;
+ } else {
+ cmnd->rw.opcode = nvme_cmd_read;
+ dma_dir = DMA_FROM_DEVICE;
+ }
+
+ result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
+ if (result < 0)
+ goto free_iod;
+ length = result;
+
+ cmnd->rw.command_id = cmdid;
+ cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+ length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
+ GFP_ATOMIC);
+ cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
+ cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
+ cmnd->rw.control = cpu_to_le16(control);
+ cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+
+ bio->bi_sector += length >> 9;
+
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+
+ return 0;
+
+ free_iod:
+ nvme_free_iod(nvmeq->dev, iod);
+ nomem:
+ return result;
+}
+
+/*
+ * NB: return value of non-zero would mean that we were a stacking driver.
+ * make_request must always succeed.
+ */
+static int nvme_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct nvme_ns *ns = q->queuedata;
+ struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
+ int result = -EBUSY;
+
+ spin_lock_irq(&nvmeq->q_lock);
+ if (bio_list_empty(&nvmeq->sq_cong))
+ result = nvme_submit_bio_queue(nvmeq, ns, bio);
+ if (unlikely(result)) {
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, bio);
+ }
+
+ spin_unlock_irq(&nvmeq->q_lock);
+ put_nvmeq(nvmeq);
+
+ return 0;
+}
+
+static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
+{
+ u16 head, phase;
+
+ head = nvmeq->cq_head;
+ phase = nvmeq->cq_phase;
+
+ for (;;) {
+ void *ctx;
+ nvme_completion_fn fn;
+ struct nvme_completion cqe = nvmeq->cqes[head];
+ if ((le16_to_cpu(cqe.status) & 1) != phase)
+ break;
+ nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
+ if (++head == nvmeq->q_depth) {
+ head = 0;
+ phase = !phase;
+ }
+
+ ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
+ fn(nvmeq->dev, ctx, &cqe);
+ }
+
+ /* If the controller ignores the cq head doorbell and continuously
+ * writes to the queue, it is theoretically possible to wrap around
+ * the queue twice and mistakenly return IRQ_NONE. Linux only
+ * requires that 0.1% of your interrupts are handled, so this isn't
+ * a big problem.
+ */
+ if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
+ return IRQ_NONE;
+
+ writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+ nvmeq->cq_head = head;
+ nvmeq->cq_phase = phase;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nvme_irq(int irq, void *data)
+{
+ irqreturn_t result;
+ struct nvme_queue *nvmeq = data;
+ spin_lock(&nvmeq->q_lock);
+ result = nvme_process_cq(nvmeq);
+ spin_unlock(&nvmeq->q_lock);
+ return result;
+}
+
+static irqreturn_t nvme_irq_check(int irq, void *data)
+{
+ struct nvme_queue *nvmeq = data;
+ struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
+ if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
+ return IRQ_NONE;
+ return IRQ_WAKE_THREAD;
+}
+
+static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
+{
+ spin_lock_irq(&nvmeq->q_lock);
+ cancel_cmdid(nvmeq, cmdid, NULL);
+ spin_unlock_irq(&nvmeq->q_lock);
+}
+
+struct sync_cmd_info {
+ struct task_struct *task;
+ u32 result;
+ int status;
+};
+
+static void sync_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ struct sync_cmd_info *cmdinfo = ctx;
+ cmdinfo->result = le32_to_cpup(&cqe->result);
+ cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+ wake_up_process(cmdinfo->task);
+}
+
+/*
+ * Returns 0 on success. If the result is negative, it's a Linux error code;
+ * if the result is positive, it's an NVM Express status code
+ */
+static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd, u32 *result, unsigned timeout)
+{
+ int cmdid;
+ struct sync_cmd_info cmdinfo;
+
+ cmdinfo.task = current;
+ cmdinfo.status = -EINTR;
+
+ cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
+ timeout);
+ if (cmdid < 0)
+ return cmdid;
+ cmd->common.command_id = cmdid;
+
+ set_current_state(TASK_KILLABLE);
+ nvme_submit_cmd(nvmeq, cmd);
+ schedule();
+
+ if (cmdinfo.status == -EINTR) {
+ nvme_abort_command(nvmeq, cmdid);
+ return -EINTR;
+ }
+
+ if (result)
+ *result = cmdinfo.result;
+
+ return cmdinfo.status;
+}
+
+static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+ u32 *result)
+{
+ return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
+}
+
+static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
+{
+ int status;
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.delete_queue.opcode = opcode;
+ c.delete_queue.qid = cpu_to_le16(id);
+
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
+ struct nvme_queue *nvmeq)
+{
+ int status;
+ struct nvme_command c;
+ int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
+
+ memset(&c, 0, sizeof(c));
+ c.create_cq.opcode = nvme_admin_create_cq;
+ c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
+ c.create_cq.cqid = cpu_to_le16(qid);
+ c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+ c.create_cq.cq_flags = cpu_to_le16(flags);
+ c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
+
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
+ struct nvme_queue *nvmeq)
+{
+ int status;
+ struct nvme_command c;
+ int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
+
+ memset(&c, 0, sizeof(c));
+ c.create_sq.opcode = nvme_admin_create_sq;
+ c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
+ c.create_sq.sqid = cpu_to_le16(qid);
+ c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+ c.create_sq.sq_flags = cpu_to_le16(flags);
+ c.create_sq.cqid = cpu_to_le16(qid);
+
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
+{
+ return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
+}
+
+static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
+{
+ return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
+}
+
+static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
+ dma_addr_t dma_addr)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.prp1 = cpu_to_le64(dma_addr);
+ c.identify.cns = cpu_to_le32(cns);
+
+ return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
+ unsigned dword11, dma_addr_t dma_addr)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_get_features;
+ c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.fid = cpu_to_le32(fid);
+ c.features.dword11 = cpu_to_le32(dword11);
+
+ return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
+ unsigned dword11, dma_addr_t dma_addr, u32 *result)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_set_features;
+ c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.fid = cpu_to_le32(fid);
+ c.features.dword11 = cpu_to_le32(dword11);
+
+ return nvme_submit_admin_cmd(dev, &c, result);
+}
+
+static void nvme_free_queue(struct nvme_dev *dev, int qid)
+{
+ struct nvme_queue *nvmeq = dev->queues[qid];
+ int vector = dev->entry[nvmeq->cq_vector].vector;
+
+ irq_set_affinity_hint(vector, NULL);
+ free_irq(vector, nvmeq);
+
+ /* Don't tell the adapter to delete the admin queue */
+ if (qid) {
+ adapter_delete_sq(dev, qid);
+ adapter_delete_cq(dev, qid);
+ }
+
+ dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+ (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+ dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+ kfree(nvmeq);
+}
+
+static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
+ int depth, int vector)
+{
+ struct device *dmadev = &dev->pci_dev->dev;
+ unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
+ struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
+ if (!nvmeq)
+ return NULL;
+
+ nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
+ &nvmeq->cq_dma_addr, GFP_KERNEL);
+ if (!nvmeq->cqes)
+ goto free_nvmeq;
+ memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
+
+ nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
+ if (!nvmeq->sq_cmds)
+ goto free_cqdma;
+
+ nvmeq->q_dmadev = dmadev;
+ nvmeq->dev = dev;
+ spin_lock_init(&nvmeq->q_lock);
+ nvmeq->cq_head = 0;
+ nvmeq->cq_phase = 1;
+ init_waitqueue_head(&nvmeq->sq_full);
+ init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
+ bio_list_init(&nvmeq->sq_cong);
+ nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ nvmeq->q_depth = depth;
+ nvmeq->cq_vector = vector;
+
+ return nvmeq;
+
+ free_cqdma:
+ dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
+ nvmeq->cq_dma_addr);
+ free_nvmeq:
+ kfree(nvmeq);
+ return NULL;
+}
+
+static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+ const char *name)
+{
+ if (use_threaded_interrupts)
+ return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
+ nvme_irq_check, nvme_irq,
+ IRQF_DISABLED | IRQF_SHARED,
+ name, nvmeq);
+ return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
+ IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+}
+
+static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
+ int qid, int cq_size, int vector)
+{
+ int result;
+ struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
+
+ if (!nvmeq)
+ return ERR_PTR(-ENOMEM);
+
+ result = adapter_alloc_cq(dev, qid, nvmeq);
+ if (result < 0)
+ goto free_nvmeq;
+
+ result = adapter_alloc_sq(dev, qid, nvmeq);
+ if (result < 0)
+ goto release_cq;
+
+ result = queue_request_irq(dev, nvmeq, "nvme");
+ if (result < 0)
+ goto release_sq;
+
+ return nvmeq;
+
+ release_sq:
+ adapter_delete_sq(dev, qid);
+ release_cq:
+ adapter_delete_cq(dev, qid);
+ free_nvmeq:
+ dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+ (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+ dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+ kfree(nvmeq);
+ return ERR_PTR(result);
+}
+
+static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
+{
+ int result;
+ u32 aqa;
+ u64 cap;
+ unsigned long timeout;
+ struct nvme_queue *nvmeq;
+
+ dev->dbs = ((void __iomem *)dev->bar) + 4096;
+
+ nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
+ if (!nvmeq)
+ return -ENOMEM;
+
+ aqa = nvmeq->q_depth - 1;
+ aqa |= aqa << 16;
+
+ dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
+ dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
+ dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
+ dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
+
+ writel(0, &dev->bar->cc);
+ writel(aqa, &dev->bar->aqa);
+ writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
+ writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
+ writel(dev->ctrl_config, &dev->bar->cc);
+
+ cap = readq(&dev->bar->cap);
+ timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+ dev->db_stride = NVME_CAP_STRIDE(cap);
+
+ while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&dev->pci_dev->dev,
+ "Device not ready; aborting initialisation\n");
+ return -ENODEV;
+ }
+ }
+
+ result = queue_request_irq(dev, nvmeq, "nvme admin");
+ dev->queues[0] = nvmeq;
+ return result;
+}
+
+static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
+ unsigned long addr, unsigned length)
+{
+ int i, err, count, nents, offset;
+ struct scatterlist *sg;
+ struct page **pages;
+ struct nvme_iod *iod;
+
+ if (addr & 3)
+ return ERR_PTR(-EINVAL);
+ if (!length)
+ return ERR_PTR(-EINVAL);
+
+ offset = offset_in_page(addr);
+ count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
+ pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
+
+ err = get_user_pages_fast(addr, count, 1, pages);
+ if (err < count) {
+ count = err;
+ err = -EFAULT;
+ goto put_pages;
+ }
+
+ iod = nvme_alloc_iod(count, length, GFP_KERNEL);
+ sg = iod->sg;
+ sg_init_table(sg, count);
+ for (i = 0; i < count; i++) {
+ sg_set_page(&sg[i], pages[i],
+ min_t(int, length, PAGE_SIZE - offset), offset);
+ length -= (PAGE_SIZE - offset);
+ offset = 0;
+ }
+ sg_mark_end(&sg[i - 1]);
+ iod->nents = count;
+
+ err = -ENOMEM;
+ nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!nents)
+ goto free_iod;
+
+ kfree(pages);
+ return iod;
+
+ free_iod:
+ kfree(iod);
+ put_pages:
+ for (i = 0; i < count; i++)
+ put_page(pages[i]);
+ kfree(pages);
+ return ERR_PTR(err);
+}
+
+static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+ struct nvme_iod *iod)
+{
+ int i;
+
+ dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ for (i = 0; i < iod->nents; i++)
+ put_page(sg_page(&iod->sg[i]));
+}
+
+static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_queue *nvmeq;
+ struct nvme_user_io io;
+ struct nvme_command c;
+ unsigned length;
+ int status;
+ struct nvme_iod *iod;
+
+ if (copy_from_user(&io, uio, sizeof(io)))
+ return -EFAULT;
+ length = (io.nblocks + 1) << ns->lba_shift;
+
+ switch (io.opcode) {
+ case nvme_cmd_write:
+ case nvme_cmd_read:
+ case nvme_cmd_compare:
+ iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (IS_ERR(iod))
+ return PTR_ERR(iod);
+
+ memset(&c, 0, sizeof(c));
+ c.rw.opcode = io.opcode;
+ c.rw.flags = io.flags;
+ c.rw.nsid = cpu_to_le32(ns->ns_id);
+ c.rw.slba = cpu_to_le64(io.slba);
+ c.rw.length = cpu_to_le16(io.nblocks);
+ c.rw.control = cpu_to_le16(io.control);
+ c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
+ c.rw.reftag = io.reftag;
+ c.rw.apptag = io.apptag;
+ c.rw.appmask = io.appmask;
+ /* XXX: metadata */
+ length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
+
+ nvmeq = get_nvmeq(dev);
+ /*
+ * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
+ * disabled. We may be preempted at any point, and be rescheduled
+ * to a different CPU. That will cause cacheline bouncing, but no
+ * additional races since q_lock already protects against other CPUs.
+ */
+ put_nvmeq(nvmeq);
+ if (length != (io.nblocks + 1) << ns->lba_shift)
+ status = -ENOMEM;
+ else
+ status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+ nvme_unmap_user_pages(dev, io.opcode & 1, iod);
+ nvme_free_iod(dev, iod);
+ return status;
+}
+
+static int nvme_user_admin_cmd(struct nvme_ns *ns,
+ struct nvme_admin_cmd __user *ucmd)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_admin_cmd cmd;
+ struct nvme_command c;
+ int status, length;
+ struct nvme_iod *iod;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+ return -EFAULT;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = cmd.opcode;
+ c.common.flags = cmd.flags;
+ c.common.nsid = cpu_to_le32(cmd.nsid);
+ c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+ c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+ c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
+ c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
+ c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
+ c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
+ c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
+ c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
+
+ length = cmd.data_len;
+ if (cmd.data_len) {
+ iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
+ length);
+ if (IS_ERR(iod))
+ return PTR_ERR(iod);
+ length = nvme_setup_prps(dev, &c.common, iod, length,
+ GFP_KERNEL);
+ }
+
+ if (length != cmd.data_len)
+ status = -ENOMEM;
+ else
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+
+ if (cmd.data_len) {
+ nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
+ nvme_free_iod(dev, iod);
+ }
+ return status;
+}
+
+static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+ unsigned long arg)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+ switch (cmd) {
+ case NVME_IOCTL_ID:
+ return ns->ns_id;
+ case NVME_IOCTL_ADMIN_CMD:
+ return nvme_user_admin_cmd(ns, (void __user *)arg);
+ case NVME_IOCTL_SUBMIT_IO:
+ return nvme_submit_io(ns, (void __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct block_device_operations nvme_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_ioctl,
+};
+
+static void nvme_timeout_ios(struct nvme_queue *nvmeq)
+{
+ int depth = nvmeq->q_depth - 1;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ unsigned long now = jiffies;
+ int cmdid;
+
+ for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
+ void *ctx;
+ nvme_completion_fn fn;
+ static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
+
+ if (!time_after(now, info[cmdid].timeout))
+ continue;
+ dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
+ ctx = cancel_cmdid(nvmeq, cmdid, &fn);
+ fn(nvmeq->dev, ctx, &cqe);
+ }
+}
+
+static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
+{
+ while (bio_list_peek(&nvmeq->sq_cong)) {
+ struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
+ struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
+ if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
+ bio_list_add_head(&nvmeq->sq_cong, bio);
+ break;
+ }
+ if (bio_list_empty(&nvmeq->sq_cong))
+ remove_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
+ }
+}
+
+static int nvme_kthread(void *data)
+{
+ struct nvme_dev *dev;
+
+ while (!kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ spin_lock(&dev_list_lock);
+ list_for_each_entry(dev, &dev_list, node) {
+ int i;
+ for (i = 0; i < dev->queue_count; i++) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+ if (!nvmeq)
+ continue;
+ spin_lock_irq(&nvmeq->q_lock);
+ if (nvme_process_cq(nvmeq))
+ printk("process_cq did something\n");
+ nvme_timeout_ios(nvmeq);
+ nvme_resubmit_bios(nvmeq);
+ spin_unlock_irq(&nvmeq->q_lock);
+ }
+ }
+ spin_unlock(&dev_list_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+ return 0;
+}
+
+static DEFINE_IDA(nvme_index_ida);
+
+static int nvme_get_ns_idx(void)
+{
+ int index, error;
+
+ do {
+ if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
+ return -1;
+
+ spin_lock(&dev_list_lock);
+ error = ida_get_new(&nvme_index_ida, &index);
+ spin_unlock(&dev_list_lock);
+ } while (error == -EAGAIN);
+
+ if (error)
+ index = -1;
+ return index;
+}
+
+static void nvme_put_ns_idx(int index)
+{
+ spin_lock(&dev_list_lock);
+ ida_remove(&nvme_index_ida, index);
+ spin_unlock(&dev_list_lock);
+}
+
+static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
+ struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
+{
+ struct nvme_ns *ns;
+ struct gendisk *disk;
+ int lbaf;
+
+ if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
+ return NULL;
+
+ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+ if (!ns)
+ return NULL;
+ ns->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!ns->queue)
+ goto out_free_ns;
+ ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
+ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
+/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
+ blk_queue_make_request(ns->queue, nvme_make_request);
+ ns->dev = dev;
+ ns->queue->queuedata = ns;
+
+ disk = alloc_disk(NVME_MINORS);
+ if (!disk)
+ goto out_free_queue;
+ ns->ns_id = nsid;
+ ns->disk = disk;
+ lbaf = id->flbas & 0xf;
+ ns->lba_shift = id->lbaf[lbaf].ds;
+
+ disk->major = nvme_major;
+ disk->minors = NVME_MINORS;
+ disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+ disk->fops = &nvme_fops;
+ disk->private_data = ns;
+ disk->queue = ns->queue;
+ disk->driverfs_dev = &dev->pci_dev->dev;
+ sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
+ set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+
+ return ns;
+
+ out_free_queue:
+ blk_cleanup_queue(ns->queue);
+ out_free_ns:
+ kfree(ns);
+ return NULL;
+}
+
+static void nvme_ns_free(struct nvme_ns *ns)
+{
+ int index = ns->disk->first_minor / NVME_MINORS;
+ put_disk(ns->disk);
+ nvme_put_ns_idx(index);
+ blk_cleanup_queue(ns->queue);
+ kfree(ns);
+}
+
+static int set_queue_count(struct nvme_dev *dev, int count)
+{
+ int status;
+ u32 result;
+ u32 q_count = (count - 1) | ((count - 1) << 16);
+
+ status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
+ &result);
+ if (status)
+ return -EIO;
+ return min(result & 0xffff, result >> 16) + 1;
+}
+
+static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
+{
+ int result, cpu, i, nr_io_queues, db_bar_size;
+
+ nr_io_queues = num_online_cpus();
+ result = set_queue_count(dev, nr_io_queues);
+ if (result < 0)
+ return result;
+ if (result < nr_io_queues)
+ nr_io_queues = result;
+
+ /* Deregister the admin queue's interrupt */
+ free_irq(dev->entry[0].vector, dev->queues[0]);
+
+ db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+ if (db_bar_size > 8192) {
+ iounmap(dev->bar);
+ dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
+ db_bar_size);
+ dev->dbs = ((void __iomem *)dev->bar) + 4096;
+ dev->queues[0]->q_db = dev->dbs;
+ }
+
+ for (i = 0; i < nr_io_queues; i++)
+ dev->entry[i].entry = i;
+ for (;;) {
+ result = pci_enable_msix(dev->pci_dev, dev->entry,
+ nr_io_queues);
+ if (result == 0) {
+ break;
+ } else if (result > 0) {
+ nr_io_queues = result;
+ continue;
+ } else {
+ nr_io_queues = 1;
+ break;
+ }
+ }
+
+ result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+ /* XXX: handle failure here */
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < nr_io_queues; i++) {
+ irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+
+ for (i = 0; i < nr_io_queues; i++) {
+ dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
+ NVME_Q_DEPTH, i);
+ if (IS_ERR(dev->queues[i + 1]))
+ return PTR_ERR(dev->queues[i + 1]);
+ dev->queue_count++;
+ }
+
+ for (; i < num_possible_cpus(); i++) {
+ int target = i % rounddown_pow_of_two(dev->queue_count - 1);
+ dev->queues[i + 1] = dev->queues[target + 1];
+ }
+
+ return 0;
+}
+
+static void nvme_free_queues(struct nvme_dev *dev)
+{
+ int i;
+
+ for (i = dev->queue_count - 1; i >= 0; i--)
+ nvme_free_queue(dev, i);
+}
+
+static int __devinit nvme_dev_add(struct nvme_dev *dev)
+{
+ int res, nn, i;
+ struct nvme_ns *ns, *next;
+ struct nvme_id_ctrl *ctrl;
+ struct nvme_id_ns *id_ns;
+ void *mem;
+ dma_addr_t dma_addr;
+
+ res = nvme_setup_io_queues(dev);
+ if (res)
+ return res;
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
+ GFP_KERNEL);
+
+ res = nvme_identify(dev, 0, 1, dma_addr);
+ if (res) {
+ res = -EIO;
+ goto out_free;
+ }
+
+ ctrl = mem;
+ nn = le32_to_cpup(&ctrl->nn);
+ memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
+ memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
+ memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
+
+ id_ns = mem;
+ for (i = 1; i <= nn; i++) {
+ res = nvme_identify(dev, i, 0, dma_addr);
+ if (res)
+ continue;
+
+ if (id_ns->ncap == 0)
+ continue;
+
+ res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
+ dma_addr + 4096);
+ if (res)
+ continue;
+
+ ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
+ if (ns)
+ list_add_tail(&ns->list, &dev->namespaces);
+ }
+ list_for_each_entry(ns, &dev->namespaces, list)
+ add_disk(ns->disk);
+
+ goto out;
+
+ out_free:
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+ list_del(&ns->list);
+ nvme_ns_free(ns);
+ }
+
+ out:
+ dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
+ return res;
+}
+
+static int nvme_dev_remove(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns, *next;
+
+ spin_lock(&dev_list_lock);
+ list_del(&dev->node);
+ spin_unlock(&dev_list_lock);
+
+ /* TODO: wait all I/O finished or cancel them */
+
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+ list_del(&ns->list);
+ del_gendisk(ns->disk);
+ nvme_ns_free(ns);
+ }
+
+ nvme_free_queues(dev);
+
+ return 0;
+}
+
+static int nvme_setup_prp_pools(struct nvme_dev *dev)
+{
+ struct device *dmadev = &dev->pci_dev->dev;
+ dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
+ PAGE_SIZE, PAGE_SIZE, 0);
+ if (!dev->prp_page_pool)
+ return -ENOMEM;
+
+ /* Optimisation for I/Os between 4k and 128k */
+ dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
+ 256, 256, 0);
+ if (!dev->prp_small_pool) {
+ dma_pool_destroy(dev->prp_page_pool);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void nvme_release_prp_pools(struct nvme_dev *dev)
+{
+ dma_pool_destroy(dev->prp_page_pool);
+ dma_pool_destroy(dev->prp_small_pool);
+}
+
+/* XXX: Use an ida or something to let remove / add work correctly */
+static void nvme_set_instance(struct nvme_dev *dev)
+{
+ static int instance;
+ dev->instance = instance++;
+}
+
+static void nvme_release_instance(struct nvme_dev *dev)
+{
+}
+
+static int __devinit nvme_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int bars, result = -ENOMEM;
+ struct nvme_dev *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
+ GFP_KERNEL);
+ if (!dev->entry)
+ goto free;
+ dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
+ GFP_KERNEL);
+ if (!dev->queues)
+ goto free;
+
+ if (pci_enable_device_mem(pdev))
+ goto free;
+ pci_set_master(pdev);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_request_selected_regions(pdev, bars, "nvme"))
+ goto disable;
+
+ INIT_LIST_HEAD(&dev->namespaces);
+ dev->pci_dev = pdev;
+ pci_set_drvdata(pdev, dev);
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ nvme_set_instance(dev);
+ dev->entry[0].vector = pdev->irq;
+
+ result = nvme_setup_prp_pools(dev);
+ if (result)
+ goto disable_msix;
+
+ dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
+ if (!dev->bar) {
+ result = -ENOMEM;
+ goto disable_msix;
+ }
+
+ result = nvme_configure_admin_queue(dev);
+ if (result)
+ goto unmap;
+ dev->queue_count++;
+
+ spin_lock(&dev_list_lock);
+ list_add(&dev->node, &dev_list);
+ spin_unlock(&dev_list_lock);
+
+ result = nvme_dev_add(dev);
+ if (result)
+ goto delete;
+
+ return 0;
+
+ delete:
+ spin_lock(&dev_list_lock);
+ list_del(&dev->node);
+ spin_unlock(&dev_list_lock);
+
+ nvme_free_queues(dev);
+ unmap:
+ iounmap(dev->bar);
+ disable_msix:
+ pci_disable_msix(pdev);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
+ disable:
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ free:
+ kfree(dev->queues);
+ kfree(dev->entry);
+ kfree(dev);
+ return result;
+}
+
+static void __devexit nvme_remove(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+ nvme_dev_remove(dev);
+ pci_disable_msix(pdev);
+ iounmap(dev->bar);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ kfree(dev->queues);
+ kfree(dev->entry);
+ kfree(dev);
+}
+
+/* These functions are yet to be implemented */
+#define nvme_error_detected NULL
+#define nvme_dump_registers NULL
+#define nvme_link_reset NULL
+#define nvme_slot_reset NULL
+#define nvme_error_resume NULL
+#define nvme_suspend NULL
+#define nvme_resume NULL
+
+static struct pci_error_handlers nvme_err_handler = {
+ .error_detected = nvme_error_detected,
+ .mmio_enabled = nvme_dump_registers,
+ .link_reset = nvme_link_reset,
+ .slot_reset = nvme_slot_reset,
+ .resume = nvme_error_resume,
+};
+
+/* Move to pci_ids.h later */
+#define PCI_CLASS_STORAGE_EXPRESS 0x010802
+
+static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, nvme_id_table);
+
+static struct pci_driver nvme_driver = {
+ .name = "nvme",
+ .id_table = nvme_id_table,
+ .probe = nvme_probe,
+ .remove = __devexit_p(nvme_remove),
+ .suspend = nvme_suspend,
+ .resume = nvme_resume,
+ .err_handler = &nvme_err_handler,
+};
+
+static int __init nvme_init(void)
+{
+ int result = -EBUSY;
+
+ nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+ if (IS_ERR(nvme_thread))
+ return PTR_ERR(nvme_thread);
+
+ nvme_major = register_blkdev(nvme_major, "nvme");
+ if (nvme_major <= 0)
+ goto kill_kthread;
+
+ result = pci_register_driver(&nvme_driver);
+ if (result)
+ goto unregister_blkdev;
+ return 0;
+
+ unregister_blkdev:
+ unregister_blkdev(nvme_major, "nvme");
+ kill_kthread:
+ kthread_stop(nvme_thread);
+ return result;
+}
+
+static void __exit nvme_exit(void)
+{
+ pci_unregister_driver(&nvme_driver);
+ unregister_blkdev(nvme_major, "nvme");
+ kthread_stop(nvme_thread);
+}
+
+MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.8");
+module_init(nvme_init);
+module_exit(nvme_exit);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 5a99bb3f255..f1a274994bb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -124,7 +124,7 @@ config MV_XOR
config MX3_IPU
bool "MX3x Image Processing Unit support"
- depends on SOC_IMX31 || SOC_IMX35
+ depends on ARCH_MXC
select DMA_ENGINE
default y
help
@@ -187,6 +187,13 @@ config TIMB_DMA
help
Enable support for the Timberdale FPGA DMA engine.
+config SIRF_DMA
+ tristate "CSR SiRFprimaII DMA support"
+ depends on ARCH_PRIMA2
+ select DMA_ENGINE
+ help
+ Enable support for the CSR SiRFprimaII DMA engine.
+
config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
bool
@@ -201,26 +208,26 @@ config PL330_DMA
platform_data for a dma-pl330 device.
config PCH_DMA
- tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
+ tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
depends on PCI && X86
select DMA_ENGINE
help
Enable support for Intel EG20T PCH DMA engine.
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7213 and ML7223.
- ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
- for MP(Media Phone) use.
- ML7213/ML7223 is companion chip for Intel Atom E6xx series.
- ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for LAPIS Semiconductor IOH(Input/
+ Output Hub), ML7213, ML7223 and ML7831.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+ for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+ ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
config IMX_SDMA
tristate "i.MX SDMA support"
- depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
+ depends on ARCH_MXC
select DMA_ENGINE
help
Support the i.MX SDMA engine. This engine is integrated into
- Freescale i.MX25/31/35/51 chips.
+ Freescale i.MX25/31/35/51/53 chips.
config IMX_DMA
tristate "i.MX DMA support"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 30cf3b1f0c5..009a222e828 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 0698695e8bf..8a281584458 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -854,8 +854,10 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
int ret;
/* Check if we already have a channel */
- if (plchan->phychan)
- return 0;
+ if (plchan->phychan) {
+ ch = plchan->phychan;
+ goto got_channel;
+ }
ch = pl08x_get_phy_channel(pl08x, plchan);
if (!ch) {
@@ -880,21 +882,22 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
return -EBUSY;
}
ch->signal = ret;
-
- /* Assign the flow control signal to this channel */
- if (txd->direction == DMA_TO_DEVICE)
- txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
- else if (txd->direction == DMA_FROM_DEVICE)
- txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
}
+ plchan->phychan = ch;
dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
ch->id,
ch->signal,
plchan->name);
+got_channel:
+ /* Assign the flow control signal to this channel */
+ if (txd->direction == DMA_MEM_TO_DEV)
+ txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
+ else if (txd->direction == DMA_DEV_TO_MEM)
+ txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+
plchan->phychan_hold++;
- plchan->phychan = ch;
return 0;
}
@@ -1102,10 +1105,10 @@ static int dma_set_runtime_config(struct dma_chan *chan,
/* Transfer direction */
plchan->runtime_direction = config->direction;
- if (config->direction == DMA_TO_DEVICE) {
+ if (config->direction == DMA_MEM_TO_DEV) {
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
- } else if (config->direction == DMA_FROM_DEVICE) {
+ } else if (config->direction == DMA_DEV_TO_MEM) {
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
} else {
@@ -1136,7 +1139,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
- if (plchan->runtime_direction == DMA_FROM_DEVICE) {
+ if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
plchan->src_addr = config->src_addr;
plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
pl08x_select_bus(plchan->cd->periph_buses,
@@ -1152,7 +1155,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
"configured channel %s (%s) for %s, data width %d, "
"maxburst %d words, LE, CCTL=0x%08x\n",
dma_chan_name(chan), plchan->name,
- (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+ (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
addr_width,
maxburst,
cctl);
@@ -1322,7 +1325,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
@@ -1354,10 +1357,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
*/
txd->direction = direction;
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
txd->cctl = plchan->dst_cctl;
slave_addr = plchan->dst_addr;
- } else if (direction == DMA_FROM_DEVICE) {
+ } else if (direction == DMA_DEV_TO_MEM) {
txd->cctl = plchan->src_cctl;
slave_addr = plchan->src_addr;
} else {
@@ -1368,10 +1371,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
}
if (plchan->cd->device_fc)
- tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
+ tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
PL080_FLOW_PER2MEM_PER;
else
- tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
+ tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
PL080_FLOW_PER2MEM;
txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1387,7 +1390,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
list_add_tail(&dsg->node, &txd->dsg_list);
dsg->len = sg_dma_len(sg);
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
dsg->src_addr = sg_phys(sg);
dsg->dst_addr = slave_addr;
} else {
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fcfa0a8b5c5..97f87b29b9f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -23,6 +23,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "at_hdmac_regs.h"
@@ -660,7 +662,7 @@ err_desc_get:
*/
static struct dma_async_tx_descriptor *
atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
@@ -678,7 +680,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
sg_len,
- direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+ direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
flags);
if (unlikely(!atslave || !sg_len)) {
@@ -692,7 +694,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrlb = ATC_IEN;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
ctrla |= ATC_DST_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
@@ -725,7 +727,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
total_len += len;
}
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
ctrla |= ATC_SRC_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED
@@ -787,7 +789,7 @@ err_desc_get:
*/
static int
atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
if (period_len > (ATC_BTSIZE_MAX << reg_width))
goto err_out;
@@ -795,7 +797,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
goto err_out;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto err_out;
- if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
goto err_out;
return 0;
@@ -810,7 +812,7 @@ err_out:
static int
atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
unsigned int period_index, dma_addr_t buf_addr,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
u32 ctrla;
unsigned int reg_width = atslave->reg_width;
@@ -822,7 +824,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
| period_len >> reg_width;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
desc->lli.saddr = buf_addr + (period_len * period_index);
desc->lli.daddr = atslave->tx_reg;
desc->lli.ctrla = ctrla;
@@ -833,7 +835,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
| ATC_DIF(AT_DMA_PER_IF);
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
desc->lli.saddr = atslave->rx_reg;
desc->lli.daddr = buf_addr + (period_len * period_index);
desc->lli.ctrla = ctrla;
@@ -861,7 +863,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
*/
static struct dma_async_tx_descriptor *
atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave = chan->private;
@@ -872,7 +874,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
unsigned int i;
dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
- direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+ direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
buf_addr,
periods, buf_len, period_len);
@@ -1175,6 +1177,56 @@ static void atc_free_chan_resources(struct dma_chan *chan)
/*-- Module Management -----------------------------------------------*/
+/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
+static struct at_dma_platform_data at91sam9rl_config = {
+ .nr_channels = 2,
+};
+static struct at_dma_platform_data at91sam9g45_config = {
+ .nr_channels = 8,
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_dma_dt_ids[] = {
+ {
+ .compatible = "atmel,at91sam9rl-dma",
+ .data = &at91sam9rl_config,
+ }, {
+ .compatible = "atmel,at91sam9g45-dma",
+ .data = &at91sam9g45_config,
+ }, {
+ /* sentinel */
+ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
+#endif
+
+static const struct platform_device_id atdma_devtypes[] = {
+ {
+ .name = "at91sam9rl_dma",
+ .driver_data = (unsigned long) &at91sam9rl_config,
+ }, {
+ .name = "at91sam9g45_dma",
+ .driver_data = (unsigned long) &at91sam9g45_config,
+ }, {
+ /* sentinel */
+ }
+};
+
+static inline struct at_dma_platform_data * __init at_dma_get_driver_data(
+ struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
+ if (match == NULL)
+ return NULL;
+ return match->data;
+ }
+ return (struct at_dma_platform_data *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
/**
* at_dma_off - disable DMA controller
* @atdma: the Atmel HDAMC device
@@ -1193,18 +1245,23 @@ static void at_dma_off(struct at_dma *atdma)
static int __init at_dma_probe(struct platform_device *pdev)
{
- struct at_dma_platform_data *pdata;
struct resource *io;
struct at_dma *atdma;
size_t size;
int irq;
int err;
int i;
+ struct at_dma_platform_data *plat_dat;
- /* get DMA Controller parameters from platform */
- pdata = pdev->dev.platform_data;
- if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
- return -EINVAL;
+ /* setup platform data for each SoC */
+ dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
+ dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
+ dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
+
+ /* get DMA parameters from controller type */
+ plat_dat = at_dma_get_driver_data(pdev);
+ if (!plat_dat)
+ return -ENODEV;
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io)
@@ -1215,14 +1272,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
return irq;
size = sizeof(struct at_dma);
- size += pdata->nr_channels * sizeof(struct at_dma_chan);
+ size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
atdma = kzalloc(size, GFP_KERNEL);
if (!atdma)
return -ENOMEM;
- /* discover transaction capabilites from the platform data */
- atdma->dma_common.cap_mask = pdata->cap_mask;
- atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
+ /* discover transaction capabilities */
+ atdma->dma_common.cap_mask = plat_dat->cap_mask;
+ atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
size = resource_size(io);
if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
@@ -1268,7 +1325,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* initialize channels related values */
INIT_LIST_HEAD(&atdma->dma_common.channels);
- for (i = 0; i < pdata->nr_channels; i++) {
+ for (i = 0; i < plat_dat->nr_channels; i++) {
struct at_dma_chan *atchan = &atdma->chan[i];
atchan->chan_common.device = &atdma->dma_common;
@@ -1313,7 +1370,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
- pdata->nr_channels);
+ plat_dat->nr_channels);
dma_async_device_register(&atdma->dma_common);
@@ -1495,9 +1552,11 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = {
static struct platform_driver at_dma_driver = {
.remove = __exit_p(at_dma_remove),
.shutdown = at_dma_shutdown,
+ .id_table = atdma_devtypes,
.driver = {
.name = "at_hdmac",
.pm = &at_dma_dev_pm_ops,
+ .of_match_table = of_match_ptr(atmel_dma_dt_ids),
},
};
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index aa4c9aebab7..dcaedfc181c 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -251,6 +251,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
/**
* struct at_dma - internal representation of an Atmel HDMA Controller
* @chan_common: common dmaengine dma_device object members
+ * @atdma_devtype: identifier of DMA controller compatibility
* @ch_regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 4234f416ef1..d65a718c0f9 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -39,7 +39,7 @@ struct coh901318_desc {
struct scatterlist *sg;
unsigned int sg_len;
struct coh901318_lli *lli;
- enum dma_data_direction dir;
+ enum dma_transfer_direction dir;
unsigned long flags;
u32 head_config;
u32 head_ctrl;
@@ -1034,7 +1034,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
static struct dma_async_tx_descriptor *
coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -1077,7 +1077,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrl_last |= cohc->runtime_ctrl;
ctrl |= cohc->runtime_ctrl;
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
@@ -1085,7 +1085,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrl_chained |= tx_flags;
ctrl_last |= tx_flags;
ctrl |= tx_flags;
- } else if (direction == DMA_FROM_DEVICE) {
+ } else if (direction == DMA_DEV_TO_MEM) {
u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
@@ -1274,11 +1274,11 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
int i = 0;
/* We only support mem to per or per to mem transfers */
- if (config->direction == DMA_FROM_DEVICE) {
+ if (config->direction == DMA_DEV_TO_MEM) {
addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
- } else if (config->direction == DMA_TO_DEVICE) {
+ } else if (config->direction == DMA_MEM_TO_DEV) {
addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 9f7e0e6a7ee..6c0e2d4c668 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -7,11 +7,10 @@
* Author: Per Friden <per.friden@stericsson.com>
*/
-#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
-#include <linux/dmapool.h>
#include <linux/memory.h>
#include <linux/gfp.h>
+#include <linux/dmapool.h>
#include <mach/coh901318.h>
#include "coh901318_lli.h"
@@ -177,18 +176,18 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
struct coh901318_lli *lli,
dma_addr_t buf, unsigned int size,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
- enum dma_data_direction dir)
+ enum dma_transfer_direction dir)
{
int s = size;
dma_addr_t src;
dma_addr_t dst;
- if (dir == DMA_TO_DEVICE) {
+ if (dir == DMA_MEM_TO_DEV) {
src = buf;
dst = dev_addr;
- } else if (dir == DMA_FROM_DEVICE) {
+ } else if (dir == DMA_DEV_TO_MEM) {
src = dev_addr;
dst = buf;
@@ -215,9 +214,9 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
lli = coh901318_lli_next(lli);
- if (dir == DMA_TO_DEVICE)
+ if (dir == DMA_MEM_TO_DEV)
src += block_size;
- else if (dir == DMA_FROM_DEVICE)
+ else if (dir == DMA_DEV_TO_MEM)
dst += block_size;
}
@@ -234,7 +233,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
struct scatterlist *sgl, unsigned int nents,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
u32 ctrl_last,
- enum dma_data_direction dir, u32 ctrl_irq_mask)
+ enum dma_transfer_direction dir, u32 ctrl_irq_mask)
{
int i;
struct scatterlist *sg;
@@ -249,9 +248,9 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
spin_lock(&pool->lock);
- if (dir == DMA_TO_DEVICE)
+ if (dir == DMA_MEM_TO_DEV)
dst = dev_addr;
- else if (dir == DMA_FROM_DEVICE)
+ else if (dir == DMA_DEV_TO_MEM)
src = dev_addr;
else
goto err;
@@ -269,7 +268,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
ctrl_sg = ctrl ? ctrl : ctrl_last;
- if (dir == DMA_TO_DEVICE)
+ if (dir == DMA_MEM_TO_DEV)
/* increment source address */
src = sg_phys(sg);
else
@@ -293,7 +292,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
lli->src_addr = src;
lli->dst_addr = dst;
- if (dir == DMA_FROM_DEVICE)
+ if (dir == DMA_DEV_TO_MEM)
dst += elem_size;
else
src += elem_size;
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318_lli.h
index 7a5c80990e9..abff3714fdd 100644
--- a/drivers/dma/coh901318_lli.h
+++ b/drivers/dma/coh901318_lli.h
@@ -97,7 +97,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
struct coh901318_lli *lli,
dma_addr_t buf, unsigned int size,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
- enum dma_data_direction dir);
+ enum dma_transfer_direction dir);
/**
* coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
@@ -119,6 +119,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
struct scatterlist *sg, unsigned int nents,
dma_addr_t dev_addr, u32 ctrl_chained,
u32 ctrl, u32 ctrl_last,
- enum dma_data_direction dir, u32 ctrl_irq_mask);
+ enum dma_transfer_direction dir, u32 ctrl_irq_mask);
#endif /* COH901318_LLI_H */
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index b48967b499d..a6c6051ec85 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -693,12 +693,12 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_interrupt);
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
!device->device_prep_dma_sg);
- BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
- !device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
!device->device_prep_dma_cyclic);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_control);
+ BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
+ !device->device_prep_interleaved_dma);
BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 9bfd6d36071..9b592b02b5f 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -166,6 +166,38 @@ dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
return cookie;
}
+static void dwc_initialize(struct dw_dma_chan *dwc)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_dma_slave *dws = dwc->chan.private;
+ u32 cfghi = DWC_CFGH_FIFO_MODE;
+ u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+
+ if (dwc->initialized == true)
+ return;
+
+ if (dws) {
+ /*
+ * We need controller-specific data to set up slave
+ * transfers.
+ */
+ BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+
+ cfghi = dws->cfg_hi;
+ cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
+ }
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+
+ /* Enable interrupts */
+ channel_set_bit(dw, MASK.XFER, dwc->mask);
+ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ channel_set_bit(dw, MASK.ERROR, dwc->mask);
+
+ dwc->initialized = true;
+}
+
/*----------------------------------------------------------------------*/
/* Called with dwc->lock held and bh disabled */
@@ -189,6 +221,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
return;
}
+ dwc_initialize(dwc);
+
channel_writel(dwc, LLP, first->txd.phys);
channel_writel(dwc, CTL_LO,
DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
@@ -696,7 +730,7 @@ err_desc_get:
static struct dma_async_tx_descriptor *
dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
@@ -720,7 +754,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
prev = first = NULL;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
@@ -777,7 +811,7 @@ slave_sg_todev_fill_desc:
goto slave_sg_todev_fill_desc;
}
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_INC
@@ -959,10 +993,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc;
- struct dw_dma_slave *dws;
int i;
- u32 cfghi;
- u32 cfglo;
unsigned long flags;
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -975,26 +1006,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
dwc->completed = chan->cookie = 1;
- cfghi = DWC_CFGH_FIFO_MODE;
- cfglo = 0;
-
- dws = chan->private;
- if (dws) {
- /*
- * We need controller-specific data to set up slave
- * transfers.
- */
- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
- cfghi = dws->cfg_hi;
- cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
- }
-
- cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
-
- channel_writel(dwc, CFG_LO, cfglo);
- channel_writel(dwc, CFG_HI, cfghi);
-
/*
* NOTE: some controllers may have additional features that we
* need to initialize here, like "scatter-gather" (which
@@ -1026,11 +1037,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
i = ++dwc->descs_allocated;
}
- /* Enable interrupts */
- channel_set_bit(dw, MASK.XFER, dwc->mask);
- channel_set_bit(dw, MASK.BLOCK, dwc->mask);
- channel_set_bit(dw, MASK.ERROR, dwc->mask);
-
spin_unlock_irqrestore(&dwc->lock, flags);
dev_dbg(chan2dev(chan),
@@ -1058,6 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
+ dwc->initialized = false;
/* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask);
@@ -1165,7 +1172,7 @@ EXPORT_SYMBOL(dw_dma_cyclic_stop);
*/
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
dma_addr_t buf_addr, size_t buf_len, size_t period_len,
- enum dma_data_direction direction)
+ enum dma_transfer_direction direction)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_cyclic_desc *cdesc;
@@ -1206,7 +1213,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
goto out_err;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto out_err;
- if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
goto out_err;
retval = ERR_PTR(-ENOMEM);
@@ -1228,7 +1235,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
goto out_err_desc_get;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
desc->lli.dar = dws->tx_reg;
desc->lli.sar = buf_addr + (period_len * i);
desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1239,7 +1246,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
| DWC_CTLL_FC(dws->fc)
| DWC_CTLL_INT_EN);
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
desc->lli.dar = buf_addr + (period_len * i);
desc->lli.sar = dws->rx_reg;
desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1335,6 +1342,8 @@ EXPORT_SYMBOL(dw_dma_cyclic_free);
static void dw_dma_off(struct dw_dma *dw)
{
+ int i;
+
dma_writel(dw, CFG, 0);
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
@@ -1345,6 +1354,9 @@ static void dw_dma_off(struct dw_dma *dw)
while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
cpu_relax();
+
+ for (i = 0; i < dw->dma.chancnt; i++)
+ dw->chan[i].initialized = false;
}
static int __init dw_probe(struct platform_device *pdev)
@@ -1533,6 +1545,7 @@ static int dw_suspend_noirq(struct device *dev)
dw_dma_off(platform_get_drvdata(pdev));
clk_disable(dw->clk);
+
return 0;
}
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index c3419518d70..5eef6946a36 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -140,6 +140,7 @@ struct dw_dma_chan {
u8 mask;
u8 priority;
bool paused;
+ bool initialized;
spinlock_t lock;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index b47e2b803fa..59e7a965772 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -246,6 +246,9 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
static struct ep93xx_dma_desc *
ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
{
+ if (list_empty(&edmac->active))
+ return NULL;
+
return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
}
@@ -263,16 +266,22 @@ ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
*/
static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
{
+ struct ep93xx_dma_desc *desc;
+
list_rotate_left(&edmac->active);
if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
return true;
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc)
+ return false;
+
/*
* If txd.cookie is set it means that we are back in the first
* descriptor in the chain and hence done with it.
*/
- return !ep93xx_dma_get_active(edmac)->txd.cookie;
+ return !desc->txd.cookie;
}
/*
@@ -327,10 +336,16 @@ static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+ struct ep93xx_dma_desc *desc;
u32 bus_addr;
- if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
+ return;
+ }
+
+ if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
bus_addr = desc->src_addr;
else
bus_addr = desc->dst_addr;
@@ -443,7 +458,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
control = (5 << M2M_CONTROL_PWSC_SHIFT);
control |= M2M_CONTROL_NO_HDSK;
- if (data->direction == DMA_TO_DEVICE) {
+ if (data->direction == DMA_MEM_TO_DEV) {
control |= M2M_CONTROL_DAH;
control |= M2M_CONTROL_TM_TX;
control |= M2M_CONTROL_RSS_SSPTX;
@@ -459,11 +474,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
* This IDE part is totally untested. Values below are taken
* from the EP93xx Users's Guide and might not be correct.
*/
- control |= M2M_CONTROL_NO_HDSK;
- control |= M2M_CONTROL_RSS_IDE;
- control |= M2M_CONTROL_PW_16;
-
- if (data->direction == DMA_TO_DEVICE) {
+ if (data->direction == DMA_MEM_TO_DEV) {
/* Worst case from the UG */
control = (3 << M2M_CONTROL_PWSC_SHIFT);
control |= M2M_CONTROL_DAH;
@@ -473,6 +484,10 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
control |= M2M_CONTROL_SAH;
control |= M2M_CONTROL_TM_RX;
}
+
+ control |= M2M_CONTROL_NO_HDSK;
+ control |= M2M_CONTROL_RSS_IDE;
+ control |= M2M_CONTROL_PW_16;
break;
default:
@@ -491,7 +506,13 @@ static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+ struct ep93xx_dma_desc *desc;
+
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
+ return;
+ }
if (edmac->buffer == 0) {
writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
@@ -669,24 +690,30 @@ static void ep93xx_dma_tasklet(unsigned long data)
{
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
struct ep93xx_dma_desc *desc, *d;
- dma_async_tx_callback callback;
- void *callback_param;
+ dma_async_tx_callback callback = NULL;
+ void *callback_param = NULL;
LIST_HEAD(list);
spin_lock_irq(&edmac->lock);
+ /*
+ * If dma_terminate_all() was called before we get to run, the active
+ * list has become empty. If that happens we aren't supposed to do
+ * anything more than call ep93xx_dma_advance_work().
+ */
desc = ep93xx_dma_get_active(edmac);
- if (desc->complete) {
- edmac->last_completed = desc->txd.cookie;
- list_splice_init(&edmac->active, &list);
+ if (desc) {
+ if (desc->complete) {
+ edmac->last_completed = desc->txd.cookie;
+ list_splice_init(&edmac->active, &list);
+ }
+ callback = desc->txd.callback;
+ callback_param = desc->txd.callback_param;
}
spin_unlock_irq(&edmac->lock);
/* Pick up the next descriptor from the queue */
ep93xx_dma_advance_work(edmac);
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
-
/* Now we can release all the chained descriptors */
list_for_each_entry_safe(desc, d, &list, node) {
/*
@@ -706,13 +733,22 @@ static void ep93xx_dma_tasklet(unsigned long data)
static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
{
struct ep93xx_dma_chan *edmac = dev_id;
+ struct ep93xx_dma_desc *desc;
irqreturn_t ret = IRQ_HANDLED;
spin_lock(&edmac->lock);
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac),
+ "got interrupt while active list is empty\n");
+ spin_unlock(&edmac->lock);
+ return IRQ_NONE;
+ }
+
switch (edmac->edma->hw_interrupt(edmac)) {
case INTERRUPT_DONE:
- ep93xx_dma_get_active(edmac)->complete = true;
+ desc->complete = true;
tasklet_schedule(&edmac->tasklet);
break;
@@ -803,8 +839,8 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
switch (data->port) {
case EP93XX_DMA_SSP:
case EP93XX_DMA_IDE:
- if (data->direction != DMA_TO_DEVICE &&
- data->direction != DMA_FROM_DEVICE)
+ if (data->direction != DMA_MEM_TO_DEV &&
+ data->direction != DMA_DEV_TO_MEM)
return -EINVAL;
break;
default:
@@ -952,7 +988,7 @@ fail:
*/
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction dir,
+ unsigned int sg_len, enum dma_transfer_direction dir,
unsigned long flags)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
@@ -988,7 +1024,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
goto fail;
}
- if (dir == DMA_TO_DEVICE) {
+ if (dir == DMA_MEM_TO_DEV) {
desc->src_addr = sg_dma_address(sg);
desc->dst_addr = edmac->runtime_addr;
} else {
@@ -1032,7 +1068,7 @@ fail:
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_data_direction dir)
+ enum dma_transfer_direction dir)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
struct ep93xx_dma_desc *desc, *first;
@@ -1065,7 +1101,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
goto fail;
}
- if (dir == DMA_TO_DEVICE) {
+ if (dir == DMA_MEM_TO_DEV) {
desc->src_addr = dma_addr + offset;
desc->dst_addr = edmac->runtime_addr;
} else {
@@ -1133,12 +1169,12 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
return -EINVAL;
switch (config->direction) {
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
width = config->src_addr_width;
addr = config->src_addr;
break;
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
width = config->dst_addr_width;
addr = config->dst_addr;
break;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 8a781540590..b98070c33ca 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -772,7 +772,7 @@ fail:
*/
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
/*
* This operation is not supported on the Freescale DMA controller
@@ -819,7 +819,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
return -ENXIO;
/* we set the controller burst size depending on direction */
- if (config->direction == DMA_TO_DEVICE)
+ if (config->direction == DMA_MEM_TO_DEV)
size = config->dst_addr_width * config->dst_maxburst;
else
size = config->src_addr_width * config->src_maxburst;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 4be55f9bb6c..e4383ee2c9a 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -107,7 +107,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
imx_dma_disable(imxdmac->imxdma_channel);
return 0;
case DMA_SLAVE_CONFIG:
- if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+ if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
imxdmac->per_address = dmaengine_cfg->src_addr;
imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
imxdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -224,7 +224,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
@@ -241,7 +241,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
dma_length += sg->length;
}
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
dmamode = DMA_MODE_READ;
else
dmamode = DMA_MODE_WRITE;
@@ -271,7 +271,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
@@ -317,7 +317,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
imxdmac->sg_list[periods].page_link =
((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
dmamode = DMA_MODE_READ;
else
dmamode = DMA_MODE_WRITE;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f993955a640..a8af379680c 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -247,7 +247,7 @@ struct sdma_engine;
struct sdma_channel {
struct sdma_engine *sdma;
unsigned int channel;
- enum dma_data_direction direction;
+ enum dma_transfer_direction direction;
enum sdma_peripheral_type peripheral_type;
unsigned int event_id0;
unsigned int event_id1;
@@ -268,6 +268,8 @@ struct sdma_channel {
struct dma_async_tx_descriptor desc;
dma_cookie_t last_completed;
enum dma_status status;
+ unsigned int chn_count;
+ unsigned int chn_real_count;
};
#define IMX_DMA_SG_LOOP (1 << 0)
@@ -503,6 +505,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
struct sdma_buffer_descriptor *bd;
int i, error = 0;
+ sdmac->chn_real_count = 0;
/*
* non loop mode. Iterate over all descriptors, collect
* errors and call callback function
@@ -512,6 +515,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
if (bd->mode.status & (BD_DONE | BD_RROR))
error = -EIO;
+ sdmac->chn_real_count += bd->mode.count;
}
if (error)
@@ -519,9 +523,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
else
sdmac->status = DMA_SUCCESS;
+ sdmac->last_completed = sdmac->desc.cookie;
if (sdmac->desc.callback)
sdmac->desc.callback(sdmac->desc.callback_param);
- sdmac->last_completed = sdmac->desc.cookie;
}
static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
@@ -650,7 +654,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
int ret;
- if (sdmac->direction == DMA_FROM_DEVICE) {
+ if (sdmac->direction == DMA_DEV_TO_MEM) {
load_address = sdmac->pc_from_device;
} else {
load_address = sdmac->pc_to_device;
@@ -832,17 +836,18 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
+ unsigned long flags;
struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
struct sdma_engine *sdma = sdmac->sdma;
dma_cookie_t cookie;
- spin_lock_irq(&sdmac->lock);
+ spin_lock_irqsave(&sdmac->lock, flags);
cookie = sdma_assign_cookie(sdmac);
sdma_enable_channel(sdma, sdmac->channel);
- spin_unlock_irq(&sdmac->lock);
+ spin_unlock_irqrestore(&sdmac->lock, flags);
return cookie;
}
@@ -911,7 +916,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
@@ -941,6 +946,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
goto err_out;
}
+ sdmac->chn_count = 0;
for_each_sg(sgl, sg, sg_len, i) {
struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
int param;
@@ -957,6 +963,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
}
bd->mode.count = count;
+ sdmac->chn_count += count;
if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
ret = -EINVAL;
@@ -1008,7 +1015,7 @@ err_out:
static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
@@ -1093,7 +1100,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
sdma_disable_channel(sdmac);
return 0;
case DMA_SLAVE_CONFIG:
- if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+ if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
sdmac->per_address = dmaengine_cfg->src_addr;
sdmac->watermark_level = dmaengine_cfg->src_maxburst;
sdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -1102,6 +1109,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
sdmac->word_size = dmaengine_cfg->dst_addr_width;
}
+ sdmac->direction = dmaengine_cfg->direction;
return sdma_config_channel(sdmac);
default:
return -ENOSYS;
@@ -1119,7 +1127,8 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
last_used = chan->cookie;
- dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
+ dma_set_tx_state(txstate, sdmac->last_completed, last_used,
+ sdmac->chn_count - sdmac->chn_real_count);
return sdmac->status;
}
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 19a0c64d45d..74f70aadf9e 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -280,7 +280,8 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
* callbacks but must be called with the lock held.
*/
static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
- struct intel_mid_dma_desc *desc)
+ struct intel_mid_dma_desc *desc)
+ __releases(&midc->lock) __acquires(&midc->lock)
{
struct dma_async_tx_descriptor *txd = &desc->txd;
dma_async_tx_callback callback_txd = NULL;
@@ -311,6 +312,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
pci_pool_destroy(desc->lli_pool);
+ desc->lli = NULL;
}
list_move(&desc->desc_node, &midc->free_list);
midc->busy = false;
@@ -395,10 +397,10 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
midc->dma->block_size);
/*Populate SAR and DAR values*/
sg_phy_addr = sg_phys(sg);
- if (desc->dirn == DMA_TO_DEVICE) {
+ if (desc->dirn == DMA_MEM_TO_DEV) {
lli_bloc_desc->sar = sg_phy_addr;
lli_bloc_desc->dar = mids->dma_slave.dst_addr;
- } else if (desc->dirn == DMA_FROM_DEVICE) {
+ } else if (desc->dirn == DMA_DEV_TO_MEM) {
lli_bloc_desc->sar = mids->dma_slave.src_addr;
lli_bloc_desc->dar = sg_phy_addr;
}
@@ -490,7 +492,9 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) {
+ spin_lock_bh(&midc->lock);
midc_scan_descriptors(to_middma_device(chan->device), midc);
+ spin_unlock_bh(&midc->lock);
last_complete = midc->completed;
last_used = chan->cookie;
@@ -566,6 +570,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
pci_pool_destroy(desc->lli_pool);
+ desc->lli = NULL;
}
list_move(&desc->desc_node, &midc->free_list);
}
@@ -632,13 +637,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
if (midc->dma->pimr_mask) {
cfg_hi.cfgx.protctl = 0x0; /*default value*/
cfg_hi.cfgx.fifo_mode = 1;
- if (mids->dma_slave.direction == DMA_TO_DEVICE) {
+ if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
cfg_hi.cfgx.src_per = 0;
if (mids->device_instance == 0)
cfg_hi.cfgx.dst_per = 3;
if (mids->device_instance == 1)
cfg_hi.cfgx.dst_per = 1;
- } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
+ } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
if (mids->device_instance == 0)
cfg_hi.cfgx.src_per = 2;
if (mids->device_instance == 1)
@@ -682,11 +687,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 0;
} else {
- if (mids->dma_slave.direction == DMA_TO_DEVICE) {
+ if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 2;
ctl_lo.ctlx.tt_fc = 1;
- } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
+ } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
ctl_lo.ctlx.sinc = 2;
ctl_lo.ctlx.dinc = 0;
ctl_lo.ctlx.tt_fc = 2;
@@ -732,7 +737,7 @@ err_desc_get:
*/
static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct intel_mid_dma_chan *midc = NULL;
@@ -868,7 +873,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
pm_runtime_get_sync(&mid->pdev->dev);
if (mid->state == SUSPENDED) {
- if (dma_resume(mid->pdev)) {
+ if (dma_resume(&mid->pdev->dev)) {
pr_err("ERR_MDMA: resume failed");
return -EFAULT;
}
@@ -1099,7 +1104,8 @@ static int mid_setup_dma(struct pci_dev *pdev)
LNW_PERIPHRAL_MASK_SIZE);
if (dma->mask_reg == NULL) {
pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_ioremap;
}
} else
dma->mask_reg = NULL;
@@ -1196,6 +1202,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
err_engine:
free_irq(pdev->irq, dma);
err_irq:
+ if (dma->mask_reg)
+ iounmap(dma->mask_reg);
+err_ioremap:
pci_pool_destroy(dma->dma_pool);
err_dma_pool:
pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
@@ -1337,8 +1346,9 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
*
* This function is called by OS when a power event occurs
*/
-int dma_suspend(struct pci_dev *pci, pm_message_t state)
+static int dma_suspend(struct device *dev)
{
+ struct pci_dev *pci = to_pci_dev(dev);
int i;
struct middma_device *device = pci_get_drvdata(pci);
pr_debug("MDMA: dma_suspend called\n");
@@ -1362,8 +1372,9 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
*
* This function is called by OS when a power event occurs
*/
-int dma_resume(struct pci_dev *pci)
+int dma_resume(struct device *dev)
{
+ struct pci_dev *pci = to_pci_dev(dev);
int ret;
struct middma_device *device = pci_get_drvdata(pci);
@@ -1429,6 +1440,8 @@ static const struct dev_pm_ops intel_mid_dma_pm = {
.runtime_suspend = dma_runtime_suspend,
.runtime_resume = dma_runtime_resume,
.runtime_idle = dma_runtime_idle,
+ .suspend = dma_suspend,
+ .resume = dma_resume,
};
static struct pci_driver intel_mid_dma_pci_driver = {
@@ -1437,8 +1450,6 @@ static struct pci_driver intel_mid_dma_pci_driver = {
.probe = intel_mid_dma_probe,
.remove = __devexit_p(intel_mid_dma_remove),
#ifdef CONFIG_PM
- .suspend = dma_suspend,
- .resume = dma_resume,
.driver = {
.pm = &intel_mid_dma_pm,
},
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index aea5ee88ce0..c83d35b97bd 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -262,7 +262,7 @@ struct intel_mid_dma_desc {
unsigned int lli_length;
unsigned int current_lli;
dma_addr_t next;
- enum dma_data_direction dirn;
+ enum dma_transfer_direction dirn;
enum dma_status status;
enum dma_slave_buswidth width; /*width of DMA txn*/
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
@@ -296,6 +296,6 @@ static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
}
-int dma_resume(struct pci_dev *pci);
+int dma_resume(struct device *dev);
#endif /*__INTEL_MID_DMAC_REGS_H__*/
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index e03f811a83d..04be90b645b 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1735,8 +1735,6 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
spin_unlock_bh(&iop_chan->lock);
}
-MODULE_ALIAS("platform:iop-adma");
-
static struct platform_driver iop_adma_driver = {
.probe = iop_adma_probe,
.remove = __devexit_p(iop_adma_remove),
@@ -1746,19 +1744,9 @@ static struct platform_driver iop_adma_driver = {
},
};
-static int __init iop_adma_init (void)
-{
- return platform_driver_register(&iop_adma_driver);
-}
-
-static void __exit iop_adma_exit (void)
-{
- platform_driver_unregister(&iop_adma_driver);
- return;
-}
-module_exit(iop_adma_exit);
-module_init(iop_adma_init);
+module_platform_driver(iop_adma_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("IOP ADMA Engine Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iop-adma");
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 0e5ef33f90a..6212b16e8cf 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -312,7 +312,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
case IPU_PIX_FMT_RGB565:
params->ip.bpp = 2;
params->ip.pfs = 4;
- params->ip.npb = 7;
+ params->ip.npb = 15;
params->ip.sat = 2; /* SAT = 32-bit access */
params->ip.ofs0 = 0; /* Red bit offset */
params->ip.ofs1 = 5; /* Green bit offset */
@@ -422,12 +422,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
params->pp.nsb = 1;
}
-static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
- uint16_t burst_pixels)
-{
- params->pp.npb = burst_pixels - 1;
-}
-
static void ipu_ch_param_set_buffer(union chan_param_mem *params,
dma_addr_t buf0, dma_addr_t buf1)
{
@@ -690,23 +684,6 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
ipu_ch_param_set_rotation(&params, rot_mode);
- /* Some channels (rotation) have restriction on burst length */
- switch (channel) {
- case IDMAC_IC_7: /* Hangs with burst 8, 16, other values
- invalid - Table 44-30 */
-/*
- ipu_ch_param_set_burst_size(&params, 8);
- */
- break;
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- /* In original code only IPU_PIX_FMT_RGB565 was setting burst */
- ipu_ch_param_set_burst_size(&params, 16);
- break;
- case IDMAC_IC_0:
- default:
- break;
- }
spin_lock_irqsave(&ipu->lock, flags);
@@ -1364,7 +1341,7 @@ static void ipu_gc_tasklet(unsigned long arg)
/* Allocate and initialise a transfer descriptor. */
static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long tx_flags)
+ enum dma_transfer_direction direction, unsigned long tx_flags)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac_tx_desc *desc = NULL;
@@ -1376,7 +1353,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
chan->chan_id != IDMAC_IC_7)
return NULL;
- if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
+ if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
return NULL;
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 8ba4edc6185..4d6d4cf6694 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -835,17 +835,7 @@ static struct platform_driver mpc_dma_driver = {
},
};
-static int __init mpc_dma_init(void)
-{
- return platform_driver_register(&mpc_dma_driver);
-}
-module_init(mpc_dma_init);
-
-static void __exit mpc_dma_exit(void)
-{
- platform_driver_unregister(&mpc_dma_driver);
-}
-module_exit(mpc_dma_exit);
+module_platform_driver(mpc_dma_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index fc903c0ed23..b06cd4ca626 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -44,7 +44,6 @@
#define HW_APBHX_CTRL0 0x000
#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
-#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8
#define BP_APBH_CTRL0_RESET_CHANNEL 16
#define HW_APBHX_CTRL1 0x010
#define HW_APBHX_CTRL2 0x020
@@ -111,6 +110,7 @@ struct mxs_dma_chan {
int chan_irq;
struct mxs_dma_ccw *ccw;
dma_addr_t ccw_phys;
+ int desc_count;
dma_cookie_t last_completed;
enum dma_status status;
unsigned int flags;
@@ -130,23 +130,6 @@ struct mxs_dma_engine {
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
};
-static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
-{
- struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
- int chan_id = mxs_chan->chan.chan_id;
- int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
-
- /* enable apbh channel clock */
- if (dma_is_apbh()) {
- if (apbh_is_old())
- writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
- mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
- else
- writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
- }
-}
-
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -165,9 +148,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
- /* clkgate needs to be enabled before writing other registers */
- mxs_dma_clkgate(mxs_chan, 1);
-
/* set cmd_addr up */
writel(mxs_chan->ccw_phys,
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
@@ -178,9 +158,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
{
- /* disable apbh channel clock */
- mxs_dma_clkgate(mxs_chan, 0);
-
mxs_chan->status = DMA_SUCCESS;
}
@@ -268,7 +245,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
/*
* When both completion and error of termination bits set at the
* same time, we do not take it as an error. IOW, it only becomes
- * an error we need to handler here in case of ether it's (1) an bus
+ * an error we need to handle here in case of either it's (1) a bus
* error or (2) a termination error with no completion.
*/
stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
@@ -338,10 +315,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
if (ret)
goto err_clk;
- /* clkgate needs to be enabled for reset to finish */
- mxs_dma_clkgate(mxs_chan, 1);
mxs_dma_reset_chan(mxs_chan);
- mxs_dma_clkgate(mxs_chan, 0);
dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
@@ -377,7 +351,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long append)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
@@ -386,7 +360,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct scatterlist *sg;
int i, j;
u32 *pio;
- static int idx;
+ int idx = append ? mxs_chan->desc_count : 0;
if (mxs_chan->status == DMA_IN_PROGRESS && !append)
return NULL;
@@ -417,7 +391,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
idx = 0;
}
- if (direction == DMA_NONE) {
+ if (direction == DMA_TRANS_NONE) {
ccw = &mxs_chan->ccw[idx++];
pio = (u32 *) sgl;
@@ -450,7 +424,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits |= CCW_CHAIN;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
- ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+ ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
COMMAND);
@@ -462,6 +436,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
}
}
}
+ mxs_chan->desc_count = idx;
return &mxs_chan->desc;
@@ -472,7 +447,7 @@ err_out:
static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -515,7 +490,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
- ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+ ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
dma_addr += period_len;
@@ -523,6 +498,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
i++;
}
+ mxs_chan->desc_count = i;
return &mxs_chan->desc;
@@ -539,8 +515,8 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
- mxs_dma_disable_chan(mxs_chan);
mxs_dma_reset_chan(mxs_chan);
+ mxs_dma_disable_chan(mxs_chan);
break;
case DMA_PAUSE:
mxs_dma_pause_chan(mxs_chan);
@@ -580,7 +556,7 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
ret = clk_prepare_enable(mxs_dma->clk);
if (ret)
- goto err_out;
+ return ret;
ret = mxs_reset_block(mxs_dma->base);
if (ret)
@@ -604,11 +580,8 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
- clk_disable_unprepare(mxs_dma->clk);
-
- return 0;
-
err_out:
+ clk_disable_unprepare(mxs_dma->clk);
return ret;
}
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index a6d0e3dbed0..823f58179f9 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -1,7 +1,7 @@
/*
* Topcliff PCH DMA controller driver
* Copyright (c) 2010 Intel Corporation
- * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -99,7 +99,7 @@ struct pch_dma_desc {
struct pch_dma_chan {
struct dma_chan chan;
void __iomem *membase;
- enum dma_data_direction dir;
+ enum dma_transfer_direction dir;
struct tasklet_struct tasklet;
unsigned long err_status;
@@ -224,7 +224,7 @@ static void pdc_set_dir(struct dma_chan *chan)
mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
val &= mask_mode;
- if (pd_chan->dir == DMA_TO_DEVICE)
+ if (pd_chan->dir == DMA_MEM_TO_DEV)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS);
else
@@ -242,7 +242,7 @@ static void pdc_set_dir(struct dma_chan *chan)
mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * ch));
val &= mask_mode;
- if (pd_chan->dir == DMA_TO_DEVICE)
+ if (pd_chan->dir == DMA_MEM_TO_DEV)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS);
else
@@ -607,7 +607,7 @@ static void pd_issue_pending(struct dma_chan *chan)
static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
struct pch_dma_slave *pd_slave = chan->private;
@@ -623,9 +623,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
return NULL;
}
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
reg = pd_slave->rx_reg;
- else if (direction == DMA_TO_DEVICE)
+ else if (direction == DMA_MEM_TO_DEV)
reg = pd_slave->tx_reg;
else
return NULL;
@@ -1018,6 +1018,8 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
+#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
+#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
@@ -1030,6 +1032,8 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
{ 0, },
};
@@ -1057,7 +1061,7 @@ static void __exit pch_dma_exit(void)
module_init(pch_dma_init);
module_exit(pch_dma_exit);
-MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
+MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
"DMA controller driver");
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 09adcfcd953..b8ec03ee8e2 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -350,14 +350,14 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
case DMA_SLAVE_CONFIG:
slave_config = (struct dma_slave_config *)arg;
- if (slave_config->direction == DMA_TO_DEVICE) {
+ if (slave_config->direction == DMA_MEM_TO_DEV) {
if (slave_config->dst_addr)
pch->fifo_addr = slave_config->dst_addr;
if (slave_config->dst_addr_width)
pch->burst_sz = __ffs(slave_config->dst_addr_width);
if (slave_config->dst_maxburst)
pch->burst_len = slave_config->dst_maxburst;
- } else if (slave_config->direction == DMA_FROM_DEVICE) {
+ } else if (slave_config->direction == DMA_DEV_TO_MEM) {
if (slave_config->src_addr)
pch->fifo_addr = slave_config->src_addr;
if (slave_config->src_addr_width)
@@ -621,7 +621,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -636,14 +636,14 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
}
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
desc->req.rqtype = MEMTODEV;
src = dma_addr;
dst = pch->fifo_addr;
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
desc->req.rqtype = DEVTOMEM;
@@ -710,7 +710,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
static struct dma_async_tx_descriptor *
pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flg)
{
struct dma_pl330_desc *first, *desc = NULL;
@@ -759,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
else
list_add_tail(&desc->node, &first->node);
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
desc->req.rqtype = MEMTODEV;
@@ -834,17 +834,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pdmac);
-#ifdef CONFIG_PM_RUNTIME
- /* to use the runtime PM helper functions */
- pm_runtime_enable(&adev->dev);
-
- /* enable the power domain */
- if (pm_runtime_get_sync(&adev->dev)) {
- dev_err(&adev->dev, "failed to get runtime pm\n");
- ret = -ENODEV;
- goto probe_err1;
- }
-#else
+#ifndef CONFIG_PM_RUNTIME
/* enable dma clk */
clk_enable(pdmac->clk);
#endif
@@ -977,10 +967,7 @@ static int __devexit pl330_remove(struct amba_device *adev)
res = &adev->res;
release_mem_region(res->start, resource_size(res));
-#ifdef CONFIG_PM_RUNTIME
- pm_runtime_put(&adev->dev);
- pm_runtime_disable(&adev->dev);
-#else
+#ifndef CONFIG_PM_RUNTIME
clk_disable(pdmac->clk);
#endif
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 81809c2b46a..54043cd831c 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
-#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
@@ -57,6 +56,15 @@ static LIST_HEAD(sh_dmae_devices);
static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
+static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
+
+static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ __raw_writel(data, shdev->chan_reg +
+ shdev->pdata->channel[sh_dc->id].chclr_offset);
+}
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
@@ -129,6 +137,15 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
+ if (shdev->pdata->chclr_present) {
+ int i;
+ for (i = 0; i < shdev->pdata->channel_num; i++) {
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
+ if (sh_chan)
+ chclr_write(sh_chan, 0);
+ }
+ }
+
dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
dmaor = dmaor_read(shdev);
@@ -139,6 +156,10 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
return -EIO;
}
+ if (shdev->pdata->dmaor_init & ~dmaor)
+ dev_warn(shdev->common.dev,
+ "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
+ dmaor, shdev->pdata->dmaor_init);
return 0;
}
@@ -259,8 +280,6 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
return 0;
}
-static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
-
static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
@@ -340,6 +359,8 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
sh_chan_xfer_ld_queue(sh_chan);
sh_chan->pm_state = DMAE_PM_ESTABLISHED;
}
+ } else {
+ sh_chan->pm_state = DMAE_PM_PENDING;
}
spin_unlock_irq(&sh_chan->desc_lock);
@@ -479,19 +500,19 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
* @sh_chan: DMA channel
* @flags: DMA transfer flags
* @dest: destination DMA address, incremented when direction equals
- * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
+ * DMA_DEV_TO_MEM
* @src: source DMA address, incremented when direction equals
- * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
+ * DMA_MEM_TO_DEV
* @len: DMA transfer length
* @first: if NULL, set to the current descriptor and cookie set to -EBUSY
* @direction: needed for slave DMA to decide which address to keep constant,
- * equals DMA_BIDIRECTIONAL for MEMCPY
+ * equals DMA_MEM_TO_MEM for MEMCPY
* Returns 0 or an error
* Locks: called with desc_lock held
*/
static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
- struct sh_desc **first, enum dma_data_direction direction)
+ struct sh_desc **first, enum dma_transfer_direction direction)
{
struct sh_desc *new;
size_t copy_size;
@@ -531,9 +552,9 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
new->direction = direction;
*len -= copy_size;
- if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
*src += copy_size;
- if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
*dest += copy_size;
return new;
@@ -546,12 +567,12 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
* converted to scatter-gather to guarantee consistent locking and a correct
* list manipulation. For slave DMA direction carries the usual meaning, and,
* logically, the SG list is RAM and the addr variable contains slave address,
- * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
* and the SG list contains only one element and points at the source buffer.
*/
static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct scatterlist *sg;
struct sh_desc *first = NULL, *new = NULL /* compiler... */;
@@ -592,7 +613,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
i, sg, len, (unsigned long long)sg_addr);
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
new = sh_dmae_add_desc(sh_chan, flags,
&sg_addr, addr, &len, &first,
direction);
@@ -646,13 +667,13 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
sg_dma_address(&sg) = dma_src;
sg_dma_len(&sg) = len;
- return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
+ return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
flags);
}
static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct sh_dmae_slave *param;
struct sh_dmae_chan *sh_chan;
@@ -996,7 +1017,7 @@ static void dmae_do_tasklet(unsigned long data)
spin_lock_irq(&sh_chan->desc_lock);
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
if (desc->mark == DESC_SUBMITTED &&
- ((desc->direction == DMA_FROM_DEVICE &&
+ ((desc->direction == DMA_DEV_TO_MEM &&
(desc->hw.dar + desc->hw.tcr) == dar_buf) ||
(desc->hw.sar + desc->hw.tcr) == sar_buf)) {
dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
@@ -1225,6 +1246,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, shdev);
+ shdev->common.dev = &pdev->dev;
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -1254,7 +1277,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
shdev->common.device_control = sh_dmae_control;
- shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */
shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
@@ -1435,22 +1457,17 @@ static int sh_dmae_runtime_resume(struct device *dev)
#ifdef CONFIG_PM
static int sh_dmae_suspend(struct device *dev)
{
- struct sh_dmae_device *shdev = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < shdev->pdata->channel_num; i++) {
- struct sh_dmae_chan *sh_chan = shdev->chan[i];
- if (sh_chan->descs_allocated)
- sh_chan->pm_error = pm_runtime_put_sync(dev);
- }
-
return 0;
}
static int sh_dmae_resume(struct device *dev)
{
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
- int i;
+ int i, ret;
+
+ ret = sh_dmae_rst(shdev);
+ if (ret < 0)
+ dev_err(dev, "Failed to reset!\n");
for (i = 0; i < shdev->pdata->channel_num; i++) {
struct sh_dmae_chan *sh_chan = shdev->chan[i];
@@ -1459,9 +1476,6 @@ static int sh_dmae_resume(struct device *dev)
if (!sh_chan->descs_allocated)
continue;
- if (!sh_chan->pm_error)
- pm_runtime_get_sync(dev);
-
if (param) {
const struct sh_dmae_slave_config *cfg = param->config;
dmae_set_dmars(sh_chan, cfg->mid_rid);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
new file mode 100644
index 00000000000..2333810d168
--- /dev/null
+++ b/drivers/dma/sirf-dma.c
@@ -0,0 +1,707 @@
+/*
+ * DMA controller driver for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/sirfsoc_dma.h>
+
+#define SIRFSOC_DMA_DESCRIPTORS 16
+#define SIRFSOC_DMA_CHANNELS 16
+
+#define SIRFSOC_DMA_CH_ADDR 0x00
+#define SIRFSOC_DMA_CH_XLEN 0x04
+#define SIRFSOC_DMA_CH_YLEN 0x08
+#define SIRFSOC_DMA_CH_CTRL 0x0C
+
+#define SIRFSOC_DMA_WIDTH_0 0x100
+#define SIRFSOC_DMA_CH_VALID 0x140
+#define SIRFSOC_DMA_CH_INT 0x144
+#define SIRFSOC_DMA_INT_EN 0x148
+#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
+
+#define SIRFSOC_DMA_MODE_CTRL_BIT 4
+#define SIRFSOC_DMA_DIR_CTRL_BIT 5
+
+/* xlen and dma_width register is in 4 bytes boundary */
+#define SIRFSOC_DMA_WORD_LEN 4
+
+struct sirfsoc_dma_desc {
+ struct dma_async_tx_descriptor desc;
+ struct list_head node;
+
+ /* SiRFprimaII 2D-DMA parameters */
+
+ int xlen; /* DMA xlen */
+ int ylen; /* DMA ylen */
+ int width; /* DMA width */
+ int dir;
+ bool cyclic; /* is loop DMA? */
+ u32 addr; /* DMA buffer address */
+};
+
+struct sirfsoc_dma_chan {
+ struct dma_chan chan;
+ struct list_head free;
+ struct list_head prepared;
+ struct list_head queued;
+ struct list_head active;
+ struct list_head completed;
+ dma_cookie_t completed_cookie;
+ unsigned long happened_cyclic;
+ unsigned long completed_cyclic;
+
+ /* Lock for this structure */
+ spinlock_t lock;
+
+ int mode;
+};
+
+struct sirfsoc_dma {
+ struct dma_device dma;
+ struct tasklet_struct tasklet;
+ struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
+ void __iomem *base;
+ int irq;
+};
+
+#define DRV_NAME "sirfsoc_dma"
+
+/* Convert struct dma_chan to struct sirfsoc_dma_chan */
+static inline
+struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct sirfsoc_dma_chan, chan);
+}
+
+/* Convert struct dma_chan to struct sirfsoc_dma */
+static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
+ return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
+}
+
+/* Execute all queued DMA descriptors */
+static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+ int cid = schan->chan.chan_id;
+ struct sirfsoc_dma_desc *sdesc = NULL;
+
+ /*
+ * lock has been held by functions calling this, so we don't hold
+ * lock again
+ */
+
+ sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
+ node);
+ /* Move the first queued descriptor to active list */
+ list_move_tail(&schan->queued, &schan->active);
+
+ /* Start the DMA transfer */
+ writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
+ cid * 4);
+ writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
+ (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
+ sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
+ writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
+ SIRFSOC_DMA_CH_XLEN);
+ writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
+ SIRFSOC_DMA_CH_YLEN);
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
+ (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+
+ /*
+ * writel has an implict memory write barrier to make sure data is
+ * flushed into memory before starting DMA
+ */
+ writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
+
+ if (sdesc->cyclic) {
+ writel((1 << cid) | 1 << (cid + 16) |
+ readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+ schan->happened_cyclic = schan->completed_cyclic = 0;
+ }
+}
+
+/* Interrupt handler */
+static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
+{
+ struct sirfsoc_dma *sdma = data;
+ struct sirfsoc_dma_chan *schan;
+ struct sirfsoc_dma_desc *sdesc = NULL;
+ u32 is;
+ int ch;
+
+ is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
+ while ((ch = fls(is) - 1) >= 0) {
+ is &= ~(1 << ch);
+ writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
+ schan = &sdma->channels[ch];
+
+ spin_lock(&schan->lock);
+
+ sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
+ node);
+ if (!sdesc->cyclic) {
+ /* Execute queued descriptors */
+ list_splice_tail_init(&schan->active, &schan->completed);
+ if (!list_empty(&schan->queued))
+ sirfsoc_dma_execute(schan);
+ } else
+ schan->happened_cyclic++;
+
+ spin_unlock(&schan->lock);
+ }
+
+ /* Schedule tasklet */
+ tasklet_schedule(&sdma->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/* process completed descriptors */
+static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
+{
+ dma_cookie_t last_cookie = 0;
+ struct sirfsoc_dma_chan *schan;
+ struct sirfsoc_dma_desc *sdesc;
+ struct dma_async_tx_descriptor *desc;
+ unsigned long flags;
+ unsigned long happened_cyclic;
+ LIST_HEAD(list);
+ int i;
+
+ for (i = 0; i < sdma->dma.chancnt; i++) {
+ schan = &sdma->channels[i];
+
+ /* Get all completed descriptors */
+ spin_lock_irqsave(&schan->lock, flags);
+ if (!list_empty(&schan->completed)) {
+ list_splice_tail_init(&schan->completed, &list);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ /* Execute callbacks and run dependencies */
+ list_for_each_entry(sdesc, &list, node) {
+ desc = &sdesc->desc;
+
+ if (desc->callback)
+ desc->callback(desc->callback_param);
+
+ last_cookie = desc->cookie;
+ dma_run_dependencies(desc);
+ }
+
+ /* Free descriptors */
+ spin_lock_irqsave(&schan->lock, flags);
+ list_splice_tail_init(&list, &schan->free);
+ schan->completed_cookie = last_cookie;
+ spin_unlock_irqrestore(&schan->lock, flags);
+ } else {
+ /* for cyclic channel, desc is always in active list */
+ sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
+ node);
+
+ if (!sdesc || (sdesc && !sdesc->cyclic)) {
+ /* without active cyclic DMA */
+ spin_unlock_irqrestore(&schan->lock, flags);
+ continue;
+ }
+
+ /* cyclic DMA */
+ happened_cyclic = schan->happened_cyclic;
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ desc = &sdesc->desc;
+ while (happened_cyclic != schan->completed_cyclic) {
+ if (desc->callback)
+ desc->callback(desc->callback_param);
+ schan->completed_cyclic++;
+ }
+ }
+ }
+}
+
+/* DMA Tasklet */
+static void sirfsoc_dma_tasklet(unsigned long data)
+{
+ struct sirfsoc_dma *sdma = (void *)data;
+
+ sirfsoc_dma_process_completed(sdma);
+}
+
+/* Submit descriptor to hardware */
+static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
+ struct sirfsoc_dma_desc *sdesc;
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ /* Move descriptor to queue */
+ list_move_tail(&sdesc->node, &schan->queued);
+
+ /* Update cookie */
+ cookie = schan->chan.cookie + 1;
+ if (cookie <= 0)
+ cookie = 1;
+
+ schan->chan.cookie = cookie;
+ sdesc->desc.cookie = cookie;
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return cookie;
+}
+
+static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
+ struct dma_slave_config *config)
+{
+ unsigned long flags;
+
+ if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+ (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
+ return -EINVAL;
+
+ spin_lock_irqsave(&schan->lock, flags);
+ schan->mode = (config->src_maxburst == 4 ? 1 : 0);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+ int cid = schan->chan.chan_id;
+ unsigned long flags;
+
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
+ ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+ writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
+
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+ & ~((1 << cid) | 1 << (cid + 16)),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+
+ spin_lock_irqsave(&schan->lock, flags);
+ list_splice_tail_init(&schan->active, &schan->free);
+ list_splice_tail_init(&schan->queued, &schan->free);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct dma_slave_config *config;
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ return sirfsoc_dma_terminate_all(schan);
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ return sirfsoc_dma_slave_config(schan, config);
+
+ default:
+ break;
+ }
+
+ return -ENOSYS;
+}
+
+/* Alloc channel resources */
+static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc;
+ unsigned long flags;
+ LIST_HEAD(descs);
+ int i;
+
+ /* Alloc descriptors for this channel */
+ for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
+ sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
+ if (!sdesc) {
+ dev_notice(sdma->dma.dev, "Memory allocation error. "
+ "Allocated only %u descriptors\n", i);
+ break;
+ }
+
+ dma_async_tx_descriptor_init(&sdesc->desc, chan);
+ sdesc->desc.flags = DMA_CTRL_ACK;
+ sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
+
+ list_add_tail(&sdesc->node, &descs);
+ }
+
+ /* Return error only if no descriptors were allocated */
+ if (i == 0)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ list_splice_tail_init(&descs, &schan->free);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return i;
+}
+
+/* Free channel resources */
+static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc, *tmp;
+ unsigned long flags;
+ LIST_HEAD(descs);
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ /* Channel must be idle */
+ BUG_ON(!list_empty(&schan->prepared));
+ BUG_ON(!list_empty(&schan->queued));
+ BUG_ON(!list_empty(&schan->active));
+ BUG_ON(!list_empty(&schan->completed));
+
+ /* Move data */
+ list_splice_tail_init(&schan->free, &descs);
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ /* Free descriptors */
+ list_for_each_entry_safe(sdesc, tmp, &descs, node)
+ kfree(sdesc);
+}
+
+/* Send pending descriptor to hardware */
+static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ if (list_empty(&schan->active) && !list_empty(&schan->queued))
+ sirfsoc_dma_execute(schan);
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+}
+
+/* Check request completion status */
+static enum dma_status
+sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ unsigned long flags;
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+
+ spin_lock_irqsave(&schan->lock, flags);
+ last_used = schan->chan.cookie;
+ last_complete = schan->completed_cookie;
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc = NULL;
+ unsigned long iflags;
+ int ret;
+
+ if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
+ ret = -EINVAL;
+ goto err_dir;
+ }
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&schan->lock, iflags);
+ if (!list_empty(&schan->free)) {
+ sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
+ node);
+ list_del(&sdesc->node);
+ }
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ if (!sdesc) {
+ /* try to free completed descriptors */
+ sirfsoc_dma_process_completed(sdma);
+ ret = 0;
+ goto no_desc;
+ }
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&schan->lock, iflags);
+
+ /*
+ * Number of chunks in a frame can only be 1 for prima2
+ * and ylen (number of frame - 1) must be at least 0
+ */
+ if ((xt->frame_size == 1) && (xt->numf > 0)) {
+ sdesc->cyclic = 0;
+ sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
+ sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
+ SIRFSOC_DMA_WORD_LEN;
+ sdesc->ylen = xt->numf - 1;
+ if (xt->dir == DMA_MEM_TO_DEV) {
+ sdesc->addr = xt->src_start;
+ sdesc->dir = 1;
+ } else {
+ sdesc->addr = xt->dst_start;
+ sdesc->dir = 0;
+ }
+
+ list_add_tail(&sdesc->node, &schan->prepared);
+ } else {
+ pr_err("sirfsoc DMA Invalid xfer\n");
+ ret = -EINVAL;
+ goto err_xfer;
+ }
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ return &sdesc->desc;
+err_xfer:
+ spin_unlock_irqrestore(&schan->lock, iflags);
+no_desc:
+err_dir:
+ return ERR_PTR(ret);
+}
+
+static struct dma_async_tx_descriptor *
+sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc = NULL;
+ unsigned long iflags;
+
+ /*
+ * we only support cycle transfer with 2 period
+ * If the X-length is set to 0, it would be the loop mode.
+ * The DMA address keeps increasing until reaching the end of a loop
+ * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
+ * the DMA address goes back to the beginning of this area.
+ * In loop mode, the DMA data region is divided into two parts, BUFA
+ * and BUFB. DMA controller generates interrupts twice in each loop:
+ * when the DMA address reaches the end of BUFA or the end of the
+ * BUFB
+ */
+ if (buf_len != 2 * period_len)
+ return ERR_PTR(-EINVAL);
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&schan->lock, iflags);
+ if (!list_empty(&schan->free)) {
+ sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
+ node);
+ list_del(&sdesc->node);
+ }
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ if (!sdesc)
+ return 0;
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&schan->lock, iflags);
+ sdesc->addr = addr;
+ sdesc->cyclic = 1;
+ sdesc->xlen = 0;
+ sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
+ sdesc->width = 1;
+ list_add_tail(&sdesc->node, &schan->prepared);
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ return &sdesc->desc;
+}
+
+/*
+ * The DMA controller consists of 16 independent DMA channels.
+ * Each channel is allocated to a different function
+ */
+bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
+{
+ unsigned int ch_nr = (unsigned int) chan_id;
+
+ if (ch_nr == chan->chan_id +
+ chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(sirfsoc_dma_filter_id);
+
+static int __devinit sirfsoc_dma_probe(struct platform_device *op)
+{
+ struct device_node *dn = op->dev.of_node;
+ struct device *dev = &op->dev;
+ struct dma_device *dma;
+ struct sirfsoc_dma *sdma;
+ struct sirfsoc_dma_chan *schan;
+ struct resource res;
+ ulong regs_start, regs_size;
+ u32 id;
+ int ret, i;
+
+ sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
+ if (!sdma) {
+ dev_err(dev, "Memory exhausted!\n");
+ return -ENOMEM;
+ }
+
+ if (of_property_read_u32(dn, "cell-index", &id)) {
+ dev_err(dev, "Fail to get DMAC index\n");
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ sdma->irq = irq_of_parse_and_map(dn, 0);
+ if (sdma->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ ret = -EINVAL;
+ goto free_mem;
+ }
+
+ ret = of_address_to_resource(dn, 0, &res);
+ if (ret) {
+ dev_err(dev, "Error parsing memory region!\n");
+ goto free_mem;
+ }
+
+ regs_start = res.start;
+ regs_size = resource_size(&res);
+
+ sdma->base = devm_ioremap(dev, regs_start, regs_size);
+ if (!sdma->base) {
+ dev_err(dev, "Error mapping memory region!\n");
+ ret = -ENOMEM;
+ goto irq_dispose;
+ }
+
+ ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
+ sdma);
+ if (ret) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ ret = -EINVAL;
+ goto unmap_mem;
+ }
+
+ dma = &sdma->dma;
+ dma->dev = dev;
+ dma->chancnt = SIRFSOC_DMA_CHANNELS;
+
+ dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
+ dma->device_issue_pending = sirfsoc_dma_issue_pending;
+ dma->device_control = sirfsoc_dma_control;
+ dma->device_tx_status = sirfsoc_dma_tx_status;
+ dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
+ dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma_cap_set(DMA_SLAVE, dma->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dma->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
+ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+
+ for (i = 0; i < dma->chancnt; i++) {
+ schan = &sdma->channels[i];
+
+ schan->chan.device = dma;
+ schan->chan.cookie = 1;
+ schan->completed_cookie = schan->chan.cookie;
+
+ INIT_LIST_HEAD(&schan->free);
+ INIT_LIST_HEAD(&schan->prepared);
+ INIT_LIST_HEAD(&schan->queued);
+ INIT_LIST_HEAD(&schan->active);
+ INIT_LIST_HEAD(&schan->completed);
+
+ spin_lock_init(&schan->lock);
+ list_add_tail(&schan->chan.device_node, &dma->channels);
+ }
+
+ tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
+
+ /* Register DMA engine */
+ dev_set_drvdata(dev, sdma);
+ ret = dma_async_device_register(dma);
+ if (ret)
+ goto free_irq;
+
+ dev_info(dev, "initialized SIRFSOC DMAC driver\n");
+
+ return 0;
+
+free_irq:
+ devm_free_irq(dev, sdma->irq, sdma);
+irq_dispose:
+ irq_dispose_mapping(sdma->irq);
+unmap_mem:
+ iounmap(sdma->base);
+free_mem:
+ devm_kfree(dev, sdma);
+ return ret;
+}
+
+static int __devexit sirfsoc_dma_remove(struct platform_device *op)
+{
+ struct device *dev = &op->dev;
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+
+ dma_async_device_unregister(&sdma->dma);
+ devm_free_irq(dev, sdma->irq, sdma);
+ irq_dispose_mapping(sdma->irq);
+ iounmap(sdma->base);
+ devm_kfree(dev, sdma);
+ return 0;
+}
+
+static struct of_device_id sirfsoc_dma_match[] = {
+ { .compatible = "sirf,prima2-dmac", },
+ {},
+};
+
+static struct platform_driver sirfsoc_dma_driver = {
+ .probe = sirfsoc_dma_probe,
+ .remove = __devexit_p(sirfsoc_dma_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sirfsoc_dma_match,
+ },
+};
+
+module_platform_driver(sirfsoc_dma_driver);
+
+MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
+ "Barry Song <baohua.song@csr.com>");
+MODULE_DESCRIPTION("SIRFSOC DMA control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 13259cad0ce..cc5ecbc067a 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <linux/amba/bus.h>
@@ -32,6 +34,9 @@
/* Maximum iterations taken before giving up suspending a channel */
#define D40_SUSPEND_MAX_IT 500
+/* Milliseconds */
+#define DMA40_AUTOSUSPEND_DELAY 100
+
/* Hardware requirement on LCLA alignment */
#define LCLA_ALIGNMENT 0x40000
@@ -62,6 +67,55 @@ enum d40_command {
D40_DMA_SUSPENDED = 3
};
+/*
+ * These are the registers that has to be saved and later restored
+ * when the DMA hw is powered off.
+ * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
+ */
+static u32 d40_backup_regs[] = {
+ D40_DREG_LCPA,
+ D40_DREG_LCLA,
+ D40_DREG_PRMSE,
+ D40_DREG_PRMSO,
+ D40_DREG_PRMOE,
+ D40_DREG_PRMOO,
+};
+
+#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
+
+/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
+static u32 d40_backup_regs_v3[] = {
+ D40_DREG_PSEG1,
+ D40_DREG_PSEG2,
+ D40_DREG_PSEG3,
+ D40_DREG_PSEG4,
+ D40_DREG_PCEG1,
+ D40_DREG_PCEG2,
+ D40_DREG_PCEG3,
+ D40_DREG_PCEG4,
+ D40_DREG_RSEG1,
+ D40_DREG_RSEG2,
+ D40_DREG_RSEG3,
+ D40_DREG_RSEG4,
+ D40_DREG_RCEG1,
+ D40_DREG_RCEG2,
+ D40_DREG_RCEG3,
+ D40_DREG_RCEG4,
+};
+
+#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
+
+static u32 d40_backup_regs_chan[] = {
+ D40_CHAN_REG_SSCFG,
+ D40_CHAN_REG_SSELT,
+ D40_CHAN_REG_SSPTR,
+ D40_CHAN_REG_SSLNK,
+ D40_CHAN_REG_SDCFG,
+ D40_CHAN_REG_SDELT,
+ D40_CHAN_REG_SDPTR,
+ D40_CHAN_REG_SDLNK,
+};
+
/**
* struct d40_lli_pool - Structure for keeping LLIs in memory
*
@@ -96,7 +150,7 @@ struct d40_lli_pool {
* during a transfer.
* @node: List entry.
* @is_in_client_list: true if the client owns this descriptor.
- * the previous one.
+ * @cyclic: true if this is a cyclic job
*
* This descriptor is used for both logical and physical transfers.
*/
@@ -143,6 +197,7 @@ struct d40_lcla_pool {
* channels.
*
* @lock: A lock protection this entity.
+ * @reserved: True if used by secure world or otherwise.
* @num: The physical channel number of this entity.
* @allocated_src: Bit mapped to show which src event line's are mapped to
* this physical channel. Can also be free or physically allocated.
@@ -152,6 +207,7 @@ struct d40_lcla_pool {
*/
struct d40_phy_res {
spinlock_t lock;
+ bool reserved;
int num;
u32 allocated_src;
u32 allocated_dst;
@@ -185,7 +241,6 @@ struct d40_base;
* @src_def_cfg: Default cfg register setting for src.
* @dst_def_cfg: Default cfg register setting for dst.
* @log_def: Default logical channel settings.
- * @lcla: Space for one dst src pair for logical channel transfers.
* @lcpa: Pointer to dst and src lcpa settings.
* @runtime_addr: runtime configured address.
* @runtime_direction: runtime configured direction.
@@ -217,7 +272,7 @@ struct d40_chan {
struct d40_log_lli_full *lcpa;
/* Runtime reconfiguration */
dma_addr_t runtime_addr;
- enum dma_data_direction runtime_direction;
+ enum dma_transfer_direction runtime_direction;
};
/**
@@ -241,6 +296,7 @@ struct d40_chan {
* @dma_both: dma_device channels that can do both memcpy and slave transfers.
* @dma_slave: dma_device channels that can do only do slave transfers.
* @dma_memcpy: dma_device channels that can do only do memcpy transfers.
+ * @phy_chans: Room for all possible physical channels in system.
* @log_chans: Room for all possible logical channels in system.
* @lookup_log_chans: Used to map interrupt number to logical channel. Points
* to log_chans entries.
@@ -248,12 +304,20 @@ struct d40_chan {
* to phy_chans entries.
* @plat_data: Pointer to provided platform_data which is the driver
* configuration.
+ * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
* @phy_res: Vector containing all physical channels.
* @lcla_pool: lcla pool settings and data.
* @lcpa_base: The virtual mapped address of LCPA.
* @phy_lcpa: The physical address of the LCPA.
* @lcpa_size: The size of the LCPA area.
* @desc_slab: cache for descriptors.
+ * @reg_val_backup: Here the values of some hardware registers are stored
+ * before the DMA is powered off. They are restored when the power is back on.
+ * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
+ * later.
+ * @reg_val_backup_chan: Backup data for standard channel parameter registers.
+ * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
+ * @initialized: true if the dma has been initialized
*/
struct d40_base {
spinlock_t interrupt_lock;
@@ -275,6 +339,7 @@ struct d40_base {
struct d40_chan **lookup_log_chans;
struct d40_chan **lookup_phy_chans;
struct stedma40_platform_data *plat_data;
+ struct regulator *lcpa_regulator;
/* Physical half channels */
struct d40_phy_res *phy_res;
struct d40_lcla_pool lcla_pool;
@@ -282,6 +347,11 @@ struct d40_base {
dma_addr_t phy_lcpa;
resource_size_t lcpa_size;
struct kmem_cache *desc_slab;
+ u32 reg_val_backup[BACKUP_REGS_SZ];
+ u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
+ u32 *reg_val_backup_chan;
+ u16 gcc_pwr_off_mask;
+ bool initialized;
};
/**
@@ -479,13 +549,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
struct d40_desc *d;
struct d40_desc *_d;
- list_for_each_entry_safe(d, _d, &d40c->client, node)
+ list_for_each_entry_safe(d, _d, &d40c->client, node) {
if (async_tx_test_ack(&d->txd)) {
d40_desc_remove(d);
desc = d;
memset(desc, 0, sizeof(*desc));
break;
}
+ }
}
if (!desc)
@@ -536,6 +607,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
bool cyclic = desc->cyclic;
int curr_lcla = -EINVAL;
int first_lcla = 0;
+ bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
bool linkback;
/*
@@ -608,11 +680,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
&lli->src[lli_current],
next_lcla, flags);
- dma_sync_single_range_for_device(chan->base->dev,
- pool->dma_addr, lcla_offset,
- 2 * sizeof(struct d40_log_lli),
- DMA_TO_DEVICE);
-
+ /*
+ * Cache maintenance is not needed if lcla is
+ * mapped in esram
+ */
+ if (!use_esram_lcla) {
+ dma_sync_single_range_for_device(chan->base->dev,
+ pool->dma_addr, lcla_offset,
+ 2 * sizeof(struct d40_log_lli),
+ DMA_TO_DEVICE);
+ }
curr_lcla = next_lcla;
if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
@@ -740,7 +817,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
return len;
}
-/* Support functions for logical channels */
+
+#ifdef CONFIG_PM
+static void dma40_backup(void __iomem *baseaddr, u32 *backup,
+ u32 *regaddr, int num, bool save)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ void __iomem *addr = baseaddr + regaddr[i];
+
+ if (save)
+ backup[i] = readl_relaxed(addr);
+ else
+ writel_relaxed(backup[i], addr);
+ }
+}
+
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+ int i;
+
+ /* Save/Restore channel specific registers */
+ for (i = 0; i < base->num_phy_chans; i++) {
+ void __iomem *addr;
+ int idx;
+
+ if (base->phy_res[i].reserved)
+ continue;
+
+ addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
+ idx = i * ARRAY_SIZE(d40_backup_regs_chan);
+
+ dma40_backup(addr, &base->reg_val_backup_chan[idx],
+ d40_backup_regs_chan,
+ ARRAY_SIZE(d40_backup_regs_chan),
+ save);
+ }
+
+ /* Save/Restore global registers */
+ dma40_backup(base->virtbase, base->reg_val_backup,
+ d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
+ save);
+
+ /* Save/Restore registers only existing on dma40 v3 and later */
+ if (base->rev >= 3)
+ dma40_backup(base->virtbase, base->reg_val_backup_v3,
+ d40_backup_regs_v3,
+ ARRAY_SIZE(d40_backup_regs_v3),
+ save);
+}
+#else
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+}
+#endif
static int d40_channel_execute_command(struct d40_chan *d40c,
enum d40_command command)
@@ -973,6 +1104,10 @@ static void d40_config_write(struct d40_chan *d40c)
/* Set LIDX for lcla */
writel(lidx, chanbase + D40_CHAN_REG_SSELT);
writel(lidx, chanbase + D40_CHAN_REG_SDELT);
+
+ /* Clear LNK which will be used by d40_chan_has_events() */
+ writel(0, chanbase + D40_CHAN_REG_SSLNK);
+ writel(0, chanbase + D40_CHAN_REG_SDLNK);
}
}
@@ -1013,6 +1148,7 @@ static int d40_pause(struct d40_chan *d40c)
if (!d40c->busy)
return 0;
+ pm_runtime_get_sync(d40c->base->dev);
spin_lock_irqsave(&d40c->lock, flags);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
@@ -1025,7 +1161,8 @@ static int d40_pause(struct d40_chan *d40c)
D40_DMA_RUN);
}
}
-
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
@@ -1039,7 +1176,7 @@ static int d40_resume(struct d40_chan *d40c)
return 0;
spin_lock_irqsave(&d40c->lock, flags);
-
+ pm_runtime_get_sync(d40c->base->dev);
if (d40c->base->rev == 0)
if (chan_is_logical(d40c)) {
res = d40_channel_execute_command(d40c,
@@ -1057,6 +1194,8 @@ static int d40_resume(struct d40_chan *d40c)
}
no_suspend:
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
@@ -1129,7 +1268,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
d40d = d40_first_queued(d40c);
if (d40d != NULL) {
- d40c->busy = true;
+ if (!d40c->busy)
+ d40c->busy = true;
+
+ pm_runtime_get_sync(d40c->base->dev);
/* Remove from queue */
d40_desc_remove(d40d);
@@ -1190,6 +1332,8 @@ static void dma_tc_handle(struct d40_chan *d40c)
if (d40_queue_start(d40c) == NULL)
d40c->busy = false;
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
}
d40c->pending_tx++;
@@ -1405,11 +1549,16 @@ static int d40_validate_conf(struct d40_chan *d40c,
return res;
}
-static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
- int log_event_line, bool is_log)
+static bool d40_alloc_mask_set(struct d40_phy_res *phy,
+ bool is_src, int log_event_line, bool is_log,
+ bool *first_user)
{
unsigned long flags;
spin_lock_irqsave(&phy->lock, flags);
+
+ *first_user = ((phy->allocated_src | phy->allocated_dst)
+ == D40_ALLOC_FREE);
+
if (!is_log) {
/* Physical interrupts are masked per physical full channel */
if (phy->allocated_src == D40_ALLOC_FREE &&
@@ -1490,7 +1639,7 @@ out:
return is_free;
}
-static int d40_allocate_channel(struct d40_chan *d40c)
+static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
{
int dev_type;
int event_group;
@@ -1526,7 +1675,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
for (i = 0; i < d40c->base->num_phy_chans; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
- 0, is_log))
+ 0, is_log,
+ first_phy_user))
goto found_phy;
}
} else
@@ -1536,7 +1686,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
if (d40_alloc_mask_set(&phys[i],
is_src,
0,
- is_log))
+ is_log,
+ first_phy_user))
goto found_phy;
}
}
@@ -1552,6 +1703,25 @@ found_phy:
/* Find logical channel */
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
int phy_num = j + event_group * 2;
+
+ if (d40c->dma_cfg.use_fixed_channel) {
+ i = d40c->dma_cfg.phy_channel;
+
+ if ((i != phy_num) && (i != phy_num + 1)) {
+ dev_err(chan2dev(d40c),
+ "invalid fixed phy channel %d\n", i);
+ return -EINVAL;
+ }
+
+ if (d40_alloc_mask_set(&phys[i], is_src, event_line,
+ is_log, first_phy_user))
+ goto found_log;
+
+ dev_err(chan2dev(d40c),
+ "could not allocate fixed phy channel %d\n", i);
+ return -EINVAL;
+ }
+
/*
* Spread logical channels across all available physical rather
* than pack every logical channel at the first available phy
@@ -1560,13 +1730,15 @@ found_phy:
if (is_src) {
for (i = phy_num; i < phy_num + 2; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
- event_line, is_log))
+ event_line, is_log,
+ first_phy_user))
goto found_log;
}
} else {
for (i = phy_num + 1; i >= phy_num; i--) {
if (d40_alloc_mask_set(&phys[i], is_src,
- event_line, is_log))
+ event_line, is_log,
+ first_phy_user))
goto found_log;
}
}
@@ -1643,10 +1815,11 @@ static int d40_free_dma(struct d40_chan *d40c)
return -EINVAL;
}
+ pm_runtime_get_sync(d40c->base->dev);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
if (res) {
chan_err(d40c, "suspend failed\n");
- return res;
+ goto out;
}
if (chan_is_logical(d40c)) {
@@ -1664,13 +1837,11 @@ static int d40_free_dma(struct d40_chan *d40c)
if (d40_chan_has_events(d40c)) {
res = d40_channel_execute_command(d40c,
D40_DMA_RUN);
- if (res) {
+ if (res)
chan_err(d40c,
"Executing RUN command\n");
- return res;
- }
}
- return 0;
+ goto out;
}
} else {
(void) d40_alloc_mask_free(phy, is_src, 0);
@@ -1680,13 +1851,23 @@ static int d40_free_dma(struct d40_chan *d40c)
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
if (res) {
chan_err(d40c, "Failed to stop channel\n");
- return res;
+ goto out;
}
+
+ if (d40c->busy) {
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ }
+
+ d40c->busy = false;
d40c->phy_chan = NULL;
d40c->configured = false;
d40c->base->lookup_phy_chans[phy->num] = NULL;
+out:
- return 0;
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ return res;
}
static bool d40_is_paused(struct d40_chan *d40c)
@@ -1855,7 +2036,7 @@ err:
}
static dma_addr_t
-d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
+d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
{
struct stedma40_platform_data *plat = chan->base->plat_data;
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
@@ -1864,9 +2045,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
if (chan->runtime_addr)
return chan->runtime_addr;
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
addr = plat->dev_rx[cfg->src_dev_type];
- else if (direction == DMA_TO_DEVICE)
+ else if (direction == DMA_MEM_TO_DEV)
addr = plat->dev_tx[cfg->dst_dev_type];
return addr;
@@ -1875,7 +2056,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
static struct dma_async_tx_descriptor *
d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
struct scatterlist *sg_dst, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long dma_flags)
+ enum dma_transfer_direction direction, unsigned long dma_flags)
{
struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
dma_addr_t src_dev_addr = 0;
@@ -1902,9 +2083,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
if (direction != DMA_NONE) {
dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
src_dev_addr = dev_addr;
- else if (direction == DMA_TO_DEVICE)
+ else if (direction == DMA_MEM_TO_DEV)
dst_dev_addr = dev_addr;
}
@@ -2011,14 +2192,15 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
goto fail;
}
}
- is_free_phy = (d40c->phy_chan == NULL);
- err = d40_allocate_channel(d40c);
+ err = d40_allocate_channel(d40c, &is_free_phy);
if (err) {
chan_err(d40c, "Failed to allocate channel\n");
+ d40c->configured = false;
goto fail;
}
+ pm_runtime_get_sync(d40c->base->dev);
/* Fill in basic CFG register values */
d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
&d40c->dst_def_cfg, chan_is_logical(d40c));
@@ -2038,6 +2220,12 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
}
+ dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
+ chan_is_logical(d40c) ? "logical" : "physical",
+ d40c->phy_chan->num,
+ d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
+
+
/*
* Only write channel configuration to the DMA if the physical
* resource is free. In case of multiple logical channels
@@ -2046,6 +2234,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
if (is_free_phy)
d40_config_write(d40c);
fail:
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return err;
}
@@ -2108,10 +2298,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl,
unsigned int sg_len,
- enum dma_data_direction direction,
+ enum dma_transfer_direction direction,
unsigned long dma_flags)
{
- if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
+ if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
return NULL;
return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
@@ -2120,7 +2310,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
static struct dma_async_tx_descriptor *
dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_data_direction direction)
+ enum dma_transfer_direction direction)
{
unsigned int periods = buf_len / period_len;
struct dma_async_tx_descriptor *txd;
@@ -2269,7 +2459,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
dst_addr_width = config->dst_addr_width;
dst_maxburst = config->dst_maxburst;
- if (config->direction == DMA_FROM_DEVICE) {
+ if (config->direction == DMA_DEV_TO_MEM) {
dma_addr_t dev_addr_rx =
d40c->base->plat_data->dev_rx[cfg->src_dev_type];
@@ -2292,7 +2482,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
if (dst_maxburst == 0)
dst_maxburst = src_maxburst;
- } else if (config->direction == DMA_TO_DEVICE) {
+ } else if (config->direction == DMA_MEM_TO_DEV) {
dma_addr_t dev_addr_tx =
d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
@@ -2357,7 +2547,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
"configured channel %s for %s, data width %d/%d, "
"maxburst %d/%d elements, LE, no flow control\n",
dma_chan_name(chan),
- (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+ (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
src_addr_width, dst_addr_width,
src_maxburst, dst_maxburst);
@@ -2519,6 +2709,72 @@ failure1:
return err;
}
+/* Suspend resume functionality */
+#ifdef CONFIG_PM
+static int dma40_pm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+ int ret = 0;
+ if (!pm_runtime_suspended(dev))
+ return -EBUSY;
+
+ if (base->lcpa_regulator)
+ ret = regulator_disable(base->lcpa_regulator);
+ return ret;
+}
+
+static int dma40_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+
+ d40_save_restore_registers(base, true);
+
+ /* Don't disable/enable clocks for v1 due to HW bugs */
+ if (base->rev != 1)
+ writel_relaxed(base->gcc_pwr_off_mask,
+ base->virtbase + D40_DREG_GCC);
+
+ return 0;
+}
+
+static int dma40_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+
+ if (base->initialized)
+ d40_save_restore_registers(base, false);
+
+ writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
+ base->virtbase + D40_DREG_GCC);
+ return 0;
+}
+
+static int dma40_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (base->lcpa_regulator)
+ ret = regulator_enable(base->lcpa_regulator);
+
+ return ret;
+}
+
+static const struct dev_pm_ops dma40_pm_ops = {
+ .suspend = dma40_pm_suspend,
+ .runtime_suspend = dma40_runtime_suspend,
+ .runtime_resume = dma40_runtime_resume,
+ .resume = dma40_resume,
+};
+#define DMA40_PM_OPS (&dma40_pm_ops)
+#else
+#define DMA40_PM_OPS NULL
+#endif
+
/* Initialization functions. */
static int __init d40_phy_res_init(struct d40_base *base)
@@ -2527,6 +2783,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
int num_phy_chans_avail = 0;
u32 val[2];
int odd_even_bit = -2;
+ int gcc = D40_DREG_GCC_ENA;
val[0] = readl(base->virtbase + D40_DREG_PRSME);
val[1] = readl(base->virtbase + D40_DREG_PRSMO);
@@ -2538,9 +2795,17 @@ static int __init d40_phy_res_init(struct d40_base *base)
/* Mark security only channels as occupied */
base->phy_res[i].allocated_src = D40_ALLOC_PHY;
base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
+ base->phy_res[i].reserved = true;
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
+ D40_DREG_GCC_SRC);
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
+ D40_DREG_GCC_DST);
+
+
} else {
base->phy_res[i].allocated_src = D40_ALLOC_FREE;
base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
+ base->phy_res[i].reserved = false;
num_phy_chans_avail++;
}
spin_lock_init(&base->phy_res[i].lock);
@@ -2552,6 +2817,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
+ base->phy_res[chan].reserved = true;
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
+ D40_DREG_GCC_SRC);
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
+ D40_DREG_GCC_DST);
num_phy_chans_avail--;
}
@@ -2572,6 +2842,15 @@ static int __init d40_phy_res_init(struct d40_base *base)
val[0] = val[0] >> 2;
}
+ /*
+ * To keep things simple, Enable all clocks initially.
+ * The clocks will get managed later post channel allocation.
+ * The clocks for the event lines on which reserved channels exists
+ * are not managed here.
+ */
+ writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
+ base->gcc_pwr_off_mask = gcc;
+
return num_phy_chans_avail;
}
@@ -2699,10 +2978,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
goto failure;
}
- base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
- sizeof(struct d40_desc *) *
- D40_LCLA_LINK_PER_EVENT_GRP,
+ base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
+ sizeof(d40_backup_regs_chan),
GFP_KERNEL);
+ if (!base->reg_val_backup_chan)
+ goto failure;
+
+ base->lcla_pool.alloc_map =
+ kzalloc(num_phy_chans * sizeof(struct d40_desc *)
+ * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
if (!base->lcla_pool.alloc_map)
goto failure;
@@ -2741,9 +3025,9 @@ failure:
static void __init d40_hw_init(struct d40_base *base)
{
- static const struct d40_reg_val dma_init_reg[] = {
+ static struct d40_reg_val dma_init_reg[] = {
/* Clock every part of the DMA block from start */
- { .reg = D40_DREG_GCC, .val = 0x0000ff01},
+ { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
/* Interrupts on all logical channels */
{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
@@ -2943,11 +3227,31 @@ static int __init d40_probe(struct platform_device *pdev)
d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
goto failure;
}
+ /* If lcla has to be located in ESRAM we don't need to allocate */
+ if (base->plat_data->use_esram_lcla) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "lcla_esram");
+ if (!res) {
+ ret = -ENOENT;
+ d40_err(&pdev->dev,
+ "No \"lcla_esram\" memory resource\n");
+ goto failure;
+ }
+ base->lcla_pool.base = ioremap(res->start,
+ resource_size(res));
+ if (!base->lcla_pool.base) {
+ ret = -ENOMEM;
+ d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
+ goto failure;
+ }
+ writel(res->start, base->virtbase + D40_DREG_LCLA);
- ret = d40_lcla_allocate(base);
- if (ret) {
- d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
- goto failure;
+ } else {
+ ret = d40_lcla_allocate(base);
+ if (ret) {
+ d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
+ goto failure;
+ }
}
spin_lock_init(&base->lcla_pool.lock);
@@ -2960,6 +3264,32 @@ static int __init d40_probe(struct platform_device *pdev)
goto failure;
}
+ pm_runtime_irq_safe(base->dev);
+ pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(base->dev);
+ pm_runtime_enable(base->dev);
+ pm_runtime_resume(base->dev);
+
+ if (base->plat_data->use_esram_lcla) {
+
+ base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
+ if (IS_ERR(base->lcpa_regulator)) {
+ d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
+ base->lcpa_regulator = NULL;
+ goto failure;
+ }
+
+ ret = regulator_enable(base->lcpa_regulator);
+ if (ret) {
+ d40_err(&pdev->dev,
+ "Failed to enable lcpa_regulator\n");
+ regulator_put(base->lcpa_regulator);
+ base->lcpa_regulator = NULL;
+ goto failure;
+ }
+ }
+
+ base->initialized = true;
err = d40_dmaengine_init(base, num_reserved_chans);
if (err)
goto failure;
@@ -2976,6 +3306,11 @@ failure:
if (base->virtbase)
iounmap(base->virtbase);
+ if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
+ iounmap(base->lcla_pool.base);
+ base->lcla_pool.base = NULL;
+ }
+
if (base->lcla_pool.dma_addr)
dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
SZ_1K * base->num_phy_chans,
@@ -2998,6 +3333,11 @@ failure:
clk_put(base->clk);
}
+ if (base->lcpa_regulator) {
+ regulator_disable(base->lcpa_regulator);
+ regulator_put(base->lcpa_regulator);
+ }
+
kfree(base->lcla_pool.alloc_map);
kfree(base->lookup_log_chans);
kfree(base->lookup_phy_chans);
@@ -3013,6 +3353,7 @@ static struct platform_driver d40_driver = {
.driver = {
.owner = THIS_MODULE,
.name = D40_NAME,
+ .pm = DMA40_PM_OPS,
},
};
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index b44c455158d..8d3d490968a 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -16,6 +16,8 @@
#define D40_TYPE_TO_GROUP(type) (type / 16)
#define D40_TYPE_TO_EVENT(type) (type % 16)
+#define D40_GROUP_SIZE 8
+#define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2)
/* Most bits of the CFG register are the same in log as in phy mode */
#define D40_SREG_CFG_MST_POS 15
@@ -123,6 +125,15 @@
/* DMA Register Offsets */
#define D40_DREG_GCC 0x000
+#define D40_DREG_GCC_ENA 0x1
+/* This assumes that there are only 4 event groups */
+#define D40_DREG_GCC_ENABLE_ALL 0xff01
+#define D40_DREG_GCC_EVTGRP_POS 8
+#define D40_DREG_GCC_SRC 0
+#define D40_DREG_GCC_DST 1
+#define D40_DREG_GCC_EVTGRP_ENA(x, y) \
+ (1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y))
+
#define D40_DREG_PRTYP 0x004
#define D40_DREG_PRSME 0x008
#define D40_DREG_PRSMO 0x00C
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index a4a398f2ef6..a6f9c1684a0 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -90,7 +90,7 @@ struct timb_dma_chan {
struct list_head queue;
struct list_head free_list;
unsigned int bytes_per_line;
- enum dma_data_direction direction;
+ enum dma_transfer_direction direction;
unsigned int descs; /* Descriptors to allocate */
unsigned int desc_elems; /* number of elems per descriptor */
};
@@ -166,10 +166,10 @@ static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
if (single)
dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
- td_chan->direction);
+ DMA_TO_DEVICE);
else
dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
- td_chan->direction);
+ DMA_TO_DEVICE);
}
static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
@@ -235,7 +235,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
"td_chan: %p, chan: %d, membase: %p\n",
td_chan, td_chan->chan.chan_id, td_chan->membase);
- if (td_chan->direction == DMA_FROM_DEVICE) {
+ if (td_chan->direction == DMA_DEV_TO_MEM) {
/* descriptor address */
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
@@ -278,7 +278,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
txd->cookie);
/* make sure to stop the transfer */
- if (td_chan->direction == DMA_FROM_DEVICE)
+ if (td_chan->direction == DMA_DEV_TO_MEM)
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
/* Currently no support for stopping DMA transfers
else
@@ -558,7 +558,7 @@ static void td_issue_pending(struct dma_chan *chan)
static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
@@ -606,7 +606,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
}
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
- td_desc->desc_list_len, DMA_TO_DEVICE);
+ td_desc->desc_list_len, DMA_MEM_TO_DEV);
return &td_desc->txd;
}
@@ -775,8 +775,8 @@ static int __devinit td_probe(struct platform_device *pdev)
td_chan->descs = pchan->descriptors;
td_chan->desc_elems = pchan->descriptor_elements;
td_chan->bytes_per_line = pchan->bytes_per_line;
- td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
- DMA_TO_DEVICE;
+ td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
+ DMA_MEM_TO_DEV;
td_chan->membase = td->membase +
(i / 2) * TIMBDMA_INSTANCE_OFFSET +
@@ -841,17 +841,7 @@ static struct platform_driver td_driver = {
.remove = __exit_p(td_remove),
};
-static int __init td_init(void)
-{
- return platform_driver_register(&td_driver);
-}
-module_init(td_init);
-
-static void __exit td_exit(void)
-{
- platform_driver_unregister(&td_driver);
-}
-module_exit(td_exit);
+module_platform_driver(td_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Timberdale DMA controller driver");
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index cbd83e362b5..6122c364cf1 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -845,7 +845,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
static struct dma_async_tx_descriptor *
txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
@@ -860,9 +860,9 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
BUG_ON(!ds || !ds->reg_width);
if (ds->tx_reg)
- BUG_ON(direction != DMA_TO_DEVICE);
+ BUG_ON(direction != DMA_MEM_TO_DEV);
else
- BUG_ON(direction != DMA_FROM_DEVICE);
+ BUG_ON(direction != DMA_DEV_TO_MEM);
if (unlikely(!sg_len))
return NULL;
@@ -882,7 +882,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg);
if (__is_dmac64(ddev)) {
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
desc->hwdesc.SAR = mem;
desc->hwdesc.DAR = ds->tx_reg;
} else {
@@ -891,7 +891,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
desc->hwdesc.CNTR = sg_dma_len(sg);
} else {
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
desc->hwdesc32.SAR = mem;
desc->hwdesc32.DAR = ds->tx_reg;
} else {
@@ -900,7 +900,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
desc->hwdesc32.CNTR = sg_dma_len(sg);
}
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
sai = ds->reg_width;
dai = 0;
} else {
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index 27555995f7e..b5ee3ebfcfc 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -24,6 +24,21 @@
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
+/* Registers (Write-only) */
+#define XREG_INIT 0x00
+#define XREG_RF_FREQ 0x02
+#define XREG_POWER_DOWN 0x08
+
+/* Registers (Read-only) */
+#define XREG_FREQ_ERROR 0x01
+#define XREG_LOCK 0x02
+#define XREG_VERSION 0x04
+#define XREG_PRODUCT_ID 0x08
+#define XREG_HSYNC_FREQ 0x10
+#define XREG_FRAME_LINES 0x20
+#define XREG_SNR 0x40
+
+#define XREG_ADC_ENV 0x0100
static int debug;
module_param(debug, int, 0644);
@@ -885,7 +900,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
mutex_lock(&priv->lock);
/* Sync Lock Indicator */
- rc = xc2028_get_reg(priv, 0x0002, &frq_lock);
+ rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock);
if (rc < 0)
goto ret;
@@ -894,7 +909,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
signal = 1 << 11;
/* Get SNR of the video signal */
- rc = xc2028_get_reg(priv, 0x0040, &signal);
+ rc = xc2028_get_reg(priv, XREG_SNR, &signal);
if (rc < 0)
goto ret;
@@ -1019,9 +1034,9 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
/* CMD= Set frequency */
if (priv->firm_version < 0x0202)
- rc = send_seq(priv, {0x00, 0x02, 0x00, 0x00});
+ rc = send_seq(priv, {0x00, XREG_RF_FREQ, 0x00, 0x00});
else
- rc = send_seq(priv, {0x80, 0x02, 0x00, 0x00});
+ rc = send_seq(priv, {0x80, XREG_RF_FREQ, 0x00, 0x00});
if (rc < 0)
goto ret;
@@ -1201,9 +1216,9 @@ static int xc2028_sleep(struct dvb_frontend *fe)
mutex_lock(&priv->lock);
if (priv->firm_version < 0x0202)
- rc = send_seq(priv, {0x00, 0x08, 0x00, 0x00});
+ rc = send_seq(priv, {0x00, XREG_POWER_DOWN, 0x00, 0x00});
else
- rc = send_seq(priv, {0x80, 0x08, 0x00, 0x00});
+ rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00});
priv->cur_fw.type = 0; /* need firmware reload */
diff --git a/drivers/media/common/tuners/xc4000.c b/drivers/media/common/tuners/xc4000.c
index d218c1d68c3..68397110b7d 100644
--- a/drivers/media/common/tuners/xc4000.c
+++ b/drivers/media/common/tuners/xc4000.c
@@ -154,6 +154,8 @@ struct xc4000_priv {
#define XREG_SNR 0x06
#define XREG_VERSION 0x07
#define XREG_PRODUCT_ID 0x08
+#define XREG_SIGNAL_LEVEL 0x0A
+#define XREG_NOISE_LEVEL 0x0B
/*
Basic firmware description. This will remain with
@@ -486,6 +488,16 @@ static int xc_get_quality(struct xc4000_priv *priv, u16 *quality)
return xc4000_readreg(priv, XREG_QUALITY, quality);
}
+static int xc_get_signal_level(struct xc4000_priv *priv, u16 *signal)
+{
+ return xc4000_readreg(priv, XREG_SIGNAL_LEVEL, signal);
+}
+
+static int xc_get_noise_level(struct xc4000_priv *priv, u16 *noise)
+{
+ return xc4000_readreg(priv, XREG_NOISE_LEVEL, noise);
+}
+
static u16 xc_wait_for_lock(struct xc4000_priv *priv)
{
u16 lock_state = 0;
@@ -1089,6 +1101,8 @@ static void xc_debug_dump(struct xc4000_priv *priv)
u32 hsync_freq_hz = 0;
u16 frame_lines;
u16 quality;
+ u16 signal = 0;
+ u16 noise = 0;
u8 hw_majorversion = 0, hw_minorversion = 0;
u8 fw_majorversion = 0, fw_minorversion = 0;
@@ -1119,6 +1133,12 @@ static void xc_debug_dump(struct xc4000_priv *priv)
xc_get_quality(priv, &quality);
dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality);
+
+ xc_get_signal_level(priv, &signal);
+ dprintk(1, "*** Signal level = -%ddB (%d)\n", signal >> 8, signal);
+
+ xc_get_noise_level(priv, &noise);
+ dprintk(1, "*** Noise level = %ddB (%d)\n", noise >> 8, noise);
}
static int xc4000_set_params(struct dvb_frontend *fe)
@@ -1432,6 +1452,71 @@ fail:
return ret;
}
+static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ u16 value = 0;
+ int rc;
+
+ mutex_lock(&priv->lock);
+ rc = xc4000_readreg(priv, XREG_SIGNAL_LEVEL, &value);
+ mutex_unlock(&priv->lock);
+
+ if (rc < 0)
+ goto ret;
+
+ /* Informations from real testing of DVB-T and radio part,
+ coeficient for one dB is 0xff.
+ */
+ tuner_dbg("Signal strength: -%ddB (%05d)\n", value >> 8, value);
+
+ /* all known digital modes */
+ if ((priv->video_standard == XC4000_DTV6) ||
+ (priv->video_standard == XC4000_DTV7) ||
+ (priv->video_standard == XC4000_DTV7_8) ||
+ (priv->video_standard == XC4000_DTV8))
+ goto digital;
+
+ /* Analog mode has NOISE LEVEL important, signal
+ depends only on gain of antenna and amplifiers,
+ but it doesn't tell anything about real quality
+ of reception.
+ */
+ mutex_lock(&priv->lock);
+ rc = xc4000_readreg(priv, XREG_NOISE_LEVEL, &value);
+ mutex_unlock(&priv->lock);
+
+ tuner_dbg("Noise level: %ddB (%05d)\n", value >> 8, value);
+
+ /* highest noise level: 32dB */
+ if (value >= 0x2000) {
+ value = 0;
+ } else {
+ value = ~value << 3;
+ }
+
+ goto ret;
+
+ /* Digital mode has SIGNAL LEVEL important and real
+ noise level is stored in demodulator registers.
+ */
+digital:
+ /* best signal: -50dB */
+ if (value <= 0x3200) {
+ value = 0xffff;
+ /* minimum: -114dB - should be 0x7200 but real zero is 0x713A */
+ } else if (value >= 0x713A) {
+ value = 0;
+ } else {
+ value = ~(value - 0x3200) << 2;
+ }
+
+ret:
+ *strength = value;
+
+ return rc;
+}
+
static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
{
struct xc4000_priv *priv = fe->tuner_priv;
@@ -1559,6 +1644,7 @@ static const struct dvb_tuner_ops xc4000_tuner_ops = {
.set_params = xc4000_set_params,
.set_analog_params = xc4000_set_analog_params,
.get_frequency = xc4000_get_frequency,
+ .get_rf_strength = xc4000_get_signal,
.get_bandwidth = xc4000_get_bandwidth,
.get_status = xc4000_get_status
};
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index b15db4fe347..fbbe545a74c 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -904,8 +904,11 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int i;
+ u32 delsys;
+ delsys = c->delivery_system;
memset(c, 0, sizeof(struct dtv_frontend_properties));
+ c->delivery_system = delsys;
c->state = DTV_CLEAR;
@@ -1009,25 +1012,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0),
_DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0),
- _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 0, 0),
- _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 0, 0),
- _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 0, 0),
- _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 0, 0),
- _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYER_ENABLED, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0),
-
_DTV_CMD(DTV_ISDBS_TS_ID, 1, 0),
_DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0),
@@ -1413,6 +1397,15 @@ static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
enum dvbv3_emulation_type type;
+ /*
+ * It was reported that some old DVBv5 applications were
+ * filling delivery_system with SYS_UNDEFINED. If this happens,
+ * assume that the application wants to use the first supported
+ * delivery system.
+ */
+ if (c->delivery_system == SYS_UNDEFINED)
+ c->delivery_system = fe->ops.delsys[0];
+
if (desired_system == SYS_UNDEFINED) {
/*
* A DVBv3 call doesn't know what's the desired system.
@@ -1732,6 +1725,7 @@ static int dvb_frontend_ioctl_properties(struct file *file,
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
+ struct dvb_frontend_private *fepriv = fe->frontend_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int err = 0;
@@ -1798,9 +1792,14 @@ static int dvb_frontend_ioctl_properties(struct file *file,
/*
* Fills the cache out struct with the cache contents, plus
- * the data retrieved from get_frontend.
+ * the data retrieved from get_frontend, if the frontend
+ * is not idle. Otherwise, returns the cached content
*/
- dtv_get_frontend(fe, NULL);
+ if (fepriv->state != FESTATE_IDLE) {
+ err = dtv_get_frontend(fe, NULL);
+ if (err < 0)
+ goto out;
+ }
for (i = 0; i < tvps->num; i++) {
err = dtv_property_process_get(fe, c, tvp + i, file);
if (err < 0)
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index d66192974d6..1455e2644ab 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -877,24 +877,18 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
case ANYSEE_HW_508T2C: /* 20 */
/* E7 T2C */
+ if (state->fe_id)
+ break;
+
/* enable DVB-T/T2/C demod on IOE[5] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20);
if (ret)
goto error;
- if (state->fe_id == 0) {
- /* DVB-T/T2 */
- adap->fe_adap[state->fe_id].fe =
- dvb_attach(cxd2820r_attach,
- &anysee_cxd2820r_config,
- &adap->dev->i2c_adap, NULL);
- } else {
- /* DVB-C */
- adap->fe_adap[state->fe_id].fe =
- dvb_attach(cxd2820r_attach,
- &anysee_cxd2820r_config,
- &adap->dev->i2c_adap, adap->fe_adap[0].fe);
- }
+ /* attach demod */
+ adap->fe_adap[state->fe_id].fe = dvb_attach(cxd2820r_attach,
+ &anysee_cxd2820r_config, &adap->dev->i2c_adap,
+ NULL);
state->has_ci = true;
diff --git a/drivers/media/dvb/dvb-usb/dib0700.h b/drivers/media/dvb/dvb-usb/dib0700.h
index 9bd6d51b3b9..7de125c0b36 100644
--- a/drivers/media/dvb/dvb-usb/dib0700.h
+++ b/drivers/media/dvb/dvb-usb/dib0700.h
@@ -48,6 +48,8 @@ struct dib0700_state {
u8 disable_streaming_master_mode;
u32 fw_version;
u32 nb_packet_buffer_size;
+ int (*read_status)(struct dvb_frontend *, fe_status_t *);
+ int (*sleep)(struct dvb_frontend* fe);
u8 buf[255];
};
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 206999476f0..070e82aa53f 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -834,6 +834,7 @@ static struct usb_driver dib0700_driver = {
module_usb_driver(dib0700_driver);
+MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw");
MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge");
MODULE_VERSION("1.0");
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 81ef4b46f79..f9e966aa26e 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -3066,19 +3066,25 @@ static struct dib7000p_config stk7070pd_dib7000p_config[2] = {
}
};
-static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
+static void stk7070pd_init(struct dvb_usb_device *dev)
{
- dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO6, GPIO_OUT, 1);
msleep(10);
- dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
- dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
- dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
- dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+ dib0700_set_gpio(dev, GPIO9, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO4, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO7, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 0);
- dib0700_ctrl_clock(adap->dev, 72, 1);
+ dib0700_ctrl_clock(dev, 72, 1);
msleep(10);
- dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 1);
+}
+
+static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
+{
+ stk7070pd_init(adap->dev);
+
msleep(10);
dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
@@ -3099,6 +3105,77 @@ static int stk7070pd_frontend_attach1(struct dvb_usb_adapter *adap)
return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
+static int novatd_read_status_override(struct dvb_frontend *fe,
+ fe_status_t *stat)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dvb_usb_device *dev = adap->dev;
+ struct dib0700_state *state = dev->priv;
+ int ret;
+
+ ret = state->read_status(fe, stat);
+
+ if (!ret)
+ dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT,
+ !!(*stat & FE_HAS_LOCK));
+
+ return ret;
+}
+
+static int novatd_sleep_override(struct dvb_frontend* fe)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dvb_usb_device *dev = adap->dev;
+ struct dib0700_state *state = dev->priv;
+
+ /* turn off LED */
+ dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, 0);
+
+ return state->sleep(fe);
+}
+
+/**
+ * novatd_frontend_attach - Nova-TD specific attach
+ *
+ * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for
+ * information purposes.
+ */
+static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *dev = adap->dev;
+ struct dib0700_state *st = dev->priv;
+
+ if (adap->id == 0) {
+ stk7070pd_init(dev);
+
+ /* turn the power LED on, the other two off (just in case) */
+ dib0700_set_gpio(dev, GPIO0, GPIO_OUT, 0);
+ dib0700_set_gpio(dev, GPIO1, GPIO_OUT, 0);
+ dib0700_set_gpio(dev, GPIO2, GPIO_OUT, 1);
+
+ if (dib7000p_i2c_enumeration(&dev->i2c_adap, 2, 18,
+ stk7070pd_dib7000p_config) != 0) {
+ err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n",
+ __func__);
+ return -ENODEV;
+ }
+ }
+
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &dev->i2c_adap,
+ adap->id == 0 ? 0x80 : 0x82,
+ &stk7070pd_dib7000p_config[adap->id]);
+
+ if (adap->fe_adap[0].fe == NULL)
+ return -ENODEV;
+
+ st->read_status = adap->fe_adap[0].fe->ops.read_status;
+ adap->fe_adap[0].fe->ops.read_status = novatd_read_status_override;
+ st->sleep = adap->fe_adap[0].fe->ops.sleep;
+ adap->fe_adap[0].fe->ops.sleep = novatd_sleep_override;
+
+ return 0;
+}
+
/* S5H1411 */
static struct s5h1411_config pinnacle_801e_config = {
.output_mode = S5H1411_PARALLEL_OUTPUT,
@@ -3870,6 +3947,57 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.pid_filter_count = 32,
.pid_filter = stk70x0p_pid_filter,
.pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
+ .frontend_attach = novatd_frontend_attach,
+ .tuner_attach = dib7070p_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+ }},
+ .size_of_priv = sizeof(struct dib0700_adapter_state),
+ }, {
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk70x0p_pid_filter,
+ .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
+ .frontend_attach = novatd_frontend_attach,
+ .tuner_attach = dib7070p_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
+ }},
+ .size_of_priv = sizeof(struct dib0700_adapter_state),
+ }
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "Hauppauge Nova-TD Stick (52009)",
+ { &dib0700_usb_id_table[35], NULL },
+ { NULL },
+ },
+ },
+
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .module_name = "dib0700",
+ .rc_query = dib0700_rc_query_old_firmware,
+ .allowed_protos = RC_TYPE_RC5 |
+ RC_TYPE_RC6 |
+ RC_TYPE_NEC,
+ .change_protocol = dib0700_change_protocol,
+ },
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 2,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk70x0p_pid_filter,
+ .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
.frontend_attach = stk7070pd_frontend_attach0,
.tuner_attach = dib7070p_tuner_attach,
@@ -3892,7 +4020,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
}
},
- .num_device_descs = 6,
+ .num_device_descs = 5,
.devices = {
{ "DiBcom STK7070PD reference design",
{ &dib0700_usb_id_table[17], NULL },
@@ -3902,10 +4030,6 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ &dib0700_usb_id_table[18], NULL },
{ NULL },
},
- { "Hauppauge Nova-TD Stick (52009)",
- { &dib0700_usb_id_table[35], NULL },
- { NULL },
- },
{ "Hauppauge Nova-TD-500 (84xxx)",
{ &dib0700_usb_id_table[36], NULL },
{ NULL },
diff --git a/drivers/media/dvb/frontends/cxd2820r_core.c b/drivers/media/dvb/frontends/cxd2820r_core.c
index 93e1b12e790..caae7f79c83 100644
--- a/drivers/media/dvb/frontends/cxd2820r_core.c
+++ b/drivers/media/dvb/frontends/cxd2820r_core.c
@@ -309,9 +309,14 @@ static int cxd2820r_read_status(struct dvb_frontend *fe, fe_status_t *status)
static int cxd2820r_get_frontend(struct dvb_frontend *fe)
{
+ struct cxd2820r_priv *priv = fe->demodulator_priv;
int ret;
dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
+
+ if (priv->delivery_system == SYS_UNDEFINED)
+ return 0;
+
switch (fe->dtv_property_cache.delivery_system) {
case SYS_DVBT:
ret = cxd2820r_get_frontend_t(fe);
@@ -476,10 +481,10 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
/* switch between DVB-T and DVB-T2 when tune fails */
- if (priv->last_tune_failed && (priv->delivery_system != SYS_DVBC_ANNEX_A)) {
+ if (priv->last_tune_failed) {
if (priv->delivery_system == SYS_DVBT)
c->delivery_system = SYS_DVBT2;
- else
+ else if (priv->delivery_system == SYS_DVBT2)
c->delivery_system = SYS_DVBT;
}
@@ -492,6 +497,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
/* frontend lock wait loop count */
switch (priv->delivery_system) {
case SYS_DVBT:
+ case SYS_DVBC_ANNEX_A:
i = 20;
break;
case SYS_DVBT2:
diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
index 938777065de..af65d013db1 100644
--- a/drivers/media/dvb/frontends/ds3000.c
+++ b/drivers/media/dvb/frontends/ds3000.c
@@ -1195,7 +1195,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
for (i = 0; i < 30 ; i++) {
ds3000_read_status(fe, &status);
- if (status && FE_HAS_LOCK)
+ if (status & FE_HAS_LOCK)
break;
msleep(10);
diff --git a/drivers/media/dvb/frontends/mb86a20s.c b/drivers/media/dvb/frontends/mb86a20s.c
index 7fa3e472cdc..fade566927c 100644
--- a/drivers/media/dvb/frontends/mb86a20s.c
+++ b/drivers/media/dvb/frontends/mb86a20s.c
@@ -402,7 +402,7 @@ static int mb86a20s_get_modulation(struct mb86a20s_state *state,
[2] = 0x8e, /* Layer C */
};
- if (layer > ARRAY_SIZE(reg))
+ if (layer >= ARRAY_SIZE(reg))
return -EINVAL;
rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
if (rc < 0)
@@ -435,7 +435,7 @@ static int mb86a20s_get_fec(struct mb86a20s_state *state,
[2] = 0x8f, /* Layer C */
};
- if (layer > ARRAY_SIZE(reg))
+ if (layer >= ARRAY_SIZE(reg))
return -EINVAL;
rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
if (rc < 0)
@@ -470,7 +470,7 @@ static int mb86a20s_get_interleaving(struct mb86a20s_state *state,
[2] = 0x90, /* Layer C */
};
- if (layer > ARRAY_SIZE(reg))
+ if (layer >= ARRAY_SIZE(reg))
return -EINVAL;
rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
if (rc < 0)
@@ -494,7 +494,7 @@ static int mb86a20s_get_segment_count(struct mb86a20s_state *state,
[2] = 0x91, /* Layer C */
};
- if (layer > ARRAY_SIZE(reg))
+ if (layer >= ARRAY_SIZE(reg))
return -EINVAL;
rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
if (rc < 0)
diff --git a/drivers/media/dvb/frontends/tda18271c2dd.c b/drivers/media/dvb/frontends/tda18271c2dd.c
index 86da3d81649..ad7c72e8f51 100644
--- a/drivers/media/dvb/frontends/tda18271c2dd.c
+++ b/drivers/media/dvb/frontends/tda18271c2dd.c
@@ -29,7 +29,6 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
-#include <linux/version.h>
#include <asm/div64.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/video/as3645a.c b/drivers/media/video/as3645a.c
index ec859a58065..f241702a0f3 100644
--- a/drivers/media/video/as3645a.c
+++ b/drivers/media/video/as3645a.c
@@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
#include <media/as3645a.h>
#include <media/v4l2-ctrls.h>
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index 14cb961c22b..4bfd865a410 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -751,20 +751,10 @@ int cx18_v4l2_close(struct file *filp)
CX18_DEBUG_IOCTL("close() of %s\n", s->name);
- v4l2_fh_del(fh);
- v4l2_fh_exit(fh);
-
- /* Easy case first: this stream was never claimed by us */
- if (s->id != id->open_id) {
- kfree(id);
- return 0;
- }
-
- /* 'Unclaim' this stream */
-
- /* Stop radio */
mutex_lock(&cx->serialize_lock);
- if (id->type == CX18_ENC_STREAM_TYPE_RAD) {
+ /* Stop radio */
+ if (id->type == CX18_ENC_STREAM_TYPE_RAD &&
+ v4l2_fh_is_singular_file(filp)) {
/* Closing radio device, return to TV mode */
cx18_mute(cx);
/* Mark that the radio is no longer in use */
@@ -781,10 +771,14 @@ int cx18_v4l2_close(struct file *filp)
}
/* Done! Unmute and continue. */
cx18_unmute(cx);
- cx18_release_stream(s);
- } else {
- cx18_stop_capture(id, 0);
}
+
+ v4l2_fh_del(fh);
+ v4l2_fh_exit(fh);
+
+ /* 'Unclaim' this stream */
+ if (s->id == id->open_id)
+ cx18_stop_capture(id, 0);
kfree(id);
mutex_unlock(&cx->serialize_lock);
return 0;
@@ -810,21 +804,15 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
item->open_id = cx->open_id++;
filp->private_data = &item->fh;
+ v4l2_fh_add(&item->fh);
- if (item->type == CX18_ENC_STREAM_TYPE_RAD) {
- /* Try to claim this stream */
- if (cx18_claim_stream(item, item->type)) {
- /* No, it's already in use */
- v4l2_fh_exit(&item->fh);
- kfree(item);
- return -EBUSY;
- }
-
+ if (item->type == CX18_ENC_STREAM_TYPE_RAD &&
+ v4l2_fh_is_singular_file(filp)) {
if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
if (atomic_read(&cx->ana_capturing) > 0) {
/* switching to radio while capture is
in progress is not polite */
- cx18_release_stream(s);
+ v4l2_fh_del(&item->fh);
v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
@@ -842,7 +830,6 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
/* Done! Unmute and continue. */
cx18_unmute(cx);
}
- v4l2_fh_add(&item->fh);
return 0;
}
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 919ed77b32f..875a7ce9473 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -1052,7 +1052,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
cx231xx_err(DRIVER_NAME ": out of memory!\n");
- clear_bit(dev->devno, &cx231xx_devused);
+ clear_bit(nr, &cx231xx_devused);
return -ENOMEM;
}
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 3c01be999e3..19b5499d262 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -213,8 +213,8 @@ struct cx23885_board cx23885_boards[] = {
.portc = CX23885_MPEG_DVB,
.tuner_type = TUNER_XC4000,
.tuner_addr = 0x61,
- .radio_type = TUNER_XC4000,
- .radio_addr = 0x61,
+ .radio_type = UNSET,
+ .radio_addr = ADDR_UNSET,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN2_CH1 |
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index af8a225763d..6835eb1fc09 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -943,6 +943,11 @@ static int dvb_register(struct cx23885_tsport *port)
fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
&dev->i2c_bus[1].i2c_adap, &cfg);
+ if (!fe) {
+ printk(KERN_ERR "%s/2: xc4000 attach failed\n",
+ dev->name);
+ goto frontend_detach;
+ }
}
break;
case CX23885_BOARD_TBS_6920:
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 4bbf9bb97bd..c654bdc7ccb 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -1550,7 +1550,6 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
struct v4l2_control ctrl;
struct videobuf_dvb_frontend *vfe;
struct dvb_frontend *fe;
- int err = 0;
struct analog_parameters params = {
.mode = V4L2_TUNER_ANALOG_TV,
@@ -1572,8 +1571,10 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
params.frequency, f->tuner, params.std);
vfe = videobuf_dvb_get_frontend(&dev->ts2.frontends, 1);
- if (!vfe)
- err = -EINVAL;
+ if (!vfe) {
+ mutex_unlock(&dev->lock);
+ return -EINVAL;
+ }
fe = vfe->dvb.frontend;
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 62c7ad050f9..cbd5d119a2c 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1573,8 +1573,8 @@ static const struct cx88_board cx88_boards[] = {
.name = "Pinnacle Hybrid PCTV",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
- .radio_type = TUNER_XC2028,
- .radio_addr = 0x61,
+ .radio_type = UNSET,
+ .radio_addr = ADDR_UNSET,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
@@ -1611,8 +1611,8 @@ static const struct cx88_board cx88_boards[] = {
.name = "Leadtek TV2000 XP Global",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
- .radio_type = TUNER_XC2028,
- .radio_addr = 0x61,
+ .radio_type = UNSET,
+ .radio_addr = ADDR_UNSET,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
@@ -2115,8 +2115,8 @@ static const struct cx88_board cx88_boards[] = {
.name = "Terratec Cinergy HT PCI MKII",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
- .radio_type = TUNER_XC2028,
- .radio_addr = 0x61,
+ .radio_type = UNSET,
+ .radio_addr = ADDR_UNSET,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
@@ -2154,9 +2154,9 @@ static const struct cx88_board cx88_boards[] = {
[CX88_BOARD_WINFAST_DTV1800H] = {
.name = "Leadtek WinFast DTV1800 Hybrid",
.tuner_type = TUNER_XC2028,
- .radio_type = TUNER_XC2028,
+ .radio_type = UNSET,
.tuner_addr = 0x61,
- .radio_addr = 0x61,
+ .radio_addr = ADDR_UNSET,
/*
* GPIO setting
*
@@ -2195,9 +2195,9 @@ static const struct cx88_board cx88_boards[] = {
[CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
.name = "Leadtek WinFast DTV1800 H (XC4000)",
.tuner_type = TUNER_XC4000,
- .radio_type = TUNER_XC4000,
+ .radio_type = UNSET,
.tuner_addr = 0x61,
- .radio_addr = 0x61,
+ .radio_addr = ADDR_UNSET,
/*
* GPIO setting
*
@@ -2236,9 +2236,9 @@ static const struct cx88_board cx88_boards[] = {
[CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
.name = "Leadtek WinFast DTV2000 H PLUS",
.tuner_type = TUNER_XC4000,
- .radio_type = TUNER_XC4000,
+ .radio_type = UNSET,
.tuner_addr = 0x61,
- .radio_addr = 0x61,
+ .radio_addr = ADDR_UNSET,
/*
* GPIO
* 2: 1: mute audio
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 544af91cbdc..3949b7dc236 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -731,9 +731,6 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
- /* start counting open_id at 1 */
- itv->open_id = 1;
-
/* Initial settings */
itv->cxhdl.port = CX2341X_PORT_MEMORY;
itv->cxhdl.capabilities = CX2341X_CAP_HAS_SLICED_VBI;
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 8f9cc17b518..06f3d78389b 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -332,7 +332,7 @@ struct ivtv_stream {
const char *name; /* name of the stream */
int type; /* stream type */
- u32 id;
+ struct v4l2_fh *fh; /* pointer to the streaming filehandle */
spinlock_t qlock; /* locks access to the queues */
unsigned long s_flags; /* status flags, see above */
int dma; /* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */
@@ -379,7 +379,6 @@ struct ivtv_stream {
struct ivtv_open_id {
struct v4l2_fh fh;
- u32 open_id; /* unique ID for this file descriptor */
int type; /* stream type */
int yuv_frames; /* 1: started OUT_UDMA_YUV output mode */
struct ivtv *itv;
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index 38f052257f4..2cd6c89b7d9 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -50,16 +50,16 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type)
if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
/* someone already claimed this stream */
- if (s->id == id->open_id) {
+ if (s->fh == &id->fh) {
/* yes, this file descriptor did. So that's OK. */
return 0;
}
- if (s->id == -1 && (type == IVTV_DEC_STREAM_TYPE_VBI ||
+ if (s->fh == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
type == IVTV_ENC_STREAM_TYPE_VBI)) {
/* VBI is handled already internally, now also assign
the file descriptor to this stream for external
reading of the stream. */
- s->id = id->open_id;
+ s->fh = &id->fh;
IVTV_DEBUG_INFO("Start Read VBI\n");
return 0;
}
@@ -67,7 +67,7 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type)
IVTV_DEBUG_INFO("Stream %d is busy\n", type);
return -EBUSY;
}
- s->id = id->open_id;
+ s->fh = &id->fh;
if (type == IVTV_DEC_STREAM_TYPE_VBI) {
/* Enable reinsertion interrupt */
ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
@@ -104,7 +104,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
struct ivtv *itv = s->itv;
struct ivtv_stream *s_vbi;
- s->id = -1;
+ s->fh = NULL;
if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) &&
test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
/* this stream is still in use internally */
@@ -136,7 +136,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
/* was already cleared */
return;
}
- if (s_vbi->id != -1) {
+ if (s_vbi->fh) {
/* VBI stream still claimed by a file descriptor */
return;
}
@@ -268,11 +268,13 @@ static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block,
}
/* wait for more data to arrive */
+ mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
/* New buffers might have become available before we were added to the waitqueue */
if (!s->q_full.buffers)
schedule();
finish_wait(&s->waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (signal_pending(current)) {
/* return if a signal was received */
IVTV_DEBUG_INFO("User stopped %s\n", s->name);
@@ -357,7 +359,7 @@ static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_co
size_t tot_written = 0;
int single_frame = 0;
- if (atomic_read(&itv->capturing) == 0 && s->id == -1) {
+ if (atomic_read(&itv->capturing) == 0 && s->fh == NULL) {
/* shouldn't happen */
IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name);
return -EIO;
@@ -507,9 +509,7 @@ ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_
IVTV_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name);
- mutex_lock(&itv->serialize_lock);
rc = ivtv_start_capture(id);
- mutex_unlock(&itv->serialize_lock);
if (rc)
return rc;
return ivtv_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
@@ -584,9 +584,7 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
/* Start decoder (returns 0 if already started) */
- mutex_lock(&itv->serialize_lock);
rc = ivtv_start_decoding(id, itv->speed);
- mutex_unlock(&itv->serialize_lock);
if (rc) {
IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name);
@@ -627,11 +625,13 @@ retry:
break;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
+ mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
/* New buffers might have become free before we were added to the waitqueue */
if (!s->q_free.buffers)
schedule();
finish_wait(&s->waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (signal_pending(current)) {
IVTV_DEBUG_INFO("User stopped %s\n", s->name);
return -EINTR;
@@ -686,12 +686,14 @@ retry:
if (mode == OUT_YUV)
ivtv_yuv_setup_stream_frame(itv);
+ mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
while (!(got_sig = signal_pending(current)) &&
test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) {
schedule();
}
finish_wait(&itv->dma_waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (got_sig) {
IVTV_DEBUG_INFO("User interrupted %s\n", s->name);
return -EINTR;
@@ -756,9 +758,7 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
int rc;
- mutex_lock(&itv->serialize_lock);
rc = ivtv_start_capture(id);
- mutex_unlock(&itv->serialize_lock);
if (rc) {
IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n",
s->name, rc);
@@ -808,7 +808,7 @@ void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
id->type == IVTV_ENC_STREAM_TYPE_VBI) &&
test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
/* Also used internally, don't stop capturing */
- s->id = -1;
+ s->fh = NULL;
}
else {
ivtv_stop_v4l2_encode_stream(s, gop_end);
@@ -861,20 +861,9 @@ int ivtv_v4l2_close(struct file *filp)
IVTV_DEBUG_FILE("close %s\n", s->name);
- v4l2_fh_del(fh);
- v4l2_fh_exit(fh);
-
- /* Easy case first: this stream was never claimed by us */
- if (s->id != id->open_id) {
- kfree(id);
- return 0;
- }
-
- /* 'Unclaim' this stream */
-
/* Stop radio */
- mutex_lock(&itv->serialize_lock);
- if (id->type == IVTV_ENC_STREAM_TYPE_RAD) {
+ if (id->type == IVTV_ENC_STREAM_TYPE_RAD &&
+ v4l2_fh_is_singular_file(filp)) {
/* Closing radio device, return to TV mode */
ivtv_mute(itv);
/* Mark that the radio is no longer in use */
@@ -890,13 +879,25 @@ int ivtv_v4l2_close(struct file *filp)
if (atomic_read(&itv->capturing) > 0) {
/* Undo video mute */
ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
- v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
- (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
+ v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
+ (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
}
/* Done! Unmute and continue. */
ivtv_unmute(itv);
- ivtv_release_stream(s);
- } else if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
+ }
+
+ v4l2_fh_del(fh);
+ v4l2_fh_exit(fh);
+
+ /* Easy case first: this stream was never claimed by us */
+ if (s->fh != &id->fh) {
+ kfree(id);
+ return 0;
+ }
+
+ /* 'Unclaim' this stream */
+
+ if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
struct ivtv_stream *s_vout = &itv->streams[IVTV_DEC_STREAM_TYPE_VOUT];
ivtv_stop_decoding(id, VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0);
@@ -911,21 +912,25 @@ int ivtv_v4l2_close(struct file *filp)
ivtv_stop_capture(id, 0);
}
kfree(id);
- mutex_unlock(&itv->serialize_lock);
return 0;
}
-static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
+int ivtv_v4l2_open(struct file *filp)
{
-#ifdef CONFIG_VIDEO_ADV_DEBUG
struct video_device *vdev = video_devdata(filp);
-#endif
+ struct ivtv_stream *s = video_get_drvdata(vdev);
struct ivtv *itv = s->itv;
struct ivtv_open_id *item;
int res = 0;
IVTV_DEBUG_FILE("open %s\n", s->name);
+ if (ivtv_init_on_first_open(itv)) {
+ IVTV_ERR("Failed to initialize on device %s\n",
+ video_device_node_name(vdev));
+ return -ENXIO;
+ }
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
/* Unless ivtv_fw_debug is set, error out if firmware dead. */
if (ivtv_fw_debug) {
@@ -966,31 +971,19 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
return -ENOMEM;
}
v4l2_fh_init(&item->fh, s->vdev);
- if (res < 0) {
- v4l2_fh_exit(&item->fh);
- kfree(item);
- return res;
- }
item->itv = itv;
item->type = s->type;
- item->open_id = itv->open_id++;
filp->private_data = &item->fh;
+ v4l2_fh_add(&item->fh);
- if (item->type == IVTV_ENC_STREAM_TYPE_RAD) {
- /* Try to claim this stream */
- if (ivtv_claim_stream(item, item->type)) {
- /* No, it's already in use */
- v4l2_fh_exit(&item->fh);
- kfree(item);
- return -EBUSY;
- }
-
+ if (item->type == IVTV_ENC_STREAM_TYPE_RAD &&
+ v4l2_fh_is_singular_file(filp)) {
if (!test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
if (atomic_read(&itv->capturing) > 0) {
/* switching to radio while capture is
in progress is not polite */
- ivtv_release_stream(s);
+ v4l2_fh_del(&item->fh);
v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
@@ -1022,32 +1015,9 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
itv->yuv_info.stream_size = 0;
}
- v4l2_fh_add(&item->fh);
return 0;
}
-int ivtv_v4l2_open(struct file *filp)
-{
- int res;
- struct ivtv *itv = NULL;
- struct ivtv_stream *s = NULL;
- struct video_device *vdev = video_devdata(filp);
-
- s = video_get_drvdata(vdev);
- itv = s->itv;
-
- mutex_lock(&itv->serialize_lock);
- if (ivtv_init_on_first_open(itv)) {
- IVTV_ERR("Failed to initialize on device %s\n",
- video_device_node_name(vdev));
- mutex_unlock(&itv->serialize_lock);
- return -ENXIO;
- }
- res = ivtv_serialized_open(s, filp);
- mutex_unlock(&itv->serialize_lock);
- return res;
-}
-
void ivtv_mute(struct ivtv *itv)
{
if (atomic_read(&itv->capturing))
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index ecafa697326..c4bc4814309 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -179,6 +179,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0);
/* Wait for any DMA to finish */
+ mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
got_sig = signal_pending(current);
@@ -188,6 +189,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
schedule();
}
finish_wait(&itv->dma_waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (got_sig)
return -EINTR;
@@ -1107,6 +1109,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
* happens within the first 100 lines of the top field.
* Make 4 attempts to sync to the decoder before giving up.
*/
+ mutex_unlock(&itv->serialize_lock);
for (f = 0; f < 4; f++) {
prepare_to_wait(&itv->vsync_waitq, &wait,
TASK_UNINTERRUPTIBLE);
@@ -1115,6 +1118,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
schedule_timeout(msecs_to_jiffies(25));
}
finish_wait(&itv->vsync_waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (f == 4)
IVTV_WARN("Mode change failed to sync to decoder\n");
@@ -1842,8 +1846,7 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
return 0;
}
-static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
- unsigned int cmd, unsigned long arg)
+long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct video_device *vfd = video_devdata(filp);
long ret;
@@ -1855,21 +1858,6 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
return ret;
}
-long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct ivtv_open_id *id = fh2id(filp->private_data);
- struct ivtv *itv = id->itv;
- long res;
-
- /* DQEVENT can block, so this should not run with the serialize lock */
- if (cmd == VIDIOC_DQEVENT)
- return ivtv_serialized_ioctl(itv, filp, cmd, arg);
- mutex_lock(&itv->serialize_lock);
- res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
- mutex_unlock(&itv->serialize_lock);
- return res;
-}
-
static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
.vidioc_querycap = ivtv_querycap,
.vidioc_s_audio = ivtv_s_audio,
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index 9c29e964d40..1b3b9578bf4 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -288,13 +288,13 @@ static void dma_post(struct ivtv_stream *s)
ivtv_process_vbi_data(itv, buf, 0, s->type);
s->q_dma.bytesused += buf->bytesused;
}
- if (s->id == -1) {
+ if (s->fh == NULL) {
ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
return;
}
}
ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
- if (s->id != -1)
+ if (s->fh)
wake_up(&s->waitq);
}
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index e7794dc1330..c6e28b4ebbe 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -159,7 +159,6 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
s->buffers = (itv->options.kilobytes[type] * 1024 + s->buf_size - 1) / s->buf_size;
spin_lock_init(&s->qlock);
init_waitqueue_head(&s->waitq);
- s->id = -1;
s->sg_handle = IVTV_DMA_UNMAPPED;
ivtv_queue_init(&s->q_free);
ivtv_queue_init(&s->q_full);
@@ -214,6 +213,7 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
s->vdev->fops = ivtv_stream_info[type].fops;
s->vdev->release = video_device_release;
s->vdev->tvnorms = V4L2_STD_ALL;
+ s->vdev->lock = &itv->serialize_lock;
set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev->flags);
ivtv_set_funcs(s->vdev);
return 0;
diff --git a/drivers/media/video/ivtv/ivtv-yuv.c b/drivers/media/video/ivtv/ivtv-yuv.c
index dcbab6ad4c2..2ad65eb2983 100644
--- a/drivers/media/video/ivtv/ivtv-yuv.c
+++ b/drivers/media/video/ivtv/ivtv-yuv.c
@@ -1149,23 +1149,37 @@ int ivtv_yuv_udma_stream_frame(struct ivtv *itv, void __user *src)
{
struct yuv_playback_info *yi = &itv->yuv_info;
struct ivtv_dma_frame dma_args;
+ int res;
ivtv_yuv_setup_stream_frame(itv);
/* We only need to supply source addresses for this */
dma_args.y_source = src;
dma_args.uv_source = src + 720 * ((yi->v4l2_src_h + 31) & ~31);
- return ivtv_yuv_udma_frame(itv, &dma_args);
+ /* Wait for frame DMA. Note that serialize_lock is locked,
+ so to allow other processes to access the driver while
+ we are waiting unlock first and later lock again. */
+ mutex_unlock(&itv->serialize_lock);
+ res = ivtv_yuv_udma_frame(itv, &dma_args);
+ mutex_lock(&itv->serialize_lock);
+ return res;
}
/* IVTV_IOC_DMA_FRAME ioctl handler */
int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
{
-/* IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
+ int res;
+/* IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
ivtv_yuv_next_free(itv);
ivtv_yuv_setup_frame(itv, args);
- return ivtv_yuv_udma_frame(itv, args);
+ /* Wait for frame DMA. Note that serialize_lock is locked,
+ so to allow other processes to access the driver while
+ we are waiting unlock first and later lock again. */
+ mutex_unlock(&itv->serialize_lock);
+ res = ivtv_yuv_udma_frame(itv, args);
+ mutex_lock(&itv->serialize_lock);
+ return res;
}
void ivtv_yuv_close(struct ivtv *itv)
@@ -1174,7 +1188,9 @@ void ivtv_yuv_close(struct ivtv *itv)
int h_filter, v_filter_1, v_filter_2;
IVTV_DEBUG_YUV("ivtv_yuv_close\n");
+ mutex_unlock(&itv->serialize_lock);
ivtv_waitq(&itv->vsync_waitq);
+ mutex_lock(&itv->serialize_lock);
yi->running = 0;
atomic_set(&yi->next_dma_frame, -1);
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 0cb461dd396..74522773e93 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -287,7 +287,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
sg_dma_len(sg) = new_size;
txd = ichan->dma_chan.device->device_prep_slave_sg(
- &ichan->dma_chan, sg, 1, DMA_FROM_DEVICE,
+ &ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!txd)
goto error;
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index a277f95091e..1fb7d5bd5ec 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -1042,7 +1042,8 @@ static int vidioc_querycap(struct file *file, void *fh,
strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
cap->bus_info[0] = '\0';
- cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
return 0;
}
@@ -1825,7 +1826,9 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
- a->flags = 0x0;
+ /* The video overlay must stay within the framebuffer and can't be
+ positioned independently. */
+ a->flags = V4L2_FBUF_FLAG_OVERLAY;
a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
| V4L2_FBUF_CAP_SRC_CHROMAKEY;
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 905d41d90c6..1f506fde97d 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -104,47 +104,16 @@ static struct Nala_table_entry Nala_table[PSZ_MAX][PWC_FPS_MAX_NALA] =
/****************************************************************************/
-static int _send_control_msg(struct pwc_device *pdev,
- u8 request, u16 value, int index, void *buf, int buflen)
-{
- int rc;
- void *kbuf = NULL;
-
- if (buflen) {
- kbuf = kmemdup(buf, buflen, GFP_KERNEL); /* not allowed on stack */
- if (kbuf == NULL)
- return -ENOMEM;
- }
-
- rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
- request,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value,
- index,
- kbuf, buflen, USB_CTRL_SET_TIMEOUT);
-
- kfree(kbuf);
- return rc;
-}
-
static int recv_control_msg(struct pwc_device *pdev,
- u8 request, u16 value, void *buf, int buflen)
+ u8 request, u16 value, int recv_count)
{
int rc;
- void *kbuf = kmalloc(buflen, GFP_KERNEL); /* not allowed on stack */
-
- if (kbuf == NULL)
- return -ENOMEM;
rc = usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0),
request,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value,
- pdev->vcinterface,
- kbuf, buflen, USB_CTRL_GET_TIMEOUT);
- memcpy(buf, kbuf, buflen);
- kfree(kbuf);
-
+ value, pdev->vcinterface,
+ pdev->ctrl_buf, recv_count, USB_CTRL_GET_TIMEOUT);
if (rc < 0)
PWC_ERROR("recv_control_msg error %d req %02x val %04x\n",
rc, request, value);
@@ -152,27 +121,39 @@ static int recv_control_msg(struct pwc_device *pdev,
}
static inline int send_video_command(struct pwc_device *pdev,
- int index, void *buf, int buflen)
+ int index, const unsigned char *buf, int buflen)
{
- return _send_control_msg(pdev,
- SET_EP_STREAM_CTL,
- VIDEO_OUTPUT_CONTROL_FORMATTER,
- index,
- buf, buflen);
+ int rc;
+
+ memcpy(pdev->ctrl_buf, buf, buflen);
+
+ rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
+ SET_EP_STREAM_CTL,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ VIDEO_OUTPUT_CONTROL_FORMATTER, index,
+ pdev->ctrl_buf, buflen, USB_CTRL_SET_TIMEOUT);
+ if (rc >= 0)
+ memcpy(pdev->cmd_buf, buf, buflen);
+ else
+ PWC_ERROR("send_video_command error %d\n", rc);
+
+ return rc;
}
int send_control_msg(struct pwc_device *pdev,
u8 request, u16 value, void *buf, int buflen)
{
- return _send_control_msg(pdev,
- request, value, pdev->vcinterface, buf, buflen);
+ return usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
+ request,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, pdev->vcinterface,
+ buf, buflen, USB_CTRL_SET_TIMEOUT);
}
-static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
- int *compression)
+static int set_video_mode_Nala(struct pwc_device *pdev, int size, int pixfmt,
+ int frames, int *compression, int send_to_cam)
{
- unsigned char buf[3];
- int ret, fps;
+ int fps, ret = 0;
struct Nala_table_entry *pEntry;
int frames2frames[31] =
{ /* closest match of framerate */
@@ -194,30 +175,29 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
7 /* 30 */
};
- if (size < 0 || size > PSZ_CIF || frames < 4 || frames > 25)
+ if (size < 0 || size > PSZ_CIF)
return -EINVAL;
+ if (frames < 4)
+ frames = 4;
+ else if (frames > 25)
+ frames = 25;
frames = frames2frames[frames];
fps = frames2table[frames];
pEntry = &Nala_table[size][fps];
if (pEntry->alternate == 0)
return -EINVAL;
- memcpy(buf, pEntry->mode, 3);
- ret = send_video_command(pdev, pdev->vendpoint, buf, 3);
- if (ret < 0) {
- PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret);
+ if (send_to_cam)
+ ret = send_video_command(pdev, pdev->vendpoint,
+ pEntry->mode, 3);
+ if (ret < 0)
return ret;
- }
- if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
- ret = pwc_dec1_init(pdev, pdev->type, pdev->release, buf);
- if (ret < 0)
- return ret;
- }
- pdev->cmd_len = 3;
- memcpy(pdev->cmd_buf, buf, 3);
+ if (pEntry->compressed && pixfmt == V4L2_PIX_FMT_YUV420)
+ pwc_dec1_init(pdev, pEntry->mode);
/* Set various parameters */
+ pdev->pixfmt = pixfmt;
pdev->vframes = frames;
pdev->valternate = pEntry->alternate;
pdev->width = pwc_image_sizes[size][0];
@@ -243,18 +223,20 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
}
-static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
- int *compression)
+static int set_video_mode_Timon(struct pwc_device *pdev, int size, int pixfmt,
+ int frames, int *compression, int send_to_cam)
{
- unsigned char buf[13];
const struct Timon_table_entry *pChoose;
- int ret, fps;
+ int fps, ret = 0;
- if (size >= PSZ_MAX || frames < 5 || frames > 30 ||
- *compression < 0 || *compression > 3)
- return -EINVAL;
- if (size == PSZ_VGA && frames > 15)
+ if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
return -EINVAL;
+ if (frames < 5)
+ frames = 5;
+ else if (size == PSZ_VGA && frames > 15)
+ frames = 15;
+ else if (frames > 30)
+ frames = 30;
fps = (frames / 5) - 1;
/* Find a supported framerate with progressively higher compression */
@@ -268,22 +250,18 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
if (pChoose == NULL || pChoose->alternate == 0)
return -ENOENT; /* Not supported. */
- memcpy(buf, pChoose->mode, 13);
- ret = send_video_command(pdev, pdev->vendpoint, buf, 13);
+ if (send_to_cam)
+ ret = send_video_command(pdev, pdev->vendpoint,
+ pChoose->mode, 13);
if (ret < 0)
return ret;
- if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
- ret = pwc_dec23_init(pdev, pdev->type, buf);
- if (ret < 0)
- return ret;
- }
-
- pdev->cmd_len = 13;
- memcpy(pdev->cmd_buf, buf, 13);
+ if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
+ pwc_dec23_init(pdev, pChoose->mode);
/* Set various parameters */
- pdev->vframes = frames;
+ pdev->pixfmt = pixfmt;
+ pdev->vframes = (fps + 1) * 5;
pdev->valternate = pChoose->alternate;
pdev->width = pwc_image_sizes[size][0];
pdev->height = pwc_image_sizes[size][1];
@@ -296,18 +274,20 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
}
-static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
- int *compression)
+static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int pixfmt,
+ int frames, int *compression, int send_to_cam)
{
const struct Kiara_table_entry *pChoose = NULL;
- int fps, ret;
- unsigned char buf[12];
+ int fps, ret = 0;
- if (size >= PSZ_MAX || frames < 5 || frames > 30 ||
- *compression < 0 || *compression > 3)
- return -EINVAL;
- if (size == PSZ_VGA && frames > 15)
+ if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
return -EINVAL;
+ if (frames < 5)
+ frames = 5;
+ else if (size == PSZ_VGA && frames > 15)
+ frames = 15;
+ else if (frames > 30)
+ frames = 30;
fps = (frames / 5) - 1;
/* Find a supported framerate with progressively higher compression */
@@ -320,26 +300,18 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
if (pChoose == NULL || pChoose->alternate == 0)
return -ENOENT; /* Not supported. */
- PWC_TRACE("Using alternate setting %d.\n", pChoose->alternate);
-
- /* usb_control_msg won't take staticly allocated arrays as argument?? */
- memcpy(buf, pChoose->mode, 12);
-
/* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */
- ret = send_video_command(pdev, 4 /* pdev->vendpoint */, buf, 12);
+ if (send_to_cam)
+ ret = send_video_command(pdev, 4, pChoose->mode, 12);
if (ret < 0)
return ret;
- if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
- ret = pwc_dec23_init(pdev, pdev->type, buf);
- if (ret < 0)
- return ret;
- }
+ if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
+ pwc_dec23_init(pdev, pChoose->mode);
- pdev->cmd_len = 12;
- memcpy(pdev->cmd_buf, buf, 12);
/* All set and go */
- pdev->vframes = frames;
+ pdev->pixfmt = pixfmt;
+ pdev->vframes = (fps + 1) * 5;
pdev->valternate = pChoose->alternate;
pdev->width = pwc_image_sizes[size][0];
pdev->height = pwc_image_sizes[size][1];
@@ -354,22 +326,24 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
}
int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
- int frames, int *compression)
+ int pixfmt, int frames, int *compression, int send_to_cam)
{
int ret, size;
- PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", width, height, frames, pdev->pixfmt);
+ PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n",
+ width, height, frames, pixfmt);
size = pwc_get_size(pdev, width, height);
PWC_TRACE("decode_size = %d.\n", size);
if (DEVICE_USE_CODEC1(pdev->type)) {
- ret = set_video_mode_Nala(pdev, size, frames, compression);
-
+ ret = set_video_mode_Nala(pdev, size, pixfmt, frames,
+ compression, send_to_cam);
} else if (DEVICE_USE_CODEC3(pdev->type)) {
- ret = set_video_mode_Kiara(pdev, size, frames, compression);
-
+ ret = set_video_mode_Kiara(pdev, size, pixfmt, frames,
+ compression, send_to_cam);
} else {
- ret = set_video_mode_Timon(pdev, size, frames, compression);
+ ret = set_video_mode_Timon(pdev, size, pixfmt, frames,
+ compression, send_to_cam);
}
if (ret < 0) {
PWC_ERROR("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret);
@@ -436,13 +410,12 @@ unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned i
int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
int ret;
- u8 buf;
- ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
+ ret = recv_control_msg(pdev, request, value, 1);
if (ret < 0)
return ret;
- *data = buf;
+ *data = pdev->ctrl_buf[0];
return 0;
}
@@ -450,7 +423,8 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
{
int ret;
- ret = send_control_msg(pdev, request, value, &data, sizeof(data));
+ pdev->ctrl_buf[0] = data;
+ ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 1);
if (ret < 0)
return ret;
@@ -460,37 +434,34 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
int ret;
- s8 buf;
- ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
+ ret = recv_control_msg(pdev, request, value, 1);
if (ret < 0)
return ret;
- *data = buf;
+ *data = ((s8 *)pdev->ctrl_buf)[0];
return 0;
}
int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
int ret;
- u8 buf[2];
- ret = recv_control_msg(pdev, request, value, buf, sizeof(buf));
+ ret = recv_control_msg(pdev, request, value, 2);
if (ret < 0)
return ret;
- *data = (buf[1] << 8) | buf[0];
+ *data = (pdev->ctrl_buf[1] << 8) | pdev->ctrl_buf[0];
return 0;
}
int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data)
{
int ret;
- u8 buf[2];
- buf[0] = data & 0xff;
- buf[1] = data >> 8;
- ret = send_control_msg(pdev, request, value, buf, sizeof(buf));
+ pdev->ctrl_buf[0] = data & 0xff;
+ pdev->ctrl_buf[1] = data >> 8;
+ ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 2);
if (ret < 0)
return ret;
@@ -511,7 +482,6 @@ int pwc_button_ctrl(struct pwc_device *pdev, u16 value)
/* POWER */
void pwc_camera_power(struct pwc_device *pdev, int power)
{
- char buf;
int r;
if (!pdev->power_save)
@@ -521,13 +491,11 @@ void pwc_camera_power(struct pwc_device *pdev, int power)
return; /* Not supported by Nala or Timon < release 6 */
if (power)
- buf = 0x00; /* active */
+ pdev->ctrl_buf[0] = 0x00; /* active */
else
- buf = 0xFF; /* power save */
- r = send_control_msg(pdev,
- SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER,
- &buf, sizeof(buf));
-
+ pdev->ctrl_buf[0] = 0xFF; /* power save */
+ r = send_control_msg(pdev, SET_STATUS_CTL,
+ SET_POWER_SAVE_MODE_FORMATTER, pdev->ctrl_buf, 1);
if (r < 0)
PWC_ERROR("Failed to power %s camera (%d)\n",
power ? "on" : "off", r);
@@ -535,7 +503,6 @@ void pwc_camera_power(struct pwc_device *pdev, int power)
int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
{
- unsigned char buf[2];
int r;
if (pdev->type < 730)
@@ -551,11 +518,11 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
if (off_value > 0xff)
off_value = 0xff;
- buf[0] = on_value;
- buf[1] = off_value;
+ pdev->ctrl_buf[0] = on_value;
+ pdev->ctrl_buf[1] = off_value;
r = send_control_msg(pdev,
- SET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf));
+ SET_STATUS_CTL, LED_FORMATTER, pdev->ctrl_buf, 2);
if (r < 0)
PWC_ERROR("Failed to set LED on/off time (%d)\n", r);
@@ -565,7 +532,6 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
#ifdef CONFIG_USB_PWC_DEBUG
int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
{
- unsigned char buf;
int ret = -1, request;
if (pdev->type < 675)
@@ -575,14 +541,13 @@ int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
else
request = SENSOR_TYPE_FORMATTER2;
- ret = recv_control_msg(pdev,
- GET_STATUS_CTL, request, &buf, sizeof(buf));
+ ret = recv_control_msg(pdev, GET_STATUS_CTL, request, 1);
if (ret < 0)
return ret;
if (pdev->type < 675)
- *sensor = buf | 0x100;
+ *sensor = pdev->ctrl_buf[0] | 0x100;
else
- *sensor = buf;
+ *sensor = pdev->ctrl_buf[0];
return 0;
}
#endif
diff --git a/drivers/media/video/pwc/pwc-dec1.c b/drivers/media/video/pwc/pwc-dec1.c
index be0e02cb487..e899036aadf 100644
--- a/drivers/media/video/pwc/pwc-dec1.c
+++ b/drivers/media/video/pwc/pwc-dec1.c
@@ -22,19 +22,11 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include "pwc-dec1.h"
+#include "pwc.h"
-int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer)
+void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd)
{
- struct pwc_dec1_private *pdec;
+ struct pwc_dec1_private *pdec = &pdev->dec1;
- if (pwc->decompress_data == NULL) {
- pdec = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL);
- if (pdec == NULL)
- return -ENOMEM;
- pwc->decompress_data = pdec;
- }
- pdec = pwc->decompress_data;
-
- return 0;
+ pdec->version = pdev->release;
}
diff --git a/drivers/media/video/pwc/pwc-dec1.h b/drivers/media/video/pwc/pwc-dec1.h
index a57d8601080..c565ef8f52f 100644
--- a/drivers/media/video/pwc/pwc-dec1.h
+++ b/drivers/media/video/pwc/pwc-dec1.h
@@ -25,13 +25,15 @@
#ifndef PWC_DEC1_H
#define PWC_DEC1_H
-#include "pwc.h"
+#include <linux/mutex.h>
+
+struct pwc_device;
struct pwc_dec1_private
{
int version;
};
-int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer);
+void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd);
#endif
diff --git a/drivers/media/video/pwc/pwc-dec23.c b/drivers/media/video/pwc/pwc-dec23.c
index 2c6709112b2..3792fedff95 100644
--- a/drivers/media/video/pwc/pwc-dec23.c
+++ b/drivers/media/video/pwc/pwc-dec23.c
@@ -294,22 +294,17 @@ static unsigned char pwc_crop_table[256 + 2*MAX_OUTER_CROP_VALUE];
/* If the type or the command change, we rebuild the lookup table */
-int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd)
+void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd)
{
int flags, version, shift, i;
- struct pwc_dec23_private *pdec;
-
- if (pwc->decompress_data == NULL) {
- pdec = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL);
- if (pdec == NULL)
- return -ENOMEM;
- pwc->decompress_data = pdec;
- }
- pdec = pwc->decompress_data;
+ struct pwc_dec23_private *pdec = &pdev->dec23;
mutex_init(&pdec->lock);
- if (DEVICE_USE_CODEC3(type)) {
+ if (pdec->last_cmd_valid && pdec->last_cmd == cmd[2])
+ return;
+
+ if (DEVICE_USE_CODEC3(pdev->type)) {
flags = cmd[2] & 0x18;
if (flags == 8)
pdec->nbits = 7; /* More bits, mean more bits to encode the stream, but better quality */
@@ -356,7 +351,8 @@ int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd)
pwc_crop_table[MAX_OUTER_CROP_VALUE+256+i] = 255;
#endif
- return 0;
+ pdec->last_cmd = cmd[2];
+ pdec->last_cmd_valid = 1;
}
/*
@@ -659,12 +655,12 @@ static void DecompressBand23(struct pwc_dec23_private *pdec,
* src: raw data
* dst: image output
*/
-void pwc_dec23_decompress(const struct pwc_device *pwc,
+void pwc_dec23_decompress(struct pwc_device *pdev,
const void *src,
void *dst)
{
int bandlines_left, bytes_per_block;
- struct pwc_dec23_private *pdec = pwc->decompress_data;
+ struct pwc_dec23_private *pdec = &pdev->dec23;
/* YUV420P image format */
unsigned char *pout_planar_y;
@@ -674,23 +670,22 @@ void pwc_dec23_decompress(const struct pwc_device *pwc,
mutex_lock(&pdec->lock);
- bandlines_left = pwc->height / 4;
- bytes_per_block = pwc->width * 4;
- plane_size = pwc->height * pwc->width;
+ bandlines_left = pdev->height / 4;
+ bytes_per_block = pdev->width * 4;
+ plane_size = pdev->height * pdev->width;
pout_planar_y = dst;
pout_planar_u = dst + plane_size;
pout_planar_v = dst + plane_size + plane_size / 4;
while (bandlines_left--) {
- DecompressBand23(pwc->decompress_data,
- src,
+ DecompressBand23(pdec, src,
pout_planar_y, pout_planar_u, pout_planar_v,
- pwc->width, pwc->width);
- src += pwc->vbandlength;
+ pdev->width, pdev->width);
+ src += pdev->vbandlength;
pout_planar_y += bytes_per_block;
- pout_planar_u += pwc->width;
- pout_planar_v += pwc->width;
+ pout_planar_u += pdev->width;
+ pout_planar_v += pdev->width;
}
mutex_unlock(&pdec->lock);
}
diff --git a/drivers/media/video/pwc/pwc-dec23.h b/drivers/media/video/pwc/pwc-dec23.h
index d64a3c281af..c655b1c1e6a 100644
--- a/drivers/media/video/pwc/pwc-dec23.h
+++ b/drivers/media/video/pwc/pwc-dec23.h
@@ -25,17 +25,20 @@
#ifndef PWC_DEC23_H
#define PWC_DEC23_H
-#include "pwc.h"
+struct pwc_device;
struct pwc_dec23_private
{
struct mutex lock;
+ unsigned char last_cmd, last_cmd_valid;
+
unsigned int scalebits;
unsigned int nbitsmask, nbits; /* Number of bits of a color in the compressed stream */
unsigned int reservoir;
unsigned int nbits_in_reservoir;
+
const unsigned char *stream;
int temp_colors[16];
@@ -51,8 +54,8 @@ struct pwc_dec23_private
};
-int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd);
-void pwc_dec23_decompress(const struct pwc_device *pwc,
+void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd);
+void pwc_dec23_decompress(struct pwc_device *pdev,
const void *src,
void *dst);
#endif
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 943d37ad0d3..122fbd0081e 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -128,18 +128,11 @@ static struct usb_driver pwc_driver = {
#define MAX_DEV_HINTS 20
#define MAX_ISOC_ERRORS 20
-static int default_fps = 10;
#ifdef CONFIG_USB_PWC_DEBUG
int pwc_trace = PWC_DEBUG_LEVEL;
#endif
static int power_save = -1;
-static int led_on = 100, led_off; /* defaults to LED that is on while in use */
-static struct {
- int type;
- char serial_number[30];
- int device_node;
- struct pwc_device *pdev;
-} device_hint[MAX_DEV_HINTS];
+static int leds[2] = { 100, 0 };
/***/
@@ -386,8 +379,8 @@ static int pwc_isoc_init(struct pwc_device *pdev)
retry:
/* We first try with low compression and then retry with a higher
compression setting if there is not enough bandwidth. */
- ret = pwc_set_video_mode(pdev, pdev->width, pdev->height,
- pdev->vframes, &compression);
+ ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
+ pdev->vframes, &compression, 1);
/* Get the current alternate interface, adjust packet size */
intf = usb_ifnum_to_if(udev, 0);
@@ -597,23 +590,9 @@ leave:
static void pwc_video_release(struct v4l2_device *v)
{
struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
- int hint;
-
- /* search device_hint[] table if we occupy a slot, by any chance */
- for (hint = 0; hint < MAX_DEV_HINTS; hint++)
- if (device_hint[hint].pdev == pdev)
- device_hint[hint].pdev = NULL;
-
- /* Free intermediate decompression buffer & tables */
- if (pdev->decompress_data != NULL) {
- PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n",
- pdev->decompress_data);
- kfree(pdev->decompress_data);
- pdev->decompress_data = NULL;
- }
v4l2_ctrl_handler_free(&pdev->ctrl_handler);
-
+ kfree(pdev->ctrl_buf);
kfree(pdev);
}
@@ -758,7 +737,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
/* Turn on camera and set LEDS on */
pwc_camera_power(pdev, 1);
- pwc_set_leds(pdev, led_on, led_off);
+ pwc_set_leds(pdev, leds[0], leds[1]);
r = pwc_isoc_init(pdev);
if (r) {
@@ -813,10 +792,9 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
struct usb_device *udev = interface_to_usbdev(intf);
struct pwc_device *pdev = NULL;
int vendor_id, product_id, type_id;
- int hint, rc;
+ int rc;
int features = 0;
int compression = 0;
- int video_nr = -1; /* default: use next available device */
int my_power_save = power_save;
char serial_number[30], *name;
@@ -1076,7 +1054,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
return -ENOMEM;
}
pdev->type = type_id;
- pdev->vframes = default_fps;
pdev->features = features;
pwc_construct(pdev); /* set min/max sizes correct */
@@ -1107,24 +1084,14 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->release = le16_to_cpu(udev->descriptor.bcdDevice);
PWC_DEBUG_PROBE("Release: %04x\n", pdev->release);
- /* Now search device_hint[] table for a match, so we can hint a node number. */
- for (hint = 0; hint < MAX_DEV_HINTS; hint++) {
- if (((device_hint[hint].type == -1) || (device_hint[hint].type == pdev->type)) &&
- (device_hint[hint].pdev == NULL)) {
- /* so far, so good... try serial number */
- if ((device_hint[hint].serial_number[0] == '*') || !strcmp(device_hint[hint].serial_number, serial_number)) {
- /* match! */
- video_nr = device_hint[hint].device_node;
- PWC_DEBUG_PROBE("Found hint, will try to register as /dev/video%d\n", video_nr);
- break;
- }
- }
+ /* Allocate USB command buffers */
+ pdev->ctrl_buf = kmalloc(sizeof(pdev->cmd_buf), GFP_KERNEL);
+ if (!pdev->ctrl_buf) {
+ PWC_ERROR("Oops, could not allocate memory for pwc_device.\n");
+ rc = -ENOMEM;
+ goto err_free_mem;
}
- /* occupy slot */
- if (hint < MAX_DEV_HINTS)
- device_hint[hint].pdev = pdev;
-
#ifdef CONFIG_USB_PWC_DEBUG
/* Query sensor type */
if (pwc_get_cmos_sensor(pdev, &rc) >= 0) {
@@ -1138,8 +1105,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pwc_set_leds(pdev, 0, 0);
/* Setup intial videomode */
- rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, pdev->vframes,
- &compression);
+ rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT,
+ V4L2_PIX_FMT_YUV420, 30, &compression, 1);
if (rc)
goto err_free_mem;
@@ -1164,7 +1131,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->v4l2_dev.ctrl_handler = &pdev->ctrl_handler;
pdev->vdev.v4l2_dev = &pdev->v4l2_dev;
- rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr);
+ rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1);
if (rc < 0) {
PWC_ERROR("Failed to register as video device (%d).\n", rc);
goto err_unregister_v4l2_dev;
@@ -1207,8 +1174,7 @@ err_unregister_v4l2_dev:
err_free_controls:
v4l2_ctrl_handler_free(&pdev->ctrl_handler);
err_free_mem:
- if (hint < MAX_DEV_HINTS)
- device_hint[hint].pdev = NULL;
+ kfree(pdev->ctrl_buf);
kfree(pdev);
return rc;
}
@@ -1243,27 +1209,19 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
* Initialization code & module stuff
*/
-static int fps;
-static int leds[2] = { -1, -1 };
static unsigned int leds_nargs;
-static char *dev_hint[MAX_DEV_HINTS];
-static unsigned int dev_hint_nargs;
-module_param(fps, int, 0444);
#ifdef CONFIG_USB_PWC_DEBUG
module_param_named(trace, pwc_trace, int, 0644);
#endif
module_param(power_save, int, 0644);
module_param_array(leds, int, &leds_nargs, 0444);
-module_param_array(dev_hint, charp, &dev_hint_nargs, 0444);
-MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30");
#ifdef CONFIG_USB_PWC_DEBUG
MODULE_PARM_DESC(trace, "For debugging purposes");
#endif
MODULE_PARM_DESC(power_save, "Turn power saving for new cameras on or off");
MODULE_PARM_DESC(leds, "LED on,off time in milliseconds");
-MODULE_PARM_DESC(dev_hint, "Device node hints");
MODULE_DESCRIPTION("Philips & OEM USB webcam driver");
MODULE_AUTHOR("Luc Saillard <luc@saillard.org>");
@@ -1273,114 +1231,13 @@ MODULE_VERSION( PWC_VERSION );
static int __init usb_pwc_init(void)
{
- int i;
-
-#ifdef CONFIG_USB_PWC_DEBUG
- PWC_INFO("Philips webcam module version " PWC_VERSION " loaded.\n");
- PWC_INFO("Supports Philips PCA645/646, PCVC675/680/690, PCVC720[40]/730/740/750 & PCVC830/840.\n");
- PWC_INFO("Also supports the Askey VC010, various Logitech Quickcams, Samsung MPC-C10 and MPC-C30,\n");
- PWC_INFO("the Creative WebCam 5 & Pro Ex, SOTEC Afina Eye and Visionite VCS-UC300 and VCS-UM100.\n");
-
- if (pwc_trace >= 0) {
- PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace);
- }
-#endif
-
- if (fps) {
- if (fps < 4 || fps > 30) {
- PWC_ERROR("Framerate out of bounds (4-30).\n");
- return -EINVAL;
- }
- default_fps = fps;
- PWC_DEBUG_MODULE("Default framerate set to %d.\n", default_fps);
- }
-
- if (leds[0] >= 0)
- led_on = leds[0];
- if (leds[1] >= 0)
- led_off = leds[1];
-
- /* Big device node whoopla. Basically, it allows you to assign a
- device node (/dev/videoX) to a camera, based on its type
- & serial number. The format is [type[.serialnumber]:]node.
-
- Any camera that isn't matched by these rules gets the next
- available free device node.
- */
- for (i = 0; i < MAX_DEV_HINTS; i++) {
- char *s, *colon, *dot;
-
- /* This loop also initializes the array */
- device_hint[i].pdev = NULL;
- s = dev_hint[i];
- if (s != NULL && *s != '\0') {
- device_hint[i].type = -1; /* wildcard */
- strcpy(device_hint[i].serial_number, "*");
-
- /* parse string: chop at ':' & '/' */
- colon = dot = s;
- while (*colon != '\0' && *colon != ':')
- colon++;
- while (*dot != '\0' && *dot != '.')
- dot++;
- /* Few sanity checks */
- if (*dot != '\0' && dot > colon) {
- PWC_ERROR("Malformed camera hint: the colon must be after the dot.\n");
- return -EINVAL;
- }
-
- if (*colon == '\0') {
- /* No colon */
- if (*dot != '\0') {
- PWC_ERROR("Malformed camera hint: no colon + device node given.\n");
- return -EINVAL;
- }
- else {
- /* No type or serial number specified, just a number. */
- device_hint[i].device_node =
- simple_strtol(s, NULL, 10);
- }
- }
- else {
- /* There's a colon, so we have at least a type and a device node */
- device_hint[i].type =
- simple_strtol(s, NULL, 10);
- device_hint[i].device_node =
- simple_strtol(colon + 1, NULL, 10);
- if (*dot != '\0') {
- /* There's a serial number as well */
- int k;
-
- dot++;
- k = 0;
- while (*dot != ':' && k < 29) {
- device_hint[i].serial_number[k++] = *dot;
- dot++;
- }
- device_hint[i].serial_number[k] = '\0';
- }
- }
- PWC_TRACE("device_hint[%d]:\n", i);
- PWC_TRACE(" type : %d\n", device_hint[i].type);
- PWC_TRACE(" serial# : %s\n", device_hint[i].serial_number);
- PWC_TRACE(" node : %d\n", device_hint[i].device_node);
- }
- else
- device_hint[i].type = 0; /* not filled */
- } /* ..for MAX_DEV_HINTS */
-
- PWC_DEBUG_PROBE("Registering driver at address 0x%p.\n", &pwc_driver);
return usb_register(&pwc_driver);
}
static void __exit usb_pwc_exit(void)
{
- PWC_DEBUG_MODULE("Deregistering driver.\n");
usb_deregister(&pwc_driver);
- PWC_INFO("Philips webcam module removed.\n");
}
module_init(usb_pwc_init);
module_exit(usb_pwc_exit);
-
-/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/drivers/media/video/pwc/pwc-misc.c b/drivers/media/video/pwc/pwc-misc.c
index 23a55b5814f..9be5adffa87 100644
--- a/drivers/media/video/pwc/pwc-misc.c
+++ b/drivers/media/video/pwc/pwc-misc.c
@@ -90,5 +90,4 @@ void pwc_construct(struct pwc_device *pdev)
pdev->frame_header_size = 0;
pdev->frame_trailer_size = 0;
}
- pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */
}
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index 80e25842e84..f495eeb5403 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -493,16 +493,11 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
(pixelformat>>24)&255);
ret = pwc_set_video_mode(pdev, f->fmt.pix.width, f->fmt.pix.height,
- pdev->vframes, &compression);
+ pixelformat, 30, &compression, 0);
PWC_DEBUG_IOCTL("pwc_set_video_mode(), return=%d\n", ret);
- if (ret == 0) {
- pdev->pixfmt = pixelformat;
- pwc_vidioc_fill_fmt(f, pdev->width, pdev->height,
- pdev->pixfmt);
- }
-
+ pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt);
leave:
mutex_unlock(&pdev->udevlock);
return ret;
@@ -777,33 +772,33 @@ static int pwc_set_autogain_expo(struct pwc_device *pdev)
static int pwc_set_motor(struct pwc_device *pdev)
{
int ret;
- u8 buf[4];
- buf[0] = 0;
+ pdev->ctrl_buf[0] = 0;
if (pdev->motor_pan_reset->is_new)
- buf[0] |= 0x01;
+ pdev->ctrl_buf[0] |= 0x01;
if (pdev->motor_tilt_reset->is_new)
- buf[0] |= 0x02;
+ pdev->ctrl_buf[0] |= 0x02;
if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) {
ret = send_control_msg(pdev, SET_MPT_CTL,
- PT_RESET_CONTROL_FORMATTER, buf, 1);
+ PT_RESET_CONTROL_FORMATTER,
+ pdev->ctrl_buf, 1);
if (ret < 0)
return ret;
}
- memset(buf, 0, sizeof(buf));
+ memset(pdev->ctrl_buf, 0, 4);
if (pdev->motor_pan->is_new) {
- buf[0] = pdev->motor_pan->val & 0xFF;
- buf[1] = (pdev->motor_pan->val >> 8);
+ pdev->ctrl_buf[0] = pdev->motor_pan->val & 0xFF;
+ pdev->ctrl_buf[1] = (pdev->motor_pan->val >> 8);
}
if (pdev->motor_tilt->is_new) {
- buf[2] = pdev->motor_tilt->val & 0xFF;
- buf[3] = (pdev->motor_tilt->val >> 8);
+ pdev->ctrl_buf[2] = pdev->motor_tilt->val & 0xFF;
+ pdev->ctrl_buf[3] = (pdev->motor_tilt->val >> 8);
}
if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) {
ret = send_control_msg(pdev, SET_MPT_CTL,
PT_RELATIVE_CONTROL_FORMATTER,
- buf, sizeof(buf));
+ pdev->ctrl_buf, 4);
if (ret < 0)
return ret;
}
@@ -1094,6 +1089,63 @@ static int pwc_enum_frameintervals(struct file *file, void *fh,
return 0;
}
+static int pwc_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct pwc_device *pdev = video_drvdata(file);
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ memset(parm, 0, sizeof(*parm));
+
+ parm->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ parm->parm.capture.readbuffers = MIN_FRAMES;
+ parm->parm.capture.capability |= V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.timeperframe.denominator = pdev->vframes;
+ parm->parm.capture.timeperframe.numerator = 1;
+
+ return 0;
+}
+
+static int pwc_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct pwc_device *pdev = video_drvdata(file);
+ int compression = 0;
+ int ret, fps;
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ parm->parm.capture.timeperframe.numerator == 0)
+ return -EINVAL;
+
+ if (pwc_test_n_set_capt_file(pdev, file))
+ return -EBUSY;
+
+ fps = parm->parm.capture.timeperframe.denominator /
+ parm->parm.capture.timeperframe.numerator;
+
+ mutex_lock(&pdev->udevlock);
+ if (!pdev->udev) {
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ if (pdev->iso_init) {
+ ret = -EBUSY;
+ goto leave;
+ }
+
+ ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
+ fps, &compression, 0);
+
+ pwc_g_parm(file, fh, parm);
+
+leave:
+ mutex_unlock(&pdev->udevlock);
+ return ret;
+}
+
static int pwc_log_status(struct file *file, void *priv)
{
struct pwc_device *pdev = video_drvdata(file);
@@ -1120,4 +1172,6 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = {
.vidioc_log_status = pwc_log_status,
.vidioc_enum_framesizes = pwc_enum_framesizes,
.vidioc_enum_frameintervals = pwc_enum_frameintervals,
+ .vidioc_g_parm = pwc_g_parm,
+ .vidioc_s_parm = pwc_s_parm,
};
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index 47c518fef17..e4d4d711dd1 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -44,6 +44,8 @@
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
#include <linux/input.h>
#endif
+#include "pwc-dec1.h"
+#include "pwc-dec23.h"
/* Version block */
#define PWC_VERSION "10.0.15"
@@ -132,9 +134,6 @@
#define DEVICE_USE_CODEC3(x) ((x)>=700)
#define DEVICE_USE_CODEC23(x) ((x)>=675)
-/* from pwc-dec.h */
-#define PWCX_FLAG_PLANAR 0x0001
-
/* Request types: video */
#define SET_LUM_CTL 0x01
#define GET_LUM_CTL 0x02
@@ -248,8 +247,8 @@ struct pwc_device
char vmirror; /* for ToUCaM series */
char power_save; /* Do powersaving for this cam */
- int cmd_len;
unsigned char cmd_buf[13];
+ unsigned char *ctrl_buf;
struct urb *urbs[MAX_ISO_BUFS];
char iso_init;
@@ -272,7 +271,10 @@ struct pwc_device
int frame_total_size; /* including header & trailer */
int drop_frames;
- void *decompress_data; /* private data for decompression engine */
+ union { /* private data for decompression engine */
+ struct pwc_dec1_private dec1;
+ struct pwc_dec23_private dec23;
+ };
/*
* We have an 'image' and a 'view', where 'image' is the fixed-size img
@@ -364,7 +366,7 @@ void pwc_construct(struct pwc_device *pdev);
/** Functions in pwc-ctrl.c */
/* Request a certain video mode. Returns < 0 if not possible */
extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
- int frames, int *compression);
+ int pixfmt, int frames, int *compression, int send_to_cam);
extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size);
extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value);
extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor);
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index 510cfab477f..a9e9653beeb 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -693,7 +693,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
mf->code = 0;
continue;
}
- if (mf->width != tfmt->width || mf->width != tfmt->width) {
+ if (mf->width != tfmt->width || mf->height != tfmt->height) {
u32 fcc = ffmt->fourcc;
tfmt->width = mf->width;
tfmt->height = mf->height;
@@ -702,7 +702,8 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
NULL, &fcc, FIMC_SD_PAD_SOURCE);
if (ffmt && ffmt->mbus_code)
mf->code = ffmt->mbus_code;
- if (mf->width != tfmt->width || mf->width != tfmt->width)
+ if (mf->width != tfmt->width ||
+ mf->height != tfmt->height)
continue;
tfmt->code = mf->code;
}
@@ -710,7 +711,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt);
if (mf->code == tfmt->code &&
- mf->width == tfmt->width && mf->width == tfmt->width)
+ mf->width == tfmt->width && mf->height == tfmt->height)
break;
}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index f5cbb8a4c54..81bcbb9492e 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -848,11 +848,11 @@ int fimc_ctrls_create(struct fimc_ctx *ctx)
v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
ctx->ctrl_rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
- V4L2_CID_ROTATE, 0, 270, 90, 0);
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
if (variant->has_alpha)
ctx->ctrl_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
&fimc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT,
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index 615c862f036..8ea4ee116e4 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -21,7 +21,6 @@
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <media/v4l2-ctrls.h>
#include <media/media-device.h>
diff --git a/drivers/media/video/s5p-g2d/g2d.c b/drivers/media/video/s5p-g2d/g2d.c
index c40b0dde188..febaa673d36 100644
--- a/drivers/media/video/s5p-g2d/g2d.c
+++ b/drivers/media/video/s5p-g2d/g2d.c
@@ -184,6 +184,7 @@ static int g2d_s_ctrl(struct v4l2_ctrl *ctrl)
ctx->rop = ROP4_INVERT;
else
ctx->rop = ROP4_COPY;
+ break;
default:
v4l2_err(&ctx->dev->v4l2_dev, "unknown control\n");
return -EINVAL;
diff --git a/drivers/media/video/s5p-jpeg/jpeg-core.c b/drivers/media/video/s5p-jpeg/jpeg-core.c
index f841a3e9845..1105a8749c8 100644
--- a/drivers/media/video/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/video/s5p-jpeg/jpeg-core.c
@@ -989,9 +989,10 @@ static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
* ============================================================================
*/
-static int s5p_jpeg_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
- unsigned int *nplanes, unsigned int sizes[],
- void *alloc_ctxs[])
+static int s5p_jpeg_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
struct s5p_jpeg_q_data *q_data = NULL;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc.c b/drivers/media/video/s5p-mfc/s5p_mfc.c
index e43e128baf5..83fe461af26 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc.c
@@ -18,7 +18,6 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <linux/workqueue.h>
#include <media/videobuf2-core.h>
@@ -475,7 +474,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
ctx->mv_size = 0;
}
ctx->dpb_count = s5p_mfc_get_dpb_count();
- if (ctx->img_width == 0 || ctx->img_width == 0)
+ if (ctx->img_width == 0 || ctx->img_height == 0)
ctx->state = MFCINST_ERROR;
else
ctx->state = MFCINST_HEAD_PARSED;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index 844a4d7797b..c25ec022d26 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -165,7 +165,7 @@ static struct mfc_control controls[] = {
.maximum = 32,
.step = 1,
.default_value = 1,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
+ .is_volatile = 1,
},
};
diff --git a/drivers/media/video/saa7164/saa7164-cards.c b/drivers/media/video/saa7164/saa7164-cards.c
index 971591d6450..5b72da5ce41 100644
--- a/drivers/media/video/saa7164/saa7164-cards.c
+++ b/drivers/media/video/saa7164/saa7164-cards.c
@@ -269,8 +269,6 @@ struct saa7164_board saa7164_boards[] = {
.portb = SAA7164_MPEG_DVB,
.portc = SAA7164_MPEG_ENCODER,
.portd = SAA7164_MPEG_ENCODER,
- .portc = SAA7164_MPEG_ENCODER,
- .portd = SAA7164_MPEG_ENCODER,
.porte = SAA7164_MPEG_VBI,
.portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
@@ -333,8 +331,6 @@ struct saa7164_board saa7164_boards[] = {
.portd = SAA7164_MPEG_ENCODER,
.porte = SAA7164_MPEG_VBI,
.portf = SAA7164_MPEG_VBI,
- .porte = SAA7164_MPEG_VBI,
- .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x28,
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
index 0a2d75f0406..4ed1c7c28ae 100644
--- a/drivers/media/video/timblogiw.c
+++ b/drivers/media/video/timblogiw.c
@@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
spin_unlock_irq(&fh->queue_lock);
desc = fh->chan->device->device_prep_slave_sg(fh->chan,
- buf->sg, sg_elems, DMA_FROM_DEVICE,
+ buf->sg, sg_elems, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!desc) {
spin_lock_irq(&fh->queue_lock);
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index 129f135d5a5..c096b3f7420 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -374,7 +374,7 @@ static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev)
}
#endif
-static bool check_firmware(struct usb_device *udev, int *down_firmware)
+static int check_firmware(struct usb_device *udev, int *down_firmware)
{
void *buf;
int ret;
@@ -398,7 +398,7 @@ static bool check_firmware(struct usb_device *udev, int *down_firmware)
*down_firmware = 1;
return firmware_download(udev);
}
- return ret;
+ return 0;
}
static int poseidon_probe(struct usb_interface *interface,
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index da1f4c2d2d4..cccd42be718 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -465,8 +465,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
- case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Minimum Number of Capture Buffers";
- case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Minimum Number of Output Buffers";
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
/* MPEG controls */
@@ -506,25 +506,25 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
- case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "The Number of Intra Refresh MBs";
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs";
case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
- case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "The Max Number of Reference Picture";
+ case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics";
case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
- case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entorpy Mode";
- case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I Period";
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period";
case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
@@ -535,16 +535,16 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P frame QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "The Maximum Bytes Per Slice";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "The Number of MB in a Slice";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "The Slice Partitioning Method";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method";
case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
/* CAMERA controls */
@@ -580,7 +580,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
- case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Feature Enabled";
+ case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
@@ -588,24 +588,24 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
- case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-emphasis settings";
+ case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis";
case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
/* Flash controls */
- case V4L2_CID_FLASH_CLASS: return "Flash controls";
- case V4L2_CID_FLASH_LED_MODE: return "LED mode";
- case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe source";
+ case V4L2_CID_FLASH_CLASS: return "Flash Controls";
+ case V4L2_CID_FLASH_LED_MODE: return "LED Mode";
+ case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source";
case V4L2_CID_FLASH_STROBE: return "Strobe";
- case V4L2_CID_FLASH_STROBE_STOP: return "Stop strobe";
- case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe status";
- case V4L2_CID_FLASH_TIMEOUT: return "Strobe timeout";
- case V4L2_CID_FLASH_INTENSITY: return "Intensity, flash mode";
- case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, torch mode";
- case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, indicator";
+ case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe";
+ case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status";
+ case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout";
+ case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode";
+ case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode";
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
case V4L2_CID_FLASH_FAULT: return "Faults";
case V4L2_CID_FLASH_CHARGE: return "Charge";
- case V4L2_CID_FLASH_READY: return "Ready to strobe";
+ case V4L2_CID_FLASH_READY: return "Ready to Strobe";
default:
return NULL;
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 77feeb67e2d..3f623859a33 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1871,6 +1871,7 @@ static long __video_do_ioctl(struct file *file,
case VIDIOC_S_FREQUENCY:
{
struct v4l2_frequency *p = arg;
+ enum v4l2_tuner_type type;
if (!ops->vidioc_s_frequency)
break;
@@ -1878,9 +1879,14 @@ static long __video_do_ioctl(struct file *file,
ret = ret_prio;
break;
}
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
p->tuner, p->type, p->frequency);
- ret = ops->vidioc_s_frequency(file, fh, p);
+ if (p->type != type)
+ ret = -EINVAL;
+ else
+ ret = ops->vidioc_s_frequency(file, fh, p);
break;
}
case VIDIOC_G_SLICED_VBI_CAP:
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index f6d26419445..4c09ab781ec 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -1958,7 +1958,6 @@ static int zoran_g_fbuf(struct file *file, void *__fh,
mutex_unlock(&zr->resource_lock);
fb->fmt.colorspace = V4L2_COLORSPACE_SRGB;
fb->fmt.field = V4L2_FIELD_INTERLACED;
- fb->flags = V4L2_FBUF_FLAG_OVERLAY;
fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
return 0;
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index eb5cd28bc6d..a2d25e4857e 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -513,7 +513,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
* transaction, and then put it under external control
*/
memset(&config, 0, sizeof(config));
- config.direction = DMA_TO_DEVICE;
+ config.direction = DMA_MEM_TO_DEV;
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4;
ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG,
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index a7ee5027146..fcfe1eb5acc 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -823,6 +823,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
struct scatterlist *sg;
unsigned int i;
enum dma_data_direction direction;
+ enum dma_transfer_direction slave_dirn;
unsigned int sglen;
u32 iflags;
@@ -860,16 +861,19 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
if (host->caps.has_dma)
atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
- if (data->flags & MMC_DATA_READ)
+ if (data->flags & MMC_DATA_READ) {
direction = DMA_FROM_DEVICE;
- else
+ slave_dirn = DMA_DEV_TO_MEM;
+ } else {
direction = DMA_TO_DEVICE;
+ slave_dirn = DMA_MEM_TO_DEV;
+ }
sglen = dma_map_sg(chan->device->dev, data->sg,
data->sg_len, direction);
desc = chan->device->device_prep_slave_sg(chan,
- data->sg, sglen, direction,
+ data->sg, sglen, slave_dirn,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto unmap_exit;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index ece03b491c7..0d955ffaf44 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -374,6 +374,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *desc;
+ enum dma_data_direction buffer_dirn;
int nr_sg;
/* Check if next job is already prepared */
@@ -387,10 +388,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
}
if (data->flags & MMC_DATA_READ) {
- conf.direction = DMA_FROM_DEVICE;
+ conf.direction = DMA_DEV_TO_MEM;
+ buffer_dirn = DMA_FROM_DEVICE;
chan = host->dma_rx_channel;
} else {
- conf.direction = DMA_TO_DEVICE;
+ conf.direction = DMA_MEM_TO_DEV;
+ buffer_dirn = DMA_TO_DEVICE;
chan = host->dma_tx_channel;
}
@@ -403,7 +406,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
return -EINVAL;
device = chan->device;
- nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
if (nr_sg == 0)
return -EINVAL;
@@ -426,7 +429,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
unmap_exit:
if (!next)
dmaengine_terminate_all(chan);
- dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
return -ENOMEM;
}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 7088b40f957..4184b7946bb 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -218,6 +218,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
unsigned int blksz = data->blksz;
unsigned int datasize = nob * blksz;
struct scatterlist *sg;
+ enum dma_transfer_direction slave_dirn;
int i, nents;
if (data->flags & MMC_DATA_STREAM)
@@ -240,10 +241,13 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
}
}
- if (data->flags & MMC_DATA_READ)
+ if (data->flags & MMC_DATA_READ) {
host->dma_dir = DMA_FROM_DEVICE;
- else
+ slave_dirn = DMA_DEV_TO_MEM;
+ } else {
host->dma_dir = DMA_TO_DEVICE;
+ slave_dirn = DMA_MEM_TO_DEV;
+ }
nents = dma_map_sg(host->dma->device->dev, data->sg,
data->sg_len, host->dma_dir);
@@ -251,7 +255,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
return -EINVAL;
host->desc = host->dma->device->device_prep_slave_sg(host->dma,
- data->sg, data->sg_len, host->dma_dir,
+ data->sg, data->sg_len, slave_dirn,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!host->desc) {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 4e2e019dd5c..382c835d217 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -154,6 +154,7 @@ struct mxs_mmc_host {
struct dma_chan *dmach;
struct mxs_dma_data dma_data;
unsigned int dma_dir;
+ enum dma_transfer_direction slave_dirn;
u32 ssp_pio_words[SSP_PIO_NUM];
unsigned int version;
@@ -324,7 +325,7 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
}
desc = host->dmach->device->device_prep_slave_sg(host->dmach,
- sgl, sg_len, host->dma_dir, append);
+ sgl, sg_len, host->slave_dirn, append);
if (desc) {
desc->callback = mxs_mmc_dma_irq_callback;
desc->callback_param = host;
@@ -356,6 +357,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
host->ssp_pio_words[1] = cmd0;
host->ssp_pio_words[2] = cmd1;
host->dma_dir = DMA_NONE;
+ host->slave_dirn = DMA_TRANS_NONE;
desc = mxs_mmc_prep_dma(host, 0);
if (!desc)
goto out;
@@ -395,6 +397,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
host->ssp_pio_words[1] = cmd0;
host->ssp_pio_words[2] = cmd1;
host->dma_dir = DMA_NONE;
+ host->slave_dirn = DMA_TRANS_NONE;
desc = mxs_mmc_prep_dma(host, 0);
if (!desc)
goto out;
@@ -433,6 +436,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
int i;
unsigned short dma_data_dir, timeout;
+ enum dma_transfer_direction slave_dirn;
unsigned int data_size = 0, log2_blksz;
unsigned int blocks = data->blocks;
@@ -448,9 +452,11 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
if (data->flags & MMC_DATA_WRITE) {
dma_data_dir = DMA_TO_DEVICE;
+ slave_dirn = DMA_MEM_TO_DEV;
read = 0;
} else {
dma_data_dir = DMA_FROM_DEVICE;
+ slave_dirn = DMA_DEV_TO_MEM;
read = BM_SSP_CTRL0_READ;
}
@@ -510,6 +516,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
host->ssp_pio_words[1] = cmd0;
host->ssp_pio_words[2] = cmd1;
host->dma_dir = DMA_NONE;
+ host->slave_dirn = DMA_TRANS_NONE;
desc = mxs_mmc_prep_dma(host, 0);
if (!desc)
goto out;
@@ -518,6 +525,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
WARN_ON(host->data != NULL);
host->data = data;
host->dma_dir = dma_data_dir;
+ host->slave_dirn = slave_dirn;
desc = mxs_mmc_prep_dma(host, 1);
if (!desc)
goto out;
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 4a2c5b2355f..f5d8b53be33 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -286,7 +286,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
if (ret > 0) {
host->dma_active = true;
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
if (desc) {
@@ -335,7 +335,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
if (ret > 0) {
host->dma_active = true;
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
if (desc) {
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 86f259cdfcb..7a6e6cc8f8b 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -77,7 +77,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_FROM_DEVICE, DMA_CTRL_ACK);
+ DMA_DEV_TO_MEM, DMA_CTRL_ACK);
if (desc) {
cookie = dmaengine_submit(desc);
@@ -158,7 +158,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_TO_DEVICE, DMA_CTRL_ACK);
+ DMA_MEM_TO_DEV, DMA_CTRL_ACK);
if (desc) {
cookie = dmaengine_submit(desc);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 2a56fc6f399..7f680420bfa 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -827,7 +827,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
pio[1] = pio[2] = 0;
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 0);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 1 error\n");
return -1;
@@ -839,7 +839,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
sg_init_one(sgl, this->cmd_buffer, this->command_length);
dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
desc = channel->device->device_prep_slave_sg(channel,
- sgl, 1, DMA_TO_DEVICE, 1);
+ sgl, 1, DMA_MEM_TO_DEV, 1);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -872,7 +872,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
pio[1] = 0;
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 0);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 1 error\n");
return -1;
@@ -881,7 +881,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
/* [2] send DMA request */
prepare_data_dma(this, DMA_TO_DEVICE);
desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
- 1, DMA_TO_DEVICE, 1);
+ 1, DMA_MEM_TO_DEV, 1);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -908,7 +908,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
pio[1] = 0;
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 0);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 1 error\n");
return -1;
@@ -917,7 +917,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
/* [2] : send DMA request */
prepare_data_dma(this, DMA_FROM_DEVICE);
desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
- 1, DMA_FROM_DEVICE, 1);
+ 1, DMA_DEV_TO_MEM, 1);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -964,7 +964,7 @@ int gpmi_send_page(struct gpmi_nand_data *this,
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 0);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -998,7 +998,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
| BF_GPMI_CTRL0_XFER_COUNT(0);
pio[1] = 0;
desc = channel->device->device_prep_slave_sg(channel,
- (struct scatterlist *)pio, 2, DMA_NONE, 0);
+ (struct scatterlist *)pio, 2,
+ DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 1 error\n");
return -1;
@@ -1027,7 +1028,7 @@ int gpmi_read_page(struct gpmi_nand_data *this,
pio[5] = auxiliary;
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 1);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 1);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -1045,7 +1046,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
pio[1] = 0;
desc = channel->device->device_prep_slave_sg(channel,
- (struct scatterlist *)pio, 2, DMA_NONE, 1);
+ (struct scatterlist *)pio, 2,
+ DMA_TRANS_NONE, 1);
if (!desc) {
pr_err("step 3 error\n");
return -1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index a688b9d975a..f99c6e312a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -365,13 +365,18 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
if (cmd->autoneg == AUTONEG_ENABLE) {
+ u32 an_supported_speed = bp->port.supported[cfg_idx];
+ if (bp->link_params.phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ an_supported_speed |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
DP(NETIF_MSG_LINK, "Autoneg not supported\n");
return -EINVAL;
}
/* advertise the requested speed and duplex if supported */
- if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
+ if (cmd->advertising & ~an_supported_speed) {
DP(NETIF_MSG_LINK, "Advertisement parameters "
"are not supported\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 4df9505b67b..2091e5dbbcd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -2502,7 +2502,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
struct bnx2x_nig_brb_pfc_port_params *nig_params)
{
u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
- u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
+ u32 llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0;
u32 pkt_priority_to_cos = 0;
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -2516,9 +2516,8 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
* MAC control frames (that are not pause packets)
* will be forwarded to the XCM.
*/
- xcm_mask = REG_RD(bp,
- port ? NIG_REG_LLH1_XCM_MASK :
- NIG_REG_LLH0_XCM_MASK);
+ xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK);
/*
* nig params will override non PFC params, since it's possible to
* do transition from PFC to SAFC
@@ -2533,8 +2532,8 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
ppp_enable = 1;
xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
- xcm0_out_en = 0;
- p0_hwpfc_enable = 1;
+ xcm_out_en = 0;
+ hwpfc_enable = 1;
} else {
if (nig_params) {
llfc_out_en = nig_params->llfc_out_en;
@@ -2545,7 +2544,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
- xcm0_out_en = 1;
+ xcm_out_en = 1;
}
if (CHIP_IS_E3(bp))
@@ -2564,13 +2563,16 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
NIG_REG_LLH0_XCM_MASK, xcm_mask);
- REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
+ REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
+ NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
/* output enable for RX_XCM # IF */
- REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
+ REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
+ NIG_REG_XCM0_OUT_EN, xcm_out_en);
/* HW PFC TX enable */
- REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
+ REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE :
+ NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable);
if (nig_params) {
u8 i = 0;
@@ -3761,7 +3763,15 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Advertise pause */
bnx2x_ext_phy_set_pause(params, phy, vars);
- vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+ /*
+ * Set KR Autoneg Work-Around flag for Warpcore version older than D108
+ */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_VERSION, &val16);
+ if (val16 < 0xd108) {
+ DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+ }
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC7, &val16);
@@ -9266,62 +9276,68 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
/* BCM8481/BCM84823/BCM84833 PHY SECTION */
/******************************************************************/
static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
- struct link_params *params)
+ struct bnx2x *bp,
+ u8 port)
{
u16 val, fw_ver1, fw_ver2, cnt;
- u8 port;
- struct bnx2x *bp = params->bp;
- port = params->port;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
+ bnx2x_save_spirom_version(bp, port,
+ ((fw_ver1 & 0xf000)>>5) | (fw_ver1 & 0x7f),
+ phy->ver_addr);
+ } else {
+ /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
+ /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
+ break;
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx "
+ "phy fw version(1)\n");
+ bnx2x_save_spirom_version(bp, port, 0,
+ phy->ver_addr);
+ return;
+ }
- /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
- /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
- for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
- if (val & 1)
- break;
- udelay(5);
- }
- if (cnt == 100) {
- DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
- bnx2x_save_spirom_version(bp, port, 0,
- phy->ver_addr);
- return;
- }
+ /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
+ break;
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw "
+ "version(2)\n");
+ bnx2x_save_spirom_version(bp, port, 0,
+ phy->ver_addr);
+ return;
+ }
+ /* lower 16 bits of the register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+ /* upper 16 bits of register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
- /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
- for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
- if (val & 1)
- break;
- udelay(5);
- }
- if (cnt == 100) {
- DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
- bnx2x_save_spirom_version(bp, port, 0,
+ bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
phy->ver_addr);
- return;
}
- /* lower 16 bits of the register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
- /* upper 16 bits of register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
-
- bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
- phy->ver_addr);
}
-
static void bnx2x_848xx_set_led(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
@@ -9392,10 +9408,13 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
u16 tmp_req_line_speed;
tmp_req_line_speed = phy->req_line_speed;
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
if (phy->req_line_speed == SPEED_10000)
phy->req_line_speed = SPEED_AUTO_NEG;
-
+ } else {
+ /* Save spirom version */
+ bnx2x_save_848xx_spirom_version(phy, bp, params->port);
+ }
/*
* This phy uses the NIG latch mechanism since link indication
* arrives through its LED4 and not via its LASI signal, so we
@@ -9443,13 +9462,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
an_1000_val);
/* set 100 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) &&
- (phy->supported &
- (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full)))) {
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) {
an_10_100_val |= (1<<7);
/* Enable autoneg and restart autoneg for legacy speeds */
autoneg_val |= (1<<9 | 1<<12);
@@ -9539,9 +9555,6 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
1);
- /* Save spirom version */
- bnx2x_save_848xx_spirom_version(phy, params);
-
phy->req_line_speed = tmp_req_line_speed;
return 0;
@@ -9749,17 +9762,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* Wait for GPHY to come out of reset */
msleep(50);
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
- /* Bring PHY out of super isolate mode */
- bnx2x_cl45_read(bp, phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
- val &= ~MDIO_84833_SUPER_ISOLATE;
- bnx2x_cl45_write(bp, phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
- bnx2x_84833_pair_swap_cfg(phy, params, vars);
- } else {
+ if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
/*
* BCM84823 requires that XGXS links up first @ 10G for normal
* behavior.
@@ -9816,24 +9819,23 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
params->multi_phy_config, val);
- /* AutogrEEEn */
- if (params->feature_config_flags &
- FEATURE_CONFIG_AUTOGREEEN_ENABLED)
- cmd_args[0] = 0x2;
- else
- cmd_args[0] = 0x0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ bnx2x_84833_pair_swap_cfg(phy, params, vars);
- cmd_args[1] = 0x0;
- cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
- cmd_args[3] = PHY84833_CONSTANT_LATENCY;
- rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, cmd_args);
- if (rc != 0)
- DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
+ /* Keep AutogrEEEn disabled. */
+ cmd_args[0] = 0x0;
+ cmd_args[1] = 0x0;
+ cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
+ cmd_args[3] = PHY84833_CONSTANT_LATENCY;
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, cmd_args);
+ if (rc != 0)
+ DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
+ }
if (initialize)
rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
else
- bnx2x_save_848xx_spirom_version(phy, params);
+ bnx2x_save_848xx_spirom_version(phy, bp, params->port);
/* 84833 PHY has a better feature and doesn't need to support this. */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
cms_enable = REG_RD(bp, params->shmem_base +
@@ -9851,6 +9853,16 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
MDIO_CTL_REG_84823_USER_CTRL_REG, val);
}
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ /* Bring PHY out of super isolate mode as the final step. */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+ val &= ~MDIO_84833_SUPER_ISOLATE;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+ }
return rc;
}
@@ -9988,10 +10000,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
} else {
bnx2x_cl45_read(bp, phy,
MDIO_CTL_DEVAD,
- 0x400f, &val16);
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val16);
+ val16 |= MDIO_84833_SUPER_ISOLATE;
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0x800);
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val16);
}
}
@@ -11516,6 +11529,19 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
}
phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+ (phy->ver_addr)) {
+ /*
+ * Remove 100Mb link supported for BCM84833 when phy fw
+ * version lower than or equal to 1.39
+ */
+ u32 raw_ver = REG_RD(bp, phy->ver_addr);
+ if (((raw_ver & 0x7F) <= 39) &&
+ (((raw_ver & 0xF80) >> 7) <= 1))
+ phy->supported &= ~(SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
+ }
+
/*
* In case mdc/mdio_access of the external phy is different than the
* mdc/mdio access of the XGXS, a HW lock must be taken in each access
@@ -12333,55 +12359,69 @@ static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
u32 chip_id)
{
u8 reset_gpios;
- struct bnx2x_phy phy;
- u32 shmem_base, shmem2_base, cnt;
- s8 port = 0;
- u16 val;
-
reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
udelay(10);
bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
reset_gpios);
- for (port = PORT_MAX - 1; port >= PORT_0; port--) {
- /* This PHY is for E2 and E3. */
- shmem_base = shmem_base_path[port];
- shmem2_base = shmem2_base_path[port];
- /* Extract the ext phy address for the port */
- if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
- 0, &phy) !=
- 0) {
- DP(NETIF_MSG_LINK, "populate_phy failed\n");
- return -EINVAL;
- }
+ return 0;
+}
- /* Wait for FW completing its initialization. */
- for (cnt = 0; cnt < 1000; cnt++) {
- bnx2x_cl45_read(bp, &phy,
+static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
+{
+ u16 val, cnt;
+ /* Wait for FW completing its initialization. */
+ for (cnt = 0; cnt < 1500; cnt++) {
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_CTRL, &val);
- if (!(val & (1<<15)))
- break;
- msleep(1);
- }
- if (cnt >= 1000)
- DP(NETIF_MSG_LINK,
- "84833 Cmn reset timeout (%d)\n", port);
-
- /* Put the port in super isolate mode. */
- bnx2x_cl45_read(bp, &phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
- val |= MDIO_84833_SUPER_ISOLATE;
- bnx2x_cl45_write(bp, &phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+ if (!(val & (1<<15)))
+ break;
+ msleep(1);
+ }
+ if (cnt >= 1500) {
+ DP(NETIF_MSG_LINK, "84833 reset timeout\n");
+ return -EINVAL;
}
+ /* Put the port in super isolate mode. */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+ val |= MDIO_84833_SUPER_ISOLATE;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+
+ /* Save spirom version */
+ bnx2x_save_848xx_spirom_version(phy, bp, PORT_0);
return 0;
}
+int bnx2x_pre_init_phy(struct bnx2x *bp,
+ u32 shmem_base,
+ u32 shmem2_base,
+ u32 chip_id)
+{
+ int rc = 0;
+ struct bnx2x_phy phy;
+ bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
+ if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base,
+ PORT_0, &phy)) {
+ DP(NETIF_MSG_LINK, "populate_phy failed\n");
+ return -EINVAL;
+ }
+ switch (phy.type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ rc = bnx2x_84833_pre_init_phy(bp, &phy);
+ break;
+ default:
+ break;
+ }
+ return rc;
+}
static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 44609de4e5d..dddbcf6e154 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2176,6 +2176,7 @@
* set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
* accommodate the 9 input clients to ETS arbiter. */
#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
+#define NIG_REG_P1_HWPFC_ENABLE 0x181d0
#define NIG_REG_P1_MAC_IN_EN 0x185c0
/* [RW 1] Output enable for TX MAC interface */
#define NIG_REG_P1_MAC_OUT_EN 0x185c4
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 076e02a415a..d529af99157 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8846,9 +8846,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
udelay(100);
- if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
+ if (tg3_flag(tp, USING_MSIX)) {
val = tr32(MSGINT_MODE);
- val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
+ val |= MSGINT_MODE_ENABLE;
+ if (tp->irq_cnt > 1)
+ val |= MSGINT_MODE_MULTIVEC_EN;
if (!tg3_flag(tp, 1SHOT_MSI))
val |= MSGINT_MODE_ONE_SHOT_DISABLE;
tw32(MSGINT_MODE, val);
@@ -9548,19 +9550,18 @@ static int tg3_request_firmware(struct tg3 *tp)
static bool tg3_enable_msix(struct tg3 *tp)
{
- int i, rc, cpus = num_online_cpus();
+ int i, rc;
struct msix_entry msix_ent[tp->irq_max];
- if (cpus == 1)
- /* Just fallback to the simpler MSI mode. */
- return false;
-
- /*
- * We want as many rx rings enabled as there are cpus.
- * The first MSIX vector only deals with link interrupts, etc,
- * so we add one to the number of vectors we are requesting.
- */
- tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
+ tp->irq_cnt = num_online_cpus();
+ if (tp->irq_cnt > 1) {
+ /* We want as many rx rings enabled as there are cpus.
+ * In multiqueue MSI-X mode, the first MSI-X vector
+ * only deals with link interrupts, etc, so we add
+ * one to the number of vectors we are requesting.
+ */
+ tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
+ }
for (i = 0; i < tp->irq_max; i++) {
msix_ent[i].entry = i;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 05b7359bde8..6bdd8e36e56 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -263,7 +263,7 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
data[i++] = atomic_read(&port->port_res[k].swqe_avail);
}
-const struct ethtool_ops ehea_ethtool_ops = {
+static const struct ethtool_ops ehea_ethtool_ops = {
.get_settings = ehea_get_settings,
.get_drvinfo = ehea_get_drvinfo,
.get_msglevel = ehea_get_msglevel,
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 3554414eb5e..5d5fb262718 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -94,8 +94,8 @@ static int port_name_cnt;
static LIST_HEAD(adapter_list);
static unsigned long ehea_driver_flags;
static DEFINE_MUTEX(dlpar_mem_lock);
-struct ehea_fw_handle_array ehea_fw_handles;
-struct ehea_bcmc_reg_array ehea_bcmc_regs;
+static struct ehea_fw_handle_array ehea_fw_handles;
+static struct ehea_bcmc_reg_array ehea_bcmc_regs;
static int __devinit ehea_probe_adapter(struct platform_device *dev,
@@ -133,7 +133,7 @@ void ehea_dump(void *adr, int len, char *msg)
}
}
-void ehea_schedule_port_reset(struct ehea_port *port)
+static void ehea_schedule_port_reset(struct ehea_port *port)
{
if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
schedule_work(&port->reset_task);
@@ -1404,7 +1404,7 @@ out:
return ret;
}
-int ehea_gen_smrs(struct ehea_port_res *pr)
+static int ehea_gen_smrs(struct ehea_port_res *pr)
{
int ret;
struct ehea_adapter *adapter = pr->port->adapter;
@@ -1426,7 +1426,7 @@ out:
return -EIO;
}
-int ehea_rem_smrs(struct ehea_port_res *pr)
+static int ehea_rem_smrs(struct ehea_port_res *pr)
{
if ((ehea_rem_mr(&pr->send_mr)) ||
(ehea_rem_mr(&pr->recv_mr)))
@@ -2190,7 +2190,7 @@ out:
return err;
}
-int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
+static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
{
int ret = -EIO;
u64 hret;
@@ -2531,7 +2531,7 @@ static void ehea_flush_sq(struct ehea_port *port)
}
}
-int ehea_stop_qps(struct net_device *dev)
+static int ehea_stop_qps(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
@@ -2600,7 +2600,7 @@ out:
return ret;
}
-void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
+static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
{
struct ehea_qp qp = *orig_qp;
struct ehea_qp_init_attr *init_attr = &qp.init_attr;
@@ -2633,7 +2633,7 @@ void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
}
}
-int ehea_restart_qps(struct net_device *dev)
+static int ehea_restart_qps(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
@@ -2824,7 +2824,7 @@ static void ehea_tx_watchdog(struct net_device *dev)
ehea_schedule_port_reset(port);
}
-int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
+static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
{
struct hcp_query_ehea *cb;
u64 hret;
@@ -2852,7 +2852,7 @@ out:
return ret;
}
-int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
+static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
{
struct hcp_ehea_port_cb4 *cb4;
u64 hret;
@@ -2966,7 +2966,7 @@ static const struct net_device_ops ehea_netdev_ops = {
.ndo_tx_timeout = ehea_tx_watchdog,
};
-struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
+static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
u32 logical_port_id,
struct device_node *dn)
{
@@ -3237,7 +3237,7 @@ static ssize_t ehea_remove_port(struct device *dev,
static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
-int ehea_create_device_sysfs(struct platform_device *dev)
+static int ehea_create_device_sysfs(struct platform_device *dev)
{
int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
if (ret)
@@ -3248,7 +3248,7 @@ out:
return ret;
}
-void ehea_remove_device_sysfs(struct platform_device *dev)
+static void ehea_remove_device_sysfs(struct platform_device *dev)
{
device_remove_file(&dev->dev, &dev_attr_probe_port);
device_remove_file(&dev->dev, &dev_attr_remove_port);
@@ -3379,7 +3379,7 @@ static int __devexit ehea_remove(struct platform_device *dev)
return 0;
}
-void ehea_crash_handler(void)
+static void ehea_crash_handler(void)
{
int i;
@@ -3491,7 +3491,7 @@ static ssize_t ehea_show_capabilities(struct device_driver *drv,
static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
ehea_show_capabilities, NULL);
-int __init ehea_module_init(void)
+static int __init ehea_module_init(void)
{
int ret;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 95b9f4fa811..c25b05b94da 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -34,9 +34,7 @@
#include "ehea_phyp.h"
#include "ehea_qmr.h"
-struct ehea_bmap *ehea_bmap = NULL;
-
-
+static struct ehea_bmap *ehea_bmap;
static void *hw_qpageit_get_inc(struct hw_queue *queue)
{
@@ -212,7 +210,7 @@ out_nomem:
return NULL;
}
-u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
+static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
{
u64 hret;
u64 adapter_handle = cq->adapter->handle;
@@ -337,7 +335,7 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
return eqe;
}
-u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
+static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
{
u64 hret;
unsigned long flags;
@@ -381,7 +379,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
/**
* allocates memory for a queue and registers pages in phyp
*/
-int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
+static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
int nr_pages, int wqe_size, int act_nr_sges,
struct ehea_adapter *adapter, int h_call_q_selector)
{
@@ -516,7 +514,7 @@ out_freemem:
return NULL;
}
-u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
+static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
{
u64 hret;
struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
@@ -976,7 +974,7 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
return 0;
}
-void print_error_data(u64 *data)
+static void print_error_data(u64 *data)
{
int length;
u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 75ec87a822b..0a85690a132 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -459,7 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
- &ctl->sg, 1, DMA_TO_DEVICE,
+ &ctl->sg, 1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!ctl->adesc)
return NETDEV_TX_BUSY;
@@ -571,7 +571,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
sg_dma_len(sg) = DMA_BUFFER_SIZE;
ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
- sg, 1, DMA_FROM_DEVICE,
+ sg, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!ctl->adesc)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6ece4295d78..813d41c4a84 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1703,7 +1703,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = &ndev->dev;
snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- mdp->pdev->name, pdid);
+ mdp->pdev->name, id);
/* PHY IRQ */
mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 88c81c5706b..09b8c9dbf78 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -557,10 +557,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (rxsp->status11 & AR_MichaelErr)
rxs->rs_status |= ATH9K_RXERR_MIC;
- if (rxsp->status11 & AR_KeyMiss)
- rxs->rs_status |= ATH9K_RXERR_KEYMISS;
}
+ if (rxsp->status11 & AR_KeyMiss)
+ rxs->rs_status |= ATH9K_RXERR_KEYMISS;
+
return 0;
}
EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index fd3f19c2e55..e196aba77ac 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -618,10 +618,11 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
rs->rs_status |= ATH9K_RXERR_MIC;
- if (ads.ds_rxstatus8 & AR_KeyMiss)
- rs->rs_status |= ATH9K_RXERR_KEYMISS;
}
+ if (ads.ds_rxstatus8 & AR_KeyMiss)
+ rs->rs_status |= ATH9K_RXERR_KEYMISS;
+
return 0;
}
EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 1c6f19393ef..b91f28ef103 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4852,6 +4852,9 @@ static void b43_op_stop(struct ieee80211_hw *hw)
cancel_work_sync(&(wl->beacon_update_trigger));
+ if (!dev)
+ goto out;
+
mutex_lock(&wl->mutex);
if (b43_status(dev) >= B43_STAT_STARTED) {
dev = b43_wireless_core_stop(dev);
@@ -4863,7 +4866,7 @@ static void b43_op_stop(struct ieee80211_hw *hw)
out_unlock:
mutex_unlock(&wl->mutex);
-
+out:
cancel_work_sync(&(wl->txpower_adjust_work));
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index f23b0c3e4ea..bf11850a20f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -2475,7 +2475,7 @@ static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv)
return err;
}
-static void brcmf_delay(u32 ms)
+static __always_inline void brcmf_delay(u32 ms)
{
if (ms < 1000 / HZ) {
cond_resched();
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index d106576ce33..448ab9c4eb4 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1128,14 +1128,7 @@ static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
return 0;
}
-static int brcms_pci_suspend(struct pci_dev *pdev)
-{
- pci_save_state(pdev);
- pci_disable_device(pdev);
- return pci_set_power_state(pdev, PCI_D3hot);
-}
-
-static int brcms_suspend(struct bcma_device *pdev, pm_message_t state)
+static int brcms_suspend(struct bcma_device *pdev)
{
struct brcms_info *wl;
struct ieee80211_hw *hw;
@@ -1153,40 +1146,15 @@ static int brcms_suspend(struct bcma_device *pdev, pm_message_t state)
wl->pub->hw_up = false;
spin_unlock_bh(&wl->lock);
- /* temporarily do suspend ourselves */
- return brcms_pci_suspend(pdev->bus->host_pci);
-}
-
-static int brcms_pci_resume(struct pci_dev *pdev)
-{
- int err = 0;
- uint val;
-
- err = pci_set_power_state(pdev, PCI_D0);
- if (err)
- return err;
-
- pci_restore_state(pdev);
-
- err = pci_enable_device(pdev);
- if (err)
- return err;
-
- pci_set_master(pdev);
-
- pci_read_config_dword(pdev, 0x40, &val);
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+ pr_debug("brcms_suspend ok\n");
return 0;
}
static int brcms_resume(struct bcma_device *pdev)
{
- /*
- * just do pci resume for now until bcma supports it.
- */
- return brcms_pci_resume(pdev->bus->host_pci);
+ pr_debug("brcms_resume ok\n");
+ return 0;
}
static struct bcma_driver brcms_bcma_driver = {
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 018a8deb88a..4fcdac63a30 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -7848,7 +7848,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
* more efficiently than we can parse it. ORDER MATTERS HERE */
struct ipw_rt_hdr *ipw_rt;
- short len = le16_to_cpu(pkt->u.frame.length);
+ unsigned short len = le16_to_cpu(pkt->u.frame.length);
/* We received data from the HW, so stop the watchdog */
dev->trans_start = jiffies;
@@ -8023,7 +8023,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
s8 noise = (s8) le16_to_cpu(frame->noise);
u8 rate = frame->rate;
- short len = le16_to_cpu(pkt->u.frame.length);
+ unsigned short len = le16_to_cpu(pkt->u.frame.length);
struct sk_buff *skb;
int hdr_only = 0;
u16 filter = priv->prom_priv->filter;
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 084aa2c4ccf..a6454726737 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -569,7 +569,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
struct iwl_scan_cmd *scan;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
u32 rate_flags = 0;
- u16 cmd_len;
+ u16 cmd_len = 0;
u16 rx_chain = 0;
enum ieee80211_band band;
u8 n_probes = 0;
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 7becea3dec6..dd5aeaff44b 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2777,7 +2777,7 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
else if (channel->band == IEEE80211_BAND_5GHZ)
cmd->band = cpu_to_le16(0x4);
- cmd->channel = channel->hw_value;
+ cmd->channel = cpu_to_le16(channel->hw_value);
if (conf->channel_type == NL80211_CHAN_NO_HT ||
conf->channel_type == NL80211_CHAN_HT20) {
@@ -4066,7 +4066,7 @@ static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw,
goto done;
if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- WLAN_CIPHER_SUITE_WEP104)
+ key->cipher == WLAN_CIPHER_SUITE_WEP104)
mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0;
cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 4941a1a2321..dc88baefa72 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -422,7 +422,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_ON);
u32 reg;
unsigned long flags;
@@ -436,25 +435,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
}
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
- rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
+ reg = 0;
+ if (state == STATE_RADIO_IRQ_ON) {
+ rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
+ }
rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 06ea3bcfdd2..16570aa84aa 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -830,16 +830,11 @@ config SCSI_ISCI
tristate "Intel(R) C600 Series Chipset SAS Controller"
depends on PCI && SCSI
depends on X86
- # (temporary): known alpha quality driver
- depends on EXPERIMENTAL
select SCSI_SAS_LIBSAS
- select SCSI_SAS_HOST_SMP
---help---
This driver supports the 6Gb/s SAS capabilities of the storage
control unit found in the Intel(R) C600 series chipset.
- The experimental tag will be removed after the driver exits alpha
-
config SCSI_GENERIC_NCR5380
tristate "Generic NCR5380/53c400 SCSI PIO support"
depends on ISA && SCSI
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 78963be2c4f..cb07c628b2f 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -673,12 +673,7 @@ struct bfa_itnim_iostats_s {
u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
u32 tm_cleanups; /* TM cleanup requests */
u32 tm_cleanup_comps; /* TM cleanup completions */
- u32 lm_lun_across_sg; /* LM lun is across sg data buf */
- u32 lm_lun_not_sup; /* LM lun not supported */
- u32 lm_rpl_data_changed; /* LM report-lun data changed */
- u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */
- u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
- u32 lm_lun_not_rdy; /* LM lun not ready */
+ u32 rsvd[6];
};
/* Modify char* port_stt[] in bfal_port.c if a new state was added */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 50b6a1c8619..8d0b88f67a3 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -56,161 +56,6 @@ struct scsi_cdb_s {
#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
-#define SCSI_SENSE_CUR_ERR 0x70
-#define SCSI_SENSE_DEF_ERR 0x71
-
-/*
- * SCSI additional sense codes
- */
-#define SCSI_ASC_LUN_NOT_READY 0x04
-#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
-#define SCSI_ASC_TOCC 0x3F
-
-/*
- * SCSI additional sense code qualifiers
- */
-#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */
-#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
-
-/*
- * Methods of reporting informational exceptions
- */
-#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */
-
-struct scsi_report_luns_data_s {
- u32 lun_list_length; /* length of LUN list length */
- u32 reserved;
- struct scsi_lun lun[1]; /* first LUN in lun list */
-};
-
-struct scsi_inquiry_vendor_s {
- u8 vendor_id[8];
-};
-
-struct scsi_inquiry_prodid_s {
- u8 product_id[16];
-};
-
-struct scsi_inquiry_prodrev_s {
- u8 product_rev[4];
-};
-
-struct scsi_inquiry_data_s {
-#ifdef __BIG_ENDIAN
- u8 peripheral_qual:3; /* peripheral qualifier */
- u8 device_type:5; /* peripheral device type */
- u8 rmb:1; /* removable medium bit */
- u8 device_type_mod:7; /* device type modifier */
- u8 version;
- u8 aenc:1; /* async evt notification capability */
- u8 trm_iop:1; /* terminate I/O process */
- u8 norm_aca:1; /* normal ACA supported */
- u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
- u8 rsp_data_format:4;
- u8 additional_len;
- u8 sccs:1;
- u8 reserved1:7;
- u8 reserved2:1;
- u8 enc_serv:1; /* enclosure service component */
- u8 reserved3:1;
- u8 multi_port:1; /* multi-port device */
- u8 m_chngr:1; /* device in medium transport element */
- u8 ack_req_q:1; /* SIP specific bit */
- u8 addr32:1; /* SIP specific bit */
- u8 addr16:1; /* SIP specific bit */
- u8 rel_adr:1; /* relative address */
- u8 w_bus32:1;
- u8 w_bus16:1;
- u8 synchronous:1;
- u8 linked_commands:1;
- u8 trans_dis:1;
- u8 cmd_queue:1; /* command queueing supported */
- u8 soft_reset:1; /* soft reset alternative (VS) */
-#else
- u8 device_type:5; /* peripheral device type */
- u8 peripheral_qual:3; /* peripheral qualifier */
- u8 device_type_mod:7; /* device type modifier */
- u8 rmb:1; /* removable medium bit */
- u8 version;
- u8 rsp_data_format:4;
- u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
- u8 norm_aca:1; /* normal ACA supported */
- u8 terminate_iop:1;/* terminate I/O process */
- u8 aenc:1; /* async evt notification capability */
- u8 additional_len;
- u8 reserved1:7;
- u8 sccs:1;
- u8 addr16:1; /* SIP specific bit */
- u8 addr32:1; /* SIP specific bit */
- u8 ack_req_q:1; /* SIP specific bit */
- u8 m_chngr:1; /* device in medium transport element */
- u8 multi_port:1; /* multi-port device */
- u8 reserved3:1; /* TBD - Vendor Specific */
- u8 enc_serv:1; /* enclosure service component */
- u8 reserved2:1;
- u8 soft_seset:1; /* soft reset alternative (VS) */
- u8 cmd_queue:1; /* command queueing supported */
- u8 trans_dis:1;
- u8 linked_commands:1;
- u8 synchronous:1;
- u8 w_bus16:1;
- u8 w_bus32:1;
- u8 rel_adr:1; /* relative address */
-#endif
- struct scsi_inquiry_vendor_s vendor_id;
- struct scsi_inquiry_prodid_s product_id;
- struct scsi_inquiry_prodrev_s product_rev;
- u8 vendor_specific[20];
- u8 reserved4[40];
-};
-
-/*
- * SCSI sense data format
- */
-struct scsi_sense_s {
-#ifdef __BIG_ENDIAN
- u8 valid:1;
- u8 rsp_code:7;
-#else
- u8 rsp_code:7;
- u8 valid:1;
-#endif
- u8 seg_num;
-#ifdef __BIG_ENDIAN
- u8 file_mark:1;
- u8 eom:1; /* end of media */
- u8 ili:1; /* incorrect length indicator */
- u8 reserved:1;
- u8 sense_key:4;
-#else
- u8 sense_key:4;
- u8 reserved:1;
- u8 ili:1; /* incorrect length indicator */
- u8 eom:1; /* end of media */
- u8 file_mark:1;
-#endif
- u8 information[4]; /* device-type or cmd specific info */
- u8 add_sense_length; /* additional sense length */
- u8 command_info[4];/* command specific information */
- u8 asc; /* additional sense code */
- u8 ascq; /* additional sense code qualifier */
- u8 fru_code; /* field replaceable unit code */
-#ifdef __BIG_ENDIAN
- u8 sksv:1; /* sense key specific valid */
- u8 c_d:1; /* command/data bit */
- u8 res1:2;
- u8 bpv:1; /* bit pointer valid */
- u8 bpointer:3; /* bit pointer */
-#else
- u8 bpointer:3; /* bit pointer */
- u8 bpv:1; /* bit pointer valid */
- u8 res1:2;
- u8 c_d:1; /* command/data bit */
- u8 sksv:1; /* sense key specific valid */
-#endif
- u8 fpointer[2]; /* field pointer */
-};
-
/*
* Fibre Channel Header Structure (FCHS) definition
*/
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index e07bd4745d8..f0f80e282e3 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -24,8 +24,6 @@ BFA_TRC_FILE(HAL, FCPIM);
* BFA ITNIM Related definitions
*/
static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
-static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
-static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
static void bfa_ioim_lm_init(struct bfa_s *bfa);
#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
@@ -60,14 +58,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
} \
} while (0)
-#define bfa_ioim_rp_wwn(__ioim) \
- (((struct bfa_fcs_rport_s *) \
- (__ioim)->itnim->rport->rport_drv)->pwwn)
-
-#define bfa_ioim_lp_wwn(__ioim) \
- ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
- (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
-
#define bfa_itnim_sler_cb(__itnim) do { \
if ((__itnim)->bfa->fcs) \
bfa_cb_itnim_sler((__itnim)->ditn); \
@@ -77,13 +67,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
} \
} while (0)
-enum bfa_ioim_lm_status {
- BFA_IOIM_LM_PRESENT = 1,
- BFA_IOIM_LM_LUN_NOT_SUP = 2,
- BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
- BFA_IOIM_LM_LUN_NOT_RDY = 4,
-};
-
enum bfa_ioim_lm_ua_status {
BFA_IOIM_LM_UA_RESET = 0,
BFA_IOIM_LM_UA_SET = 1,
@@ -145,9 +128,6 @@ enum bfa_ioim_event {
BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
- BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
- BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
- BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
};
@@ -245,9 +225,6 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
-static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
/*
* forward declaration of BFA IO state machine
@@ -445,12 +422,6 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
- bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
- bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
- bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
- bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
- bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
- bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
}
bfa_status_t
@@ -1580,27 +1551,6 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
__bfa_cb_ioim_abort, ioim);
break;
- case BFA_IOIM_SM_LM_LUN_NOT_SUP:
- bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
- bfa_ioim_move_to_comp_q(ioim);
- bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
- __bfa_cb_ioim_lm_lun_not_sup, ioim);
- break;
-
- case BFA_IOIM_SM_LM_RPL_DC:
- bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
- bfa_ioim_move_to_comp_q(ioim);
- bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
- __bfa_cb_ioim_lm_rpl_dc, ioim);
- break;
-
- case BFA_IOIM_SM_LM_LUN_NOT_RDY:
- bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
- bfa_ioim_move_to_comp_q(ioim);
- bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
- __bfa_cb_ioim_lm_lun_not_rdy, ioim);
- break;
-
default:
bfa_sm_fault(ioim->bfa, event);
}
@@ -2160,243 +2110,6 @@ bfa_ioim_lm_init(struct bfa_s *bfa)
}
}
-/*
- * Validate LUN for LUN masking
- */
-static enum bfa_ioim_lm_status
-bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
- struct bfa_rport_s *rp, struct scsi_lun lun)
-{
- u8 i;
- struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
-
- if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
- (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
- return BFA_IOIM_LM_PRESENT;
- }
-
- for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
-
- if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
- continue;
-
- if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
- scsilun_to_int((struct scsi_lun *)&lun))
- && (rp->rport_tag == lun_list[i].rp_tag)
- && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
- lun_list[i].lp_tag)) {
- bfa_trc(ioim->bfa, lun_list[i].rp_tag);
- bfa_trc(ioim->bfa, lun_list[i].lp_tag);
- bfa_trc(ioim->bfa, scsilun_to_int(
- (struct scsi_lun *)&lun_list[i].lun));
-
- if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
- ((cdb->scsi_cdb[0] != INQUIRY) ||
- (cdb->scsi_cdb[0] != REPORT_LUNS))) {
- lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
- return BFA_IOIM_LM_RPL_DATA_CHANGED;
- }
-
- if (cdb->scsi_cdb[0] == REPORT_LUNS)
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
-
- return BFA_IOIM_LM_PRESENT;
- }
- }
-
- if ((cdb->scsi_cdb[0] == INQUIRY) &&
- (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
- ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
- return BFA_IOIM_LM_PRESENT;
- }
-
- if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
- return BFA_IOIM_LM_LUN_NOT_RDY;
-
- return BFA_IOIM_LM_LUN_NOT_SUP;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
-{
- return BFA_TRUE;
-}
-
-static void
-bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
- int buf_lun_cnt)
-{
- struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
- struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
- struct scsi_lun lun;
- int i, j;
-
- bfa_trc(ioim->bfa, buf_lun_cnt);
- for (j = 0; j < buf_lun_cnt; j++) {
- lun = *((struct scsi_lun *)(lun_data + j));
- for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
- if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
- continue;
- if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
- (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
- (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
- == scsilun_to_int((struct scsi_lun *)&lun))) {
- lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
- break;
- }
- } /* next lun in mask DB */
- } /* next lun in buf */
-}
-
-static int
-bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
- struct scsi_report_luns_data_s *rl)
-{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct scatterlist *sg = scsi_sglist(cmnd);
- struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
- struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
- int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
- int lun_across_sg_bytes, bytes_from_next_buf;
- u64 last_lun, temp_last_lun;
-
- /* fetch luns from the first sg element */
- bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
- (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
-
- /* fetch luns from multiple sg elements */
- scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
- if (sgeid == 0) {
- prev_sg_len = sg_dma_len(sg);
- prev_rl_data = (struct scsi_lun *)
- phys_to_virt(sg_dma_address(sg));
- continue;
- }
-
- /* if the buf is having more data */
- lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
- if (lun_across_sg_bytes) {
- bfa_trc(ioim->bfa, lun_across_sg_bytes);
- bfa_stats(ioim->itnim, lm_lun_across_sg);
- bytes_from_next_buf = sizeof(struct scsi_lun) -
- lun_across_sg_bytes;
-
- /* from next buf take higher bytes */
- temp_last_lun = *((u64 *)
- phys_to_virt(sg_dma_address(sg)));
- last_lun |= temp_last_lun >>
- (lun_across_sg_bytes * BITS_PER_BYTE);
-
- /* from prev buf take higher bytes */
- temp_last_lun = *((u64 *)(prev_rl_data +
- (prev_sg_len - lun_across_sg_bytes)));
- temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
- last_lun = last_lun | (temp_last_lun <<
- (bytes_from_next_buf * BITS_PER_BYTE));
-
- bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
- } else
- bytes_from_next_buf = 0;
-
- *pgdlen += sg_dma_len(sg);
- prev_sg_len = sg_dma_len(sg);
- prev_rl_data = (struct scsi_lun *)
- phys_to_virt(sg_dma_address(sg));
- bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
- bytes_from_next_buf,
- sg_dma_len(sg) / sizeof(struct scsi_lun));
- }
-
- /* update the report luns data - based on fetched luns */
- sg = scsi_sglist(cmnd);
- base_rl_data = (struct scsi_lun *)rl->lun;
- base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
- for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
- if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
- base_rl_data[j] = lun_list[i].lun;
- lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
- j++;
- lun_fetched_cnt++;
- }
-
- if (j > base_count) {
- j = 0;
- sg = sg_next(sg);
- base_rl_data = (struct scsi_lun *)
- phys_to_virt(sg_dma_address(sg));
- base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
- }
- }
-
- bfa_trc(ioim->bfa, lun_fetched_cnt);
- return lun_fetched_cnt;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
-{
- struct scsi_inquiry_data_s *inq;
- struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
-
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
- inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
-
- bfa_trc(ioim->bfa, inq->device_type);
- inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
- return 0;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
-{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct scatterlist *sg = scsi_sglist(cmnd);
- struct bfi_ioim_rsp_s *m;
- struct scsi_report_luns_data_s *rl = NULL;
- int lun_count = 0, lun_fetched_cnt = 0;
- u32 residue, pgdlen = 0;
-
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
- if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
- return BFA_TRUE;
-
- m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
- if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
- return BFA_TRUE;
-
- pgdlen = sg_dma_len(sg);
- bfa_trc(ioim->bfa, pgdlen);
- rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
- lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
- lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
-
- if (lun_count == lun_fetched_cnt)
- return BFA_TRUE;
-
- bfa_trc(ioim->bfa, lun_count);
- bfa_trc(ioim->bfa, lun_fetched_cnt);
- bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
-
- if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
- rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
- sizeof(struct scsi_lun);
- else
- bfa_stats(ioim->itnim, lm_small_buf_addresidue);
-
- bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
- bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
-
- residue = be32_to_cpu(m->residue);
- residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
- bfa_stats(ioim->itnim, lm_wire_residue_changed);
- m->residue = be32_to_cpu(residue);
- bfa_trc(ioim->bfa, ioim->nsges);
- return BFA_FALSE;
-}
-
static void
__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
{
@@ -2454,83 +2167,6 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
m->scsi_status, sns_len, snsinfo, residue);
}
-static void
-__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
-{
- struct bfa_ioim_s *ioim = cbarg;
- int sns_len = 0xD;
- u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
- struct scsi_sense_s *snsinfo;
-
- if (!complete) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
- return;
- }
-
- snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
- ioim->fcpim->fcp, ioim->iotag);
- snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
- snsinfo->add_sense_length = 0xa;
- snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
- snsinfo->sense_key = ILLEGAL_REQUEST;
- bfa_trc(ioim->bfa, residue);
- bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
- SCSI_STATUS_CHECK_CONDITION, sns_len,
- (u8 *)snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
-{
- struct bfa_ioim_s *ioim = cbarg;
- int sns_len = 0xD;
- u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
- struct scsi_sense_s *snsinfo;
-
- if (!complete) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
- return;
- }
-
- snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
- ioim->iotag);
- snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
- snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
- snsinfo->asc = SCSI_ASC_TOCC;
- snsinfo->add_sense_length = 0x6;
- snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
- bfa_trc(ioim->bfa, residue);
- bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
- SCSI_STATUS_CHECK_CONDITION, sns_len,
- (u8 *)snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
-{
- struct bfa_ioim_s *ioim = cbarg;
- int sns_len = 0xD;
- u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
- struct scsi_sense_s *snsinfo;
-
- if (!complete) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
- return;
- }
-
- snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
- ioim->fcpim->fcp, ioim->iotag);
- snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
- snsinfo->add_sense_length = 0xa;
- snsinfo->sense_key = NOT_READY;
- snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
- snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
- bfa_trc(ioim->bfa, residue);
- bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
- SCSI_STATUS_CHECK_CONDITION, sns_len,
- (u8 *)snsinfo, residue);
-}
-
void
bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
u16 rp_tag, u8 lp_tag)
@@ -2647,7 +2283,8 @@ bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
if (port) {
*pwwn = port->port_cfg.pwwn;
rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
- rp = rp_fcs->bfa_rport;
+ if (rp_fcs)
+ rp = rp_fcs->bfa_rport;
}
lunm_list = bfa_get_lun_mask_list(bfa);
@@ -2715,7 +2352,8 @@ bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
if (port) {
*pwwn = port->port_cfg.pwwn;
rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
- rp = rp_fcs->bfa_rport;
+ if (rp_fcs)
+ rp = rp_fcs->bfa_rport;
}
}
@@ -2757,7 +2395,6 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
return;
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
0, 0, NULL, 0);
}
@@ -2773,7 +2410,6 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
return;
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
0, 0, NULL, 0);
}
@@ -2788,7 +2424,6 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
return;
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
}
@@ -3132,7 +2767,6 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
ioim->bfa = fcpim->bfa;
ioim->fcpim = fcpim;
ioim->iosp = iosp;
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
INIT_LIST_HEAD(&ioim->sgpg_q);
bfa_reqq_winit(&ioim->iosp->reqq_wait,
bfa_ioim_qresume, ioim);
@@ -3170,7 +2804,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
evt = BFA_IOIM_SM_DONE;
else
evt = BFA_IOIM_SM_COMP;
- ioim->proc_rsp_data(ioim);
break;
case BFI_IOIM_STS_TIMEDOUT:
@@ -3206,7 +2839,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
if (rsp->abort_tag != ioim->abort_tag) {
bfa_trc(ioim->bfa, rsp->abort_tag);
bfa_trc(ioim->bfa, ioim->abort_tag);
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
return;
}
@@ -3225,7 +2857,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
WARN_ON(1);
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_sm_send_event(ioim, evt);
}
@@ -3244,15 +2875,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
bfa_ioim_cb_profile_comp(fcpim, ioim);
- if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
- return;
- }
-
- if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
- bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
- else
- bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
}
/*
@@ -3364,35 +2987,6 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
void
bfa_ioim_start(struct bfa_ioim_s *ioim)
{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct bfa_lps_s *lps;
- enum bfa_ioim_lm_status status;
- struct scsi_lun scsilun;
-
- if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
- lps = BFA_IOIM_TO_LPS(ioim);
- int_to_scsilun(cmnd->device->lun, &scsilun);
- status = bfa_ioim_lm_check(ioim, lps,
- ioim->itnim->rport, scsilun);
- if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
- bfa_stats(ioim->itnim, lm_lun_not_rdy);
- return;
- }
-
- if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
- bfa_stats(ioim->itnim, lm_lun_not_sup);
- return;
- }
-
- if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
- bfa_stats(ioim->itnim, lm_rpl_data_changed);
- return;
- }
- }
-
bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
/*
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 1080bcb81cb..36f26da80f7 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -110,7 +110,6 @@ struct bfad_ioim_s;
struct bfad_tskim_s;
typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
-typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
struct bfa_fcpim_s {
struct bfa_s *bfa;
@@ -124,7 +123,6 @@ struct bfa_fcpim_s {
u32 path_tov;
u16 q_depth;
u8 reqq; /* Request queue to be used */
- u8 lun_masking_pending;
struct list_head itnim_q; /* queue of active itnim */
struct list_head ioim_resfree_q; /* IOs waiting for f/w */
struct list_head ioim_comp_q; /* IO global comp Q */
@@ -181,7 +179,6 @@ struct bfa_ioim_s {
u8 reqq; /* Request queue for I/O */
u8 mode; /* IO is passthrough or not */
u64 start_time; /* IO's Profile start val */
- bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
};
struct bfa_ioim_sp_s {
@@ -261,10 +258,6 @@ struct bfa_itnim_s {
(__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
} while (0)
-#define BFA_IOIM_TO_LPS(__ioim) \
- BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \
- __ioim->itnim->rport->rport_info.lp_tag)
-
static inline bfa_boolean_t
bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
{
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 95adb86d376..b52cbb6bcd5 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -582,11 +582,6 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
#define BFA_LP_TAG_INVALID 0xff
void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
-bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp);
-wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
-struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
- wwn_t *lpwwn, wwn_t rpwwn);
-void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
/*
* bfa fcxp API functions
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 66fb72531b3..404fd10ddb2 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -674,6 +674,7 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcs_vport_start(&vport->fcs_vport);
+ list_add_tail(&vport->list_entry, &bfad->vport_list);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return BFA_STATUS_OK;
@@ -1404,6 +1405,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
bfad->ref_count = 0;
bfad->pport.bfad = bfad;
INIT_LIST_HEAD(&bfad->pbc_vport_list);
+ INIT_LIST_HEAD(&bfad->vport_list);
/* Setup the debugfs node for this bfad */
if (bfa_debugfs_enable)
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 9d95844ab46..1938fe0473e 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -491,7 +491,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
free_scsi_host:
bfad_scsi_host_free(bfad, im_port);
-
+ list_del(&vport->list_entry);
kfree(vport);
return 0;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 06fc00caeb4..530de2b1200 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2394,6 +2394,21 @@ out:
return 0;
}
+/* Function to reset the LUN SCAN mode */
+static void
+bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
+{
+ struct bfad_im_port_s *pport_im = bfad->pport.im_port;
+ struct bfad_vport_s *vport = NULL;
+
+ /* Set the scsi device LUN SCAN flags for base port */
+ bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
+
+ /* Set the scsi device LUN SCAN flags for the vports */
+ list_for_each_entry(vport, &bfad->vport_list, list_entry)
+ bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
+}
+
int
bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
{
@@ -2401,11 +2416,17 @@ bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
- if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
+ if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
- else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
+ /* Set the LUN Scanning mode to be Sequential scan */
+ if (iocmd->status == BFA_STATUS_OK)
+ bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
+ } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
- else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
+ /* Set the LUN Scanning mode to default REPORT_LUNS scan */
+ if (iocmd->status == BFA_STATUS_OK)
+ bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
+ } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 5e19a5f820e..dc5b9d99c45 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -43,6 +43,7 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_bsg_fc.h>
+#include <scsi/scsi_devinfo.h>
#include "bfa_modules.h"
#include "bfa_fcs.h"
@@ -227,6 +228,7 @@ struct bfad_s {
struct list_head active_aen_q;
struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
spinlock_t bfad_aen_spinlock;
+ struct list_head vport_list;
};
/* BFAD state machine events */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index e5db649e8eb..3153923f5b6 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -918,16 +918,70 @@ bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
}
/*
+ * Function is invoked from the SCSI Host Template slave_alloc() entry point.
+ * Has the logic to query the LUN Mask database to check if this LUN needs to
+ * be made visible to the SCSI mid-layer or not.
+ *
+ * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack.
+ * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack.
+ */
+static int
+bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
+ struct fc_rport *rport)
+{
+ struct bfad_itnim_data_s *itnim_data =
+ (struct bfad_itnim_data_s *) rport->dd_data;
+ struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
+ struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport;
+ struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa);
+ int i = 0, ret = -ENXIO;
+
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE &&
+ scsilun_to_int(&lun_list[i].lun) == sdev->lun &&
+ lun_list[i].rp_tag == bfa_rport->rport_tag &&
+ lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) {
+ ret = BFA_STATUS_OK;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
* Scsi_Host template entry slave_alloc
*/
static int
bfad_im_slave_alloc(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct bfad_itnim_data_s *itnim_data =
+ (struct bfad_itnim_data_s *) rport->dd_data;
+ struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) {
+ /*
+ * We should not mask LUN 0 - since this will translate
+ * to no LUN / TARGET for SCSI ml resulting no scan.
+ */
+ if (sdev->lun == 0) {
+ sdev->sdev_bflags |= BLIST_NOREPORTLUN |
+ BLIST_SPARSELUN;
+ goto done;
+ }
+
+ /*
+ * Query LUN Mask configuration - to expose this LUN
+ * to the SCSI mid-layer or to mask it.
+ */
+ if (bfad_im_check_if_make_lun_visible(sdev, rport) !=
+ BFA_STATUS_OK)
+ return -ENXIO;
+ }
+done:
sdev->hostdata = rport->dd_data;
return 0;
@@ -1037,6 +1091,8 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
&& (fc_rport->scsi_target_id < MAX_FCP_TARGET))
itnim->scsi_tgt_id = fc_rport->scsi_target_id;
+ itnim->channel = fc_rport->channel;
+
return;
}
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 004b6cf848d..0814367ef10 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -91,6 +91,7 @@ struct bfad_itnim_s {
struct fc_rport *fc_rport;
struct bfa_itnim_s *bfa_itnim;
u16 scsi_tgt_id;
+ u16 channel;
u16 queue_work;
unsigned long last_ramp_up_time;
unsigned long last_queue_full_time;
@@ -166,4 +167,30 @@ irqreturn_t bfad_intx(int irq, void *dev_id);
int bfad_im_bsg_request(struct fc_bsg_job *job);
int bfad_im_bsg_timeout(struct fc_bsg_job *job);
+/*
+ * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
+ * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan
+ *
+ * Internally iterate's over all the ITNIM's part of the im_port & set's the
+ * sdev_bflags for the scsi_device associated with LUN #0.
+ */
+#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do { \
+ struct scsi_device *__sdev = NULL; \
+ struct bfad_itnim_s *__itnim = NULL; \
+ u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; \
+ list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list), \
+ list_entry) { \
+ __sdev = scsi_device_lookup((__im_port)->shost, \
+ __itnim->channel, \
+ __itnim->scsi_tgt_id, 0); \
+ if (__sdev) { \
+ if ((__lunmask_cfg) == BFA_TRUE) \
+ __sdev->sdev_bflags |= scan_flags; \
+ else \
+ __sdev->sdev_bflags &= ~scan_flags; \
+ scsi_device_put(__sdev); \
+ } \
+ } \
+} while (0)
+
#endif
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index c5360ffb4be..d3ff9cd4023 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1868,8 +1868,9 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
if (!tdata->skb) {
- pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n",
- cdev->skb_tx_rsvd, headroom, opcode);
+ struct cxgbi_sock *csk = cconn->cep->csk;
+ struct net_device *ndev = cdev->ports[csk->port_id];
+ ndev->stats.tx_dropped++;
return -ENOMEM;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 4ef021291a4..04c5cea47a2 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -466,6 +466,11 @@ static int alua_check_sense(struct scsi_device *sdev,
* Power On, Reset, or Bus Device Reset, just retry.
*/
return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
+ /*
+ * Mode Parameters Changed
+ */
+ return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
/*
* ALUA state changed
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 841ebf4a678..53a31c753cb 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -953,6 +953,8 @@ static int __init rdac_init(void)
if (!kmpath_rdacd) {
scsi_unregister_device_handler(&rdac_dh);
printk(KERN_ERR "kmpath_rdacd creation failed.\n");
+
+ r = -EINVAL;
}
done:
return r;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 8d67467dd9c..e9599600aa2 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -58,7 +58,11 @@ module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
"Direct Data Placement (DDP).");
-DEFINE_MUTEX(fcoe_config_mutex);
+unsigned int fcoe_debug_logging;
+module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+static DEFINE_MUTEX(fcoe_config_mutex);
static struct workqueue_struct *fcoe_wq;
@@ -67,8 +71,8 @@ static DECLARE_COMPLETION(fcoe_flush_completion);
/* fcoe host list */
/* must only by accessed under the RTNL mutex */
-LIST_HEAD(fcoe_hostlist);
-DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
+static LIST_HEAD(fcoe_hostlist);
+static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
/* Function Prototypes */
static int fcoe_reset(struct Scsi_Host *);
@@ -157,7 +161,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
.lport_set_port_id = fcoe_set_port_id,
};
-struct fc_function_template fcoe_nport_fc_functions = {
+static struct fc_function_template fcoe_nport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -197,7 +201,7 @@ struct fc_function_template fcoe_nport_fc_functions = {
.bsg_request = fc_lport_bsg_request,
};
-struct fc_function_template fcoe_vport_fc_functions = {
+static struct fc_function_template fcoe_vport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -433,7 +437,7 @@ static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
*
* Caller must be holding the RTNL mutex
*/
-void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
+static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = &fcoe->ctlr;
@@ -748,7 +752,7 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
*
* Returns: True for read types I/O, otherwise returns false.
*/
-bool fcoe_oem_match(struct fc_frame *fp)
+static bool fcoe_oem_match(struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fcp_cmnd *fcp;
@@ -756,11 +760,12 @@ bool fcoe_oem_match(struct fc_frame *fp)
if (fc_fcp_is_read(fr_fsp(fp)) &&
(fr_fsp(fp)->data_len > fcoe_ddp_min))
return true;
- else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
+ else if ((fr_fsp(fp) == NULL) &&
+ (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) &&
+ (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
fcp = fc_frame_payload_get(fp, sizeof(*fcp));
- if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
- fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
- (fcp->fc_flags & FCP_CFL_WRDATA))
+ if ((fcp->fc_flags & FCP_CFL_WRDATA) &&
+ (ntohl(fcp->fc_dl) > fcoe_ddp_min))
return true;
}
return false;
@@ -1106,7 +1111,7 @@ static int __init fcoe_if_init(void)
*
* Returns: 0 on success
*/
-int __exit fcoe_if_exit(void)
+static int __exit fcoe_if_exit(void)
{
fc_release_transport(fcoe_nport_scsi_transport);
fc_release_transport(fcoe_vport_scsi_transport);
@@ -1295,7 +1300,7 @@ static inline unsigned int fcoe_select_cpu(void)
*
* Returns: 0 for success
*/
-int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
+static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
struct packet_type *ptype, struct net_device *olddev)
{
struct fc_lport *lport;
@@ -1451,7 +1456,7 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
*
* Return: 0 for success
*/
-int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
+static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
{
int wlen;
u32 crc;
@@ -1671,8 +1676,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
skb->dev ? skb->dev->name : "<NULL>");
port = lport_priv(lport);
- if (skb_is_nonlinear(skb))
- skb_linearize(skb); /* not ideal */
+ skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
/*
* Frame length checks and setting up the header pointers
@@ -1728,7 +1732,7 @@ drop:
*
* Return: 0 for success
*/
-int fcoe_percpu_receive_thread(void *arg)
+static int fcoe_percpu_receive_thread(void *arg)
{
struct fcoe_percpu_s *p = arg;
struct sk_buff *skb;
@@ -2146,7 +2150,7 @@ out_nortnl:
* Returns: 0 if the ethtool query was successful
* -1 if the ethtool query failed
*/
-int fcoe_link_speed_update(struct fc_lport *lport)
+static int fcoe_link_speed_update(struct fc_lport *lport)
{
struct net_device *netdev = fcoe_netdev(lport);
struct ethtool_cmd ecmd;
@@ -2180,7 +2184,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
* Returns: 0 if link is UP and OK, -1 if not
*
*/
-int fcoe_link_ok(struct fc_lport *lport)
+static int fcoe_link_ok(struct fc_lport *lport)
{
struct net_device *netdev = fcoe_netdev(lport);
@@ -2200,7 +2204,7 @@ int fcoe_link_ok(struct fc_lport *lport)
* there no packets that will be handled by the lport, but also that any
* threads already handling packet have returned.
*/
-void fcoe_percpu_clean(struct fc_lport *lport)
+static void fcoe_percpu_clean(struct fc_lport *lport)
{
struct fcoe_percpu_s *pp;
struct fcoe_rcv_info *fr;
@@ -2251,7 +2255,7 @@ void fcoe_percpu_clean(struct fc_lport *lport)
*
* Returns: Always 0 (return value required by FC transport template)
*/
-int fcoe_reset(struct Scsi_Host *shost)
+static int fcoe_reset(struct Scsi_Host *shost)
{
struct fc_lport *lport = shost_priv(shost);
struct fcoe_port *port = lport_priv(lport);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 6c6884bcf84..bcc89e63949 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -40,9 +40,7 @@
#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
-unsigned int fcoe_debug_logging;
-module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+extern unsigned int fcoe_debug_logging;
#define FCOE_LOGGING 0x01 /* General logging, not categorized */
#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 5140f5d0fd6..b96962c3944 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4271,7 +4271,9 @@ static void stop_controller_lockup_detector(struct ctlr_info *h)
remove_ctlr_from_lockup_detector_list(h);
/* If the list of ctlr's to monitor is empty, stop the thread */
if (list_empty(&hpsa_ctlr_list)) {
+ spin_unlock_irqrestore(&lockup_detector_lock, flags);
kthread_stop(hpsa_lockup_detector);
+ spin_lock_irqsave(&lockup_detector_lock, flags);
hpsa_lockup_detector = NULL;
}
spin_unlock_irqrestore(&lockup_detector_lock, flags);
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
deleted file mode 100644
index 5f54461cabc..00000000000
--- a/drivers/scsi/isci/firmware/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# Makefile for create_fw
-#
-CC=gcc
-CFLAGS=-c -Wall -O2 -g
-LDFLAGS=
-SOURCES=create_fw.c
-OBJECTS=$(SOURCES:.cpp=.o)
-EXECUTABLE=create_fw
-
-all: $(SOURCES) $(EXECUTABLE)
-
-$(EXECUTABLE): $(OBJECTS)
- $(CC) $(LDFLAGS) $(OBJECTS) -o $@
-
-.c.o:
- $(CC) $(CFLAGS) $< -O $@
-
-clean:
- rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
deleted file mode 100644
index 8056d2bd233..00000000000
--- a/drivers/scsi/isci/firmware/README
+++ /dev/null
@@ -1,36 +0,0 @@
-This defines the temporary binary blow we are to pass to the SCU
-driver to emulate the binary firmware that we will eventually be
-able to access via NVRAM on the SCU controller.
-
-The current size of the binary blob is expected to be 149 bytes or larger
-
-Header Types:
-0x1: Phy Masks
-0x2: Phy Gens
-0x3: SAS Addrs
-0xff: End of Data
-
-ID string - u8[12]: "#SCU MAGIC#\0"
-Version - u8: 1
-SubVersion - u8: 0
-
-Header Type - u8: 0x1
-Size - u8: 8
-Phy Mask - u32[8]
-
-Header Type - u8: 0x2
-Size - u8: 8
-Phy Gen - u32[8]
-
-Header Type - u8: 0x3
-Size - u8: 8
-Sas Addr - u64[8]
-
-Header Type - u8: 0xf
-
-
-==============================================================================
-
-Place isci_firmware.bin in /lib/firmware
-Be sure to recreate the initramfs image to include the firmware.
-
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
deleted file mode 100644
index c7a2887a7e9..00000000000
--- a/drivers/scsi/isci/firmware/create_fw.c
+++ /dev/null
@@ -1,99 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <string.h>
-#include <errno.h>
-#include <asm/types.h>
-#include <strings.h>
-#include <stdint.h>
-
-#include "create_fw.h"
-#include "../probe_roms.h"
-
-int write_blob(struct isci_orom *isci_orom)
-{
- FILE *fd;
- int err;
- size_t count;
-
- fd = fopen(blob_name, "w+");
- if (!fd) {
- perror("Open file for write failed");
- fclose(fd);
- return -EIO;
- }
-
- count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
- if (count != 1) {
- perror("Write data failed");
- fclose(fd);
- return -EIO;
- }
-
- fclose(fd);
-
- return 0;
-}
-
-void set_binary_values(struct isci_orom *isci_orom)
-{
- int ctrl_idx, phy_idx, port_idx;
-
- /* setting OROM signature */
- strncpy(isci_orom->hdr.signature, sig, strlen(sig));
- isci_orom->hdr.version = version;
- isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
- isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
- isci_orom->hdr.num_elements = num_elements;
-
- for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
- isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
- isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
- max_num_concurrent_dev_spin_up;
- isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
- enable_ssc;
-
- for (port_idx = 0; port_idx < 4; port_idx++)
- isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
- phy_mask[ctrl_idx][port_idx];
-
- for (phy_idx = 0; phy_idx < 4; phy_idx++) {
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
- (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
- (__u32)(sas_addr[ctrl_idx][phy_idx]);
-
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
- afe_tx_amp_control0;
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
- afe_tx_amp_control1;
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
- afe_tx_amp_control2;
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
- afe_tx_amp_control3;
- }
- }
-}
-
-int main(void)
-{
- int err;
- struct isci_orom *isci_orom;
-
- isci_orom = malloc(sizeof(struct isci_orom));
- memset(isci_orom, 0, sizeof(struct isci_orom));
-
- set_binary_values(isci_orom);
-
- err = write_blob(isci_orom);
- if (err < 0) {
- free(isci_orom);
- return err;
- }
-
- free(isci_orom);
- return 0;
-}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
deleted file mode 100644
index 5f298828d22..00000000000
--- a/drivers/scsi/isci/firmware/create_fw.h
+++ /dev/null
@@ -1,77 +0,0 @@
-#ifndef _CREATE_FW_H_
-#define _CREATE_FW_H_
-#include "../probe_roms.h"
-
-
-/* we are configuring for 2 SCUs */
-static const int num_elements = 2;
-
-/*
- * For all defined arrays:
- * elements 0-3 are for SCU0, ports 0-3
- * elements 4-7 are for SCU1, ports 0-3
- *
- * valid configurations for one SCU are:
- * P0 P1 P2 P3
- * ----------------
- * 0xF,0x0,0x0,0x0 # 1 x4 port
- * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
- * # ports
- * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
- * # port
- * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
- * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
- *
- * if there is a port/phy on which you do not wish to override the default
- * values, use the value assigned to UNINIT_PARAM (255).
- */
-
-/* discovery mode type (port auto config mode by default ) */
-
-/*
- * if there is a port/phy on which you do not wish to override the default
- * values, use the value "0000000000000000". SAS address of zero's is
- * considered invalid and will not be used.
- */
-#ifdef MPC
-static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
-static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
- {1, 2, 4, 8} };
-static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
- 0x5FCFFFFFF0000002ULL,
- 0x5FCFFFFFF0000003ULL,
- 0x5FCFFFFFF0000004ULL },
- { 0x5FCFFFFFF0000005ULL,
- 0x5FCFFFFFF0000006ULL,
- 0x5FCFFFFFF0000007ULL,
- 0x5FCFFFFFF0000008ULL } };
-#else /* APC (default) */
-static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
-static const __u8 phy_mask[2][4];
-static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
- 0x5FCFFFFF00000001ULL,
- 0x5FCFFFFF00000001ULL,
- 0x5FCFFFFF00000001ULL },
- { 0x5FCFFFFF00000002ULL,
- 0x5FCFFFFF00000002ULL,
- 0x5FCFFFFF00000002ULL,
- 0x5FCFFFFF00000002ULL } };
-#endif
-
-/* Maximum number of concurrent device spin up */
-static const int max_num_concurrent_dev_spin_up = 1;
-
-/* enable of ssc operation */
-static const int enable_ssc;
-
-/* AFE_TX_AMP_CONTROL */
-static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
-static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
-static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
-static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
-
-static const char blob_name[] = "isci_firmware.bin";
-static const char sig[] = "ISCUOEMB";
-static const unsigned char version = 0x10;
-
-#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index e7fe9c4c85b..1a65d651423 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -899,7 +899,8 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
*/
if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
(iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
- (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
+ (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
+ (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
is_controller_start_complete = false;
break;
}
@@ -1666,6 +1667,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
/* Default to no SSC operation. */
ihost->oem_parameters.controller.do_enable_ssc = false;
+ /* Default to short cables on all phys. */
+ ihost->oem_parameters.controller.cable_selection_mask = 0;
+
/* Initialize all of the port parameter information to narrow ports. */
for (index = 0; index < SCI_MAX_PORTS; index++) {
ihost->oem_parameters.ports[index].phy_mask = 0;
@@ -1673,8 +1677,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
/* Initialize all of the phy parameter information. */
for (index = 0; index < SCI_MAX_PHYS; index++) {
- /* Default to 6G (i.e. Gen 3) for now. */
- ihost->user_parameters.phys[index].max_speed_generation = 3;
+ /* Default to 3G (i.e. Gen 2). */
+ ihost->user_parameters.phys[index].max_speed_generation =
+ SCIC_SDS_PARM_GEN2_SPEED;
/* the frequencies cannot be 0 */
ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
@@ -1694,7 +1699,7 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
ihost->user_parameters.ssp_inactivity_timeout = 5;
ihost->user_parameters.stp_max_occupancy_timeout = 5;
ihost->user_parameters.ssp_max_occupancy_timeout = 20;
- ihost->user_parameters.no_outbound_task_timeout = 20;
+ ihost->user_parameters.no_outbound_task_timeout = 2;
}
static void controller_timeout(unsigned long data)
@@ -1759,7 +1764,7 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
return sci_controller_reset(ihost);
}
-int sci_oem_parameters_validate(struct sci_oem_params *oem)
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
{
int i;
@@ -1791,18 +1796,61 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem)
oem->controller.max_concurr_spin_up < 1)
return -EINVAL;
+ if (oem->controller.do_enable_ssc) {
+ if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
+ return -EINVAL;
+
+ if (version >= ISCI_ROM_VER_1_1) {
+ u8 test = oem->controller.ssc_sata_tx_spread_level;
+
+ switch (test) {
+ case 0:
+ case 2:
+ case 3:
+ case 6:
+ case 7:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ test = oem->controller.ssc_sas_tx_spread_level;
+ if (oem->controller.ssc_sas_tx_type == 0) {
+ switch (test) {
+ case 0:
+ case 2:
+ case 3:
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (oem->controller.ssc_sas_tx_type == 1) {
+ switch (test) {
+ case 0:
+ case 3:
+ case 6:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+ }
+
return 0;
}
static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
{
u32 state = ihost->sm.current_state_id;
+ struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
if (state == SCIC_RESET ||
state == SCIC_INITIALIZING ||
state == SCIC_INITIALIZED) {
- if (sci_oem_parameters_validate(&ihost->oem_parameters))
+ if (sci_oem_parameters_validate(&ihost->oem_parameters,
+ pci_info->orom->hdr.version))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
return SCI_SUCCESS;
@@ -1857,6 +1905,31 @@ static void power_control_timeout(unsigned long data)
ihost->power_control.phys_waiting--;
ihost->power_control.phys_granted_power++;
sci_phy_consume_power_handler(iphy);
+
+ if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ u8 j;
+
+ for (j = 0; j < SCI_MAX_PHYS; j++) {
+ struct isci_phy *requester = ihost->power_control.requesters[j];
+
+ /*
+ * Search the power_control queue to see if there are other phys
+ * attached to the same remote device. If found, take all of
+ * them out of await_sas_power state.
+ */
+ if (requester != NULL && requester != iphy) {
+ u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
+ iphy->frame_rcvd.iaf.sas_addr,
+ sizeof(requester->frame_rcvd.iaf.sas_addr));
+
+ if (other == 0) {
+ ihost->power_control.requesters[j] = NULL;
+ ihost->power_control.phys_waiting--;
+ sci_phy_consume_power_handler(requester);
+ }
+ }
+ }
+ }
}
/*
@@ -1891,9 +1964,34 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
ihost->power_control.timer_started = true;
} else {
- /* Add the phy in the waiting list */
- ihost->power_control.requesters[iphy->phy_index] = iphy;
- ihost->power_control.phys_waiting++;
+ /*
+ * There are phys, attached to the same sas address as this phy, are
+ * already in READY state, this phy don't need wait.
+ */
+ u8 i;
+ struct isci_phy *current_phy;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ u8 other;
+ current_phy = &ihost->phys[i];
+
+ other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
+ iphy->frame_rcvd.iaf.sas_addr,
+ sizeof(current_phy->frame_rcvd.iaf.sas_addr));
+
+ if (current_phy->sm.current_state_id == SCI_PHY_READY &&
+ current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS &&
+ other == 0) {
+ sci_phy_consume_power_handler(iphy);
+ break;
+ }
+ }
+
+ if (i == SCI_MAX_PHYS) {
+ /* Add the phy in the waiting list */
+ ihost->power_control.requesters[iphy->phy_index] = iphy;
+ ihost->power_control.phys_waiting++;
+ }
}
}
@@ -1908,162 +2006,250 @@ void sci_controller_power_control_queue_remove(struct isci_host *ihost,
ihost->power_control.requesters[iphy->phy_index] = NULL;
}
+static int is_long_cable(int phy, unsigned char selection_byte)
+{
+ return !!(selection_byte & (1 << phy));
+}
+
+static int is_medium_cable(int phy, unsigned char selection_byte)
+{
+ return !!(selection_byte & (1 << (phy + 4)));
+}
+
+static enum cable_selections decode_selection_byte(
+ int phy,
+ unsigned char selection_byte)
+{
+ return ((selection_byte & (1 << phy)) ? 1 : 0)
+ + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
+}
+
+static unsigned char *to_cable_select(struct isci_host *ihost)
+{
+ if (is_cable_select_overridden())
+ return ((unsigned char *)&cable_selection_override)
+ + ihost->id;
+ else
+ return &ihost->oem_parameters.controller.cable_selection_mask;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
+{
+ return decode_selection_byte(phy, *to_cable_select(ihost));
+}
+
+char *lookup_cable_names(enum cable_selections selection)
+{
+ static char *cable_names[] = {
+ [short_cable] = "short",
+ [long_cable] = "long",
+ [medium_cable] = "medium",
+ [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
+ };
+ return (selection <= undefined_cable) ? cable_names[selection]
+ : cable_names[undefined_cable];
+}
+
#define AFE_REGISTER_WRITE_DELAY 10
-/* Initialize the AFE for this phy index. We need to read the AFE setup from
- * the OEM parameters
- */
static void sci_controller_afe_initialization(struct isci_host *ihost)
{
+ struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
const struct sci_oem_params *oem = &ihost->oem_parameters;
struct pci_dev *pdev = ihost->pdev;
u32 afe_status;
u32 phy_id;
+ unsigned char cable_selection_mask = *to_cable_select(ihost);
/* Clear DFX Status registers */
- writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
+ writel(0x0081000f, &afe->afe_dfx_master_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
- if (is_b0(pdev)) {
+ if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
/* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
- * Timer, PM Stagger Timer */
- writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
+ * Timer, PM Stagger Timer
+ */
+ writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
udelay(AFE_REGISTER_WRITE_DELAY);
}
/* Configure bias currents to normal */
if (is_a2(pdev))
- writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
+ writel(0x00005A00, &afe->afe_bias_control);
else if (is_b0(pdev) || is_c0(pdev))
- writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
+ writel(0x00005F00, &afe->afe_bias_control);
+ else if (is_c1(pdev))
+ writel(0x00005500, &afe->afe_bias_control);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Enable PLL */
- if (is_b0(pdev) || is_c0(pdev))
- writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
- else
- writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
+ if (is_a2(pdev))
+ writel(0x80040908, &afe->afe_pll_control0);
+ else if (is_b0(pdev) || is_c0(pdev))
+ writel(0x80040A08, &afe->afe_pll_control0);
+ else if (is_c1(pdev)) {
+ writel(0x80000B08, &afe->afe_pll_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ writel(0x00000B08, &afe->afe_pll_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ writel(0x80000B08, &afe->afe_pll_control0);
+ }
udelay(AFE_REGISTER_WRITE_DELAY);
/* Wait for the PLL to lock */
do {
- afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
+ afe_status = readl(&afe->afe_common_block_status);
udelay(AFE_REGISTER_WRITE_DELAY);
} while ((afe_status & 0x00001000) == 0);
if (is_a2(pdev)) {
- /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
- writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
+ /* Shorten SAS SNW lock time (RxLock timer value from 76
+ * us to 50 us)
+ */
+ writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
}
for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+ struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id];
const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+ int cable_length_long =
+ is_long_cable(phy_id, cable_selection_mask);
+ int cable_length_medium =
+ is_medium_cable(phy_id, cable_selection_mask);
- if (is_b0(pdev)) {
- /* Configure transmitter SSC parameters */
- writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+ if (is_a2(pdev)) {
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x00004512, &xcvr->afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x0050100F, &xcvr->afe_xcvr_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } else if (is_b0(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x00030000, &xcvr->afe_tx_ssc_control);
udelay(AFE_REGISTER_WRITE_DELAY);
} else if (is_c0(pdev)) {
- /* Configure transmitter SSC parameters */
- writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+ /* Configure transmitter SSC parameters */
+ writel(0x00010202, &xcvr->afe_tx_ssc_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- /*
- * All defaults, except the Receive Word Alignament/Comma Detect
- * Enable....(0xe800) */
- writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x00014500, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
- } else {
- /*
- * All defaults, except the Receive Word Alignament/Comma Detect
- * Enable....(0xe800) */
- writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ } else if (is_c1(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x00010202, &xcvr->afe_tx_ssc_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x0001C500, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
}
- /*
- * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
- * & increase TX int & ext bias 20%....(0xe85c) */
+ /* Power up TX and RX out from power down (PWRDNTX and
+ * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
+ */
if (is_a2(pdev))
- writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000003F0, &xcvr->afe_channel_control);
else if (is_b0(pdev)) {
- /* Power down TX and RX (PWRDNTX and PWRDNRX) */
- writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000003D7, &xcvr->afe_channel_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- /*
- * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
- * & increase TX int & ext bias 20%....(0xe85c) */
- writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
- } else {
- writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000003D4, &xcvr->afe_channel_control);
+ } else if (is_c0(pdev)) {
+ writel(0x000001E7, &xcvr->afe_channel_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- /*
- * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
- * & increase TX int & ext bias 20%....(0xe85c) */
- writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000001E4, &xcvr->afe_channel_control);
+ } else if (is_c1(pdev)) {
+ writel(cable_length_long ? 0x000002F7 : 0x000001F7,
+ &xcvr->afe_channel_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(cable_length_long ? 0x000002F4 : 0x000001F4,
+ &xcvr->afe_channel_control);
}
udelay(AFE_REGISTER_WRITE_DELAY);
if (is_a2(pdev)) {
/* Enable TX equalization (0xe824) */
- writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ writel(0x00040000, &xcvr->afe_tx_control);
udelay(AFE_REGISTER_WRITE_DELAY);
}
- /*
- * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
- * RDD=0x0(RX Detect Enabled) ....(0xe800) */
- writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ if (is_a2(pdev) || is_b0(pdev))
+ /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
+ * TPD=0x0(TX Power On), RDD=0x0(RX Detect
+ * Enabled) ....(0xe800)
+ */
+ writel(0x00004100, &xcvr->afe_xcvr_control0);
+ else if (is_c0(pdev))
+ writel(0x00014100, &xcvr->afe_xcvr_control0);
+ else if (is_c1(pdev))
+ writel(0x0001C100, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Leave DFE/FFE on */
if (is_a2(pdev))
- writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
else if (is_b0(pdev)) {
- writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Enable TX equalization (0xe824) */
- writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
- } else {
- writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
+ writel(0x00040000, &xcvr->afe_tx_control);
+ } else if (is_c0(pdev)) {
+ writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &xcvr->afe_tx_control);
+ } else if (is_c1(pdev)) {
+ writel(cable_length_long ? 0x01500C0C :
+ cable_length_medium ? 0x01400C0D : 0x02400C0D,
+ &xcvr->afe_xcvr_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ writel(cable_length_long ? 0x33091C1F :
+ cable_length_medium ? 0x3315181F : 0x2B17161F,
+ &xcvr->afe_rx_ssc_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Enable TX equalization (0xe824) */
- writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ writel(0x00040000, &xcvr->afe_tx_control);
}
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control0,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
+ writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control1,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
+ writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control2,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
+ writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control3,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
+ writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
udelay(AFE_REGISTER_WRITE_DELAY);
}
/* Transfer control to the PEs */
- writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
+ writel(0x00010f00, &afe->afe_dfx_master_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 646051afd3c..5477f0fa823 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -435,11 +435,36 @@ static inline bool is_b0(struct pci_dev *pdev)
static inline bool is_c0(struct pci_dev *pdev)
{
- if (pdev->revision >= 5)
+ if (pdev->revision == 5)
return true;
return false;
}
+static inline bool is_c1(struct pci_dev *pdev)
+{
+ if (pdev->revision >= 6)
+ return true;
+ return false;
+}
+
+enum cable_selections {
+ short_cable = 0,
+ long_cable = 1,
+ medium_cable = 2,
+ undefined_cable = 3
+};
+
+#define CABLE_OVERRIDE_DISABLED (0x10000)
+
+static inline int is_cable_select_overridden(void)
+{
+ return cable_selection_override < CABLE_OVERRIDE_DISABLED;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy);
+void validate_cable_selections(struct isci_host *ihost);
+char *lookup_cable_names(enum cable_selections);
+
/* set hw control for 'activity', even though active enclosures seem to drive
* the activity led on their own. Skip setting FSENG control on 'status' due
* to unexpected operation and 'error' due to not being a supported automatic
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index a97edabcb85..17c4c2c89c2 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -65,7 +65,7 @@
#include "probe_roms.h"
#define MAJ 1
-#define MIN 0
+#define MIN 1
#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD)
@@ -94,7 +94,7 @@ MODULE_DEVICE_TABLE(pci, isci_id_table);
/* linux isci specific settings */
-unsigned char no_outbound_task_to = 20;
+unsigned char no_outbound_task_to = 2;
module_param(no_outbound_task_to, byte, 0);
MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
@@ -114,7 +114,7 @@ u16 stp_inactive_to = 5;
module_param(stp_inactive_to, ushort, 0);
MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
-unsigned char phy_gen = 3;
+unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
module_param(phy_gen, byte, 0);
MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
@@ -122,6 +122,14 @@ unsigned char max_concurr_spinup;
module_param(max_concurr_spinup, byte, 0);
MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
+uint cable_selection_override = CABLE_OVERRIDE_DISABLED;
+module_param(cable_selection_override, uint, 0);
+
+MODULE_PARM_DESC(cable_selection_override,
+ "This field indicates length of the SAS/SATA cable between "
+ "host and device. If any bits > 15 are set (default) "
+ "indicates \"use platform defaults\"");
+
static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
@@ -412,6 +420,14 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
return NULL;
isci_host->shost = shost;
+ dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
+ "{%s, %s, %s, %s}\n",
+ (is_cable_select_overridden() ? "* " : ""), isci_host->id,
+ lookup_cable_names(decode_cable_selection(isci_host, 3)),
+ lookup_cable_names(decode_cable_selection(isci_host, 2)),
+ lookup_cable_names(decode_cable_selection(isci_host, 1)),
+ lookup_cable_names(decode_cable_selection(isci_host, 0)));
+
err = isci_host_init(isci_host);
if (err)
goto err_shost;
@@ -466,7 +482,8 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
orom = isci_request_oprom(pdev);
for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
- if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+ if (sci_oem_parameters_validate(&orom->ctrl[i],
+ orom->hdr.version)) {
dev_warn(&pdev->dev,
"[%d]: invalid oem parameters detected, falling back to firmware\n", i);
devm_kfree(&pdev->dev, orom);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 8efeb6b0832..234ab46fce3 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -480,6 +480,7 @@ extern u16 ssp_inactive_to;
extern u16 stp_inactive_to;
extern unsigned char phy_gen;
extern unsigned char max_concurr_spinup;
+extern uint cable_selection_override;
irqreturn_t isci_msix_isr(int vec, void *data);
irqreturn_t isci_intx_isr(int vec, void *data);
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 35f50c2183e..fe18acfd6eb 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -91,22 +91,23 @@ sci_phy_transport_layer_initialization(struct isci_phy *iphy,
static enum sci_status
sci_phy_link_layer_initialization(struct isci_phy *iphy,
- struct scu_link_layer_registers __iomem *reg)
+ struct scu_link_layer_registers __iomem *llr)
{
struct isci_host *ihost = iphy->owning_port->owning_controller;
+ struct sci_phy_user_params *phy_user;
+ struct sci_phy_oem_params *phy_oem;
int phy_idx = iphy->phy_index;
- struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
- struct sci_phy_oem_params *phy_oem =
- &ihost->oem_parameters.phys[phy_idx];
- u32 phy_configuration;
struct sci_phy_cap phy_cap;
+ u32 phy_configuration;
u32 parity_check = 0;
u32 parity_count = 0;
u32 llctl, link_rate;
u32 clksm_value = 0;
u32 sp_timeouts = 0;
- iphy->link_layer_registers = reg;
+ phy_user = &ihost->user_parameters.phys[phy_idx];
+ phy_oem = &ihost->oem_parameters.phys[phy_idx];
+ iphy->link_layer_registers = llr;
/* Set our IDENTIFY frame data */
#define SCI_END_DEVICE 0x01
@@ -116,32 +117,26 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
- &iphy->link_layer_registers->transmit_identification);
+ &llr->transmit_identification);
/* Write the device SAS Address */
- writel(0xFEDCBA98,
- &iphy->link_layer_registers->sas_device_name_high);
- writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
+ writel(0xFEDCBA98, &llr->sas_device_name_high);
+ writel(phy_idx, &llr->sas_device_name_low);
/* Write the source SAS Address */
- writel(phy_oem->sas_address.high,
- &iphy->link_layer_registers->source_sas_address_high);
- writel(phy_oem->sas_address.low,
- &iphy->link_layer_registers->source_sas_address_low);
+ writel(phy_oem->sas_address.high, &llr->source_sas_address_high);
+ writel(phy_oem->sas_address.low, &llr->source_sas_address_low);
/* Clear and Set the PHY Identifier */
- writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
- writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
- &iphy->link_layer_registers->identify_frame_phy_id);
+ writel(0, &llr->identify_frame_phy_id);
+ writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id);
/* Change the initial state of the phy configuration register */
- phy_configuration =
- readl(&iphy->link_layer_registers->phy_configuration);
+ phy_configuration = readl(&llr->phy_configuration);
/* Hold OOB state machine in reset */
phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
- writel(phy_configuration,
- &iphy->link_layer_registers->phy_configuration);
+ writel(phy_configuration, &llr->phy_configuration);
/* Configure the SNW capabilities */
phy_cap.all = 0;
@@ -149,15 +144,64 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
phy_cap.gen3_no_ssc = 1;
phy_cap.gen2_no_ssc = 1;
phy_cap.gen1_no_ssc = 1;
- if (ihost->oem_parameters.controller.do_enable_ssc == true) {
+ if (ihost->oem_parameters.controller.do_enable_ssc) {
+ struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
+ struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_idx];
+ struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+ bool en_sas = false;
+ bool en_sata = false;
+ u32 sas_type = 0;
+ u32 sata_spread = 0x2;
+ u32 sas_spread = 0x2;
+
phy_cap.gen3_ssc = 1;
phy_cap.gen2_ssc = 1;
phy_cap.gen1_ssc = 1;
+
+ if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1)
+ en_sas = en_sata = true;
+ else {
+ sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level;
+ sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level;
+
+ if (sata_spread)
+ en_sata = true;
+
+ if (sas_spread) {
+ en_sas = true;
+ sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type;
+ }
+
+ }
+
+ if (en_sas) {
+ u32 reg;
+
+ reg = readl(&xcvr->afe_xcvr_control0);
+ reg |= (0x00100000 | (sas_type << 19));
+ writel(reg, &xcvr->afe_xcvr_control0);
+
+ reg = readl(&xcvr->afe_tx_ssc_control);
+ reg |= sas_spread << 8;
+ writel(reg, &xcvr->afe_tx_ssc_control);
+ }
+
+ if (en_sata) {
+ u32 reg;
+
+ reg = readl(&xcvr->afe_tx_ssc_control);
+ reg |= sata_spread;
+ writel(reg, &xcvr->afe_tx_ssc_control);
+
+ reg = readl(&llr->stp_control);
+ reg |= 1 << 12;
+ writel(reg, &llr->stp_control);
+ }
}
- /*
- * The SAS specification indicates that the phy_capabilities that
- * are transmitted shall have an even parity. Calculate the parity. */
+ /* The SAS specification indicates that the phy_capabilities that
+ * are transmitted shall have an even parity. Calculate the parity.
+ */
parity_check = phy_cap.all;
while (parity_check != 0) {
if (parity_check & 0x1)
@@ -165,20 +209,20 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
parity_check >>= 1;
}
- /*
- * If parity indicates there are an odd number of bits set, then
- * set the parity bit to 1 in the phy capabilities. */
+ /* If parity indicates there are an odd number of bits set, then
+ * set the parity bit to 1 in the phy capabilities.
+ */
if ((parity_count % 2) != 0)
phy_cap.parity = 1;
- writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
+ writel(phy_cap.all, &llr->phy_capabilities);
/* Set the enable spinup period but disable the ability to send
* notify enable spinup
*/
writel(SCU_ENSPINUP_GEN_VAL(COUNT,
phy_user->notify_enable_spin_up_insertion_frequency),
- &iphy->link_layer_registers->notify_enable_spinup_control);
+ &llr->notify_enable_spinup_control);
/* Write the ALIGN Insertion Ferequency for connected phy and
* inpendent of connected state
@@ -189,11 +233,13 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
phy_user->align_insertion_frequency);
- writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
+ writel(clksm_value, &llr->clock_skew_management);
- /* @todo Provide a way to write this register correctly */
- writel(0x02108421,
- &iphy->link_layer_registers->afe_lookup_table_control);
+ if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) {
+ writel(0x04210400, &llr->afe_lookup_table_control);
+ writel(0x020A7C05, &llr->sas_primitive_timeout);
+ } else
+ writel(0x02108421, &llr->afe_lookup_table_control);
llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
(u8)ihost->user_parameters.no_outbound_task_timeout);
@@ -210,9 +256,9 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
break;
}
llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
- writel(llctl, &iphy->link_layer_registers->link_layer_control);
+ writel(llctl, &llr->link_layer_control);
- sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
+ sp_timeouts = readl(&llr->sas_phy_timeouts);
/* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
@@ -222,20 +268,23 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
*/
sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
- writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
+ writel(sp_timeouts, &llr->sas_phy_timeouts);
if (is_a2(ihost->pdev)) {
- /* Program the max ARB time for the PHY to 700us so we inter-operate with
- * the PMC expander which shuts down PHYs if the expander PHY generates too
- * many breaks. This time value will guarantee that the initiator PHY will
- * generate the break.
+ /* Program the max ARB time for the PHY to 700us so we
+ * inter-operate with the PMC expander which shuts down
+ * PHYs if the expander PHY generates too many breaks.
+ * This time value will guarantee that the initiator PHY
+ * will generate the break.
*/
writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
- &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
+ &llr->maximum_arbitration_wait_timer_timeout);
}
- /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
- writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
+ /* Disable link layer hang detection, rely on the OS timeout for
+ * I/O timeouts.
+ */
+ writel(0, &llr->link_layer_hang_detection_timeout);
/* We can exit the initial state to the stopped state */
sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
@@ -1049,24 +1098,25 @@ static void scu_link_layer_stop_protocol_engine(
writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
}
-/**
- *
- *
- * This method will start the OOB/SN state machine for this struct isci_phy object.
- */
-static void scu_link_layer_start_oob(
- struct isci_phy *iphy)
+static void scu_link_layer_start_oob(struct isci_phy *iphy)
{
- u32 scu_sas_pcfg_value;
-
- scu_sas_pcfg_value =
- readl(&iphy->link_layer_registers->phy_configuration);
- scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
- scu_sas_pcfg_value &=
- ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
- SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
- writel(scu_sas_pcfg_value,
- &iphy->link_layer_registers->phy_configuration);
+ struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers;
+ u32 val;
+
+ /** Reset OOB sequence - start */
+ val = readl(&ll->phy_configuration);
+ val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
+ writel(val, &ll->phy_configuration);
+ readl(&ll->phy_configuration); /* flush */
+ /** Reset OOB sequence - end */
+
+ /** Start OOB sequence - start */
+ val = readl(&ll->phy_configuration);
+ val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ writel(val, &ll->phy_configuration);
+ readl(&ll->phy_configuration); /* flush */
+ /** Start OOB sequence - end */
}
/**
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index ac7f27749f9..7c6ac58a5c4 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -114,7 +114,7 @@ static u32 sci_port_get_phys(struct isci_port *iport)
* value is returned if the specified port is not valid. When this value is
* returned, no data is copied to the properties output parameter.
*/
-static enum sci_status sci_port_get_properties(struct isci_port *iport,
+enum sci_status sci_port_get_properties(struct isci_port *iport,
struct sci_port_properties *prop)
{
if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
@@ -647,19 +647,26 @@ void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
}
}
-static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
- bool do_notify_user)
+static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+ sci_phy_resume(iphy);
+ iport->enabled_phy_mask |= 1 << iphy->phy_index;
+}
+
+static void sci_port_activate_phy(struct isci_port *iport,
+ struct isci_phy *iphy,
+ u8 flags)
{
struct isci_host *ihost = iport->owning_controller;
- if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
+ if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
sci_phy_resume(iphy);
iport->active_phy_mask |= 1 << iphy->phy_index;
sci_controller_clear_invalid_phy(ihost, iphy);
- if (do_notify_user == true)
+ if (flags & PF_NOTIFY)
isci_port_link_up(ihost, iport, iphy);
}
@@ -669,14 +676,19 @@ void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
struct isci_host *ihost = iport->owning_controller;
iport->active_phy_mask &= ~(1 << iphy->phy_index);
+ iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
if (!iport->active_phy_mask)
iport->last_active_phy = iphy->phy_index;
iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
- /* Re-assign the phy back to the LP as if it were a narrow port */
- writel(iphy->phy_index,
- &iport->port_pe_configuration_register[iphy->phy_index]);
+ /* Re-assign the phy back to the LP as if it were a narrow port for APC
+ * mode. For MPC mode, the phy will remain in the port.
+ */
+ if (iport->owning_controller->oem_parameters.controller.mode_type ==
+ SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
+ writel(iphy->phy_index,
+ &iport->port_pe_configuration_register[iphy->phy_index]);
if (do_notify_user == true)
isci_port_link_down(ihost, iphy, iport);
@@ -701,18 +713,16 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
* sci_port_general_link_up_handler - phy can be assigned to port?
* @sci_port: sci_port object for which has a phy that has gone link up.
* @sci_phy: This is the struct isci_phy object that has gone link up.
- * @do_notify_user: This parameter specifies whether to inform the user (via
- * sci_port_link_up()) as to the fact that a new phy as become ready.
+ * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
*
- * Determine if this phy can be assigned to this
- * port . If the phy is not a valid PHY for
- * this port then the function will notify the user. A PHY can only be
- * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
- * the same port. none
+ * Determine if this phy can be assigned to this port . If the phy is
+ * not a valid PHY for this port then the function will notify the user.
+ * A PHY can only be part of a port if it's attached SAS ADDRESS is the
+ * same as all other PHYs in the same port.
*/
static void sci_port_general_link_up_handler(struct isci_port *iport,
- struct isci_phy *iphy,
- bool do_notify_user)
+ struct isci_phy *iphy,
+ u8 flags)
{
struct sci_sas_address port_sas_address;
struct sci_sas_address phy_sas_address;
@@ -730,7 +740,7 @@ static void sci_port_general_link_up_handler(struct isci_port *iport,
iport->active_phy_mask == 0) {
struct sci_base_state_machine *sm = &iport->sm;
- sci_port_activate_phy(iport, iphy, do_notify_user);
+ sci_port_activate_phy(iport, iphy, flags);
if (sm->current_state_id == SCI_PORT_RESETTING)
port_state_machine_change(iport, SCI_PORT_READY);
} else
@@ -781,11 +791,16 @@ bool sci_port_link_detected(
struct isci_phy *iphy)
{
if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
- (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
- sci_port_is_wide(iport)) {
- sci_port_invalid_link_up(iport, iphy);
-
- return false;
+ (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
+ if (sci_port_is_wide(iport)) {
+ sci_port_invalid_link_up(iport, iphy);
+ return false;
+ } else {
+ struct isci_host *ihost = iport->owning_controller;
+ struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
+ writel(iphy->phy_index,
+ &dst_port->port_pe_configuration_register[iphy->phy_index]);
+ }
}
return true;
@@ -975,6 +990,13 @@ static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine
}
}
+static void scic_sds_port_ready_substate_waiting_exit(
+ struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ sci_port_resume_port_task_scheduler(iport);
+}
+
static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
{
u32 index;
@@ -988,13 +1010,13 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach
writel(iport->physical_port_index,
&iport->port_pe_configuration_register[
iport->phy_table[index]->phy_index]);
+ if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
+ sci_port_resume_phy(iport, iport->phy_table[index]);
}
}
sci_port_update_viit_entry(iport);
- sci_port_resume_port_task_scheduler(iport);
-
/*
* Post the dummy task for the port so the hardware can schedule
* io correctly
@@ -1061,20 +1083,9 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach
if (iport->active_phy_mask == 0) {
isci_port_not_ready(ihost, iport);
- port_state_machine_change(iport,
- SCI_PORT_SUB_WAITING);
- } else if (iport->started_request_count == 0)
- port_state_machine_change(iport,
- SCI_PORT_SUB_OPERATIONAL);
-}
-
-static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
-{
- struct isci_port *iport = container_of(sm, typeof(*iport), sm);
-
- sci_port_suspend_port_task_scheduler(iport);
- if (iport->ready_exit)
- sci_port_invalidate_dummy_remote_node(iport);
+ port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
+ } else
+ port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
}
enum sci_status sci_port_start(struct isci_port *iport)
@@ -1252,7 +1263,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
if (status != SCI_SUCCESS)
return status;
- sci_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
@@ -1262,7 +1273,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
if (status != SCI_SUCCESS)
return status;
- sci_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
/* Re-enter the configuring state since this may be the last phy in
* the port.
@@ -1338,13 +1349,13 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
/* Since this is the first phy going link up for the port we
* can just enable it and continue
*/
- sci_port_activate_phy(iport, iphy, true);
+ sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
port_state_machine_change(iport,
SCI_PORT_SUB_OPERATIONAL);
return SCI_SUCCESS;
case SCI_PORT_SUB_OPERATIONAL:
- sci_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
return SCI_SUCCESS;
case SCI_PORT_RESETTING:
/* TODO We should make sure that the phy that has gone
@@ -1361,7 +1372,7 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
/* In the resetting state we don't notify the user regarding
* link up and link down notifications.
*/
- sci_port_general_link_up_handler(iport, iphy, false);
+ sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
@@ -1584,14 +1595,14 @@ static const struct sci_base_state sci_port_state_table[] = {
},
[SCI_PORT_SUB_WAITING] = {
.enter_state = sci_port_ready_substate_waiting_enter,
+ .exit_state = scic_sds_port_ready_substate_waiting_exit,
},
[SCI_PORT_SUB_OPERATIONAL] = {
.enter_state = sci_port_ready_substate_operational_enter,
.exit_state = sci_port_ready_substate_operational_exit
},
[SCI_PORT_SUB_CONFIGURING] = {
- .enter_state = sci_port_ready_substate_configuring_enter,
- .exit_state = sci_port_ready_substate_configuring_exit
+ .enter_state = sci_port_ready_substate_configuring_enter
},
[SCI_PORT_RESETTING] = {
.exit_state = sci_port_resetting_state_exit
@@ -1609,6 +1620,7 @@ void sci_port_construct(struct isci_port *iport, u8 index,
iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
iport->physical_port_index = index;
iport->active_phy_mask = 0;
+ iport->enabled_phy_mask = 0;
iport->last_active_phy = 0;
iport->ready_exit = false;
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index cb5ffbc3860..08116090eb7 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -63,6 +63,9 @@
#define SCIC_SDS_DUMMY_PORT 0xFF
+#define PF_NOTIFY (1 << 0)
+#define PF_RESUME (1 << 1)
+
struct isci_phy;
struct isci_host;
@@ -83,6 +86,8 @@ enum isci_status {
* @logical_port_index: software port index
* @physical_port_index: hardware port index
* @active_phy_mask: identifies phy members
+ * @enabled_phy_mask: phy mask for the port
+ * that are already part of the port
* @reserved_tag:
* @reserved_rni: reserver for port task scheduler workaround
* @started_request_count: reference count for outstanding commands
@@ -104,6 +109,7 @@ struct isci_port {
u8 logical_port_index;
u8 physical_port_index;
u8 active_phy_mask;
+ u8 enabled_phy_mask;
u8 last_active_phy;
u16 reserved_rni;
u16 reserved_tag;
@@ -250,6 +256,10 @@ bool sci_port_link_detected(
struct isci_port *iport,
struct isci_phy *iphy);
+enum sci_status sci_port_get_properties(
+ struct isci_port *iport,
+ struct sci_port_properties *prop);
+
enum sci_status sci_port_link_up(struct isci_port *iport,
struct isci_phy *iphy);
enum sci_status sci_port_link_down(struct isci_port *iport,
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 38a99d28114..6d1e9544cbe 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -57,7 +57,7 @@
#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
-#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (250)
enum SCIC_SDS_APC_ACTIVITY {
SCIC_SDS_APC_SKIP_PHY,
@@ -466,6 +466,23 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
return sci_port_configuration_agent_validate_ports(ihost, port_agent);
}
+/*
+ * This routine will restart the automatic port configuration timeout
+ * timer for the next time period. This could be caused by either a link
+ * down event or a link up event where we can not yet tell to which a phy
+ * belongs.
+ */
+static void sci_apc_agent_start_timer(
+ struct sci_port_configuration_agent *port_agent,
+ u32 timeout)
+{
+ if (port_agent->timer_pending)
+ sci_del_timer(&port_agent->timer);
+
+ port_agent->timer_pending = true;
+ sci_mod_timer(&port_agent->timer, timeout);
+}
+
static void sci_apc_agent_configure_ports(struct isci_host *ihost,
struct sci_port_configuration_agent *port_agent,
struct isci_phy *iphy,
@@ -565,17 +582,8 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost,
break;
case SCIC_SDS_APC_START_TIMER:
- /*
- * This can occur for either a link down event, or a link
- * up event where we cannot yet tell the port to which a
- * phy belongs.
- */
- if (port_agent->timer_pending)
- sci_del_timer(&port_agent->timer);
-
- port_agent->timer_pending = true;
- sci_mod_timer(&port_agent->timer,
- SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+ sci_apc_agent_start_timer(port_agent,
+ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
break;
case SCIC_SDS_APC_SKIP_PHY:
@@ -607,7 +615,8 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
if (!iport) {
/* the phy is not the part of this port */
port_agent->phy_ready_mask |= 1 << phy_index;
- sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+ sci_apc_agent_start_timer(port_agent,
+ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
} else {
/* the phy is already the part of the port */
u32 port_state = iport->sm.current_state_id;
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index b5f4341de24..9b8117b9d75 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -147,7 +147,7 @@ struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmw
memcpy(orom, fw->data, fw->size);
- if (is_c0(pdev))
+ if (is_c0(pdev) || is_c1(pdev))
goto out;
/*
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
index 2c75248ca32..bb0e9d4d97c 100644
--- a/drivers/scsi/isci/probe_roms.h
+++ b/drivers/scsi/isci/probe_roms.h
@@ -152,7 +152,7 @@ struct sci_user_parameters {
#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
struct sci_oem_params;
-int sci_oem_parameters_validate(struct sci_oem_params *oem);
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
struct isci_orom;
struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
@@ -191,6 +191,11 @@ struct isci_oem_hdr {
0x1a, 0x04, 0xc6)
#define ISCI_EFI_VAR_NAME "RstScuO"
+#define ISCI_ROM_VER_1_0 0x10
+#define ISCI_ROM_VER_1_1 0x11
+#define ISCI_ROM_VER_1_3 0x13
+#define ISCI_ROM_VER_LATEST ISCI_ROM_VER_1_3
+
/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
* defined by the OEM configuration parameters providing no PHY_MASK parameters
* for any PORT. i.e. There are no phys assigned to any of the ports at start.
@@ -220,8 +225,86 @@ struct sci_oem_params {
struct {
uint8_t mode_type;
uint8_t max_concurr_spin_up;
- uint8_t do_enable_ssc;
- uint8_t reserved;
+ /*
+ * This bitfield indicates the OEM's desired default Tx
+ * Spread Spectrum Clocking (SSC) settings for SATA and SAS.
+ * NOTE: Default SSC Modulation Frequency is 31.5KHz.
+ */
+ union {
+ struct {
+ /*
+ * NOTE: Max spread for SATA is +0 / -5000 PPM.
+ * Down-spreading SSC (only method allowed for SATA):
+ * SATA SSC Tx Disabled = 0x0
+ * SATA SSC Tx at +0 / -1419 PPM Spread = 0x2
+ * SATA SSC Tx at +0 / -2129 PPM Spread = 0x3
+ * SATA SSC Tx at +0 / -4257 PPM Spread = 0x6
+ * SATA SSC Tx at +0 / -4967 PPM Spread = 0x7
+ */
+ uint8_t ssc_sata_tx_spread_level:4;
+ /*
+ * SAS SSC Tx Disabled = 0x0
+ *
+ * NOTE: Max spread for SAS down-spreading +0 /
+ * -2300 PPM
+ * Down-spreading SSC:
+ * SAS SSC Tx at +0 / -1419 PPM Spread = 0x2
+ * SAS SSC Tx at +0 / -2129 PPM Spread = 0x3
+ *
+ * NOTE: Max spread for SAS center-spreading +2300 /
+ * -2300 PPM
+ * Center-spreading SSC:
+ * SAS SSC Tx at +1064 / -1064 PPM Spread = 0x3
+ * SAS SSC Tx at +2129 / -2129 PPM Spread = 0x6
+ */
+ uint8_t ssc_sas_tx_spread_level:3;
+ /*
+ * NOTE: Refer to the SSC section of the SAS 2.x
+ * Specification for proper setting of this field.
+ * For standard SAS Initiator SAS PHY operation it
+ * should be 0 for Down-spreading.
+ * SAS SSC Tx spread type:
+ * Down-spreading SSC = 0
+ * Center-spreading SSC = 1
+ */
+ uint8_t ssc_sas_tx_type:1;
+ };
+ uint8_t do_enable_ssc;
+ };
+ /*
+ * This field indicates length of the SAS/SATA cable between
+ * host and device.
+ * This field is used make relationship between analog
+ * parameters of the phy in the silicon and length of the cable.
+ * Supported cable attenuation levels:
+ * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than
+ * 6m.
+ *
+ * This is bit mask field:
+ *
+ * BIT: (MSB) 7 6 5 4
+ * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Medium cable
+ * length assignment
+ * BIT: 3 2 1 0 (LSB)
+ * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Long cable length
+ * assignment
+ *
+ * BITS 7-4 are set when the cable length is assigned to medium
+ * BITS 3-0 are set when the cable length is assigned to long
+ *
+ * The BIT positions are clear when the cable length is
+ * assigned to short.
+ *
+ * Setting the bits for both long and medium cable length is
+ * undefined.
+ *
+ * A value of 0x84 would assign
+ * phy3 - medium
+ * phy2 - long
+ * phy1 - short
+ * phy0 - short
+ */
+ uint8_t cable_selection_mask;
} controller;
struct {
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index b207cd3b15a..dd74b6ceeb8 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -53,6 +53,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <scsi/sas.h>
+#include <linux/bitops.h>
#include "isci.h"
#include "port.h"
#include "remote_device.h"
@@ -1101,6 +1102,7 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
struct isci_remote_device *idev)
{
enum sci_status status;
+ struct sci_port_properties properties;
struct domain_device *dev = idev->domain_dev;
sci_remote_device_construct(iport, idev);
@@ -1110,6 +1112,11 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
* entries will be needed to store the remote node.
*/
idev->is_direct_attached = true;
+
+ sci_port_get_properties(iport, &properties);
+ /* Get accurate port width from port's phy mask for a DA device. */
+ idev->device_port_width = hweight32(properties.phy_mask);
+
status = sci_controller_allocate_remote_node_context(iport->owning_controller,
idev,
&idev->rnc.remote_node_index);
@@ -1125,9 +1132,6 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
idev->connection_rate = sci_port_get_max_allowed_speed(iport);
- /* / @todo Should I assign the port width by reading all of the phys on the port? */
- idev->device_port_width = 1;
-
return SCI_SUCCESS;
}
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 66ad3dc8949..f5a3f7d2bda 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -496,7 +496,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
}
}
- isci_print_tmf(tmf);
+ isci_print_tmf(ihost, tmf);
if (tmf->status == SCI_SUCCESS)
ret = TMF_RESP_FUNC_COMPLETE;
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index bc78c0a41d5..1b27b3797c6 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -106,7 +106,6 @@ struct isci_tmf {
} resp;
unsigned char lun[8];
u16 io_tag;
- struct isci_remote_device *device;
enum isci_tmf_function_codes tmf_code;
int status;
@@ -120,10 +119,10 @@ struct isci_tmf {
};
-static inline void isci_print_tmf(struct isci_tmf *tmf)
+static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
{
if (SAS_PROTOCOL_SATA == tmf->proto)
- dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+ dev_dbg(&ihost->pdev->dev,
"%s: status = %x\n"
"tmf->resp.d2h_fis.status = %x\n"
"tmf->resp.d2h_fis.error = %x\n",
@@ -132,7 +131,7 @@ static inline void isci_print_tmf(struct isci_tmf *tmf)
tmf->resp.d2h_fis.status,
tmf->resp.d2h_fis.error);
else
- dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+ dev_dbg(&ihost->pdev->dev,
"%s: status = %x\n"
"tmf->resp.resp_iu.data_present = %x\n"
"tmf->resp.resp_iu.status = %x\n"
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 7269e928824..1d1b0c9da29 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -61,7 +61,7 @@ static void fc_disc_restart(struct fc_disc *);
* Locking Note: This function expects that the lport mutex is locked before
* calling it.
*/
-void fc_disc_stop_rports(struct fc_disc *disc)
+static void fc_disc_stop_rports(struct fc_disc *disc)
{
struct fc_lport *lport;
struct fc_rport_priv *rdata;
@@ -682,7 +682,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
* fc_disc_stop() - Stop discovery for a given lport
* @lport: The local port that discovery should stop on
*/
-void fc_disc_stop(struct fc_lport *lport)
+static void fc_disc_stop(struct fc_lport *lport)
{
struct fc_disc *disc = &lport->disc;
@@ -698,7 +698,7 @@ void fc_disc_stop(struct fc_lport *lport)
* This function will block until discovery has been
* completely stopped and all rports have been deleted.
*/
-void fc_disc_stop_final(struct fc_lport *lport)
+static void fc_disc_stop_final(struct fc_lport *lport)
{
fc_disc_stop(lport);
lport->tt.rport_flush_queue();
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index fb9161dc4ca..e17a28d324d 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -28,6 +28,7 @@
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
+#include "fc_libfc.h"
/**
* fc_elsct_send() - Send an ELS or CT frame
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 9de9db27e87..4d70d96fa5d 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -91,7 +91,7 @@ struct fc_exch_pool {
* It manages the allocation of exchange IDs.
*/
struct fc_exch_mgr {
- struct fc_exch_pool *pool;
+ struct fc_exch_pool __percpu *pool;
mempool_t *ep_pool;
enum fc_class class;
struct kref kref;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 221875ec3d7..f607314810a 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
fsp->xfer_ddp = FC_XID_UNKNOWN;
atomic_set(&fsp->ref_cnt, 1);
init_timer(&fsp->timer);
+ fsp->timer.data = (unsigned long)fsp;
INIT_LIST_HEAD(&fsp->list);
spin_lock_init(&fsp->scsi_pkt_lock);
}
@@ -1850,9 +1851,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
}
put_cpu();
- init_timer(&fsp->timer);
- fsp->timer.data = (unsigned long)fsp;
-
/*
* send it to the lower layer
* if we get -1 return then put the request in the pending
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e77094a587e..83750ebb527 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -677,7 +677,8 @@ EXPORT_SYMBOL(fc_set_mfs);
* @lport: The local port receiving the event
* @event: The discovery event
*/
-void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
+static void fc_lport_disc_callback(struct fc_lport *lport,
+ enum fc_disc_event event)
{
switch (event) {
case DISC_EV_SUCCESS:
@@ -1568,7 +1569,7 @@ EXPORT_SYMBOL(fc_lport_flogi_resp);
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
-void fc_lport_enter_flogi(struct fc_lport *lport)
+static void fc_lport_enter_flogi(struct fc_lport *lport)
{
struct fc_frame *fp;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b9e434844a6..83aa1efec87 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -391,7 +391,7 @@ static void fc_rport_work(struct work_struct *work)
* If it appears we are already logged in, ADISC is used to verify
* the setup.
*/
-int fc_rport_login(struct fc_rport_priv *rdata)
+static int fc_rport_login(struct fc_rport_priv *rdata)
{
mutex_lock(&rdata->rp_mutex);
@@ -451,7 +451,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
*/
-int fc_rport_logoff(struct fc_rport_priv *rdata)
+static int fc_rport_logoff(struct fc_rport_priv *rdata)
{
mutex_lock(&rdata->rp_mutex);
@@ -653,8 +653,8 @@ static int fc_rport_login_complete(struct fc_rport_priv *rdata,
* @fp: The FLOGI response frame
* @rp_arg: The remote port that received the FLOGI response
*/
-void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
- void *rp_arg)
+static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rp_arg)
{
struct fc_rport_priv *rdata = rp_arg;
struct fc_lport *lport = rdata->local_port;
@@ -1520,7 +1520,7 @@ reject:
*
* Locking Note: Called with the lport lock held.
*/
-void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_seq_els_data els_data;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 5c1776406c9..15eefa1d61f 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -306,19 +306,22 @@ mega_query_adapter(adapter_t *adapter)
adapter->host->sg_tablesize = adapter->sglen;
- /* use HP firmware and bios version encoding */
+ /* use HP firmware and bios version encoding
+ Note: fw_version[0|1] and bios_version[0|1] were originally shifted
+ right 8 bits making them zero. This 0 value was hardcoded to fix
+ sparse warnings. */
if (adapter->product_info.subsysvid == HP_SUBSYS_VID) {
sprintf (adapter->fw_version, "%c%d%d.%d%d",
adapter->product_info.fw_version[2],
- adapter->product_info.fw_version[1] >> 8,
+ 0,
adapter->product_info.fw_version[1] & 0x0f,
- adapter->product_info.fw_version[0] >> 8,
+ 0,
adapter->product_info.fw_version[0] & 0x0f);
sprintf (adapter->bios_version, "%c%d%d.%d%d",
adapter->product_info.bios_version[2],
- adapter->product_info.bios_version[1] >> 8,
+ 0,
adapter->product_info.bios_version[1] & 0x0f,
- adapter->product_info.bios_version[0] >> 8,
+ 0,
adapter->product_info.bios_version[0] & 0x0f);
} else {
memcpy(adapter->fw_version,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index dd94c7d574f..e5f416f8042 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.06.12-rc1"
-#define MEGASAS_RELDATE "Oct. 5, 2011"
-#define MEGASAS_EXT_VERSION "Wed. Oct. 5 17:00:00 PDT 2011"
+#define MEGASAS_VERSION "00.00.06.14-rc1"
+#define MEGASAS_RELDATE "Jan. 6, 2012"
+#define MEGASAS_EXT_VERSION "Fri. Jan. 6 17:00:00 PDT 2012"
/*
* Device IDs
@@ -773,7 +773,6 @@ struct megasas_ctrl_info {
#define MFI_OB_INTR_STATUS_MASK 0x00000002
#define MFI_POLL_TIMEOUT_SECS 60
-#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10)
#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
@@ -1353,7 +1352,6 @@ struct megasas_instance {
u32 mfiStatus;
u32 last_seq_num;
- struct timer_list io_completion_timer;
struct list_head internal_reset_pending_q;
/* Ptr to hba specific information */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 29a994f9c4f..8b300be4428 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.06.12-rc1
+ * Version : v00.00.06.14-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -59,14 +59,6 @@
#include "megaraid_sas.h"
/*
- * poll_mode_io:1- schedule complete completion from q cmd
- */
-static unsigned int poll_mode_io;
-module_param_named(poll_mode_io, poll_mode_io, int, 0);
-MODULE_PARM_DESC(poll_mode_io,
- "Complete cmds from IO path, (default=0)");
-
-/*
* Number of sectors per IO command
* Will be set in megasas_init_mfi if user does not provide
*/
@@ -1439,11 +1431,6 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
cmd->frame_count-1, instance->reg_set);
- /*
- * Check if we have pend cmds to be completed
- */
- if (poll_mode_io && atomic_read(&instance->fw_outstanding))
- tasklet_schedule(&instance->isr_tasklet);
return 0;
out_return_cmd:
@@ -3370,47 +3357,6 @@ fail_fw_init:
return -EINVAL;
}
-/**
- * megasas_start_timer - Initializes a timer object
- * @instance: Adapter soft state
- * @timer: timer object to be initialized
- * @fn: timer function
- * @interval: time interval between timer function call
- */
-static inline void
-megasas_start_timer(struct megasas_instance *instance,
- struct timer_list *timer,
- void *fn, unsigned long interval)
-{
- init_timer(timer);
- timer->expires = jiffies + interval;
- timer->data = (unsigned long)instance;
- timer->function = fn;
- add_timer(timer);
-}
-
-/**
- * megasas_io_completion_timer - Timer fn
- * @instance_addr: Address of adapter soft state
- *
- * Schedules tasklet for cmd completion
- * if poll_mode_io is set
- */
-static void
-megasas_io_completion_timer(unsigned long instance_addr)
-{
- struct megasas_instance *instance =
- (struct megasas_instance *)instance_addr;
-
- if (atomic_read(&instance->fw_outstanding))
- tasklet_schedule(&instance->isr_tasklet);
-
- /* Restart timer */
- if (poll_mode_io)
- mod_timer(&instance->io_completion_timer,
- jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
-}
-
static u32
megasas_init_adapter_mfi(struct megasas_instance *instance)
{
@@ -3638,11 +3584,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- /* Initialize the cmd completion timer */
- if (poll_mode_io)
- megasas_start_timer(instance, &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
return 0;
fail_init_adapter:
@@ -4369,9 +4310,6 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
host = instance->host;
instance->unload = 1;
- if (poll_mode_io)
- del_timer_sync(&instance->io_completion_timer);
-
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
@@ -4511,12 +4449,6 @@ megasas_resume(struct pci_dev *pdev)
}
instance->instancet->enable_intr(instance->reg_set);
-
- /* Initialize the cmd completion timer */
- if (poll_mode_io)
- megasas_start_timer(instance, &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
instance->unload = 0;
/*
@@ -4570,9 +4502,6 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
host = instance->host;
fusion = instance->ctrl_context;
- if (poll_mode_io)
- del_timer_sync(&instance->io_completion_timer);
-
scsi_remove_host(instance->host);
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4773,6 +4702,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
cmd->frame->hdr.context = cmd->index;
cmd->frame->hdr.pad_0 = 0;
+ cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64);
/*
* The management interface between applications and the fw uses
@@ -5219,60 +5150,6 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
megasas_sysfs_set_dbg_lvl);
-static ssize_t
-megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
-{
- return sprintf(buf, "%u\n", poll_mode_io);
-}
-
-static ssize_t
-megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
- const char *buf, size_t count)
-{
- int retval = count;
- int tmp = poll_mode_io;
- int i;
- struct megasas_instance *instance;
-
- if (sscanf(buf, "%u", &poll_mode_io) < 1) {
- printk(KERN_ERR "megasas: could not set poll_mode_io\n");
- retval = -EINVAL;
- }
-
- /*
- * Check if poll_mode_io is already set or is same as previous value
- */
- if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
- goto out;
-
- if (poll_mode_io) {
- /*
- * Start timers for all adapters
- */
- for (i = 0; i < megasas_mgmt_info.max_index; i++) {
- instance = megasas_mgmt_info.instance[i];
- if (instance) {
- megasas_start_timer(instance,
- &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
- }
- }
- } else {
- /*
- * Delete timers for all adapters
- */
- for (i = 0; i < megasas_mgmt_info.max_index; i++) {
- instance = megasas_mgmt_info.instance[i];
- if (instance)
- del_timer_sync(&instance->io_completion_timer);
- }
- }
-
-out:
- return retval;
-}
-
static void
megasas_aen_polling(struct work_struct *work)
{
@@ -5502,11 +5379,6 @@ megasas_aen_polling(struct work_struct *work)
kfree(ev);
}
-
-static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
- megasas_sysfs_show_poll_mode_io,
- megasas_sysfs_set_poll_mode_io);
-
/**
* megasas_init - Driver load entry point
*/
@@ -5566,11 +5438,6 @@ static int __init megasas_init(void)
if (rval)
goto err_dcf_dbg_lvl;
rval = driver_create_file(&megasas_pci_driver.driver,
- &driver_attr_poll_mode_io);
- if (rval)
- goto err_dcf_poll_mode_io;
-
- rval = driver_create_file(&megasas_pci_driver.driver,
&driver_attr_support_device_change);
if (rval)
goto err_dcf_support_device_change;
@@ -5579,10 +5446,6 @@ static int __init megasas_init(void)
err_dcf_support_device_change:
driver_remove_file(&megasas_pci_driver.driver,
- &driver_attr_poll_mode_io);
-
-err_dcf_poll_mode_io:
- driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
err_dcf_dbg_lvl:
driver_remove_file(&megasas_pci_driver.driver,
@@ -5607,8 +5470,6 @@ err_pcidrv:
static void __exit megasas_exit(void)
{
driver_remove_file(&megasas_pci_driver.driver,
- &driver_attr_poll_mode_io);
- driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_support_poll_for_event);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 5255dd688ac..294abb0defa 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -282,7 +282,9 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
else {
*pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
if ((raid->level >= 5) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER))
+ ((instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER &&
+ raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
/* Get alternate Pd. */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 22a3ff02e48..bfe68545203 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -150,6 +150,8 @@
#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
/* recovery timeout */
+#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
+#define LSW(x) ((uint16_t)(x))
#define LSDW(x) ((u32)((u64)(x)))
#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
@@ -671,6 +673,7 @@ struct scsi_qla_host {
uint16_t pri_ddb_idx;
uint16_t sec_ddb_idx;
int is_reset;
+ uint16_t temperature;
};
struct ql4_task_data {
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1bdfa8120ac..90614f38b55 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -697,6 +697,9 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
+ writel(set_rmask(CSR_SCSI_COMPLETION_INTR),
+ &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Get firmware "
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index c2593782fbb..e1e66a45e4d 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -219,6 +219,13 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
ha->mailbox_timeout_count++;
mbx_sts[0] = (-1);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ if (is_qla8022(ha)) {
+ ql4_printk(KERN_INFO, ha,
+ "disabling pause transmit on port 0 & 1.\n");
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
goto mbox_exit;
}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 8d6bc1b2ff1..78f1111158d 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1875,6 +1875,11 @@ exit:
int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
{
int retval;
+
+ /* clear the interrupt */
+ writel(0, &ha->qla4_8xxx_reg->host_int);
+ readl(&ha->qla4_8xxx_reg->host_int);
+
retval = qla4_8xxx_device_state_handler(ha);
if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 35376a1c3f1..dc45ac92369 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -19,12 +19,28 @@
#define PHAN_PEG_RCV_INITIALIZED 0xff01
/*CRB_RELATED*/
-#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200)
-#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
-
+#define QLA82XX_CRB_BASE (QLA82XX_CAM_RAM(0x200))
+#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
+
+#define qla82xx_get_temp_val(x) ((x) >> 16)
+#define qla82xx_get_temp_state(x) ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ec393a00c03..ce6d3b7f0c6 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -35,43 +35,44 @@ static struct kmem_cache *srb_cachep;
int ql4xdisablesysfsboot = 1;
module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdisablesysfsboot,
- "Set to disable exporting boot targets to sysfs\n"
- " 0 - Export boot targets\n"
- " 1 - Do not export boot targets (Default)");
+ " Set to disable exporting boot targets to sysfs.\n"
+ "\t\t 0 - Export boot targets\n"
+ "\t\t 1 - Do not export boot targets (Default)");
int ql4xdontresethba = 0;
module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdontresethba,
- "Don't reset the HBA for driver recovery \n"
- " 0 - It will reset HBA (Default)\n"
- " 1 - It will NOT reset HBA");
+ " Don't reset the HBA for driver recovery.\n"
+ "\t\t 0 - It will reset HBA (Default)\n"
+ "\t\t 1 - It will NOT reset HBA");
-int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
+int ql4xextended_error_logging;
module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xextended_error_logging,
- "Option to enable extended error logging, "
- "Default is 0 - no logging, 1 - debug logging");
+ " Option to enable extended error logging.\n"
+ "\t\t 0 - no logging (Default)\n"
+ "\t\t 2 - debug logging");
int ql4xenablemsix = 1;
module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql4xenablemsix,
- "Set to enable MSI or MSI-X interrupt mechanism.\n"
- " 0 = enable INTx interrupt mechanism.\n"
- " 1 = enable MSI-X interrupt mechanism (Default).\n"
- " 2 = enable MSI interrupt mechanism.");
+ " Set to enable MSI or MSI-X interrupt mechanism.\n"
+ "\t\t 0 = enable INTx interrupt mechanism.\n"
+ "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
+ "\t\t 2 = enable MSI interrupt mechanism.");
#define QL4_DEF_QDEPTH 32
static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xmaxqdepth,
- "Maximum queue depth to report for target devices.\n"
- " Default: 32.");
+ " Maximum queue depth to report for target devices.\n"
+ "\t\t Default: 32.");
static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
"Target Session Recovery Timeout.\n"
- " Default: 120 sec.");
+ "\t\t Default: 120 sec.");
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
@@ -1630,7 +1631,9 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
/* Update timers after login */
ddb_entry->default_relogin_timeout =
- le16_to_cpu(fw_ddb_entry->def_timeout);
+ (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
+ (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
+ le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
ddb_entry->default_time2wait =
le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
@@ -1970,6 +1973,42 @@ mem_alloc_error_exit:
}
/**
+ * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * @ha: adapter block pointer.
+ *
+ * Note: The caller should not hold the idc lock.
+ **/
+static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
+{
+ uint32_t temp, temp_state, temp_val;
+ int status = QLA_SUCCESS;
+
+ temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
+
+ temp_state = qla82xx_get_temp_state(temp);
+ temp_val = qla82xx_get_temp_val(temp);
+
+ if (temp_state == QLA82XX_TEMP_PANIC) {
+ ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
+ " exceeds maximum allowed. Hardware has been shut"
+ " down.\n", temp_val);
+ status = QLA_ERROR;
+ } else if (temp_state == QLA82XX_TEMP_WARN) {
+ if (ha->temperature == QLA82XX_TEMP_NORMAL)
+ ql4_printk(KERN_WARNING, ha, "Device temperature %d"
+ " degrees C exceeds operating range."
+ " Immediate action needed.\n", temp_val);
+ } else {
+ if (ha->temperature == QLA82XX_TEMP_WARN)
+ ql4_printk(KERN_INFO, ha, "Device temperature is"
+ " now %d degrees C in normal range.\n",
+ temp_val);
+ }
+ ha->temperature = temp_state;
+ return status;
+}
+
+/**
* qla4_8xxx_check_fw_alive - Check firmware health
* @ha: Pointer to host adapter structure.
*
@@ -2040,7 +2079,16 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- if (dev_state == QLA82XX_DEV_NEED_RESET &&
+
+ if (qla4_8xxx_check_temp(ha)) {
+ ql4_printk(KERN_INFO, ha, "disabling pause"
+ " transmit on port 0 & 1.\n");
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+ qla4xxx_wake_dpc(ha);
+ } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
if (!ql4xdontresethba) {
ql4_printk(KERN_INFO, ha, "%s: HW State: "
@@ -2057,9 +2105,21 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
} else {
/* Check firmware health */
if (qla4_8xxx_check_fw_alive(ha)) {
+ ql4_printk(KERN_INFO, ha, "disabling pause"
+ " transmit on port 0 & 1.\n");
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
halt_status = qla4_8xxx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1);
+ if (LSW(MSB(halt_status)) == 0x67)
+ ql4_printk(KERN_ERR, ha, "%s:"
+ " Firmware aborted with"
+ " error code 0x00006700."
+ " Device is being reset\n",
+ __func__);
+
/* Since we cannot change dev_state in interrupt
* context, set appropriate DPC flag then wakeup
* DPC */
@@ -2078,7 +2138,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
}
}
-void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
{
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
@@ -3826,16 +3886,14 @@ exit_check:
return ret;
}
-static void qla4xxx_free_nt_list(struct list_head *list_nt)
+static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
{
- struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
- /* Free up the normaltargets list */
- list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
- list_del_init(&nt_ddb_idx->list);
- vfree(nt_ddb_idx);
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+ list_del_init(&ddb_idx->list);
+ vfree(ddb_idx);
}
-
}
static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
@@ -3884,6 +3942,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
+ uint16_t def_timeout;
+
ddb_entry->ddb_type = FLASH_DDB;
ddb_entry->fw_ddb_index = INVALID_ENTRY;
ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
@@ -3894,9 +3954,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
atomic_set(&ddb_entry->relogin_timer, 0);
atomic_set(&ddb_entry->relogin_retry_count, 0);
-
+ def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
ddb_entry->default_relogin_timeout =
- le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+ (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
+ def_timeout : LOGIN_TOV;
ddb_entry->default_time2wait =
le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
}
@@ -3934,7 +3995,6 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
ip_state == IP_ADDRSTATE_DEPRICATED ||
ip_state == IP_ADDRSTATE_DISABLING)
ip_idx[idx] = -1;
-
}
/* Break if all IP states checked */
@@ -3947,58 +4007,37 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
} while (time_after(wtime, jiffies));
}
-void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
+ struct list_head *list_st)
{
+ struct qla_ddb_index *st_ddb_idx;
int max_ddbs;
+ int fw_idx_size;
+ struct dev_db_entry *fw_ddb_entry;
+ dma_addr_t fw_ddb_dma;
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
- uint16_t conn_id;
- struct dev_db_entry *fw_ddb_entry;
- struct ddb_entry *ddb_entry = NULL;
- dma_addr_t fw_ddb_dma;
- struct iscsi_cls_session *cls_sess;
- struct iscsi_session *sess;
- struct iscsi_cls_conn *cls_conn;
- struct iscsi_endpoint *ep;
- uint16_t cmds_max = 32, tmo = 0;
- uint32_t initial_cmdsn = 0;
- struct list_head list_st, list_nt; /* List of sendtargets */
- struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
- int fw_idx_size;
- unsigned long wtime;
- struct qla_ddb_index *nt_ddb_idx;
-
- if (!test_bit(AF_LINK_UP, &ha->flags)) {
- set_bit(AF_BUILD_DDB_LIST, &ha->flags);
- ha->is_reset = is_reset;
- return;
- }
- max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
- MAX_DEV_DB_ENTRIES;
+ uint16_t conn_id = 0;
fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
&fw_ddb_dma);
if (fw_ddb_entry == NULL) {
DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
- goto exit_ddb_list;
+ goto exit_st_list;
}
- INIT_LIST_HEAD(&list_st);
- INIT_LIST_HEAD(&list_nt);
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
fw_idx_size = sizeof(struct qla_ddb_index);
for (idx = 0; idx < max_ddbs; idx = next_idx) {
- ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
- fw_ddb_dma, NULL,
- &next_idx, &state, &conn_err,
- NULL, &conn_id);
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+ NULL, &next_idx, &state,
+ &conn_err, NULL, &conn_id);
if (ret == QLA_ERROR)
break;
- if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
- goto continue_next_st;
-
/* Check if ST, add to the list_st */
if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
goto continue_next_st;
@@ -4009,59 +4048,155 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
st_ddb_idx->fw_ddb_idx = idx;
- list_add_tail(&st_ddb_idx->list, &list_st);
+ list_add_tail(&st_ddb_idx->list, list_st);
continue_next_st:
if (next_idx == 0)
break;
}
- /* Before issuing conn open mbox, ensure all IPs states are configured
- * Note, conn open fails if IPs are not configured
+exit_st_list:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
+ * @ha: pointer to adapter structure
+ * @list_ddb: List from which failed ddb to be removed
+ *
+ * Iterate over the list of DDBs and find and remove DDBs that are either in
+ * no connection active state or failed state
+ **/
+static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
+ struct list_head *list_ddb)
+{
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
+ uint32_t next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ int ret;
+
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+ ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
+ NULL, 0, NULL, &next_idx, &state,
+ &conn_err, NULL, NULL);
+ if (ret == QLA_ERROR)
+ continue;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ list_del_init(&ddb_idx->list);
+ vfree(ddb_idx);
+ }
+ }
+}
+
+static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ int is_reset)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_endpoint *ep;
+ uint16_t cmds_max = 32;
+ uint16_t conn_id = 0;
+ uint32_t initial_cmdsn = 0;
+ int ret = QLA_SUCCESS;
+
+ struct ddb_entry *ddb_entry = NULL;
+
+ /* Create session object, with INVALID_ENTRY,
+ * the targer_id would get set when we issue the login
*/
- qla4xxx_wait_for_ip_configuration(ha);
+ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
+ cmds_max, sizeof(struct ddb_entry),
+ sizeof(struct ql4_task_data),
+ initial_cmdsn, INVALID_ENTRY);
+ if (!cls_sess) {
+ ret = QLA_ERROR;
+ goto exit_setup;
+ }
- /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
- list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
- qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+ /*
+ * so calling module_put function to decrement the
+ * reference count.
+ **/
+ module_put(qla4xxx_iscsi_transport.owner);
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->sess = cls_sess;
+
+ cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+ memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+ cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
+
+ if (!cls_conn) {
+ ret = QLA_ERROR;
+ goto exit_setup;
}
- /* Wait to ensure all sendtargets are done for min 12 sec wait */
- tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Default time to wait for build ddb %d\n", tmo));
+ ddb_entry->conn = cls_conn;
- wtime = jiffies + (HZ * tmo);
- do {
- list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
- list) {
- ret = qla4xxx_get_fwddb_entry(ha,
- st_ddb_idx->fw_ddb_idx,
- NULL, 0, NULL, &next_idx,
- &state, &conn_err, NULL,
- NULL);
- if (ret == QLA_ERROR)
- continue;
+ /* Setup ep, for displaying attributes in sysfs */
+ ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+ if (ep) {
+ ep->conn = cls_conn;
+ cls_conn->ep = ep;
+ } else {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
+ ret = QLA_ERROR;
+ goto exit_setup;
+ }
- if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
- state == DDB_DS_SESSION_FAILED) {
- list_del_init(&st_ddb_idx->list);
- vfree(st_ddb_idx);
- }
- }
- schedule_timeout_uninterruptible(HZ / 10);
- } while (time_after(wtime, jiffies));
+ /* Update sess/conn params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
- /* Free up the sendtargets list */
- list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
- list_del_init(&st_ddb_idx->list);
- vfree(st_ddb_idx);
+ if (is_reset == RESET_ADAPTER) {
+ iscsi_block_session(cls_sess);
+ /* Use the relogin path to discover new devices
+ * by short-circuting the logic of setting
+ * timer to relogin - instead set the flags
+ * to initiate login right away.
+ */
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
}
+exit_setup:
+ return ret;
+}
+
+static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
+ struct list_head *list_nt, int is_reset)
+{
+ struct dev_db_entry *fw_ddb_entry;
+ dma_addr_t fw_ddb_dma;
+ int max_ddbs;
+ int fw_idx_size;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t conn_id = 0;
+ struct qla_ddb_index *nt_ddb_idx;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_nt_list;
+ }
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+ fw_idx_size = sizeof(struct qla_ddb_index);
+
for (idx = 0; idx < max_ddbs; idx = next_idx) {
- ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
- fw_ddb_dma, NULL,
- &next_idx, &state, &conn_err,
- NULL, &conn_id);
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+ NULL, &next_idx, &state,
+ &conn_err, NULL, &conn_id);
if (ret == QLA_ERROR)
break;
@@ -4072,107 +4207,113 @@ continue_next_st:
if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
goto continue_next_nt;
- if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
- state == DDB_DS_SESSION_FAILED) {
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Adding DDB to session = 0x%x\n",
- idx));
- if (is_reset == INIT_ADAPTER) {
- nt_ddb_idx = vmalloc(fw_idx_size);
- if (!nt_ddb_idx)
- break;
-
- nt_ddb_idx->fw_ddb_idx = idx;
-
- memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
- sizeof(struct dev_db_entry));
-
- if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
- fw_ddb_entry) == QLA_SUCCESS) {
- vfree(nt_ddb_idx);
- goto continue_next_nt;
- }
- list_add_tail(&nt_ddb_idx->list, &list_nt);
- } else if (is_reset == RESET_ADAPTER) {
- if (qla4xxx_is_session_exists(ha,
- fw_ddb_entry) == QLA_SUCCESS)
- goto continue_next_nt;
- }
+ if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED))
+ goto continue_next_nt;
- /* Create session object, with INVALID_ENTRY,
- * the targer_id would get set when we issue the login
- */
- cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
- ha->host, cmds_max,
- sizeof(struct ddb_entry),
- sizeof(struct ql4_task_data),
- initial_cmdsn, INVALID_ENTRY);
- if (!cls_sess)
- goto exit_ddb_list;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Adding DDB to session = 0x%x\n", idx));
+ if (is_reset == INIT_ADAPTER) {
+ nt_ddb_idx = vmalloc(fw_idx_size);
+ if (!nt_ddb_idx)
+ break;
- /*
- * iscsi_session_setup increments the driver reference
- * count which wouldn't let the driver to be unloaded.
- * so calling module_put function to decrement the
- * reference count.
- **/
- module_put(qla4xxx_iscsi_transport.owner);
- sess = cls_sess->dd_data;
- ddb_entry = sess->dd_data;
- ddb_entry->sess = cls_sess;
+ nt_ddb_idx->fw_ddb_idx = idx;
- cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
- memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+ memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
sizeof(struct dev_db_entry));
- qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
-
- cls_conn = iscsi_conn_setup(cls_sess,
- sizeof(struct qla_conn),
- conn_id);
- if (!cls_conn)
- goto exit_ddb_list;
-
- ddb_entry->conn = cls_conn;
-
- /* Setup ep, for displaying attributes in sysfs */
- ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
- if (ep) {
- ep->conn = cls_conn;
- cls_conn->ep = ep;
- } else {
- DEBUG2(ql4_printk(KERN_ERR, ha,
- "Unable to get ep\n"));
- }
-
- /* Update sess/conn params */
- qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
- cls_conn);
-
- if (is_reset == RESET_ADAPTER) {
- iscsi_block_session(cls_sess);
- /* Use the relogin path to discover new devices
- * by short-circuting the logic of setting
- * timer to relogin - instead set the flags
- * to initiate login right away.
- */
- set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
- set_bit(DF_RELOGIN, &ddb_entry->flags);
+ if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
+ fw_ddb_entry) == QLA_SUCCESS) {
+ vfree(nt_ddb_idx);
+ goto continue_next_nt;
}
+ list_add_tail(&nt_ddb_idx->list, list_nt);
+ } else if (is_reset == RESET_ADAPTER) {
+ if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
+ QLA_SUCCESS)
+ goto continue_next_nt;
}
+
+ ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
+ if (ret == QLA_ERROR)
+ goto exit_nt_list;
+
continue_next_nt:
if (next_idx == 0)
break;
}
-exit_ddb_list:
- qla4xxx_free_nt_list(&list_nt);
+
+exit_nt_list:
if (fw_ddb_entry)
dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_build_ddb_list - Build ddb list and setup sessions
+ * @ha: pointer to adapter structure
+ * @is_reset: Is this init path or reset path
+ *
+ * Create a list of sendtargets (st) from firmware DDBs, issue send targets
+ * using connection open, then create the list of normal targets (nt)
+ * from firmware DDBs. Based on the list of nt setup session and connection
+ * objects.
+ **/
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+ uint16_t tmo = 0;
+ struct list_head list_st, list_nt;
+ struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
+ unsigned long wtime;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags)) {
+ set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+ ha->is_reset = is_reset;
+ return;
+ }
+
+ INIT_LIST_HEAD(&list_st);
+ INIT_LIST_HEAD(&list_nt);
+
+ qla4xxx_build_st_list(ha, &list_st);
+
+ /* Before issuing conn open mbox, ensure all IPs states are configured
+ * Note, conn open fails if IPs are not configured
+ */
+ qla4xxx_wait_for_ip_configuration(ha);
+
+ /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+ }
+
+ /* Wait to ensure all sendtargets are done for min 12 sec wait */
+ tmo = ((ha->def_timeout > LOGIN_TOV) &&
+ (ha->def_timeout < LOGIN_TOV * 10) ?
+ ha->def_timeout : LOGIN_TOV);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Default time to wait for build ddb %d\n", tmo));
+
+ wtime = jiffies + (HZ * tmo);
+ do {
+ if (list_empty(&list_st))
+ break;
+
+ qla4xxx_remove_failed_ddb(ha, &list_st);
+ schedule_timeout_uninterruptible(HZ / 10);
+ } while (time_after(wtime, jiffies));
+
+ /* Free up the sendtargets list */
+ qla4xxx_free_ddb_list(&list_st);
+
+ qla4xxx_build_nt_list(ha, &list_nt, is_reset);
+
+ qla4xxx_free_ddb_list(&list_nt);
qla4xxx_free_ddb_index(ha);
}
-
/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 26a3fa34a33..133989b3a9f 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k10"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k12"
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f85cfa6c47b..b2c95dbe9d6 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1316,15 +1316,10 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
}
if (scsi_target_is_busy(starget)) {
- if (list_empty(&sdev->starved_entry))
- list_add_tail(&sdev->starved_entry,
- &shost->starved_list);
+ list_move_tail(&sdev->starved_entry, &shost->starved_list);
return 0;
}
- /* We're OK to process the command, so we can't be starved */
- if (!list_empty(&sdev->starved_entry))
- list_del_init(&sdev->starved_entry);
return 1;
}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1b214910b71..f59d4a05ecd 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3048,7 +3048,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
- FC_RPORT_DEVLOSS_PENDING);
+ FC_RPORT_DEVLOSS_PENDING |
+ FC_RPORT_DEVLOSS_CALLBK_DONE);
spin_unlock_irqrestore(shost->host_lock, flags);
/* ensure any stgt delete functions are done */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 02d99982a74..eacd46bb36b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2368,16 +2368,15 @@ static ssize_t
sg_proc_write_adio(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
- int num;
- char buff[11];
+ int err;
+ unsigned long num;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- num = (count < 10) ? count : 10;
- if (copy_from_user(buff, buffer, num))
- return -EFAULT;
- buff[num] = '\0';
- sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
+ err = kstrtoul_from_user(buffer, count, 0, &num);
+ if (err)
+ return err;
+ sg_allow_dio = num ? 1 : 0;
return count;
}
@@ -2390,17 +2389,15 @@ static ssize_t
sg_proc_write_dressz(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
- int num;
+ int err;
unsigned long k = ULONG_MAX;
- char buff[11];
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- num = (count < 10) ? count : 10;
- if (copy_from_user(buff, buffer, num))
- return -EFAULT;
- buff[num] = '\0';
- k = simple_strtoul(buff, NULL, 10);
+
+ err = kstrtoul_from_user(buffer, count, 0, &k);
+ if (err)
+ return err;
if (k <= 1048576) { /* limit "big buff" to 1 MB */
sg_big_buff = k;
return count;
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index b4543f575f4..36d1ed7817e 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
struct sym_lcb *lp = sym_lp(tp, sdev->lun);
unsigned long flags;
+ /* if slave_alloc returned before allocating a sym_lcb, return */
+ if (!lp)
+ return;
+
spin_lock_irqsave(np->s.host->host_lock, flags);
if (lp->busy_itlq || lp->busy_itl) {
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index e743a45ee92..8418eb03665 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -131,7 +131,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
rxchan = dws->rxchan;
/* 2. Prepare the TX dma transfer */
- txconf.direction = DMA_TO_DEVICE;
+ txconf.direction = DMA_MEM_TO_DEV;
txconf.dst_addr = dws->dma_addr;
txconf.dst_maxburst = LNW_DMA_MSIZE_16;
txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -147,13 +147,13 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
txdesc = txchan->device->device_prep_slave_sg(txchan,
&dws->tx_sgl,
1,
- DMA_TO_DEVICE,
+ DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
txdesc->callback = dw_spi_dma_done;
txdesc->callback_param = dws;
/* 3. Prepare the RX dma transfer */
- rxconf.direction = DMA_FROM_DEVICE;
+ rxconf.direction = DMA_DEV_TO_MEM;
rxconf.src_addr = dws->dma_addr;
rxconf.src_maxburst = LNW_DMA_MSIZE_16;
rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -169,7 +169,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
&dws->rx_sgl,
1,
- DMA_FROM_DEVICE,
+ DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
rxdesc->callback = dw_spi_dma_done;
rxdesc->callback_param = dws;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 0a282e5fcc9..d46e55c720b 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -551,6 +551,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
struct dma_async_tx_descriptor *txd;
enum dma_slave_buswidth buswidth;
struct dma_slave_config conf;
+ enum dma_transfer_direction slave_dirn;
struct scatterlist *sg;
struct sg_table *sgt;
struct dma_chan *chan;
@@ -573,6 +574,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
conf.src_addr = espi->sspdr_phys;
conf.src_addr_width = buswidth;
+ slave_dirn = DMA_DEV_TO_MEM;
} else {
chan = espi->dma_tx;
buf = t->tx_buf;
@@ -580,6 +582,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
conf.dst_addr = espi->sspdr_phys;
conf.dst_addr_width = buswidth;
+ slave_dirn = DMA_MEM_TO_DEV;
}
ret = dmaengine_slave_config(chan, &conf);
@@ -631,7 +634,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
return ERR_PTR(-ENOMEM);
txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
- dir, DMA_CTRL_ACK);
+ slave_dirn, DMA_CTRL_ACK);
if (!txd) {
dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
return ERR_PTR(-ENOMEM);
@@ -979,7 +982,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
dma_cap_set(DMA_SLAVE, mask);
espi->dma_rx_data.port = EP93XX_DMA_SSP;
- espi->dma_rx_data.direction = DMA_FROM_DEVICE;
+ espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
espi->dma_rx_data.name = "ep93xx-spi-rx";
espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
@@ -990,7 +993,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
}
espi->dma_tx_data.port = EP93XX_DMA_SSP;
- espi->dma_tx_data.direction = DMA_TO_DEVICE;
+ espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
espi->dma_tx_data.name = "ep93xx-spi-tx";
espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index f1f5efbc340..2f9cb43a239 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -900,11 +900,11 @@ static int configure_dma(struct pl022 *pl022)
{
struct dma_slave_config rx_conf = {
.src_addr = SSP_DR(pl022->phybase),
- .direction = DMA_FROM_DEVICE,
+ .direction = DMA_DEV_TO_MEM,
};
struct dma_slave_config tx_conf = {
.dst_addr = SSP_DR(pl022->phybase),
- .direction = DMA_TO_DEVICE,
+ .direction = DMA_MEM_TO_DEV,
};
unsigned int pages;
int ret;
@@ -1041,7 +1041,7 @@ static int configure_dma(struct pl022 *pl022)
rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
pl022->sgt_rx.sgl,
rx_sglen,
- DMA_FROM_DEVICE,
+ DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxdesc)
goto err_rxdesc;
@@ -1049,7 +1049,7 @@ static int configure_dma(struct pl022 *pl022)
txdesc = txchan->device->device_prep_slave_sg(txchan,
pl022->sgt_tx.sgl,
tx_sglen,
- DMA_TO_DEVICE,
+ DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc)
goto err_txdesc;
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 7086583b910..2a6429d8c36 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1079,7 +1079,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
}
sg = dma->sg_rx_p;
desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg,
- num, DMA_FROM_DEVICE,
+ num, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
@@ -1124,7 +1124,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
}
sg = dma->sg_tx_p;
desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx,
- sg, num, DMA_TO_DEVICE,
+ sg, num, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 6958594f2fc..9ae024025ff 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -268,7 +268,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
struct dma_slave_config tx_conf = {
.dst_addr = uap->port.mapbase + UART01x_DR,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
- .direction = DMA_TO_DEVICE,
+ .direction = DMA_MEM_TO_DEV,
.dst_maxburst = uap->fifosize >> 1,
};
struct dma_chan *chan;
@@ -301,7 +301,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
struct dma_slave_config rx_conf = {
.src_addr = uap->port.mapbase + UART01x_DR,
.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
- .direction = DMA_FROM_DEVICE,
+ .direction = DMA_DEV_TO_MEM,
.src_maxburst = uap->fifosize >> 1,
};
@@ -480,7 +480,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
return -EBUSY;
}
- desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_TO_DEVICE,
+ desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
@@ -676,7 +676,7 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
dma_dev = rxchan->device;
desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
- DMA_FROM_DEVICE,
+ DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
/*
* If the DMA engine is busy and cannot prepare a
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index de0f613ed6f..17ae65762d1 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -764,7 +764,7 @@ static int dma_handle_rx(struct eg20t_port *priv)
sg_dma_address(sg) = priv->rx_buf_dma;
desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx,
- sg, 1, DMA_FROM_DEVICE,
+ sg, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
@@ -923,7 +923,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
}
desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx,
- priv->sg_tx_p, nent, DMA_TO_DEVICE,
+ priv->sg_tx_p, nent, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_err(priv->port.dev, "%s:device_prep_slave_sg Failed\n",
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 9e62349b3d9..75085795528 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1339,7 +1339,7 @@ static void sci_submit_rx(struct sci_port *s)
struct dma_async_tx_descriptor *desc;
desc = chan->device->device_prep_slave_sg(chan,
- sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT);
+ sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (desc) {
s->desc_rx[i] = desc;
@@ -1454,7 +1454,7 @@ static void work_fn_tx(struct work_struct *work)
BUG_ON(!sg_dma_len(sg));
desc = chan->device->device_prep_slave_sg(chan,
- sg, s->sg_len_tx, DMA_TO_DEVICE,
+ sg, s->sg_len_tx, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
/* switch to PIO */
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 32793ce3d9e..9c2cc463389 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -183,7 +183,7 @@ static int __devinit ehci_hcd_xilinx_of_probe(struct platform_device *op)
}
irq = irq_of_parse_and_map(dn, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
rv = -EBUSY;
goto err_irq;
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index a163632877a..97cb45916c4 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -84,7 +84,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
struct dma_chan *dma_chan = ux500_channel->dma_chan;
struct dma_async_tx_descriptor *dma_desc;
- enum dma_data_direction direction;
+ enum dma_transfer_direction direction;
struct scatterlist sg;
struct dma_slave_config slave_conf;
enum dma_slave_buswidth addr_width;
@@ -104,7 +104,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
sg_dma_address(&sg) = dma_addr;
sg_dma_len(&sg) = len;
- direction = ux500_channel->is_tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
DMA_SLAVE_BUSWIDTH_4_BYTES;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index b51fcd80d24..72339bd6fca 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -772,10 +772,10 @@ static void usbhsf_dma_prepare_tasklet(unsigned long data)
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
struct device *dev = usbhs_priv_to_dev(priv);
- enum dma_data_direction dir;
+ enum dma_transfer_direction dir;
dma_cookie_t cookie;
- dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
sg_init_table(&sg, 1);
sg_set_page(&sg, virt_to_page(pkt->dma),
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 882a51fe7b3..9dab1f51dd4 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -856,9 +856,9 @@ static const struct file_operations vhost_net_fops = {
};
static struct miscdevice vhost_net_misc = {
- MISC_DYNAMIC_MINOR,
- "vhost-net",
- &vhost_net_fops,
+ .minor = VHOST_NET_MINOR,
+ .name = "vhost-net",
+ .fops = &vhost_net_fops,
};
static int vhost_net_init(void)
@@ -879,3 +879,5 @@ MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Michael S. Tsirkin");
MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
+MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
+MODULE_ALIAS("devname:vhost-net");
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index e3406ab3130..727a5149d81 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -245,6 +245,7 @@ struct mx3fb_data {
uint32_t h_start_width;
uint32_t v_start_width;
+ enum disp_data_mapping disp_data_fmt;
};
struct dma_chan_request {
@@ -287,11 +288,14 @@ static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long r
__raw_writel(value, mx3fb->reg_base + reg);
}
-static const uint32_t di_mappings[] = {
- 0x1600AAAA, 0x00E05555, 0x00070000, 3, /* RGB888 */
- 0x0005000F, 0x000B000F, 0x0011000F, 1, /* RGB666 */
- 0x0011000F, 0x000B000F, 0x0005000F, 1, /* BGR666 */
- 0x0004003F, 0x000A000F, 0x000F003F, 1 /* RGB565 */
+struct di_mapping {
+ uint32_t b0, b1, b2;
+};
+
+static const struct di_mapping di_mappings[] = {
+ [IPU_DISP_DATA_MAPPING_RGB666] = { 0x0005000f, 0x000b000f, 0x0011000f },
+ [IPU_DISP_DATA_MAPPING_RGB565] = { 0x0004003f, 0x000a000f, 0x000f003f },
+ [IPU_DISP_DATA_MAPPING_RGB888] = { 0x00070000, 0x000f0000, 0x00170000 },
};
static void sdc_fb_init(struct mx3fb_info *fbi)
@@ -334,7 +338,7 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
/* This enables the channel */
if (mx3_fbi->cookie < 0) {
mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan,
- &mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
+ &mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!mx3_fbi->txd) {
dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
dma_chan->chan_id);
@@ -425,7 +429,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel
* @pixel_clk: desired pixel clock frequency in Hz.
* @width: width of panel in pixels.
* @height: height of panel in pixels.
- * @pixel_fmt: pixel format of buffer as FOURCC ASCII code.
* @h_start_width: number of pixel clocks between the HSYNC signal pulse
* and the start of valid data.
* @h_sync_width: width of the HSYNC signal in units of pixel clocks.
@@ -442,7 +445,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel
static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
uint32_t pixel_clk,
uint16_t width, uint16_t height,
- enum pixel_fmt pixel_fmt,
uint16_t h_start_width, uint16_t h_sync_width,
uint16_t h_end_width, uint16_t v_start_width,
uint16_t v_sync_width, uint16_t v_end_width,
@@ -453,6 +455,7 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
uint32_t old_conf;
uint32_t div;
struct clk *ipu_clk;
+ const struct di_mapping *map;
dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height);
@@ -540,36 +543,10 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL);
- switch (pixel_fmt) {
- case IPU_PIX_FMT_RGB24:
- mx3fb_write_reg(mx3fb, di_mappings[0], DI_DISP3_B0_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[1], DI_DISP3_B1_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[2], DI_DISP3_B2_MAP);
- mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
- ((di_mappings[3] - 1) << 12), DI_DISP_ACC_CC);
- break;
- case IPU_PIX_FMT_RGB666:
- mx3fb_write_reg(mx3fb, di_mappings[4], DI_DISP3_B0_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[5], DI_DISP3_B1_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[6], DI_DISP3_B2_MAP);
- mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
- ((di_mappings[7] - 1) << 12), DI_DISP_ACC_CC);
- break;
- case IPU_PIX_FMT_BGR666:
- mx3fb_write_reg(mx3fb, di_mappings[8], DI_DISP3_B0_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[9], DI_DISP3_B1_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[10], DI_DISP3_B2_MAP);
- mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
- ((di_mappings[11] - 1) << 12), DI_DISP_ACC_CC);
- break;
- default:
- mx3fb_write_reg(mx3fb, di_mappings[12], DI_DISP3_B0_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[13], DI_DISP3_B1_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[14], DI_DISP3_B2_MAP);
- mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
- ((di_mappings[15] - 1) << 12), DI_DISP_ACC_CC);
- break;
- }
+ map = &di_mappings[mx3fb->disp_data_fmt];
+ mx3fb_write_reg(mx3fb, map->b0, DI_DISP3_B0_MAP);
+ mx3fb_write_reg(mx3fb, map->b1, DI_DISP3_B1_MAP);
+ mx3fb_write_reg(mx3fb, map->b2, DI_DISP3_B2_MAP);
spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
@@ -780,8 +757,6 @@ static int __set_par(struct fb_info *fbi, bool lock)
if (sdc_init_panel(mx3fb, mode,
(PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
fbi->var.xres, fbi->var.yres,
- (fbi->var.sync & FB_SYNC_SWAP_RGB) ?
- IPU_PIX_FMT_BGR666 : IPU_PIX_FMT_RGB666,
fbi->var.left_margin,
fbi->var.hsync_len,
fbi->var.right_margin +
@@ -1117,7 +1092,7 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
async_tx_ack(mx3_fbi->txd);
txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg +
- mx3_fbi->cur_ipu_buf, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
+ mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!txd) {
dev_err(fbi->device,
"Error preparing a DMA transaction descriptor.\n");
@@ -1349,6 +1324,12 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
const struct fb_videomode *mode;
int ret, num_modes;
+ if (mx3fb_pdata->disp_data_fmt >= ARRAY_SIZE(di_mappings)) {
+ dev_err(dev, "Illegal display data format %d\n",
+ mx3fb_pdata->disp_data_fmt);
+ return -EINVAL;
+ }
+
ichan->client = mx3fb;
irq = ichan->eof_irq;
@@ -1402,6 +1383,8 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
mx3fbi->mx3fb = mx3fb;
mx3fbi->blank = FB_BLANK_NORMAL;
+ mx3fb->disp_data_fmt = mx3fb_pdata->disp_data_fmt;
+
init_completion(&mx3fbi->flip_cmpl);
disable_irq(ichan->eof_irq);
dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq);
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index ba6eda4b514..18c1bb6ffce 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -11,3 +11,4 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
((mfn1 == mfn2) || ((mfn1+1) == mfn2));
}
+EXPORT_SYMBOL(xen_biovec_phys_mergeable);
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 3832e303c33..596e6a7b17d 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -221,7 +221,7 @@ static int register_balloon(struct device *dev)
{
int i, error;
- error = bus_register(&balloon_subsys);
+ error = subsys_system_register(&balloon_subsys, NULL);
if (error)
return error;