summaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-dw-mid.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-10 11:35:36 -0800
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-10 11:35:36 -0800
commit4ba24fef3eb3b142197135223b90ced2f319cd53 (patch)
treea20c125b27740ec7b4c761b11d801108e1b316b2 /drivers/spi/spi-dw-mid.c
parent47c1ffb2b6b630894e9a16442611c056ab21c057 (diff)
parent98a4a59ee31a12105a2b84f5b8b515ac2cb208ef (diff)
Merge branch 'next' into for-linus
Prepare first round of input updates for 3.20.
Diffstat (limited to 'drivers/spi/spi-dw-mid.c')
-rw-r--r--drivers/spi/spi-dw-mid.c165
1 files changed, 110 insertions, 55 deletions
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 6d207afec8c..7281316a5ec 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -1,7 +1,7 @@
/*
* Special handling for DW core on Intel MID platform
*
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -11,10 +11,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/dma-mapping.h>
@@ -30,6 +26,9 @@
#include <linux/intel_mid_dma.h>
#include <linux/pci.h>
+#define RX_BUSY 0
+#define TX_BUSY 1
+
struct mid_dma {
struct intel_mid_dma_slave dmas_tx;
struct intel_mid_dma_slave dmas_rx;
@@ -39,22 +38,25 @@ static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
{
struct dw_spi *dws = param;
- return dws->dmac && (&dws->dmac->dev == chan->device->dev);
+ return dws->dma_dev == chan->device->dev;
}
static int mid_spi_dma_init(struct dw_spi *dws)
{
struct mid_dma *dw_dma = dws->dma_priv;
+ struct pci_dev *dma_dev;
struct intel_mid_dma_slave *rxs, *txs;
dma_cap_mask_t mask;
/*
* Get pci device for DMA controller, currently it could only
- * be the DMA controller of either Moorestown or Medfield
+ * be the DMA controller of Medfield
*/
- dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL);
- if (!dws->dmac)
- dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
+ dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
+ if (!dma_dev)
+ return -ENODEV;
+
+ dws->dma_dev = &dma_dev->dev;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -83,103 +85,156 @@ static int mid_spi_dma_init(struct dw_spi *dws)
free_rxchan:
dma_release_channel(dws->rxchan);
err_exit:
- return -1;
-
+ return -EBUSY;
}
static void mid_spi_dma_exit(struct dw_spi *dws)
{
+ if (!dws->dma_inited)
+ return;
+
+ dmaengine_terminate_all(dws->txchan);
dma_release_channel(dws->txchan);
+
+ dmaengine_terminate_all(dws->rxchan);
dma_release_channel(dws->rxchan);
}
/*
- * dws->dma_chan_done is cleared before the dma transfer starts,
- * callback for rx/tx channel will each increment it by 1.
- * Reaching 2 means the whole spi transaction is done.
+ * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
+ * channel will clear a corresponding bit.
*/
-static void dw_spi_dma_done(void *arg)
+static void dw_spi_dma_tx_done(void *arg)
{
struct dw_spi *dws = arg;
- if (++dws->dma_chan_done != 2)
+ if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY))
return;
dw_spi_xfer_done(dws);
}
-static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
+static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws)
{
- struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
- struct dma_chan *txchan, *rxchan;
- struct dma_slave_config txconf, rxconf;
- u16 dma_ctrl = 0;
-
- /* 1. setup DMA related registers */
- if (cs_change) {
- spi_enable_chip(dws, 0);
- dw_writew(dws, DW_SPI_DMARDLR, 0xf);
- dw_writew(dws, DW_SPI_DMATDLR, 0x10);
- if (dws->tx_dma)
- dma_ctrl |= 0x2;
- if (dws->rx_dma)
- dma_ctrl |= 0x1;
- dw_writew(dws, DW_SPI_DMACR, dma_ctrl);
- spi_enable_chip(dws, 1);
- }
+ struct dma_slave_config txconf;
+ struct dma_async_tx_descriptor *txdesc;
- dws->dma_chan_done = 0;
- txchan = dws->txchan;
- rxchan = dws->rxchan;
+ if (!dws->tx_dma)
+ return NULL;
- /* 2. Prepare the TX dma transfer */
txconf.direction = DMA_MEM_TO_DEV;
txconf.dst_addr = dws->dma_addr;
txconf.dst_maxburst = LNW_DMA_MSIZE_16;
txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ txconf.dst_addr_width = dws->dma_width;
txconf.device_fc = false;
- txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
- (unsigned long) &txconf);
+ dmaengine_slave_config(dws->txchan, &txconf);
memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
dws->tx_sgl.dma_address = dws->tx_dma;
dws->tx_sgl.length = dws->len;
- txdesc = dmaengine_prep_slave_sg(txchan,
+ txdesc = dmaengine_prep_slave_sg(dws->txchan,
&dws->tx_sgl,
1,
DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT);
- txdesc->callback = dw_spi_dma_done;
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ txdesc->callback = dw_spi_dma_tx_done;
txdesc->callback_param = dws;
- /* 3. Prepare the RX dma transfer */
+ return txdesc;
+}
+
+/*
+ * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
+ * channel will clear a corresponding bit.
+ */
+static void dw_spi_dma_rx_done(void *arg)
+{
+ struct dw_spi *dws = arg;
+
+ if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY))
+ return;
+ dw_spi_xfer_done(dws);
+}
+
+static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws)
+{
+ struct dma_slave_config rxconf;
+ struct dma_async_tx_descriptor *rxdesc;
+
+ if (!dws->rx_dma)
+ return NULL;
+
rxconf.direction = DMA_DEV_TO_MEM;
rxconf.src_addr = dws->dma_addr;
rxconf.src_maxburst = LNW_DMA_MSIZE_16;
rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ rxconf.src_addr_width = dws->dma_width;
rxconf.device_fc = false;
- rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
- (unsigned long) &rxconf);
+ dmaengine_slave_config(dws->rxchan, &rxconf);
memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
dws->rx_sgl.dma_address = dws->rx_dma;
dws->rx_sgl.length = dws->len;
- rxdesc = dmaengine_prep_slave_sg(rxchan,
+ rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
&dws->rx_sgl,
1,
DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT);
- rxdesc->callback = dw_spi_dma_done;
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ rxdesc->callback = dw_spi_dma_rx_done;
rxdesc->callback_param = dws;
+ return rxdesc;
+}
+
+static void dw_spi_dma_setup(struct dw_spi *dws)
+{
+ u16 dma_ctrl = 0;
+
+ spi_enable_chip(dws, 0);
+
+ dw_writew(dws, DW_SPI_DMARDLR, 0xf);
+ dw_writew(dws, DW_SPI_DMATDLR, 0x10);
+
+ if (dws->tx_dma)
+ dma_ctrl |= SPI_DMA_TDMAE;
+ if (dws->rx_dma)
+ dma_ctrl |= SPI_DMA_RDMAE;
+ dw_writew(dws, DW_SPI_DMACR, dma_ctrl);
+
+ spi_enable_chip(dws, 1);
+}
+
+static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
+{
+ struct dma_async_tx_descriptor *txdesc, *rxdesc;
+
+ /* 1. setup DMA related registers */
+ if (cs_change)
+ dw_spi_dma_setup(dws);
+
+ /* 2. Prepare the TX dma transfer */
+ txdesc = dw_spi_dma_prepare_tx(dws);
+
+ /* 3. Prepare the RX dma transfer */
+ rxdesc = dw_spi_dma_prepare_rx(dws);
+
/* rx must be started before tx due to spi instinct */
- rxdesc->tx_submit(rxdesc);
- txdesc->tx_submit(txdesc);
+ if (rxdesc) {
+ set_bit(RX_BUSY, &dws->dma_chan_busy);
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(dws->rxchan);
+ }
+
+ if (txdesc) {
+ set_bit(TX_BUSY, &dws->dma_chan_busy);
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(dws->txchan);
+ }
+
return 0;
}
@@ -190,7 +245,7 @@ static struct dw_spi_dma_ops mid_dma_ops = {
};
#endif
-/* Some specific info for SPI0 controller on Moorestown */
+/* Some specific info for SPI0 controller on Intel MID */
/* HW info for MRST CLk Control Unit, one 32b reg */
#define MRST_SPI_CLK_BASE 100000000 /* 100m */