summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig19
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/acpi-dma.c4
-rw-r--r--drivers/dma/amba-pl08x.c524
-rw-r--r--drivers/dma/coh901318.c26
-rw-r--r--drivers/dma/cppi41.c1059
-rw-r--r--drivers/dma/dmaengine.c109
-rw-r--r--drivers/dma/dmatest.c182
-rw-r--r--drivers/dma/dw/Kconfig1
-rw-r--r--drivers/dma/dw/core.c39
-rw-r--r--drivers/dma/dw/platform.c1
-rw-r--r--drivers/dma/edma.c164
-rw-r--r--drivers/dma/ep93xx_dma.c10
-rw-r--r--drivers/dma/fsldma.c10
-rw-r--r--drivers/dma/imx-dma.c6
-rw-r--r--drivers/dma/imx-sdma.c179
-rw-r--r--drivers/dma/ioat/dma_v3.c26
-rw-r--r--drivers/dma/iop-adma.c6
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/k3dma.c837
-rw-r--r--drivers/dma/mmp_pdma.c320
-rw-r--r--drivers/dma/mmp_tdma.c6
-rw-r--r--drivers/dma/mpc512x_dma.c10
-rw-r--r--drivers/dma/mv_xor.c57
-rw-r--r--drivers/dma/mv_xor.h28
-rw-r--r--drivers/dma/mxs-dma.c27
-rw-r--r--drivers/dma/of-dma.c3
-rw-r--r--drivers/dma/pch_dma.c10
-rw-r--r--drivers/dma/pl330.c179
-rw-r--r--drivers/dma/sh/Kconfig10
-rw-r--r--drivers/dma/sh/Makefile6
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c655
-rw-r--r--drivers/dma/sh/shdma-arm.h51
-rw-r--r--drivers/dma/sh/shdma-base.c26
-rw-r--r--drivers/dma/sh/shdma-of.c5
-rw-r--r--drivers/dma/sh/shdma-r8a73a4.c77
-rw-r--r--drivers/dma/sh/shdma.h16
-rw-r--r--drivers/dma/sh/shdmac.c (renamed from drivers/dma/sh/shdma.c)160
-rw-r--r--drivers/dma/sh/sudmac.c22
-rw-r--r--drivers/dma/sirf-dma.c134
-rw-r--r--drivers/dma/ste_dma40.c23
-rw-r--r--drivers/dma/tegra20-apb-dma.c8
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/dma/txx9dmac.c22
44 files changed, 4160 insertions, 907 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957c97f..526ec77c7ba 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -194,7 +194,7 @@ config SIRF_DMA
Enable support for the CSR SiRFprimaII DMA engine.
config TI_EDMA
- tristate "TI EDMA support"
+ bool "TI EDMA support"
depends on ARCH_DAVINCI || ARCH_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
@@ -287,6 +287,14 @@ config DMA_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
+config TI_CPPI41
+ tristate "AM33xx CPPI41 DMA support"
+ depends on ARCH_OMAP
+ select DMA_ENGINE
+ help
+ The Communications Port Programming Interface (CPPI) 4.1 DMA engine
+ is currently used by the USB driver on AM335x platforms.
+
config MMP_PDMA
bool "MMP PDMA support"
depends on (ARCH_MMP || ARCH_PXA)
@@ -300,6 +308,15 @@ config DMA_JZ4740
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
+config K3_DMA
+ tristate "Hisilicon K3 DMA support"
+ depends on ARCH_HI3xxx
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the DMA engine for Hisilicon K3 platform
+ devices.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef8561..db89035b362 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,5 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_TI_CPPI41) += cppi41.o
+obj-$(CONFIG_K3_DMA) += k3dma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 5a18f82f732..e69b03c0fa5 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -43,7 +43,6 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
struct list_head resource_list;
struct resource_list_entry *rentry;
resource_size_t mem = 0, irq = 0;
- u32 vendor_id;
int ret;
if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
@@ -73,9 +72,8 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
if (si->mmio_base_low != mem || si->gsi_interrupt != irq)
return 0;
- vendor_id = le32_to_cpu(grp->vendor_id);
dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
- (char *)&vendor_id, grp->device_id, grp->revision);
+ (char *)&grp->vendor_id, grp->device_id, grp->revision);
/* Check if the request line range is available */
if (si->base_request_line == 0 && si->num_handshake_signals == 0)
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 06fe45c74de..fce46c5bf1c 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -24,6 +24,7 @@
*
* Documentation: ARM DDI 0196G == PL080
* Documentation: ARM DDI 0218E == PL081
+ * Documentation: S3C6410 User's Manual == PL080S
*
* PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
* channel.
@@ -36,6 +37,14 @@
*
* The PL080 has a dual bus master, PL081 has a single master.
*
+ * PL080S is a version modified by Samsung and used in S3C64xx SoCs.
+ * It differs in following aspects:
+ * - CH_CONFIG register at different offset,
+ * - separate CH_CONTROL2 register for transfer size,
+ * - bigger maximum transfer size,
+ * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word,
+ * - no support for peripheral flow control.
+ *
* Memory to peripheral transfer may be visualized as
* Get data from memory to DMAC
* Until no data left
@@ -64,10 +73,7 @@
* - Peripheral flow control: the transfer size is ignored (and should be
* zero). The data is transferred from the current LLI entry, until
* after the final transfer signalled by LBREQ or LSREQ. The DMAC
- * will then move to the next LLI entry.
- *
- * Global TODO:
- * - Break out common code from arch/arm/mach-s3c64xx and share
+ * will then move to the next LLI entry. Unsupported by PL080S.
*/
#include <linux/amba/bus.h>
#include <linux/amba/pl08x.h>
@@ -100,24 +106,16 @@ struct pl08x_driver_data;
* @nomadik: whether the channels have Nomadik security extension bits
* that need to be checked for permission before use and some registers are
* missing
+ * @pl080s: whether this version is a PL080S, which has separate register and
+ * LLI word for transfer size.
*/
struct vendor_data {
+ u8 config_offset;
u8 channels;
bool dualmaster;
bool nomadik;
-};
-
-/*
- * PL08X private data structures
- * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
- * start & end do not - their bus bit info is in cctl. Also note that these
- * are fixed 32-bit quantities.
- */
-struct pl08x_lli {
- u32 src;
- u32 dst;
- u32 lli;
- u32 cctl;
+ bool pl080s;
+ u32 max_transfer_size;
};
/**
@@ -133,6 +131,8 @@ struct pl08x_bus_data {
u8 buswidth;
};
+#define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth)
+
/**
* struct pl08x_phy_chan - holder for the physical channels
* @id: physical index to this channel
@@ -145,6 +145,7 @@ struct pl08x_bus_data {
struct pl08x_phy_chan {
unsigned int id;
void __iomem *base;
+ void __iomem *reg_config;
spinlock_t lock;
struct pl08x_dma_chan *serving;
bool locked;
@@ -174,12 +175,13 @@ struct pl08x_sg {
* @ccfg: config reg values for current txd
* @done: this marks completed descriptors, which should not have their
* mux released.
+ * @cyclic: indicate cyclic transfers
*/
struct pl08x_txd {
struct virt_dma_desc vd;
struct list_head dsg_list;
dma_addr_t llis_bus;
- struct pl08x_lli *llis_va;
+ u32 *llis_va;
/* Default cctl value for LLIs */
u32 cctl;
/*
@@ -188,6 +190,7 @@ struct pl08x_txd {
*/
u32 ccfg;
bool done;
+ bool cyclic;
};
/**
@@ -263,17 +266,29 @@ struct pl08x_driver_data {
struct dma_pool *pool;
u8 lli_buses;
u8 mem_buses;
+ u8 lli_words;
};
/*
* PL08X specific defines
*/
-/* Size (bytes) of each LLI buffer allocated for one transfer */
-# define PL08X_LLI_TSFR_SIZE 0x2000
+/* The order of words in an LLI. */
+#define PL080_LLI_SRC 0
+#define PL080_LLI_DST 1
+#define PL080_LLI_LLI 2
+#define PL080_LLI_CCTL 3
+#define PL080S_LLI_CCTL2 4
+
+/* Total words in an LLI. */
+#define PL080_LLI_WORDS 4
+#define PL080S_LLI_WORDS 8
-/* Maximum times we call dma_pool_alloc on this pool without freeing */
-#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
+/*
+ * Number of LLIs in each LLI buffer allocated for one transfer
+ * (maximum times we call dma_pool_alloc on this pool without freeing)
+ */
+#define MAX_NUM_TSFR_LLIS 512
#define PL08X_ALIGN 8
static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
@@ -334,10 +349,39 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
{
unsigned int val;
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
return val & PL080_CONFIG_ACTIVE;
}
+static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
+ struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg)
+{
+ if (pl08x->vd->pl080s)
+ dev_vdbg(&pl08x->adev->dev,
+ "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+ "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
+ phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
+ lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL],
+ lli[PL080S_LLI_CCTL2], ccfg);
+ else
+ dev_vdbg(&pl08x->adev->dev,
+ "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+ "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
+ phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
+ lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg);
+
+ writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR);
+ writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR);
+ writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI);
+ writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL);
+
+ if (pl08x->vd->pl080s)
+ writel_relaxed(lli[PL080S_LLI_CCTL2],
+ phychan->base + PL080S_CH_CONTROL2);
+
+ writel(ccfg, phychan->reg_config);
+}
+
/*
* Set the initial DMA register values i.e. those for the first LLI
* The next LLI pointer and the configuration interrupt bit have
@@ -350,7 +394,6 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
struct pl08x_phy_chan *phychan = plchan->phychan;
struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
- struct pl08x_lli *lli;
u32 val;
list_del(&txd->vd.node);
@@ -361,19 +404,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
while (pl08x_phy_channel_busy(phychan))
cpu_relax();
- lli = &txd->llis_va[0];
-
- dev_vdbg(&pl08x->adev->dev,
- "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
- "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
- phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
- txd->ccfg);
-
- writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
- writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
- writel(lli->lli, phychan->base + PL080_CH_LLI);
- writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
- writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
+ pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg);
/* Enable the DMA channel */
/* Do not access config register until channel shows as disabled */
@@ -381,11 +412,11 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
cpu_relax();
/* Do not access config register until channel shows as inactive */
- val = readl(phychan->base + PL080_CH_CONFIG);
+ val = readl(phychan->reg_config);
while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
- val = readl(phychan->base + PL080_CH_CONFIG);
+ val = readl(phychan->reg_config);
- writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
+ writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
}
/*
@@ -404,9 +435,9 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
int timeout;
/* Set the HALT bit and wait for the FIFO to drain */
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
val |= PL080_CONFIG_HALT;
- writel(val, ch->base + PL080_CH_CONFIG);
+ writel(val, ch->reg_config);
/* Wait for channel inactive */
for (timeout = 1000; timeout; timeout--) {
@@ -423,9 +454,9 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
u32 val;
/* Clear the HALT bit */
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
val &= ~PL080_CONFIG_HALT;
- writel(val, ch->base + PL080_CH_CONFIG);
+ writel(val, ch->reg_config);
}
/*
@@ -437,12 +468,12 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
struct pl08x_phy_chan *ch)
{
- u32 val = readl(ch->base + PL080_CH_CONFIG);
+ u32 val = readl(ch->reg_config);
val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
PL080_CONFIG_TC_IRQ_MASK);
- writel(val, ch->base + PL080_CH_CONFIG);
+ writel(val, ch->reg_config);
writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
@@ -453,6 +484,28 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
/* The source width defines the number of bytes */
u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
+ cctl &= PL080_CONTROL_SWIDTH_MASK;
+
+ switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
+ case PL080_WIDTH_8BIT:
+ break;
+ case PL080_WIDTH_16BIT:
+ bytes *= 2;
+ break;
+ case PL080_WIDTH_32BIT:
+ bytes *= 4;
+ break;
+ }
+ return bytes;
+}
+
+static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1)
+{
+ /* The source width defines the number of bytes */
+ u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK;
+
+ cctl &= PL080_CONTROL_SWIDTH_MASK;
+
switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
case PL080_WIDTH_8BIT:
break;
@@ -469,47 +522,66 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
/* The channel should be paused when calling this */
static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
{
+ struct pl08x_driver_data *pl08x = plchan->host;
+ const u32 *llis_va, *llis_va_limit;
struct pl08x_phy_chan *ch;
+ dma_addr_t llis_bus;
struct pl08x_txd *txd;
- size_t bytes = 0;
+ u32 llis_max_words;
+ size_t bytes;
+ u32 clli;
ch = plchan->phychan;
txd = plchan->at;
+ if (!ch || !txd)
+ return 0;
+
/*
* Follow the LLIs to get the number of remaining
* bytes in the currently active transaction.
*/
- if (ch && txd) {
- u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
+ clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
- /* First get the remaining bytes in the active transfer */
+ /* First get the remaining bytes in the active transfer */
+ if (pl08x->vd->pl080s)
+ bytes = get_bytes_in_cctl_pl080s(
+ readl(ch->base + PL080_CH_CONTROL),
+ readl(ch->base + PL080S_CH_CONTROL2));
+ else
bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
- if (clli) {
- struct pl08x_lli *llis_va = txd->llis_va;
- dma_addr_t llis_bus = txd->llis_bus;
- int index;
+ if (!clli)
+ return bytes;
- BUG_ON(clli < llis_bus || clli >= llis_bus +
- sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
+ llis_va = txd->llis_va;
+ llis_bus = txd->llis_bus;
- /*
- * Locate the next LLI - as this is an array,
- * it's simple maths to find.
- */
- index = (clli - llis_bus) / sizeof(struct pl08x_lli);
+ llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS;
+ BUG_ON(clli < llis_bus || clli >= llis_bus +
+ sizeof(u32) * llis_max_words);
- for (; index < MAX_NUM_TSFR_LLIS; index++) {
- bytes += get_bytes_in_cctl(llis_va[index].cctl);
+ /*
+ * Locate the next LLI - as this is an array,
+ * it's simple maths to find.
+ */
+ llis_va += (clli - llis_bus) / sizeof(u32);
- /*
- * A LLI pointer of 0 terminates the LLI list
- */
- if (!llis_va[index].lli)
- break;
- }
- }
+ llis_va_limit = llis_va + llis_max_words;
+
+ for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) {
+ if (pl08x->vd->pl080s)
+ bytes += get_bytes_in_cctl_pl080s(
+ llis_va[PL080_LLI_CCTL],
+ llis_va[PL080S_LLI_CCTL2]);
+ else
+ bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]);
+
+ /*
+ * A LLI pointer going backward terminates the LLI list
+ */
+ if (llis_va[PL080_LLI_LLI] <= clli)
+ break;
}
return bytes;
@@ -720,6 +792,7 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
break;
}
+ tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
return retbits;
}
@@ -764,20 +837,26 @@ static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
/*
* Fills in one LLI for a certain transfer descriptor and advance the counter
*/
-static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
- int num_llis, int len, u32 cctl)
+static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
+ struct pl08x_lli_build_data *bd,
+ int num_llis, int len, u32 cctl, u32 cctl2)
{
- struct pl08x_lli *llis_va = bd->txd->llis_va;
+ u32 offset = num_llis * pl08x->lli_words;
+ u32 *llis_va = bd->txd->llis_va + offset;
dma_addr_t llis_bus = bd->txd->llis_bus;
BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
- llis_va[num_llis].cctl = cctl;
- llis_va[num_llis].src = bd->srcbus.addr;
- llis_va[num_llis].dst = bd->dstbus.addr;
- llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
- sizeof(struct pl08x_lli);
- llis_va[num_llis].lli |= bd->lli_bus;
+ /* Advance the offset to next LLI. */
+ offset += pl08x->lli_words;
+
+ llis_va[PL080_LLI_SRC] = bd->srcbus.addr;
+ llis_va[PL080_LLI_DST] = bd->dstbus.addr;
+ llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset);
+ llis_va[PL080_LLI_LLI] |= bd->lli_bus;
+ llis_va[PL080_LLI_CCTL] = cctl;
+ if (pl08x->vd->pl080s)
+ llis_va[PL080S_LLI_CCTL2] = cctl2;
if (cctl & PL080_CONTROL_SRC_INCR)
bd->srcbus.addr += len;
@@ -789,14 +868,53 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
bd->remainder -= len;
}
-static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
- u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
+static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x,
+ struct pl08x_lli_build_data *bd, u32 *cctl, u32 len,
+ int num_llis, size_t *total_bytes)
{
*cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
- pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
+ pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len);
(*total_bytes) += len;
}
+#ifdef VERBOSE_DEBUG
+static void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
+ const u32 *llis_va, int num_llis)
+{
+ int i;
+
+ if (pl08x->vd->pl080s) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%-3s %-9s %-10s %-10s %-10s %-10s %s\n",
+ "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
+ for (i = 0; i < num_llis; i++) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, llis_va, llis_va[PL080_LLI_SRC],
+ llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
+ llis_va[PL080_LLI_CCTL],
+ llis_va[PL080S_LLI_CCTL2]);
+ llis_va += pl08x->lli_words;
+ }
+ } else {
+ dev_vdbg(&pl08x->adev->dev,
+ "%-3s %-9s %-10s %-10s %-10s %s\n",
+ "lli", "", "csrc", "cdst", "clli", "cctl");
+ for (i = 0; i < num_llis; i++) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, llis_va, llis_va[PL080_LLI_SRC],
+ llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
+ llis_va[PL080_LLI_CCTL]);
+ llis_va += pl08x->lli_words;
+ }
+ }
+}
+#else
+static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
+ const u32 *llis_va, int num_llis) {}
+#endif
+
/*
* This fills in the table of LLIs for the transfer descriptor
* Note that we assume we never have to change the burst sizes
@@ -810,7 +928,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
int num_llis = 0;
u32 cctl, early_bytes = 0;
size_t max_bytes_per_lli, total_bytes;
- struct pl08x_lli *llis_va;
+ u32 *llis_va, *last_lli;
struct pl08x_sg *dsg;
txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
@@ -845,10 +963,13 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
- dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
- bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
+ dev_vdbg(&pl08x->adev->dev,
+ "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n",
+ (u64)bd.srcbus.addr,
+ cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
bd.srcbus.buswidth,
- bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
+ (u64)bd.dstbus.addr,
+ cctl & PL080_CONTROL_DST_INCR ? "+" : "",
bd.dstbus.buswidth,
bd.remainder);
dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
@@ -886,8 +1007,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
return 0;
}
- if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
- (bd.dstbus.addr % bd.dstbus.buswidth)) {
+ if (!IS_BUS_ALIGNED(&bd.srcbus) ||
+ !IS_BUS_ALIGNED(&bd.dstbus)) {
dev_err(&pl08x->adev->dev,
"%s src & dst address must be aligned to src"
" & dst width if peripheral is flow controller",
@@ -897,7 +1018,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
bd.dstbus.buswidth, 0);
- pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
+ pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
+ 0, cctl, 0);
break;
}
@@ -908,9 +1030,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
*/
if (bd.remainder < mbus->buswidth)
early_bytes = bd.remainder;
- else if ((mbus->addr) % (mbus->buswidth)) {
- early_bytes = mbus->buswidth - (mbus->addr) %
- (mbus->buswidth);
+ else if (!IS_BUS_ALIGNED(mbus)) {
+ early_bytes = mbus->buswidth -
+ (mbus->addr & (mbus->buswidth - 1));
if ((bd.remainder - early_bytes) < mbus->buswidth)
early_bytes = bd.remainder;
}
@@ -919,8 +1041,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
dev_vdbg(&pl08x->adev->dev,
"%s byte width LLIs (remain 0x%08x)\n",
__func__, bd.remainder);
- prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
- &total_bytes);
+ prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
+ num_llis++, &total_bytes);
}
if (bd.remainder) {
@@ -928,7 +1050,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
* Master now aligned
* - if slave is not then we must set its width down
*/
- if (sbus->addr % sbus->buswidth) {
+ if (!IS_BUS_ALIGNED(sbus)) {
dev_dbg(&pl08x->adev->dev,
"%s set down bus width to one byte\n",
__func__);
@@ -941,7 +1063,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
* MIN(buswidths)
*/
max_bytes_per_lli = bd.srcbus.buswidth *
- PL080_CONTROL_TRANSFER_SIZE_MASK;
+ pl08x->vd->max_transfer_size;
dev_vdbg(&pl08x->adev->dev,
"%s max bytes per lli = %zu\n",
__func__, max_bytes_per_lli);
@@ -976,8 +1098,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
bd.dstbus.buswidth, tsize);
- pl08x_fill_lli_for_desc(&bd, num_llis++,
- lli_len, cctl);
+ pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
+ lli_len, cctl, tsize);
total_bytes += lli_len;
}
@@ -988,8 +1110,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
dev_vdbg(&pl08x->adev->dev,
"%s align with boundary, send odd bytes (remain %zu)\n",
__func__, bd.remainder);
- prep_byte_width_lli(&bd, &cctl, bd.remainder,
- num_llis++, &total_bytes);
+ prep_byte_width_lli(pl08x, &bd, &cctl,
+ bd.remainder, num_llis++, &total_bytes);
}
}
@@ -1003,33 +1125,25 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
if (num_llis >= MAX_NUM_TSFR_LLIS) {
dev_err(&pl08x->adev->dev,
"%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
- __func__, (u32) MAX_NUM_TSFR_LLIS);
+ __func__, MAX_NUM_TSFR_LLIS);
return 0;
}
}
llis_va = txd->llis_va;
- /* The final LLI terminates the LLI. */
- llis_va[num_llis - 1].lli = 0;
- /* The final LLI element shall also fire an interrupt. */
- llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
+ last_lli = llis_va + (num_llis - 1) * pl08x->lli_words;
-#ifdef VERBOSE_DEBUG
- {
- int i;
-
- dev_vdbg(&pl08x->adev->dev,
- "%-3s %-9s %-10s %-10s %-10s %s\n",
- "lli", "", "csrc", "cdst", "clli", "cctl");
- for (i = 0; i < num_llis; i++) {
- dev_vdbg(&pl08x->adev->dev,
- "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, &llis_va[i], llis_va[i].src,
- llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
- );
- }
+ if (txd->cyclic) {
+ /* Link back to the first LLI. */
+ last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus;
+ } else {
+ /* The final LLI terminates the LLI. */
+ last_lli[PL080_LLI_LLI] = 0;
+ /* The final LLI element shall also fire an interrupt. */
+ last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
}
-#endif
+
+ pl08x_dump_lli(pl08x, llis_va, num_llis);
return num_llis;
}
@@ -1305,6 +1419,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
if (!plchan->slave)
return -EINVAL;
@@ -1314,6 +1429,13 @@ static int dma_set_runtime_config(struct dma_chan *chan,
config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
return -EINVAL;
+ if (config->device_fc && pl08x->vd->pl080s) {
+ dev_err(&pl08x->adev->dev,
+ "%s: PL080S does not support peripheral flow control\n",
+ __func__);
+ return -EINVAL;
+ }
+
plchan->cfg = *config;
return 0;
@@ -1404,25 +1526,19 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
}
-static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+static struct pl08x_txd *pl08x_init_txd(
+ struct dma_chan *chan,
+ enum dma_transfer_direction direction,
+ dma_addr_t *slave_addr)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
- struct pl08x_sg *dsg;
- struct scatterlist *sg;
enum dma_slave_buswidth addr_width;
- dma_addr_t slave_addr;
int ret, tmp;
u8 src_buses, dst_buses;
u32 maxburst, cctl;
- dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
- __func__, sg_dma_len(sgl), plchan->name);
-
txd = pl08x_get_txd(plchan);
if (!txd) {
dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
@@ -1436,14 +1552,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
*/
if (direction == DMA_MEM_TO_DEV) {
cctl = PL080_CONTROL_SRC_INCR;
- slave_addr = plchan->cfg.dst_addr;
+ *slave_addr = plchan->cfg.dst_addr;
addr_width = plchan->cfg.dst_addr_width;
maxburst = plchan->cfg.dst_maxburst;
src_buses = pl08x->mem_buses;
dst_buses = plchan->cd->periph_buses;
} else if (direction == DMA_DEV_TO_MEM) {
cctl = PL080_CONTROL_DST_INCR;
- slave_addr = plchan->cfg.src_addr;
+ *slave_addr = plchan->cfg.src_addr;
addr_width = plchan->cfg.src_addr_width;
maxburst = plchan->cfg.src_maxburst;
src_buses = plchan->cd->periph_buses;
@@ -1492,24 +1608,107 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
else
txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+ return txd;
+}
+
+static int pl08x_tx_add_sg(struct pl08x_txd *txd,
+ enum dma_transfer_direction direction,
+ dma_addr_t slave_addr,
+ dma_addr_t buf_addr,
+ unsigned int len)
+{
+ struct pl08x_sg *dsg;
+
+ dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+ if (!dsg)
+ return -ENOMEM;
+
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
+ dsg->len = len;
+ if (direction == DMA_MEM_TO_DEV) {
+ dsg->src_addr = buf_addr;
+ dsg->dst_addr = slave_addr;
+ } else {
+ dsg->src_addr = slave_addr;
+ dsg->dst_addr = buf_addr;
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ struct scatterlist *sg;
+ int ret, tmp;
+ dma_addr_t slave_addr;
+
+ dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
+ __func__, sg_dma_len(sgl), plchan->name);
+
+ txd = pl08x_init_txd(chan, direction, &slave_addr);
+ if (!txd)
+ return NULL;
+
for_each_sg(sgl, sg, sg_len, tmp) {
- dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
- if (!dsg) {
+ ret = pl08x_tx_add_sg(txd, direction, slave_addr,
+ sg_dma_address(sg),
+ sg_dma_len(sg));
+ if (ret) {
pl08x_release_mux(plchan);
pl08x_free_txd(pl08x, txd);
dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
__func__);
return NULL;
}
- list_add_tail(&dsg->node, &txd->dsg_list);
+ }
- dsg->len = sg_dma_len(sg);
- if (direction == DMA_MEM_TO_DEV) {
- dsg->src_addr = sg_dma_address(sg);
- dsg->dst_addr = slave_addr;
- } else {
- dsg->src_addr = slave_addr;
- dsg->dst_addr = sg_dma_address(sg);
+ ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+ if (!ret) {
+ pl08x_release_mux(plchan);
+ pl08x_free_txd(pl08x, txd);
+ return NULL;
+ }
+
+ return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ int ret, tmp;
+ dma_addr_t slave_addr;
+
+ dev_dbg(&pl08x->adev->dev,
+ "%s prepare cyclic transaction of %d/%d bytes %s %s\n",
+ __func__, period_len, buf_len,
+ direction == DMA_MEM_TO_DEV ? "to" : "from",
+ plchan->name);
+
+ txd = pl08x_init_txd(chan, direction, &slave_addr);
+ if (!txd)
+ return NULL;
+
+ txd->cyclic = true;
+ txd->cctl |= PL080_CONTROL_TC_IRQ_EN;
+ for (tmp = 0; tmp < buf_len; tmp += period_len) {
+ ret = pl08x_tx_add_sg(txd, direction, slave_addr,
+ buf_addr + tmp, period_len);
+ if (ret) {
+ pl08x_release_mux(plchan);
+ pl08x_free_txd(pl08x, txd);
+ return NULL;
}
}
@@ -1652,7 +1851,9 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
spin_lock(&plchan->vc.lock);
tx = plchan->at;
- if (tx) {
+ if (tx && tx->cyclic) {
+ vchan_cyclic_callback(&tx->vd);
+ } else if (tx) {
plchan->at = NULL;
/*
* This descriptor is done, release its mux
@@ -1846,6 +2047,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
{
struct pl08x_driver_data *pl08x;
const struct vendor_data *vd = id->data;
+ u32 tsfr_size;
int ret = 0;
int i;
@@ -1873,6 +2075,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
/* Initialize slave engine */
dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
pl08x->slave.dev = &adev->dev;
pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
@@ -1880,6 +2083,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->slave.device_tx_status = pl08x_dma_tx_status;
pl08x->slave.device_issue_pending = pl08x_issue_pending;
pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
+ pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
pl08x->slave.device_control = pl08x_control;
/* Get the platform data */
@@ -1902,9 +2106,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->mem_buses = pl08x->pd->mem_buses;
}
+ if (vd->pl080s)
+ pl08x->lli_words = PL080S_LLI_WORDS;
+ else
+ pl08x->lli_words = PL080_LLI_WORDS;
+ tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32);
+
/* A DMA memory pool for LLIs, align on 1-byte boundary */
pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
- PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
+ tsfr_size, PL08X_ALIGN, 0);
if (!pl08x->pool) {
ret = -ENOMEM;
goto out_no_lli_pool;
@@ -1947,6 +2157,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ch->id = i;
ch->base = pl08x->base + PL080_Cx_BASE(i);
+ ch->reg_config = ch->base + vd->config_offset;
spin_lock_init(&ch->lock);
/*
@@ -1957,7 +2168,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
if (vd->nomadik) {
u32 val;
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
ch->locked = true;
@@ -2008,8 +2219,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pl08x);
init_pl08x_debugfs(pl08x);
- dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
- amba_part(adev), amba_rev(adev),
+ dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
+ amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev),
(unsigned long long)adev->res.start, adev->irq[0]);
return 0;
@@ -2038,22 +2249,41 @@ out_no_pl08x:
/* PL080 has 8 channels and the PL080 have just 2 */
static struct vendor_data vendor_pl080 = {
+ .config_offset = PL080_CH_CONFIG,
.channels = 8,
.dualmaster = true,
+ .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
};
static struct vendor_data vendor_nomadik = {
+ .config_offset = PL080_CH_CONFIG,
.channels = 8,
.dualmaster = true,
.nomadik = true,
+ .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
+};
+
+static struct vendor_data vendor_pl080s = {
+ .config_offset = PL080S_CH_CONFIG,
+ .channels = 8,
+ .pl080s = true,
+ .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
};
static struct vendor_data vendor_pl081 = {
+ .config_offset = PL080_CH_CONFIG,
.channels = 2,
.dualmaster = false,
+ .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
};
static struct amba_id pl08x_ids[] = {
+ /* Samsung PL080S variant */
+ {
+ .id = 0x0a141080,
+ .mask = 0xffffffff,
+ .data = &vendor_pl080s,
+ },
/* PL080 */
{
.id = 0x00041080,
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 9bfaddd57ef..31011d2a26f 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1339,15 +1339,14 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
{
u64 started_channels = debugfs_dma_base->pm.started_channels;
int pool_count = debugfs_dma_base->pool.debugfs_pool_counter;
- int i;
- int ret = 0;
char *dev_buf;
char *tmp;
- int dev_size;
+ int ret;
+ int i;
dev_buf = kmalloc(4*1024, GFP_KERNEL);
if (dev_buf == NULL)
- goto err_kmalloc;
+ return -ENOMEM;
tmp = dev_buf;
tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
@@ -1357,26 +1356,11 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
tmp += sprintf(tmp, "channel %d\n", i);
tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
- dev_size = tmp - dev_buf;
-
- /* No more to read if offset != 0 */
- if (*f_pos > dev_size)
- goto out;
- if (count > dev_size - *f_pos)
- count = dev_size - *f_pos;
-
- if (copy_to_user(buf, dev_buf + *f_pos, count))
- ret = -EINVAL;
- ret = count;
- *f_pos += count;
-
- out:
+ ret = simple_read_from_buffer(buf, count, f_pos, dev_buf,
+ tmp - dev_buf);
kfree(dev_buf);
return ret;
-
- err_kmalloc:
- return 0;
}
static const struct file_operations coh901318_debugfs_status_operations = {
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
new file mode 100644
index 00000000000..7c82b92f9b1
--- /dev/null
+++ b/drivers/dma/cppi41.c
@@ -0,0 +1,1059 @@
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include "dmaengine.h"
+
+#define DESC_TYPE 27
+#define DESC_TYPE_HOST 0x10
+#define DESC_TYPE_TEARD 0x13
+
+#define TD_DESC_IS_RX (1 << 16)
+#define TD_DESC_DMA_NUM 10
+
+#define DESC_LENGTH_BITS_NUM 21
+
+#define DESC_TYPE_USB (5 << 26)
+#define DESC_PD_COMPLETE (1 << 31)
+
+/* DMA engine */
+#define DMA_TDFDQ 4
+#define DMA_TXGCR(x) (0x800 + (x) * 0x20)
+#define DMA_RXGCR(x) (0x808 + (x) * 0x20)
+#define RXHPCRA0 4
+
+#define GCR_CHAN_ENABLE (1 << 31)
+#define GCR_TEARDOWN (1 << 30)
+#define GCR_STARV_RETRY (1 << 24)
+#define GCR_DESC_TYPE_HOST (1 << 14)
+
+/* DMA scheduler */
+#define DMA_SCHED_CTRL 0
+#define DMA_SCHED_CTRL_EN (1 << 31)
+#define DMA_SCHED_WORD(x) ((x) * 4 + 0x800)
+
+#define SCHED_ENTRY0_CHAN(x) ((x) << 0)
+#define SCHED_ENTRY0_IS_RX (1 << 7)
+
+#define SCHED_ENTRY1_CHAN(x) ((x) << 8)
+#define SCHED_ENTRY1_IS_RX (1 << 15)
+
+#define SCHED_ENTRY2_CHAN(x) ((x) << 16)
+#define SCHED_ENTRY2_IS_RX (1 << 23)
+
+#define SCHED_ENTRY3_CHAN(x) ((x) << 24)
+#define SCHED_ENTRY3_IS_RX (1 << 31)
+
+/* Queue manager */
+/* 4 KiB of memory for descriptors, 2 for each endpoint */
+#define ALLOC_DECS_NUM 128
+#define DESCS_AREAS 1
+#define TOTAL_DESCS_NUM (ALLOC_DECS_NUM * DESCS_AREAS)
+#define QMGR_SCRATCH_SIZE (TOTAL_DESCS_NUM * 4)
+
+#define QMGR_LRAM0_BASE 0x80
+#define QMGR_LRAM_SIZE 0x84
+#define QMGR_LRAM1_BASE 0x88
+#define QMGR_MEMBASE(x) (0x1000 + (x) * 0x10)
+#define QMGR_MEMCTRL(x) (0x1004 + (x) * 0x10)
+#define QMGR_MEMCTRL_IDX_SH 16
+#define QMGR_MEMCTRL_DESC_SH 8
+
+#define QMGR_NUM_PEND 5
+#define QMGR_PEND(x) (0x90 + (x) * 4)
+
+#define QMGR_PENDING_SLOT_Q(x) (x / 32)
+#define QMGR_PENDING_BIT_Q(x) (x % 32)
+
+#define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10)
+#define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10)
+#define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10)
+#define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10)
+
+/* Glue layer specific */
+/* USBSS / USB AM335x */
+#define USBSS_IRQ_STATUS 0x28
+#define USBSS_IRQ_ENABLER 0x2c
+#define USBSS_IRQ_CLEARR 0x30
+
+#define USBSS_IRQ_PD_COMP (1 << 2)
+
+struct cppi41_channel {
+ struct dma_chan chan;
+ struct dma_async_tx_descriptor txd;
+ struct cppi41_dd *cdd;
+ struct cppi41_desc *desc;
+ dma_addr_t desc_phys;
+ void __iomem *gcr_reg;
+ int is_tx;
+ u32 residue;
+
+ unsigned int q_num;
+ unsigned int q_comp_num;
+ unsigned int port_num;
+
+ unsigned td_retry;
+ unsigned td_queued:1;
+ unsigned td_seen:1;
+ unsigned td_desc_seen:1;
+};
+
+struct cppi41_desc {
+ u32 pd0;
+ u32 pd1;
+ u32 pd2;
+ u32 pd3;
+ u32 pd4;
+ u32 pd5;
+ u32 pd6;
+ u32 pd7;
+} __aligned(32);
+
+struct chan_queues {
+ u16 submit;
+ u16 complete;
+};
+
+struct cppi41_dd {
+ struct dma_device ddev;
+
+ void *qmgr_scratch;
+ dma_addr_t scratch_phys;
+
+ struct cppi41_desc *cd;
+ dma_addr_t descs_phys;
+ u32 first_td_desc;
+ struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
+
+ void __iomem *usbss_mem;
+ void __iomem *ctrl_mem;
+ void __iomem *sched_mem;
+ void __iomem *qmgr_mem;
+ unsigned int irq;
+ const struct chan_queues *queues_rx;
+ const struct chan_queues *queues_tx;
+ struct chan_queues td_queue;
+};
+
+#define FIST_COMPLETION_QUEUE 93
+static struct chan_queues usb_queues_tx[] = {
+ /* USB0 ENDP 1 */
+ [ 0] = { .submit = 32, .complete = 93},
+ [ 1] = { .submit = 34, .complete = 94},
+ [ 2] = { .submit = 36, .complete = 95},
+ [ 3] = { .submit = 38, .complete = 96},
+ [ 4] = { .submit = 40, .complete = 97},
+ [ 5] = { .submit = 42, .complete = 98},
+ [ 6] = { .submit = 44, .complete = 99},
+ [ 7] = { .submit = 46, .complete = 100},
+ [ 8] = { .submit = 48, .complete = 101},
+ [ 9] = { .submit = 50, .complete = 102},
+ [10] = { .submit = 52, .complete = 103},
+ [11] = { .submit = 54, .complete = 104},
+ [12] = { .submit = 56, .complete = 105},
+ [13] = { .submit = 58, .complete = 106},
+ [14] = { .submit = 60, .complete = 107},
+
+ /* USB1 ENDP1 */
+ [15] = { .submit = 62, .complete = 125},
+ [16] = { .submit = 64, .complete = 126},
+ [17] = { .submit = 66, .complete = 127},
+ [18] = { .submit = 68, .complete = 128},
+ [19] = { .submit = 70, .complete = 129},
+ [20] = { .submit = 72, .complete = 130},
+ [21] = { .submit = 74, .complete = 131},
+ [22] = { .submit = 76, .complete = 132},
+ [23] = { .submit = 78, .complete = 133},
+ [24] = { .submit = 80, .complete = 134},
+ [25] = { .submit = 82, .complete = 135},
+ [26] = { .submit = 84, .complete = 136},
+ [27] = { .submit = 86, .complete = 137},
+ [28] = { .submit = 88, .complete = 138},
+ [29] = { .submit = 90, .complete = 139},
+};
+
+static const struct chan_queues usb_queues_rx[] = {
+ /* USB0 ENDP 1 */
+ [ 0] = { .submit = 1, .complete = 109},
+ [ 1] = { .submit = 2, .complete = 110},
+ [ 2] = { .submit = 3, .complete = 111},
+ [ 3] = { .submit = 4, .complete = 112},
+ [ 4] = { .submit = 5, .complete = 113},
+ [ 5] = { .submit = 6, .complete = 114},
+ [ 6] = { .submit = 7, .complete = 115},
+ [ 7] = { .submit = 8, .complete = 116},
+ [ 8] = { .submit = 9, .complete = 117},
+ [ 9] = { .submit = 10, .complete = 118},
+ [10] = { .submit = 11, .complete = 119},
+ [11] = { .submit = 12, .complete = 120},
+ [12] = { .submit = 13, .complete = 121},
+ [13] = { .submit = 14, .complete = 122},
+ [14] = { .submit = 15, .complete = 123},
+
+ /* USB1 ENDP 1 */
+ [15] = { .submit = 16, .complete = 141},
+ [16] = { .submit = 17, .complete = 142},
+ [17] = { .submit = 18, .complete = 143},
+ [18] = { .submit = 19, .complete = 144},
+ [19] = { .submit = 20, .complete = 145},
+ [20] = { .submit = 21, .complete = 146},
+ [21] = { .submit = 22, .complete = 147},
+ [22] = { .submit = 23, .complete = 148},
+ [23] = { .submit = 24, .complete = 149},
+ [24] = { .submit = 25, .complete = 150},
+ [25] = { .submit = 26, .complete = 151},
+ [26] = { .submit = 27, .complete = 152},
+ [27] = { .submit = 28, .complete = 153},
+ [28] = { .submit = 29, .complete = 154},
+ [29] = { .submit = 30, .complete = 155},
+};
+
+struct cppi_glue_infos {
+ irqreturn_t (*isr)(int irq, void *data);
+ const struct chan_queues *queues_rx;
+ const struct chan_queues *queues_tx;
+ struct chan_queues td_queue;
+};
+
+static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
+{
+ return container_of(c, struct cppi41_channel, chan);
+}
+
+static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
+{
+ struct cppi41_channel *c;
+ u32 descs_size;
+ u32 desc_num;
+
+ descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM;
+
+ if (!((desc >= cdd->descs_phys) &&
+ (desc < (cdd->descs_phys + descs_size)))) {
+ return NULL;
+ }
+
+ desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc);
+ BUG_ON(desc_num >= ALLOC_DECS_NUM);
+ c = cdd->chan_busy[desc_num];
+ cdd->chan_busy[desc_num] = NULL;
+ return c;
+}
+
+static void cppi_writel(u32 val, void *__iomem *mem)
+{
+ __raw_writel(val, mem);
+}
+
+static u32 cppi_readl(void *__iomem *mem)
+{
+ return __raw_readl(mem);
+}
+
+static u32 pd_trans_len(u32 val)
+{
+ return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
+}
+
+static irqreturn_t cppi41_irq(int irq, void *data)
+{
+ struct cppi41_dd *cdd = data;
+ struct cppi41_channel *c;
+ u32 status;
+ int i;
+
+ status = cppi_readl(cdd->usbss_mem + USBSS_IRQ_STATUS);
+ if (!(status & USBSS_IRQ_PD_COMP))
+ return IRQ_NONE;
+ cppi_writel(status, cdd->usbss_mem + USBSS_IRQ_STATUS);
+
+ for (i = QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE); i < QMGR_NUM_PEND;
+ i++) {
+ u32 val;
+ u32 q_num;
+
+ val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
+ if (i == QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE) && val) {
+ u32 mask;
+ /* set corresponding bit for completetion Q 93 */
+ mask = 1 << QMGR_PENDING_BIT_Q(FIST_COMPLETION_QUEUE);
+ /* not set all bits for queues less than Q 93 */
+ mask--;
+ /* now invert and keep only Q 93+ set */
+ val &= ~mask;
+ }
+
+ if (val)
+ __iormb();
+
+ while (val) {
+ u32 desc;
+
+ q_num = __fls(val);
+ val &= ~(1 << q_num);
+ q_num += 32 * i;
+ desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num));
+ desc &= ~0x1f;
+ c = desc_to_chan(cdd, desc);
+ if (WARN_ON(!c)) {
+ pr_err("%s() q %d desc %08x\n", __func__,
+ q_num, desc);
+ continue;
+ }
+ c->residue = pd_trans_len(c->desc->pd6) -
+ pd_trans_len(c->desc->pd0);
+
+ dma_cookie_complete(&c->txd);
+ c->txd.callback(c->txd.callback_param);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ dma_cookie_t cookie;
+
+ cookie = dma_cookie_assign(tx);
+
+ return cookie;
+}
+
+static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+
+ dma_cookie_init(chan);
+ dma_async_tx_descriptor_init(&c->txd, chan);
+ c->txd.tx_submit = cppi41_tx_submit;
+
+ if (!c->is_tx)
+ cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
+
+ return 0;
+}
+
+static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
+{
+}
+
+static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ enum dma_status ret;
+
+ /* lock */
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (txstate && ret == DMA_SUCCESS)
+ txstate->residue = c->residue;
+ /* unlock */
+
+ return ret;
+}
+
+static void push_desc_queue(struct cppi41_channel *c)
+{
+ struct cppi41_dd *cdd = c->cdd;
+ u32 desc_num;
+ u32 desc_phys;
+ u32 reg;
+
+ desc_phys = lower_32_bits(c->desc_phys);
+ desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+ WARN_ON(cdd->chan_busy[desc_num]);
+ cdd->chan_busy[desc_num] = c;
+
+ reg = (sizeof(struct cppi41_desc) - 24) / 4;
+ reg |= desc_phys;
+ cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
+}
+
+static void cppi41_dma_issue_pending(struct dma_chan *chan)
+{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ u32 reg;
+
+ c->residue = 0;
+
+ reg = GCR_CHAN_ENABLE;
+ if (!c->is_tx) {
+ reg |= GCR_STARV_RETRY;
+ reg |= GCR_DESC_TYPE_HOST;
+ reg |= c->q_comp_num;
+ }
+
+ cppi_writel(reg, c->gcr_reg);
+
+ /*
+ * We don't use writel() but __raw_writel() so we have to make sure
+ * that the DMA descriptor in coherent memory made to the main memory
+ * before starting the dma engine.
+ */
+ __iowmb();
+ push_desc_queue(c);
+}
+
+static u32 get_host_pd0(u32 length)
+{
+ u32 reg;
+
+ reg = DESC_TYPE_HOST << DESC_TYPE;
+ reg |= length;
+
+ return reg;
+}
+
+static u32 get_host_pd1(struct cppi41_channel *c)
+{
+ u32 reg;
+
+ reg = 0;
+
+ return reg;
+}
+
+static u32 get_host_pd2(struct cppi41_channel *c)
+{
+ u32 reg;
+
+ reg = DESC_TYPE_USB;
+ reg |= c->q_comp_num;
+
+ return reg;
+}
+
+static u32 get_host_pd3(u32 length)
+{
+ u32 reg;
+
+ /* PD3 = packet size */
+ reg = length;
+
+ return reg;
+}
+
+static u32 get_host_pd6(u32 length)
+{
+ u32 reg;
+
+ /* PD6 buffer size */
+ reg = DESC_PD_COMPLETE;
+ reg |= length;
+
+ return reg;
+}
+
+static u32 get_host_pd4_or_7(u32 addr)
+{
+ u32 reg;
+
+ reg = addr;
+
+ return reg;
+}
+
+static u32 get_host_pd5(void)
+{
+ u32 reg;
+
+ reg = 0;
+
+ return reg;
+}
+
+static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len,
+ enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
+{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct cppi41_desc *d;
+ struct scatterlist *sg;
+ unsigned int i;
+ unsigned int num;
+
+ num = 0;
+ d = c->desc;
+ for_each_sg(sgl, sg, sg_len, i) {
+ u32 addr;
+ u32 len;
+
+ /* We need to use more than one desc once musb supports sg */
+ BUG_ON(num > 0);
+ addr = lower_32_bits(sg_dma_address(sg));
+ len = sg_dma_len(sg);
+
+ d->pd0 = get_host_pd0(len);
+ d->pd1 = get_host_pd1(c);
+ d->pd2 = get_host_pd2(c);
+ d->pd3 = get_host_pd3(len);
+ d->pd4 = get_host_pd4_or_7(addr);
+ d->pd5 = get_host_pd5();
+ d->pd6 = get_host_pd6(len);
+ d->pd7 = get_host_pd4_or_7(addr);
+
+ d++;
+ }
+
+ return &c->txd;
+}
+
+static int cpp41_cfg_chan(struct cppi41_channel *c,
+ struct dma_slave_config *cfg)
+{
+ return 0;
+}
+
+static void cppi41_compute_td_desc(struct cppi41_desc *d)
+{
+ d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
+}
+
+static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
+{
+ u32 desc;
+
+ desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
+ desc &= ~0x1f;
+ return desc;
+}
+
+static int cppi41_tear_down_chan(struct cppi41_channel *c)
+{
+ struct cppi41_dd *cdd = c->cdd;
+ struct cppi41_desc *td;
+ u32 reg;
+ u32 desc_phys;
+ u32 td_desc_phys;
+
+ td = cdd->cd;
+ td += cdd->first_td_desc;
+
+ td_desc_phys = cdd->descs_phys;
+ td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc);
+
+ if (!c->td_queued) {
+ cppi41_compute_td_desc(td);
+ __iowmb();
+
+ reg = (sizeof(struct cppi41_desc) - 24) / 4;
+ reg |= td_desc_phys;
+ cppi_writel(reg, cdd->qmgr_mem +
+ QMGR_QUEUE_D(cdd->td_queue.submit));
+
+ reg = GCR_CHAN_ENABLE;
+ if (!c->is_tx) {
+ reg |= GCR_STARV_RETRY;
+ reg |= GCR_DESC_TYPE_HOST;
+ reg |= c->q_comp_num;
+ }
+ reg |= GCR_TEARDOWN;
+ cppi_writel(reg, c->gcr_reg);
+ c->td_queued = 1;
+ c->td_retry = 100;
+ }
+
+ if (!c->td_seen) {
+ unsigned td_comp_queue;
+
+ if (c->is_tx)
+ td_comp_queue = cdd->td_queue.complete;
+ else
+ td_comp_queue = c->q_comp_num;
+
+ desc_phys = cppi41_pop_desc(cdd, td_comp_queue);
+ if (desc_phys) {
+ __iormb();
+
+ if (desc_phys == td_desc_phys) {
+ u32 pd0;
+ pd0 = td->pd0;
+ WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
+ WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
+ WARN_ON((pd0 & 0x1f) != c->port_num);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ c->td_seen = 1;
+ }
+ }
+ if (!c->td_desc_seen) {
+ desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
+ if (desc_phys) {
+ __iormb();
+ WARN_ON(c->desc_phys != desc_phys);
+ c->td_desc_seen = 1;
+ }
+ }
+ c->td_retry--;
+ /*
+ * If the TX descriptor / channel is in use, the caller needs to poke
+ * his TD bit multiple times. After that he hardware releases the
+ * transfer descriptor followed by TD descriptor. Waiting seems not to
+ * cause any difference.
+ * RX seems to be thrown out right away. However once the TearDown
+ * descriptor gets through we are done. If we have seens the transfer
+ * descriptor before the TD we fetch it from enqueue, it has to be
+ * there waiting for us.
+ */
+ if (!c->td_seen && c->td_retry)
+ return -EAGAIN;
+
+ WARN_ON(!c->td_retry);
+ if (!c->td_desc_seen) {
+ desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
+ WARN_ON(!desc_phys);
+ }
+
+ c->td_queued = 0;
+ c->td_seen = 0;
+ c->td_desc_seen = 0;
+ cppi_writel(0, c->gcr_reg);
+ return 0;
+}
+
+static int cppi41_stop_chan(struct dma_chan *chan)
+{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct cppi41_dd *cdd = c->cdd;
+ u32 desc_num;
+ u32 desc_phys;
+ int ret;
+
+ ret = cppi41_tear_down_chan(c);
+ if (ret)
+ return ret;
+
+ desc_phys = lower_32_bits(c->desc_phys);
+ desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+ WARN_ON(!cdd->chan_busy[desc_num]);
+ cdd->chan_busy[desc_num] = NULL;
+
+ return 0;
+}
+
+static int cppi41_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ int ret;
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ ret = cpp41_cfg_chan(c, (struct dma_slave_config *) arg);
+ break;
+
+ case DMA_TERMINATE_ALL:
+ ret = cppi41_stop_chan(chan);
+ break;
+
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ return ret;
+}
+
+static void cleanup_chans(struct cppi41_dd *cdd)
+{
+ while (!list_empty(&cdd->ddev.channels)) {
+ struct cppi41_channel *cchan;
+
+ cchan = list_first_entry(&cdd->ddev.channels,
+ struct cppi41_channel, chan.device_node);
+ list_del(&cchan->chan.device_node);
+ kfree(cchan);
+ }
+}
+
+static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd)
+{
+ struct cppi41_channel *cchan;
+ int i;
+ int ret;
+ u32 n_chans;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels",
+ &n_chans);
+ if (ret)
+ return ret;
+ /*
+ * The channels can only be used as TX or as RX. So we add twice
+ * that much dma channels because USB can only do RX or TX.
+ */
+ n_chans *= 2;
+
+ for (i = 0; i < n_chans; i++) {
+ cchan = kzalloc(sizeof(*cchan), GFP_KERNEL);
+ if (!cchan)
+ goto err;
+
+ cchan->cdd = cdd;
+ if (i & 1) {
+ cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1);
+ cchan->is_tx = 1;
+ } else {
+ cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1);
+ cchan->is_tx = 0;
+ }
+ cchan->port_num = i >> 1;
+ cchan->desc = &cdd->cd[i];
+ cchan->desc_phys = cdd->descs_phys;
+ cchan->desc_phys += i * sizeof(struct cppi41_desc);
+ cchan->chan.device = &cdd->ddev;
+ list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels);
+ }
+ cdd->first_td_desc = n_chans;
+
+ return 0;
+err:
+ cleanup_chans(cdd);
+ return -ENOMEM;
+}
+
+static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
+{
+ unsigned int mem_decs;
+ int i;
+
+ mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc);
+
+ for (i = 0; i < DESCS_AREAS; i++) {
+
+ cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
+ cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
+
+ dma_free_coherent(&pdev->dev, mem_decs, cdd->cd,
+ cdd->descs_phys);
+ }
+}
+
+static void disable_sched(struct cppi41_dd *cdd)
+{
+ cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
+}
+
+static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd)
+{
+ disable_sched(cdd);
+
+ purge_descs(pdev, cdd);
+
+ cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
+ cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
+ dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
+ cdd->scratch_phys);
+}
+
+static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
+{
+ unsigned int desc_size;
+ unsigned int mem_decs;
+ int i;
+ u32 reg;
+ u32 idx;
+
+ BUILD_BUG_ON(sizeof(struct cppi41_desc) &
+ (sizeof(struct cppi41_desc) - 1));
+ BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32);
+ BUILD_BUG_ON(ALLOC_DECS_NUM < 32);
+
+ desc_size = sizeof(struct cppi41_desc);
+ mem_decs = ALLOC_DECS_NUM * desc_size;
+
+ idx = 0;
+ for (i = 0; i < DESCS_AREAS; i++) {
+
+ reg = idx << QMGR_MEMCTRL_IDX_SH;
+ reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH;
+ reg |= ilog2(ALLOC_DECS_NUM) - 5;
+
+ BUILD_BUG_ON(DESCS_AREAS != 1);
+ cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs,
+ &cdd->descs_phys, GFP_KERNEL);
+ if (!cdd->cd)
+ return -ENOMEM;
+
+ cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
+ cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i));
+
+ idx += ALLOC_DECS_NUM;
+ }
+ return 0;
+}
+
+static void init_sched(struct cppi41_dd *cdd)
+{
+ unsigned ch;
+ unsigned word;
+ u32 reg;
+
+ word = 0;
+ cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
+ for (ch = 0; ch < 15 * 2; ch += 2) {
+
+ reg = SCHED_ENTRY0_CHAN(ch);
+ reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
+
+ reg |= SCHED_ENTRY2_CHAN(ch + 1);
+ reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX;
+ cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
+ word++;
+ }
+ reg = 15 * 2 * 2 - 1;
+ reg |= DMA_SCHED_CTRL_EN;
+ cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
+}
+
+static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
+{
+ int ret;
+
+ BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
+ cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE,
+ &cdd->scratch_phys, GFP_KERNEL);
+ if (!cdd->qmgr_scratch)
+ return -ENOMEM;
+
+ cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
+ cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
+ cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
+
+ ret = init_descs(pdev, cdd);
+ if (ret)
+ goto err_td;
+
+ cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
+ init_sched(cdd);
+ return 0;
+err_td:
+ deinit_cpii41(pdev, cdd);
+ return ret;
+}
+
+static struct platform_driver cpp41_dma_driver;
+/*
+ * The param format is:
+ * X Y
+ * X: Port
+ * Y: 0 = RX else TX
+ */
+#define INFO_PORT 0
+#define INFO_IS_TX 1
+
+static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct cppi41_channel *cchan;
+ struct cppi41_dd *cdd;
+ const struct chan_queues *queues;
+ u32 *num = param;
+
+ if (chan->device->dev->driver != &cpp41_dma_driver.driver)
+ return false;
+
+ cchan = to_cpp41_chan(chan);
+
+ if (cchan->port_num != num[INFO_PORT])
+ return false;
+
+ if (cchan->is_tx && !num[INFO_IS_TX])
+ return false;
+ cdd = cchan->cdd;
+ if (cchan->is_tx)
+ queues = cdd->queues_tx;
+ else
+ queues = cdd->queues_rx;
+
+ BUILD_BUG_ON(ARRAY_SIZE(usb_queues_rx) != ARRAY_SIZE(usb_queues_tx));
+ if (WARN_ON(cchan->port_num > ARRAY_SIZE(usb_queues_rx)))
+ return false;
+
+ cchan->q_num = queues[cchan->port_num].submit;
+ cchan->q_comp_num = queues[cchan->port_num].complete;
+ return true;
+}
+
+static struct of_dma_filter_info cpp41_dma_info = {
+ .filter_fn = cpp41_dma_filter_fn,
+};
+
+static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ int count = dma_spec->args_count;
+ struct of_dma_filter_info *info = ofdma->of_dma_data;
+
+ if (!info || !info->filter_fn)
+ return NULL;
+
+ if (count != 2)
+ return NULL;
+
+ return dma_request_channel(info->dma_cap, info->filter_fn,
+ &dma_spec->args[0]);
+}
+
+static const struct cppi_glue_infos usb_infos = {
+ .isr = cppi41_irq,
+ .queues_rx = usb_queues_rx,
+ .queues_tx = usb_queues_tx,
+ .td_queue = { .submit = 31, .complete = 0 },
+};
+
+static const struct of_device_id cppi41_dma_ids[] = {
+ { .compatible = "ti,am3359-cppi41", .data = &usb_infos},
+ {},
+};
+MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
+
+static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node);
+ if (!of_id)
+ return NULL;
+ return of_id->data;
+}
+
+static int cppi41_dma_probe(struct platform_device *pdev)
+{
+ struct cppi41_dd *cdd;
+ const struct cppi_glue_infos *glue_info;
+ int irq;
+ int ret;
+
+ glue_info = get_glue_info(pdev);
+ if (!glue_info)
+ return -EINVAL;
+
+ cdd = kzalloc(sizeof(*cdd), GFP_KERNEL);
+ if (!cdd)
+ return -ENOMEM;
+
+ dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask);
+ cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources;
+ cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources;
+ cdd->ddev.device_tx_status = cppi41_dma_tx_status;
+ cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
+ cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
+ cdd->ddev.device_control = cppi41_dma_control;
+ cdd->ddev.dev = &pdev->dev;
+ INIT_LIST_HEAD(&cdd->ddev.channels);
+ cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
+
+ cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0);
+ cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1);
+ cdd->sched_mem = of_iomap(pdev->dev.of_node, 2);
+ cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3);
+
+ if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
+ !cdd->qmgr_mem) {
+ ret = -ENXIO;
+ goto err_remap;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret)
+ goto err_get_sync;
+
+ cdd->queues_rx = glue_info->queues_rx;
+ cdd->queues_tx = glue_info->queues_tx;
+ cdd->td_queue = glue_info->td_queue;
+
+ ret = init_cppi41(pdev, cdd);
+ if (ret)
+ goto err_init_cppi;
+
+ ret = cppi41_add_chans(pdev, cdd);
+ if (ret)
+ goto err_chans;
+
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!irq)
+ goto err_irq;
+
+ cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
+
+ ret = request_irq(irq, glue_info->isr, IRQF_SHARED,
+ dev_name(&pdev->dev), cdd);
+ if (ret)
+ goto err_irq;
+ cdd->irq = irq;
+
+ ret = dma_async_device_register(&cdd->ddev);
+ if (ret)
+ goto err_dma_reg;
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ cppi41_dma_xlate, &cpp41_dma_info);
+ if (ret)
+ goto err_of;
+
+ platform_set_drvdata(pdev, cdd);
+ return 0;
+err_of:
+ dma_async_device_unregister(&cdd->ddev);
+err_dma_reg:
+ free_irq(irq, cdd);
+err_irq:
+ cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
+ cleanup_chans(cdd);
+err_chans:
+ deinit_cpii41(pdev, cdd);
+err_init_cppi:
+ pm_runtime_put(&pdev->dev);
+err_get_sync:
+ pm_runtime_disable(&pdev->dev);
+ iounmap(cdd->usbss_mem);
+ iounmap(cdd->ctrl_mem);
+ iounmap(cdd->sched_mem);
+ iounmap(cdd->qmgr_mem);
+err_remap:
+ kfree(cdd);
+ return ret;
+}
+
+static int cppi41_dma_remove(struct platform_device *pdev)
+{
+ struct cppi41_dd *cdd = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&cdd->ddev);
+
+ cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
+ free_irq(cdd->irq, cdd);
+ cleanup_chans(cdd);
+ deinit_cpii41(pdev, cdd);
+ iounmap(cdd->usbss_mem);
+ iounmap(cdd->ctrl_mem);
+ iounmap(cdd->sched_mem);
+ iounmap(cdd->qmgr_mem);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ kfree(cdd);
+ return 0;
+}
+
+static struct platform_driver cpp41_dma_driver = {
+ .probe = cppi41_dma_probe,
+ .remove = cppi41_dma_remove,
+ .driver = {
+ .name = "cppi41-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cppi41_dma_ids),
+ },
+};
+
+module_platform_driver(cpp41_dma_driver);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 9e56745f87b..9162ac80c18 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -87,7 +87,8 @@ static struct dma_chan *dev_to_dma_chan(struct device *dev)
return chan_dev->chan;
}
-static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t memcpy_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct dma_chan *chan;
unsigned long count = 0;
@@ -106,9 +107,10 @@ static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *at
return err;
}
+static DEVICE_ATTR_RO(memcpy_count);
-static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t bytes_transferred_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct dma_chan *chan;
unsigned long count = 0;
@@ -127,8 +129,10 @@ static ssize_t show_bytes_transferred(struct device *dev, struct device_attribut
return err;
}
+static DEVICE_ATTR_RO(bytes_transferred);
-static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct dma_chan *chan;
int err;
@@ -143,13 +147,15 @@ static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, ch
return err;
}
+static DEVICE_ATTR_RO(in_use);
-static struct device_attribute dma_attrs[] = {
- __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
- __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
- __ATTR(in_use, S_IRUGO, show_in_use, NULL),
- __ATTR_NULL
+static struct attribute *dma_dev_attrs[] = {
+ &dev_attr_memcpy_count.attr,
+ &dev_attr_bytes_transferred.attr,
+ &dev_attr_in_use.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(dma_dev);
static void chan_dev_release(struct device *dev)
{
@@ -167,7 +173,7 @@ static void chan_dev_release(struct device *dev)
static struct class dma_devclass = {
.name = "dma",
- .dev_attrs = dma_attrs,
+ .dev_groups = dma_dev_groups,
.dev_release = chan_dev_release,
};
@@ -376,20 +382,30 @@ void dma_issue_pending_all(void)
EXPORT_SYMBOL(dma_issue_pending_all);
/**
- * nth_chan - returns the nth channel of the given capability
+ * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
+ */
+static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
+{
+ int node = dev_to_node(chan->device->dev);
+ return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
+}
+
+/**
+ * min_chan - returns the channel with min count and in the same numa-node as the cpu
* @cap: capability to match
- * @n: nth channel desired
+ * @cpu: cpu index which the channel should be close to
*
- * Defaults to returning the channel with the desired capability and the
- * lowest reference count when 'n' cannot be satisfied. Must be called
- * under dma_list_mutex.
+ * If some channels are close to the given cpu, the one with the lowest
+ * reference count is returned. Otherwise, cpu is ignored and only the
+ * reference count is taken into account.
+ * Must be called under dma_list_mutex.
*/
-static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
+static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
{
struct dma_device *device;
struct dma_chan *chan;
- struct dma_chan *ret = NULL;
struct dma_chan *min = NULL;
+ struct dma_chan *localmin = NULL;
list_for_each_entry(device, &dma_device_list, global_node) {
if (!dma_has_cap(cap, device->cap_mask) ||
@@ -398,27 +414,22 @@ static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
list_for_each_entry(chan, &device->channels, device_node) {
if (!chan->client_count)
continue;
- if (!min)
- min = chan;
- else if (chan->table_count < min->table_count)
+ if (!min || chan->table_count < min->table_count)
min = chan;
- if (n-- == 0) {
- ret = chan;
- break; /* done */
- }
+ if (dma_chan_is_local(chan, cpu))
+ if (!localmin ||
+ chan->table_count < localmin->table_count)
+ localmin = chan;
}
- if (ret)
- break; /* done */
}
- if (!ret)
- ret = min;
+ chan = localmin ? localmin : min;
- if (ret)
- ret->table_count++;
+ if (chan)
+ chan->table_count++;
- return ret;
+ return chan;
}
/**
@@ -435,7 +446,6 @@ static void dma_channel_rebalance(void)
struct dma_device *device;
int cpu;
int cap;
- int n;
/* undo the last distribution */
for_each_dma_cap_mask(cap, dma_cap_mask_all)
@@ -454,14 +464,9 @@ static void dma_channel_rebalance(void)
return;
/* redistribute available channels */
- n = 0;
for_each_dma_cap_mask(cap, dma_cap_mask_all)
for_each_online_cpu(cpu) {
- if (num_possible_cpus() > 1)
- chan = nth_chan(cap, n++);
- else
- chan = nth_chan(cap, -1);
-
+ chan = min_chan(cap, cpu);
per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
}
}
@@ -504,7 +509,33 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
}
/**
- * dma_request_channel - try to allocate an exclusive channel
+ * dma_request_slave_channel - try to get specific channel exclusively
+ * @chan: target channel
+ */
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
+{
+ int err = -EBUSY;
+
+ /* lock against __dma_request_channel */
+ mutex_lock(&dma_list_mutex);
+
+ if (chan->client_count == 0) {
+ err = dma_chan_get(chan);
+ if (err)
+ pr_debug("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
+ } else
+ chan = NULL;
+
+ mutex_unlock(&dma_list_mutex);
+
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(dma_get_slave_channel);
+
+/**
+ * __dma_request_channel - try to allocate an exclusive channel
* @mask: capabilities that the channel must satisfy
* @fn: optional callback to disposition available channels
* @fn_param: opaque parameter to pass to dma_filter_fn
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index e88ded2c8d2..92f796cdc6a 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -25,44 +25,46 @@
#include <linux/seq_file.h>
static unsigned int test_buf_size = 16384;
-module_param(test_buf_size, uint, S_IRUGO);
+module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
static char test_channel[20];
-module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
+module_param_string(channel, test_channel, sizeof(test_channel),
+ S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
static char test_device[20];
-module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
+module_param_string(device, test_device, sizeof(test_device),
+ S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
static unsigned int threads_per_chan = 1;
-module_param(threads_per_chan, uint, S_IRUGO);
+module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(threads_per_chan,
"Number of threads to start per channel (default: 1)");
static unsigned int max_channels;
-module_param(max_channels, uint, S_IRUGO);
+module_param(max_channels, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_channels,
"Maximum number of channels to use (default: all)");
static unsigned int iterations;
-module_param(iterations, uint, S_IRUGO);
+module_param(iterations, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
static unsigned int xor_sources = 3;
-module_param(xor_sources, uint, S_IRUGO);
+module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(xor_sources,
"Number of xor source buffers (default: 3)");
static unsigned int pq_sources = 3;
-module_param(pq_sources, uint, S_IRUGO);
+module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
static int timeout = 3000;
-module_param(timeout, uint, S_IRUGO);
+module_param(timeout, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
@@ -193,7 +195,6 @@ struct dmatest_info {
/* debugfs related stuff */
struct dentry *root;
- struct dmatest_params dbgfs_params;
/* Test results */
struct list_head results;
@@ -406,7 +407,11 @@ static int thread_result_add(struct dmatest_info *info,
list_add_tail(&tr->node, &r->results);
mutex_unlock(&info->results_lock);
- pr_warn("%s\n", thread_result_get(r->name, tr));
+ if (tr->type == DMATEST_ET_OK)
+ pr_debug("%s\n", thread_result_get(r->name, tr));
+ else
+ pr_warn("%s\n", thread_result_get(r->name, tr));
+
return 0;
}
@@ -1007,7 +1012,15 @@ static int __restart_threaded_test(struct dmatest_info *info, bool run)
result_free(info, NULL);
/* Copy test parameters */
- memcpy(params, &info->dbgfs_params, sizeof(*params));
+ params->buf_size = test_buf_size;
+ strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
+ strlcpy(params->device, strim(test_device), sizeof(params->device));
+ params->threads_per_chan = threads_per_chan;
+ params->max_channels = max_channels;
+ params->iterations = iterations;
+ params->xor_sources = xor_sources;
+ params->pq_sources = pq_sources;
+ params->timeout = timeout;
/* Run test with new parameters */
return __run_threaded_test(info);
@@ -1029,71 +1042,6 @@ static bool __is_threaded_test_run(struct dmatest_info *info)
return false;
}
-static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
- const void __user *from, size_t count)
-{
- char tmp[20];
- ssize_t len;
-
- len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
- if (len >= 0) {
- tmp[len] = '\0';
- strlcpy(to, strim(tmp), available);
- }
-
- return len;
-}
-
-static ssize_t dtf_read_channel(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return simple_read_from_buffer(buf, count, ppos,
- info->dbgfs_params.channel,
- strlen(info->dbgfs_params.channel));
-}
-
-static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return dtf_write_string(info->dbgfs_params.channel,
- sizeof(info->dbgfs_params.channel),
- ppos, buf, size);
-}
-
-static const struct file_operations dtf_channel_fops = {
- .read = dtf_read_channel,
- .write = dtf_write_channel,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static ssize_t dtf_read_device(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return simple_read_from_buffer(buf, count, ppos,
- info->dbgfs_params.device,
- strlen(info->dbgfs_params.device));
-}
-
-static ssize_t dtf_write_device(struct file *file, const char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return dtf_write_string(info->dbgfs_params.device,
- sizeof(info->dbgfs_params.device),
- ppos, buf, size);
-}
-
-static const struct file_operations dtf_device_fops = {
- .read = dtf_read_device,
- .write = dtf_write_device,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1187,8 +1135,6 @@ static const struct file_operations dtf_results_fops = {
static int dmatest_register_dbgfs(struct dmatest_info *info)
{
struct dentry *d;
- struct dmatest_params *params = &info->dbgfs_params;
- int ret = -ENOMEM;
d = debugfs_create_dir("dmatest", NULL);
if (IS_ERR(d))
@@ -1198,81 +1144,24 @@ static int dmatest_register_dbgfs(struct dmatest_info *info)
info->root = d;
- /* Copy initial values */
- memcpy(params, &info->params, sizeof(*params));
-
- /* Test parameters */
-
- d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->buf_size);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
- info, &dtf_channel_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
- info, &dtf_device_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->threads_per_chan);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->max_channels);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->iterations);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->xor_sources);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->pq_sources);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->timeout);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
/* Run or stop threaded test */
- d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
- info, &dtf_run_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
+ debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
+ &dtf_run_fops);
/* Results of test in progress */
- d = debugfs_create_file("results", S_IRUGO, info->root, info,
- &dtf_results_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
+ debugfs_create_file("results", S_IRUGO, info->root, info,
+ &dtf_results_fops);
return 0;
-err_node:
- debugfs_remove_recursive(info->root);
err_root:
pr_err("dmatest: Failed to initialize debugfs\n");
- return ret;
+ return -ENOMEM;
}
static int __init dmatest_init(void)
{
struct dmatest_info *info = &test_info;
- struct dmatest_params *params = &info->params;
int ret;
memset(info, 0, sizeof(*info));
@@ -1283,17 +1172,6 @@ static int __init dmatest_init(void)
mutex_init(&info->results_lock);
INIT_LIST_HEAD(&info->results);
- /* Set default parameters */
- params->buf_size = test_buf_size;
- strlcpy(params->channel, test_channel, sizeof(params->channel));
- strlcpy(params->device, test_device, sizeof(params->device));
- params->threads_per_chan = threads_per_chan;
- params->max_channels = max_channels;
- params->iterations = iterations;
- params->xor_sources = xor_sources;
- params->pq_sources = pq_sources;
- params->timeout = timeout;
-
ret = dmatest_register_dbgfs(info);
if (ret)
return ret;
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index dde13248b68..dcfe964cc8d 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -4,7 +4,6 @@
config DW_DMAC_CORE
tristate "Synopsys DesignWare AHB DMA support"
- depends on GENERIC_HARDIRQS
select DMA_ENGINE
config DW_DMAC
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index eea479c1217..89eb89f2228 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -37,16 +37,22 @@
* which does not support descriptor writeback.
*/
+static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
+{
+ return dwc->request_line == (typeof(dwc->request_line))~0;
+}
+
static inline void dwc_set_masters(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
struct dw_dma_slave *dws = dwc->chan.private;
unsigned char mmax = dw->nr_masters - 1;
- if (dwc->request_line == ~0) {
- dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
- dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
- }
+ if (!is_request_line_unset(dwc))
+ return;
+
+ dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
+ dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
}
#define DWC_DEFAULT_CTLLO(_chan) ({ \
@@ -644,10 +650,13 @@ static void dw_dma_tasklet(unsigned long data)
static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
{
struct dw_dma *dw = dev_id;
- u32 status;
+ u32 status = dma_readl(dw, STATUS_INT);
+
+ dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
- dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
- dma_readl(dw, STATUS_INT));
+ /* Check if we have any interrupt from the DMAC */
+ if (!status)
+ return IRQ_NONE;
/*
* Just disable the interrupts. We'll turn them back on in the
@@ -984,7 +993,7 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
dwc->direction = sconfig->direction;
/* Take the request line from slave_id member */
- if (dwc->request_line == ~0)
+ if (is_request_line_unset(dwc))
dwc->request_line = sconfig->slave_id;
convert_burst(&dwc->dma_sconfig.src_maxburst);
@@ -1089,16 +1098,16 @@ dwc_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
- dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+ if (ret == DMA_SUCCESS)
+ return ret;
- ret = dma_cookie_status(chan, cookie, txstate);
- }
+ dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+ ret = dma_cookie_status(chan, cookie, txstate);
if (ret != DMA_SUCCESS)
dma_set_residue(txstate, dwc_get_residue(dwc));
- if (dwc->paused)
+ if (dwc->paused && ret == DMA_IN_PROGRESS)
return DMA_PAUSED;
return ret;
@@ -1560,8 +1569,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Disable BLOCK interrupts as well */
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
- err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, 0,
- "dw_dmac", dw);
+ err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
+ IRQF_SHARED, "dw_dmac", dw);
if (err)
return err;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 6c9449cffae..e35d9759031 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -253,6 +253,7 @@ static const struct acpi_device_id dw_dma_acpi_id_table[] = {
{ "INTL9C60", 0 },
{ }
};
+MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
#endif
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 5f3e532436e..ff50ff4c6a5 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -56,6 +56,7 @@ struct edma_desc {
struct list_head node;
int absync;
int pset_nr;
+ int processed;
struct edmacc_param pset[0];
};
@@ -69,6 +70,7 @@ struct edma_chan {
int ch_num;
bool alloced;
int slot[EDMA_MAX_SLOTS];
+ int missed;
struct dma_slave_config cfg;
};
@@ -104,22 +106,34 @@ static void edma_desc_free(struct virt_dma_desc *vdesc)
/* Dispatch a queued descriptor to the controller (caller holds lock) */
static void edma_execute(struct edma_chan *echan)
{
- struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan);
+ struct virt_dma_desc *vdesc;
struct edma_desc *edesc;
- int i;
-
- if (!vdesc) {
- echan->edesc = NULL;
- return;
+ struct device *dev = echan->vchan.chan.device->dev;
+ int i, j, left, nslots;
+
+ /* If either we processed all psets or we're still not started */
+ if (!echan->edesc ||
+ echan->edesc->pset_nr == echan->edesc->processed) {
+ /* Get next vdesc */
+ vdesc = vchan_next_desc(&echan->vchan);
+ if (!vdesc) {
+ echan->edesc = NULL;
+ return;
+ }
+ list_del(&vdesc->node);
+ echan->edesc = to_edma_desc(&vdesc->tx);
}
- list_del(&vdesc->node);
+ edesc = echan->edesc;
- echan->edesc = edesc = to_edma_desc(&vdesc->tx);
+ /* Find out how many left */
+ left = edesc->pset_nr - edesc->processed;
+ nslots = min(MAX_NR_SG, left);
/* Write descriptor PaRAM set(s) */
- for (i = 0; i < edesc->pset_nr; i++) {
- edma_write_slot(echan->slot[i], &edesc->pset[i]);
+ for (i = 0; i < nslots; i++) {
+ j = i + edesc->processed;
+ edma_write_slot(echan->slot[i], &edesc->pset[j]);
dev_dbg(echan->vchan.chan.device->dev,
"\n pset[%d]:\n"
" chnum\t%d\n"
@@ -132,24 +146,50 @@ static void edma_execute(struct edma_chan *echan)
" bidx\t%08x\n"
" cidx\t%08x\n"
" lkrld\t%08x\n",
- i, echan->ch_num, echan->slot[i],
- edesc->pset[i].opt,
- edesc->pset[i].src,
- edesc->pset[i].dst,
- edesc->pset[i].a_b_cnt,
- edesc->pset[i].ccnt,
- edesc->pset[i].src_dst_bidx,
- edesc->pset[i].src_dst_cidx,
- edesc->pset[i].link_bcntrld);
+ j, echan->ch_num, echan->slot[i],
+ edesc->pset[j].opt,
+ edesc->pset[j].src,
+ edesc->pset[j].dst,
+ edesc->pset[j].a_b_cnt,
+ edesc->pset[j].ccnt,
+ edesc->pset[j].src_dst_bidx,
+ edesc->pset[j].src_dst_cidx,
+ edesc->pset[j].link_bcntrld);
/* Link to the previous slot if not the last set */
- if (i != (edesc->pset_nr - 1))
+ if (i != (nslots - 1))
edma_link(echan->slot[i], echan->slot[i+1]);
- /* Final pset links to the dummy pset */
- else
- edma_link(echan->slot[i], echan->ecc->dummy_slot);
}
- edma_start(echan->ch_num);
+ edesc->processed += nslots;
+
+ /*
+ * If this is either the last set in a set of SG-list transactions
+ * then setup a link to the dummy slot, this results in all future
+ * events being absorbed and that's OK because we're done
+ */
+ if (edesc->processed == edesc->pset_nr)
+ edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot);
+
+ edma_resume(echan->ch_num);
+
+ if (edesc->processed <= MAX_NR_SG) {
+ dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
+ edma_start(echan->ch_num);
+ }
+
+ /*
+ * This happens due to setup times between intermediate transfers
+ * in long SG lists which have to be broken up into transfers of
+ * MAX_NR_SG
+ */
+ if (echan->missed) {
+ dev_dbg(dev, "missed event in execute detected\n");
+ edma_clean_channel(echan->ch_num);
+ edma_stop(echan->ch_num);
+ edma_start(echan->ch_num);
+ edma_trigger_channel(echan->ch_num);
+ echan->missed = 0;
+ }
}
static int edma_terminate_all(struct edma_chan *echan)
@@ -222,9 +262,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
enum dma_slave_buswidth dev_width;
u32 burst;
struct scatterlist *sg;
- int i;
int acnt, bcnt, ccnt, src, dst, cidx;
int src_bidx, dst_bidx, src_cidx, dst_cidx;
+ int i, nslots;
if (unlikely(!echan || !sgl || !sg_len))
return NULL;
@@ -247,12 +287,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
return NULL;
}
- if (sg_len > MAX_NR_SG) {
- dev_err(dev, "Exceeded max SG segments %d > %d\n",
- sg_len, MAX_NR_SG);
- return NULL;
- }
-
edesc = kzalloc(sizeof(*edesc) + sg_len *
sizeof(edesc->pset[0]), GFP_ATOMIC);
if (!edesc) {
@@ -262,8 +296,10 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
edesc->pset_nr = sg_len;
- for_each_sg(sgl, sg, sg_len, i) {
- /* Allocate a PaRAM slot, if needed */
+ /* Allocate a PaRAM slot, if needed */
+ nslots = min_t(unsigned, MAX_NR_SG, sg_len);
+
+ for (i = 0; i < nslots; i++) {
if (echan->slot[i] < 0) {
echan->slot[i] =
edma_alloc_slot(EDMA_CTLR(echan->ch_num),
@@ -273,6 +309,10 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
return NULL;
}
}
+ }
+
+ /* Configure PaRAM sets for each SG */
+ for_each_sg(sgl, sg, sg_len, i) {
acnt = dev_width;
@@ -330,6 +370,12 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
/* Configure A or AB synchronized transfers */
if (edesc->absync)
edesc->pset[i].opt |= SYNCDIM;
+
+ /* If this is the last in a current SG set of transactions,
+ enable interrupts so that next set is processed */
+ if (!((i+1) % MAX_NR_SG))
+ edesc->pset[i].opt |= TCINTEN;
+
/* If this is the last set, enable completion interrupt flag */
if (i == sg_len - 1)
edesc->pset[i].opt |= TCINTEN;
@@ -355,27 +401,65 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
struct device *dev = echan->vchan.chan.device->dev;
struct edma_desc *edesc;
unsigned long flags;
+ struct edmacc_param p;
- /* Stop the channel */
- edma_stop(echan->ch_num);
+ /* Pause the channel */
+ edma_pause(echan->ch_num);
switch (ch_status) {
case DMA_COMPLETE:
- dev_dbg(dev, "transfer complete on channel %d\n", ch_num);
-
spin_lock_irqsave(&echan->vchan.lock, flags);
edesc = echan->edesc;
if (edesc) {
+ if (edesc->processed == edesc->pset_nr) {
+ dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
+ edma_stop(echan->ch_num);
+ vchan_cookie_complete(&edesc->vdesc);
+ } else {
+ dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
+ }
+
edma_execute(echan);
- vchan_cookie_complete(&edesc->vdesc);
}
spin_unlock_irqrestore(&echan->vchan.lock, flags);
break;
case DMA_CC_ERROR:
- dev_dbg(dev, "transfer error on channel %d\n", ch_num);
+ spin_lock_irqsave(&echan->vchan.lock, flags);
+
+ edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
+
+ /*
+ * Issue later based on missed flag which will be sure
+ * to happen as:
+ * (1) we finished transmitting an intermediate slot and
+ * edma_execute is coming up.
+ * (2) or we finished current transfer and issue will
+ * call edma_execute.
+ *
+ * Important note: issuing can be dangerous here and
+ * lead to some nasty recursion when we are in a NULL
+ * slot. So we avoid doing so and set the missed flag.
+ */
+ if (p.a_b_cnt == 0 && p.ccnt == 0) {
+ dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n");
+ echan->missed = 1;
+ } else {
+ /*
+ * The slot is already programmed but the event got
+ * missed, so its safe to issue it here.
+ */
+ dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n");
+ edma_clean_channel(echan->ch_num);
+ edma_stop(echan->ch_num);
+ edma_start(echan->ch_num);
+ edma_trigger_channel(echan->ch_num);
+ }
+
+ spin_unlock_irqrestore(&echan->vchan.lock, flags);
+
break;
default:
break;
@@ -502,8 +586,6 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
} else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
struct edma_desc *edesc = echan->edesc;
txstate->residue = edma_desc_size(edesc);
- } else {
- txstate->residue = 0;
}
spin_unlock_irqrestore(&echan->vchan.lock, flags);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index f2bf8c0c467..591cd8c63ab 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1313,15 +1313,7 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *state)
{
- struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&edmac->lock, flags);
- ret = dma_cookie_status(chan, cookie, state);
- spin_unlock_irqrestore(&edmac->lock, flags);
-
- return ret;
+ return dma_cookie_status(chan, cookie, state);
}
/**
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 49e8fbdb898..b3f3e90054f 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -979,15 +979,7 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct fsldma_chan *chan = to_fsl_chan(dchan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
- ret = dma_cookie_status(dchan, cookie, txstate);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-
- return ret;
+ return dma_cookie_status(dchan, cookie, txstate);
}
/*----------------------------------------------------------------------------*/
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index ff2aab973b4..78f8ca5fcce 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -805,10 +805,8 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
}
INIT_LIST_HEAD(&imxdmac->ld_free);
- if (imxdmac->sg_list) {
- kfree(imxdmac->sg_list);
- imxdmac->sg_list = NULL;
- }
+ kfree(imxdmac->sg_list);
+ imxdmac->sg_list = NULL;
}
static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 1e44b8cf95d..fc43603cf0b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -243,7 +243,6 @@ struct sdma_engine;
* @event_id1 for channels that use 2 events
* @word_size peripheral access size
* @buf_tail ID of the buffer that was processed
- * @done channel completion
* @num_bd max NUM_BD. number of descriptors currently handling
*/
struct sdma_channel {
@@ -255,7 +254,6 @@ struct sdma_channel {
unsigned int event_id1;
enum dma_slave_buswidth word_size;
unsigned int buf_tail;
- struct completion done;
unsigned int num_bd;
struct sdma_buffer_descriptor *bd;
dma_addr_t bd_phys;
@@ -307,9 +305,10 @@ struct sdma_firmware_header {
u32 ram_code_size;
};
-enum sdma_devtype {
- IMX31_SDMA, /* runs on i.mx31 */
- IMX35_SDMA, /* runs on i.mx35 and later */
+struct sdma_driver_data {
+ int chnenbl0;
+ int num_events;
+ struct sdma_script_start_addrs *script_addrs;
};
struct sdma_engine {
@@ -318,8 +317,6 @@ struct sdma_engine {
struct sdma_channel channel[MAX_DMA_CHANNELS];
struct sdma_channel_control *channel_control;
void __iomem *regs;
- enum sdma_devtype devtype;
- unsigned int num_events;
struct sdma_context_data *context;
dma_addr_t context_phys;
struct dma_device dma_device;
@@ -327,15 +324,118 @@ struct sdma_engine {
struct clk *clk_ahb;
spinlock_t channel_0_lock;
struct sdma_script_start_addrs *script_addrs;
+ const struct sdma_driver_data *drvdata;
+};
+
+static struct sdma_driver_data sdma_imx31 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX31,
+ .num_events = 32,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx25 = {
+ .ap_2_ap_addr = 729,
+ .uart_2_mcu_addr = 904,
+ .per_2_app_addr = 1255,
+ .mcu_2_app_addr = 834,
+ .uartsh_2_mcu_addr = 1120,
+ .per_2_shp_addr = 1329,
+ .mcu_2_shp_addr = 1048,
+ .ata_2_mcu_addr = 1560,
+ .mcu_2_ata_addr = 1479,
+ .app_2_per_addr = 1189,
+ .app_2_mcu_addr = 770,
+ .shp_2_per_addr = 1407,
+ .shp_2_mcu_addr = 979,
+};
+
+static struct sdma_driver_data sdma_imx25 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx25,
+};
+
+static struct sdma_driver_data sdma_imx35 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx51 = {
+ .ap_2_ap_addr = 642,
+ .uart_2_mcu_addr = 817,
+ .mcu_2_app_addr = 747,
+ .mcu_2_shp_addr = 961,
+ .ata_2_mcu_addr = 1473,
+ .mcu_2_ata_addr = 1392,
+ .app_2_per_addr = 1033,
+ .app_2_mcu_addr = 683,
+ .shp_2_per_addr = 1251,
+ .shp_2_mcu_addr = 892,
+};
+
+static struct sdma_driver_data sdma_imx51 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx51,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx53 = {
+ .ap_2_ap_addr = 642,
+ .app_2_mcu_addr = 683,
+ .mcu_2_app_addr = 747,
+ .uart_2_mcu_addr = 817,
+ .shp_2_mcu_addr = 891,
+ .mcu_2_shp_addr = 960,
+ .uartsh_2_mcu_addr = 1032,
+ .spdif_2_mcu_addr = 1100,
+ .mcu_2_spdif_addr = 1134,
+ .firi_2_mcu_addr = 1193,
+ .mcu_2_firi_addr = 1290,
+};
+
+static struct sdma_driver_data sdma_imx53 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx53,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx6q = {
+ .ap_2_ap_addr = 642,
+ .uart_2_mcu_addr = 817,
+ .mcu_2_app_addr = 747,
+ .per_2_per_addr = 6331,
+ .uartsh_2_mcu_addr = 1032,
+ .mcu_2_shp_addr = 960,
+ .app_2_mcu_addr = 683,
+ .shp_2_mcu_addr = 891,
+ .spdif_2_mcu_addr = 1100,
+ .mcu_2_spdif_addr = 1134,
+};
+
+static struct sdma_driver_data sdma_imx6q = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx6q,
};
static struct platform_device_id sdma_devtypes[] = {
{
+ .name = "imx25-sdma",
+ .driver_data = (unsigned long)&sdma_imx25,
+ }, {
.name = "imx31-sdma",
- .driver_data = IMX31_SDMA,
+ .driver_data = (unsigned long)&sdma_imx31,
}, {
.name = "imx35-sdma",
- .driver_data = IMX35_SDMA,
+ .driver_data = (unsigned long)&sdma_imx35,
+ }, {
+ .name = "imx51-sdma",
+ .driver_data = (unsigned long)&sdma_imx51,
+ }, {
+ .name = "imx53-sdma",
+ .driver_data = (unsigned long)&sdma_imx53,
+ }, {
+ .name = "imx6q-sdma",
+ .driver_data = (unsigned long)&sdma_imx6q,
}, {
/* sentinel */
}
@@ -343,8 +443,11 @@ static struct platform_device_id sdma_devtypes[] = {
MODULE_DEVICE_TABLE(platform, sdma_devtypes);
static const struct of_device_id sdma_dt_ids[] = {
- { .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], },
- { .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], },
+ { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
+ { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
+ { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
+ { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
+ { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
@@ -356,8 +459,7 @@ MODULE_DEVICE_TABLE(of, sdma_dt_ids);
static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
{
- u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 :
- SDMA_CHNENBL0_IMX35);
+ u32 chnenbl0 = sdma->drvdata->chnenbl0;
return chnenbl0 + event * 4;
}
@@ -547,8 +649,6 @@ static void sdma_tasklet(unsigned long data)
{
struct sdma_channel *sdmac = (struct sdma_channel *) data;
- complete(&sdmac->done);
-
if (sdmac->flags & IMX_DMA_SG_LOOP)
sdma_handle_channel_loop(sdmac);
else
@@ -733,7 +833,7 @@ static int sdma_config_channel(struct sdma_channel *sdmac)
sdmac->per_addr = 0;
if (sdmac->event_id0) {
- if (sdmac->event_id0 >= sdmac->sdma->num_events)
+ if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
return -EINVAL;
sdma_event_enable(sdmac, sdmac->event_id0);
}
@@ -812,9 +912,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
-
- init_completion(&sdmac->done);
-
return 0;
out:
@@ -1120,15 +1217,12 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
}
static enum dma_status sdma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie,
- struct dma_tx_state *txstate)
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
- dma_cookie_t last_used;
-
- last_used = chan->cookie;
- dma_set_tx_state(txstate, chan->completed_cookie, last_used,
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
sdmac->chn_count - sdmac->chn_real_count);
return sdmac->status;
@@ -1218,19 +1312,6 @@ static int __init sdma_init(struct sdma_engine *sdma)
int i, ret;
dma_addr_t ccb_phys;
- switch (sdma->devtype) {
- case IMX31_SDMA:
- sdma->num_events = 32;
- break;
- case IMX35_SDMA:
- sdma->num_events = 48;
- break;
- default:
- dev_err(sdma->dev, "Unknown sdma type %d. aborting\n",
- sdma->devtype);
- return -ENODEV;
- }
-
clk_enable(sdma->clk_ipg);
clk_enable(sdma->clk_ahb);
@@ -1257,7 +1338,7 @@ static int __init sdma_init(struct sdma_engine *sdma)
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
/* disable all channels */
- for (i = 0; i < sdma->num_events; i++)
+ for (i = 0; i < sdma->drvdata->num_events; i++)
writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
/* All channels have priority 0 */
@@ -1335,10 +1416,21 @@ static int __init sdma_probe(struct platform_device *pdev)
int ret;
int irq;
struct resource *iores;
- struct sdma_platform_data *pdata = pdev->dev.platform_data;
+ struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
int i;
struct sdma_engine *sdma;
s32 *saddr_arr;
+ const struct sdma_driver_data *drvdata = NULL;
+
+ if (of_id)
+ drvdata = of_id->data;
+ else if (pdev->id_entry)
+ drvdata = (void *)pdev->id_entry->driver_data;
+
+ if (!drvdata) {
+ dev_err(&pdev->dev, "unable to find driver data\n");
+ return -EINVAL;
+ }
sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
if (!sdma)
@@ -1347,6 +1439,7 @@ static int __init sdma_probe(struct platform_device *pdev)
spin_lock_init(&sdma->channel_0_lock);
sdma->dev = &pdev->dev;
+ sdma->drvdata = drvdata;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
@@ -1396,10 +1489,6 @@ static int __init sdma_probe(struct platform_device *pdev)
for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
saddr_arr[i] = -EINVAL;
- if (of_id)
- pdev->id_entry = of_id->data;
- sdma->devtype = pdev->id_entry->driver_data;
-
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
@@ -1431,6 +1520,8 @@ static int __init sdma_probe(struct platform_device *pdev)
if (ret)
goto err_init;
+ if (sdma->drvdata->script_addrs)
+ sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
if (pdata && pdata->script_addrs)
sdma_add_scripts(sdma, pdata->script_addrs);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index b642e035579..d8ececaf1b5 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -251,7 +251,7 @@ static bool is_bwd_noraid(struct pci_dev *pdev)
}
static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
- dma_addr_t addr, u32 offset, u8 coef, int idx)
+ dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
{
struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
struct ioat_pq16a_descriptor *pq16 =
@@ -1775,15 +1775,12 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- if (is_xeon_cb32(pdev))
- dma->copy_align = 6;
-
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
- if (is_bwd_noraid(pdev))
+ if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
/* dca is incompatible with raid operations */
@@ -1793,7 +1790,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_XOR) {
is_raid_device = true;
dma->max_xor = 8;
- dma->xor_align = 6;
dma_cap_set(DMA_XOR, dma->cap_mask);
dma->device_prep_dma_xor = ioat3_prep_xor;
@@ -1812,13 +1808,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_RAID16SS) {
dma_set_maxpq(dma, 16, 0);
- dma->pq_align = 0;
} else {
dma_set_maxpq(dma, 8, 0);
- if (is_xeon_cb32(pdev))
- dma->pq_align = 6;
- else
- dma->pq_align = 0;
}
if (!(device->cap & IOAT_CAP_XOR)) {
@@ -1829,13 +1820,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_RAID16SS) {
dma->max_xor = 16;
- dma->xor_align = 0;
} else {
dma->max_xor = 8;
- if (is_xeon_cb32(pdev))
- dma->xor_align = 6;
- else
- dma->xor_align = 0;
}
}
}
@@ -1844,14 +1830,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event;
- if (is_xeon_cb32(pdev)) {
- dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
- dma->device_prep_dma_xor_val = NULL;
-
- dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
- dma->device_prep_dma_pq_val = NULL;
- }
-
/* starting with CB3.3 super extended descriptors are supported */
if (device->cap & IOAT_CAP_RAID16SS) {
char pool_name[14];
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index cc727ec78c4..dd8b44a56e5 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -518,7 +518,7 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
struct iop_adma_desc_slot *slot = NULL;
int init = iop_chan->slots_allocated ? 0 : 1;
struct iop_adma_platform_data *plat_data =
- iop_chan->device->pdev->dev.platform_data;
+ dev_get_platdata(&iop_chan->device->pdev->dev);
int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
/* Allocate descriptor slots */
@@ -1351,7 +1351,7 @@ static int iop_adma_remove(struct platform_device *dev)
struct iop_adma_device *device = platform_get_drvdata(dev);
struct dma_chan *chan, *_chan;
struct iop_adma_chan *iop_chan;
- struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
+ struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev);
dma_async_device_unregister(&device->common);
@@ -1376,7 +1376,7 @@ static int iop_adma_probe(struct platform_device *pdev)
struct iop_adma_device *adev;
struct iop_adma_chan *iop_chan;
struct dma_device *dma_dev;
- struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
+ struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index d39c2cd0795..cb9c0bc317e 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1593,10 +1593,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
static enum dma_status idmac_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
- dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
- if (cookie != chan->cookie)
- return DMA_ERROR;
- return DMA_SUCCESS;
+ return dma_cookie_status(chan, cookie, txstate);
}
static int __init ipu_idmac_init(struct ipu *ipu)
@@ -1767,7 +1764,6 @@ static int ipu_remove(struct platform_device *pdev)
iounmap(ipu->reg_ic);
iounmap(ipu->reg_ipu);
tasklet_kill(&ipu->tasklet);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
new file mode 100644
index 00000000000..a2c330f5f95
--- /dev/null
+++ b/drivers/dma/k3dma.c
@@ -0,0 +1,837 @@
+/*
+ * Copyright (c) 2013 Linaro Ltd.
+ * Copyright (c) 2013 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define DRIVER_NAME "k3-dma"
+#define DMA_ALIGN 3
+#define DMA_MAX_SIZE 0x1ffc
+
+#define INT_STAT 0x00
+#define INT_TC1 0x04
+#define INT_ERR1 0x0c
+#define INT_ERR2 0x10
+#define INT_TC1_MASK 0x18
+#define INT_ERR1_MASK 0x20
+#define INT_ERR2_MASK 0x24
+#define INT_TC1_RAW 0x600
+#define INT_ERR1_RAW 0x608
+#define INT_ERR2_RAW 0x610
+#define CH_PRI 0x688
+#define CH_STAT 0x690
+#define CX_CUR_CNT 0x704
+#define CX_LLI 0x800
+#define CX_CNT 0x810
+#define CX_SRC 0x814
+#define CX_DST 0x818
+#define CX_CFG 0x81c
+#define AXI_CFG 0x820
+#define AXI_CFG_DEFAULT 0x201201
+
+#define CX_LLI_CHAIN_EN 0x2
+#define CX_CFG_EN 0x1
+#define CX_CFG_MEM2PER (0x1 << 2)
+#define CX_CFG_PER2MEM (0x2 << 2)
+#define CX_CFG_SRCINCR (0x1 << 31)
+#define CX_CFG_DSTINCR (0x1 << 30)
+
+struct k3_desc_hw {
+ u32 lli;
+ u32 reserved[3];
+ u32 count;
+ u32 saddr;
+ u32 daddr;
+ u32 config;
+} __aligned(32);
+
+struct k3_dma_desc_sw {
+ struct virt_dma_desc vd;
+ dma_addr_t desc_hw_lli;
+ size_t desc_num;
+ size_t size;
+ struct k3_desc_hw desc_hw[0];
+};
+
+struct k3_dma_phy;
+
+struct k3_dma_chan {
+ u32 ccfg;
+ struct virt_dma_chan vc;
+ struct k3_dma_phy *phy;
+ struct list_head node;
+ enum dma_transfer_direction dir;
+ dma_addr_t dev_addr;
+ enum dma_status status;
+};
+
+struct k3_dma_phy {
+ u32 idx;
+ void __iomem *base;
+ struct k3_dma_chan *vchan;
+ struct k3_dma_desc_sw *ds_run;
+ struct k3_dma_desc_sw *ds_done;
+};
+
+struct k3_dma_dev {
+ struct dma_device slave;
+ void __iomem *base;
+ struct tasklet_struct task;
+ spinlock_t lock;
+ struct list_head chan_pending;
+ struct k3_dma_phy *phy;
+ struct k3_dma_chan *chans;
+ struct clk *clk;
+ u32 dma_channels;
+ u32 dma_requests;
+};
+
+#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
+
+static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct k3_dma_chan, vc.chan);
+}
+
+static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
+{
+ u32 val = 0;
+
+ if (on) {
+ val = readl_relaxed(phy->base + CX_CFG);
+ val |= CX_CFG_EN;
+ writel_relaxed(val, phy->base + CX_CFG);
+ } else {
+ val = readl_relaxed(phy->base + CX_CFG);
+ val &= ~CX_CFG_EN;
+ writel_relaxed(val, phy->base + CX_CFG);
+ }
+}
+
+static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
+{
+ u32 val = 0;
+
+ k3_dma_pause_dma(phy, false);
+
+ val = 0x1 << phy->idx;
+ writel_relaxed(val, d->base + INT_TC1_RAW);
+ writel_relaxed(val, d->base + INT_ERR1_RAW);
+ writel_relaxed(val, d->base + INT_ERR2_RAW);
+}
+
+static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
+{
+ writel_relaxed(hw->lli, phy->base + CX_LLI);
+ writel_relaxed(hw->count, phy->base + CX_CNT);
+ writel_relaxed(hw->saddr, phy->base + CX_SRC);
+ writel_relaxed(hw->daddr, phy->base + CX_DST);
+ writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
+ writel_relaxed(hw->config, phy->base + CX_CFG);
+}
+
+static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
+{
+ u32 cnt = 0;
+
+ cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
+ cnt &= 0xffff;
+ return cnt;
+}
+
+static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
+{
+ return readl_relaxed(phy->base + CX_LLI);
+}
+
+static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
+{
+ return readl_relaxed(d->base + CH_STAT);
+}
+
+static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
+{
+ if (on) {
+ /* set same priority */
+ writel_relaxed(0x0, d->base + CH_PRI);
+
+ /* unmask irq */
+ writel_relaxed(0xffff, d->base + INT_TC1_MASK);
+ writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
+ writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
+ } else {
+ /* mask irq */
+ writel_relaxed(0x0, d->base + INT_TC1_MASK);
+ writel_relaxed(0x0, d->base + INT_ERR1_MASK);
+ writel_relaxed(0x0, d->base + INT_ERR2_MASK);
+ }
+}
+
+static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
+{
+ struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
+ struct k3_dma_phy *p;
+ struct k3_dma_chan *c;
+ u32 stat = readl_relaxed(d->base + INT_STAT);
+ u32 tc1 = readl_relaxed(d->base + INT_TC1);
+ u32 err1 = readl_relaxed(d->base + INT_ERR1);
+ u32 err2 = readl_relaxed(d->base + INT_ERR2);
+ u32 i, irq_chan = 0;
+
+ while (stat) {
+ i = __ffs(stat);
+ stat &= (stat - 1);
+ if (likely(tc1 & BIT(i))) {
+ p = &d->phy[i];
+ c = p->vchan;
+ if (c) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vchan_cookie_complete(&p->ds_run->vd);
+ p->ds_done = p->ds_run;
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ }
+ irq_chan |= BIT(i);
+ }
+ if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
+ dev_warn(d->slave.dev, "DMA ERR\n");
+ }
+
+ writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
+ writel_relaxed(err1, d->base + INT_ERR1_RAW);
+ writel_relaxed(err2, d->base + INT_ERR2_RAW);
+
+ if (irq_chan) {
+ tasklet_schedule(&d->task);
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+static int k3_dma_start_txd(struct k3_dma_chan *c)
+{
+ struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+ if (!c->phy)
+ return -EAGAIN;
+
+ if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
+ return -EAGAIN;
+
+ if (vd) {
+ struct k3_dma_desc_sw *ds =
+ container_of(vd, struct k3_dma_desc_sw, vd);
+ /*
+ * fetch and remove request from vc->desc_issued
+ * so vc->desc_issued only contains desc pending
+ */
+ list_del(&ds->vd.node);
+ c->phy->ds_run = ds;
+ c->phy->ds_done = NULL;
+ /* start dma */
+ k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
+ return 0;
+ }
+ c->phy->ds_done = NULL;
+ c->phy->ds_run = NULL;
+ return -EAGAIN;
+}
+
+static void k3_dma_tasklet(unsigned long arg)
+{
+ struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
+ struct k3_dma_phy *p;
+ struct k3_dma_chan *c, *cn;
+ unsigned pch, pch_alloc = 0;
+
+ /* check new dma request of running channel in vc->desc_issued */
+ list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
+ spin_lock_irq(&c->vc.lock);
+ p = c->phy;
+ if (p && p->ds_done) {
+ if (k3_dma_start_txd(c)) {
+ /* No current txd associated with this channel */
+ dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
+ /* Mark this channel free */
+ c->phy = NULL;
+ p->vchan = NULL;
+ }
+ }
+ spin_unlock_irq(&c->vc.lock);
+ }
+
+ /* check new channel request in d->chan_pending */
+ spin_lock_irq(&d->lock);
+ for (pch = 0; pch < d->dma_channels; pch++) {
+ p = &d->phy[pch];
+
+ if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
+ c = list_first_entry(&d->chan_pending,
+ struct k3_dma_chan, node);
+ /* remove from d->chan_pending */
+ list_del_init(&c->node);
+ pch_alloc |= 1 << pch;
+ /* Mark this channel allocated */
+ p->vchan = c;
+ c->phy = p;
+ dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
+ }
+ }
+ spin_unlock_irq(&d->lock);
+
+ for (pch = 0; pch < d->dma_channels; pch++) {
+ if (pch_alloc & (1 << pch)) {
+ p = &d->phy[pch];
+ c = p->vchan;
+ if (c) {
+ spin_lock_irq(&c->vc.lock);
+ k3_dma_start_txd(c);
+ spin_unlock_irq(&c->vc.lock);
+ }
+ }
+ }
+}
+
+static int k3_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ return 0;
+}
+
+static void k3_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->lock, flags);
+ list_del_init(&c->node);
+ spin_unlock_irqrestore(&d->lock, flags);
+
+ vchan_free_chan_resources(&c->vc);
+ c->ccfg = 0;
+}
+
+static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ struct k3_dma_phy *p;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ enum dma_status ret;
+ size_t bytes = 0;
+
+ ret = dma_cookie_status(&c->vc.chan, cookie, state);
+ if (ret == DMA_SUCCESS)
+ return ret;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ p = c->phy;
+ ret = c->status;
+
+ /*
+ * If the cookie is on our issue queue, then the residue is
+ * its total size.
+ */
+ vd = vchan_find_desc(&c->vc, cookie);
+ if (vd) {
+ bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
+ } else if ((!p) || (!p->ds_run)) {
+ bytes = 0;
+ } else {
+ struct k3_dma_desc_sw *ds = p->ds_run;
+ u32 clli = 0, index = 0;
+
+ bytes = k3_dma_get_curr_cnt(d, p);
+ clli = k3_dma_get_curr_lli(p);
+ index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw);
+ for (; index < ds->desc_num; index++) {
+ bytes += ds->desc_hw[index].count;
+ /* end of lli */
+ if (!ds->desc_hw[index].lli)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ dma_set_residue(state, bytes);
+ return ret;
+}
+
+static void k3_dma_issue_pending(struct dma_chan *chan)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ /* add request to vc->desc_issued */
+ if (vchan_issue_pending(&c->vc)) {
+ spin_lock(&d->lock);
+ if (!c->phy) {
+ if (list_empty(&c->node)) {
+ /* if new channel, add chan_pending */
+ list_add_tail(&c->node, &d->chan_pending);
+ /* check in tasklet */
+ tasklet_schedule(&d->task);
+ dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+ }
+ }
+ spin_unlock(&d->lock);
+ } else
+ dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
+ dma_addr_t src, size_t len, u32 num, u32 ccfg)
+{
+ if ((num + 1) < ds->desc_num)
+ ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
+ sizeof(struct k3_desc_hw);
+ ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
+ ds->desc_hw[num].count = len;
+ ds->desc_hw[num].saddr = src;
+ ds->desc_hw[num].daddr = dst;
+ ds->desc_hw[num].config = ccfg;
+}
+
+static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_desc_sw *ds;
+ size_t copy = 0;
+ int num = 0;
+
+ if (!len)
+ return NULL;
+
+ num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
+ ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
+ if (!ds) {
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ return NULL;
+ }
+ ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
+ ds->size = len;
+ ds->desc_num = num;
+ num = 0;
+
+ if (!c->ccfg) {
+ /* default is memtomem, without calling device_control */
+ c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
+ c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
+ c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
+ }
+
+ do {
+ copy = min_t(size_t, len, DMA_MAX_SIZE);
+ k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
+
+ if (c->dir == DMA_MEM_TO_DEV) {
+ src += copy;
+ } else if (c->dir == DMA_DEV_TO_MEM) {
+ dst += copy;
+ } else {
+ src += copy;
+ dst += copy;
+ }
+ len -= copy;
+ } while (len);
+
+ ds->desc_hw[num-1].lli = 0; /* end of link */
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
+ enum dma_transfer_direction dir, unsigned long flags, void *context)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_desc_sw *ds;
+ size_t len, avail, total = 0;
+ struct scatterlist *sg;
+ dma_addr_t addr, src = 0, dst = 0;
+ int num = sglen, i;
+
+ if (sgl == 0)
+ return NULL;
+
+ for_each_sg(sgl, sg, sglen, i) {
+ avail = sg_dma_len(sg);
+ if (avail > DMA_MAX_SIZE)
+ num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
+ }
+
+ ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
+ if (!ds) {
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ return NULL;
+ }
+ ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
+ ds->desc_num = num;
+ num = 0;
+
+ for_each_sg(sgl, sg, sglen, i) {
+ addr = sg_dma_address(sg);
+ avail = sg_dma_len(sg);
+ total += avail;
+
+ do {
+ len = min_t(size_t, avail, DMA_MAX_SIZE);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = addr;
+ dst = c->dev_addr;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ src = c->dev_addr;
+ dst = addr;
+ }
+
+ k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
+
+ addr += len;
+ avail -= len;
+ } while (avail);
+ }
+
+ ds->desc_hw[num-1].lli = 0; /* end of link */
+ ds->size = total;
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ struct dma_slave_config *cfg = (void *)arg;
+ struct k3_dma_phy *p = c->phy;
+ unsigned long flags;
+ u32 maxburst = 0, val = 0;
+ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ LIST_HEAD(head);
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ if (cfg == NULL)
+ return -EINVAL;
+ c->dir = cfg->direction;
+ if (c->dir == DMA_DEV_TO_MEM) {
+ c->ccfg = CX_CFG_DSTINCR;
+ c->dev_addr = cfg->src_addr;
+ maxburst = cfg->src_maxburst;
+ width = cfg->src_addr_width;
+ } else if (c->dir == DMA_MEM_TO_DEV) {
+ c->ccfg = CX_CFG_SRCINCR;
+ c->dev_addr = cfg->dst_addr;
+ maxburst = cfg->dst_maxburst;
+ width = cfg->dst_addr_width;
+ }
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ val = __ffs(width);
+ break;
+ default:
+ val = 3;
+ break;
+ }
+ c->ccfg |= (val << 12) | (val << 16);
+
+ if ((maxburst == 0) || (maxburst > 16))
+ val = 16;
+ else
+ val = maxburst - 1;
+ c->ccfg |= (val << 20) | (val << 24);
+ c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
+
+ /* specific request line */
+ c->ccfg |= c->vc.chan.chan_id << 4;
+ break;
+
+ case DMA_TERMINATE_ALL:
+ dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+
+ /* Prevent this channel being scheduled */
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+
+ /* Clear the tx descriptor lists */
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vchan_get_all_descriptors(&c->vc, &head);
+ if (p) {
+ /* vchan is assigned to a pchan - stop the channel */
+ k3_dma_terminate_chan(p, d);
+ c->phy = NULL;
+ p->vchan = NULL;
+ p->ds_run = p->ds_done = NULL;
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+ break;
+
+ case DMA_PAUSE:
+ dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
+ if (c->status == DMA_IN_PROGRESS) {
+ c->status = DMA_PAUSED;
+ if (p) {
+ k3_dma_pause_dma(p, false);
+ } else {
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+ }
+ }
+ break;
+
+ case DMA_RESUME:
+ dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (c->status == DMA_PAUSED) {
+ c->status = DMA_IN_PROGRESS;
+ if (p) {
+ k3_dma_pause_dma(p, true);
+ } else if (!list_empty(&c->vc.desc_issued)) {
+ spin_lock(&d->lock);
+ list_add_tail(&c->node, &d->chan_pending);
+ spin_unlock(&d->lock);
+ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ break;
+ default:
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static void k3_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct k3_dma_desc_sw *ds =
+ container_of(vd, struct k3_dma_desc_sw, vd);
+
+ kfree(ds);
+}
+
+static struct of_device_id k3_pdma_dt_ids[] = {
+ { .compatible = "hisilicon,k3-dma-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
+
+static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct k3_dma_dev *d = ofdma->of_dma_data;
+ unsigned int request = dma_spec->args[0];
+
+ if (request > d->dma_requests)
+ return NULL;
+
+ return dma_get_slave_channel(&(d->chans[request].vc.chan));
+}
+
+static int k3_dma_probe(struct platform_device *op)
+{
+ struct k3_dma_dev *d;
+ const struct of_device_id *of_id;
+ struct resource *iores;
+ int i, ret, irq = 0;
+
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -EINVAL;
+
+ d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->base = devm_ioremap_resource(&op->dev, iores);
+ if (IS_ERR(d->base))
+ return PTR_ERR(d->base);
+
+ of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
+ if (of_id) {
+ of_property_read_u32((&op->dev)->of_node,
+ "dma-channels", &d->dma_channels);
+ of_property_read_u32((&op->dev)->of_node,
+ "dma-requests", &d->dma_requests);
+ }
+
+ d->clk = devm_clk_get(&op->dev, NULL);
+ if (IS_ERR(d->clk)) {
+ dev_err(&op->dev, "no dma clk\n");
+ return PTR_ERR(d->clk);
+ }
+
+ irq = platform_get_irq(op, 0);
+ ret = devm_request_irq(&op->dev, irq,
+ k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d);
+ if (ret)
+ return ret;
+
+ /* init phy channel */
+ d->phy = devm_kzalloc(&op->dev,
+ d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
+ if (d->phy == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < d->dma_channels; i++) {
+ struct k3_dma_phy *p = &d->phy[i];
+
+ p->idx = i;
+ p->base = d->base + i * 0x40;
+ }
+
+ INIT_LIST_HEAD(&d->slave.channels);
+ dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+ dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+ d->slave.dev = &op->dev;
+ d->slave.device_alloc_chan_resources = k3_dma_alloc_chan_resources;
+ d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
+ d->slave.device_tx_status = k3_dma_tx_status;
+ d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
+ d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
+ d->slave.device_issue_pending = k3_dma_issue_pending;
+ d->slave.device_control = k3_dma_control;
+ d->slave.copy_align = DMA_ALIGN;
+ d->slave.chancnt = d->dma_requests;
+
+ /* init virtual channel */
+ d->chans = devm_kzalloc(&op->dev,
+ d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
+ if (d->chans == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < d->dma_requests; i++) {
+ struct k3_dma_chan *c = &d->chans[i];
+
+ c->status = DMA_IN_PROGRESS;
+ INIT_LIST_HEAD(&c->node);
+ c->vc.desc_free = k3_dma_free_desc;
+ vchan_init(&c->vc, &d->slave);
+ }
+
+ /* Enable clock before accessing registers */
+ ret = clk_prepare_enable(d->clk);
+ if (ret < 0) {
+ dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ k3_dma_enable_dma(d, true);
+
+ ret = dma_async_device_register(&d->slave);
+ if (ret)
+ return ret;
+
+ ret = of_dma_controller_register((&op->dev)->of_node,
+ k3_of_dma_simple_xlate, d);
+ if (ret)
+ goto of_dma_register_fail;
+
+ spin_lock_init(&d->lock);
+ INIT_LIST_HEAD(&d->chan_pending);
+ tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
+ platform_set_drvdata(op, d);
+ dev_info(&op->dev, "initialized\n");
+
+ return 0;
+
+of_dma_register_fail:
+ dma_async_device_unregister(&d->slave);
+ return ret;
+}
+
+static int k3_dma_remove(struct platform_device *op)
+{
+ struct k3_dma_chan *c, *cn;
+ struct k3_dma_dev *d = platform_get_drvdata(op);
+
+ dma_async_device_unregister(&d->slave);
+ of_dma_controller_free((&op->dev)->of_node);
+
+ list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
+ list_del(&c->vc.chan.device_node);
+ tasklet_kill(&c->vc.task);
+ }
+ tasklet_kill(&d->task);
+ clk_disable_unprepare(d->clk);
+ return 0;
+}
+
+static int k3_dma_suspend(struct device *dev)
+{
+ struct k3_dma_dev *d = dev_get_drvdata(dev);
+ u32 stat = 0;
+
+ stat = k3_dma_get_chan_stat(d);
+ if (stat) {
+ dev_warn(d->slave.dev,
+ "chan %d is running fail to suspend\n", stat);
+ return -1;
+ }
+ k3_dma_enable_dma(d, false);
+ clk_disable_unprepare(d->clk);
+ return 0;
+}
+
+static int k3_dma_resume(struct device *dev)
+{
+ struct k3_dma_dev *d = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = clk_prepare_enable(d->clk);
+ if (ret < 0) {
+ dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+ k3_dma_enable_dma(d, true);
+ return 0;
+}
+
+SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
+
+static struct platform_driver k3_pdma_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &k3_dma_pmops,
+ .of_match_table = k3_pdma_dt_ids,
+ },
+ .probe = k3_dma_probe,
+ .remove = k3_dma_remove,
+};
+
+module_platform_driver(k3_pdma_driver);
+
+MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
+MODULE_ALIAS("platform:k3dma");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index c26699f9c4d..ff8d7827f8c 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -18,7 +18,9 @@
#include <linux/platform_data/mmp_dma.h>
#include <linux/dmapool.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <linux/of.h>
+#include <linux/dma/mmp-pdma.h>
#include "dmaengine.h"
@@ -47,6 +49,8 @@
#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
#define DCSR_EORINTR (1 << 9) /* The end of Receive */
+#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \
+ (((n) & 0x3f) << 2))
#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
@@ -69,7 +73,7 @@
#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
#define PDMA_ALIGNMENT 3
-#define PDMA_MAX_DESC_BYTES 0x1000
+#define PDMA_MAX_DESC_BYTES DCMD_LENGTH
struct mmp_pdma_desc_hw {
u32 ddadr; /* Points to the next descriptor + flags */
@@ -94,6 +98,9 @@ struct mmp_pdma_chan {
struct mmp_pdma_phy *phy;
enum dma_transfer_direction dir;
+ struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel
+ * is in cyclic mode */
+
/* channel's basic info */
struct tasklet_struct tasklet;
u32 dcmd;
@@ -105,6 +112,7 @@ struct mmp_pdma_chan {
struct list_head chain_pending; /* Link descriptors queue for pending */
struct list_head chain_running; /* Link descriptors queue for running */
bool idle; /* channel statue machine */
+ bool byte_align;
struct dma_pool *desc_pool; /* Descriptors pool */
};
@@ -121,6 +129,7 @@ struct mmp_pdma_device {
struct device *dev;
struct dma_device device;
struct mmp_pdma_phy *phy;
+ spinlock_t phy_lock; /* protect alloc/free phy channels */
};
#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
@@ -137,15 +146,21 @@ static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
static void enable_chan(struct mmp_pdma_phy *phy)
{
- u32 reg;
+ u32 reg, dalgn;
if (!phy->vchan)
return;
- reg = phy->vchan->drcmr;
- reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
+ reg = DRCMR(phy->vchan->drcmr);
writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ dalgn = readl(phy->base + DALGN);
+ if (phy->vchan->byte_align)
+ dalgn |= 1 << phy->idx;
+ else
+ dalgn &= ~(1 << phy->idx);
+ writel(dalgn, phy->base + DALGN);
+
reg = (phy->idx << 2) + DCSR;
writel(readl(phy->base + reg) | DCSR_RUN,
phy->base + reg);
@@ -218,7 +233,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
{
int prio, i;
struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
- struct mmp_pdma_phy *phy;
+ struct mmp_pdma_phy *phy, *found = NULL;
+ unsigned long flags;
/*
* dma channel priorities
@@ -227,6 +243,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
* ch 8 - 11, 24 - 27 <--> (2)
* ch 12 - 15, 28 - 31 <--> (3)
*/
+
+ spin_lock_irqsave(&pdev->phy_lock, flags);
for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
for (i = 0; i < pdev->dma_channels; i++) {
if (prio != ((i & 0xf) >> 2))
@@ -234,31 +252,34 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
phy = &pdev->phy[i];
if (!phy->vchan) {
phy->vchan = pchan;
- return phy;
+ found = phy;
+ goto out_unlock;
}
}
}
- return NULL;
+out_unlock:
+ spin_unlock_irqrestore(&pdev->phy_lock, flags);
+ return found;
}
-/* desc->tx_list ==> pending list */
-static void append_pending_queue(struct mmp_pdma_chan *chan,
- struct mmp_pdma_desc_sw *desc)
+static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
{
- struct mmp_pdma_desc_sw *tail =
- to_mmp_pdma_desc(chan->chain_pending.prev);
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
+ unsigned long flags;
+ u32 reg;
- if (list_empty(&chan->chain_pending))
- goto out_splice;
+ if (!pchan->phy)
+ return;
- /* one irq per queue, even appended */
- tail->desc.ddadr = desc->async_tx.phys;
- tail->desc.dcmd &= ~DCMD_ENDIRQEN;
+ /* clear the channel mapping in DRCMR */
+ reg = DRCMR(pchan->phy->vchan->drcmr);
+ writel(0, pchan->phy->base + reg);
- /* softly link to pending list */
-out_splice:
- list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
+ spin_lock_irqsave(&pdev->phy_lock, flags);
+ pchan->phy->vchan = NULL;
+ pchan->phy = NULL;
+ spin_unlock_irqrestore(&pdev->phy_lock, flags);
}
/**
@@ -277,10 +298,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
if (list_empty(&chan->chain_pending)) {
/* chance to re-fetch phy channel with higher prio */
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
dev_dbg(chan->dev, "no pending list\n");
return;
}
@@ -326,14 +344,16 @@ static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(&child->async_tx);
}
- append_pending_queue(chan, desc);
+ /* softly link to pending list - desc->tx_list ==> pending list */
+ list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
spin_unlock_irqrestore(&chan->desc_lock, flags);
return cookie;
}
-struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
+static struct mmp_pdma_desc_sw *
+mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
{
struct mmp_pdma_desc_sw *desc;
dma_addr_t pdesc;
@@ -377,10 +397,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
dev_err(chan->dev, "unable to allocate descriptor pool\n");
return -ENOMEM;
}
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
chan->idle = true;
chan->dev_addr = 0;
return 1;
@@ -411,10 +428,7 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
chan->desc_pool = NULL;
chan->idle = true;
chan->dev_addr = 0;
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
return;
}
@@ -434,6 +448,7 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
return NULL;
chan = to_mmp_pdma_chan(dchan);
+ chan->byte_align = false;
if (!chan->dir) {
chan->dir = DMA_MEM_TO_MEM;
@@ -450,6 +465,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
}
copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
+ if (dma_src & 0x7 || dma_dst & 0x7)
+ chan->byte_align = true;
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
new->desc.dsadr = dma_src;
@@ -486,6 +503,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
new->desc.ddadr = DDADR_STOP;
new->desc.dcmd |= DCMD_ENDIRQEN;
+ chan->cyclic_first = NULL;
+
return &first->async_tx;
fail:
@@ -509,12 +528,16 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
if ((sgl == NULL) || (sg_len == 0))
return NULL;
+ chan->byte_align = false;
+
for_each_sg(sgl, sg, sg_len, i) {
addr = sg_dma_address(sg);
avail = sg_dma_len(sgl);
do {
len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
+ if (addr & 0x7)
+ chan->byte_align = true;
/* allocate and populate the descriptor */
new = mmp_pdma_alloc_descriptor(chan);
@@ -557,6 +580,94 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
new->desc.ddadr = DDADR_STOP;
new->desc.dcmd |= DCMD_ENDIRQEN;
+ chan->dir = dir;
+ chan->cyclic_first = NULL;
+
+ return &first->async_tx;
+
+fail:
+ if (first)
+ mmp_pdma_free_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
+ struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct mmp_pdma_chan *chan;
+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
+ dma_addr_t dma_src, dma_dst;
+
+ if (!dchan || !len || !period_len)
+ return NULL;
+
+ /* the buffer length must be a multiple of period_len */
+ if (len % period_len != 0)
+ return NULL;
+
+ if (period_len > PDMA_MAX_DESC_BYTES)
+ return NULL;
+
+ chan = to_mmp_pdma_chan(dchan);
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ dma_src = buf_addr;
+ dma_dst = chan->dev_addr;
+ break;
+ case DMA_DEV_TO_MEM:
+ dma_dst = buf_addr;
+ dma_src = chan->dev_addr;
+ break;
+ default:
+ dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
+ return NULL;
+ }
+
+ chan->dir = direction;
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = mmp_pdma_alloc_descriptor(chan);
+ if (!new) {
+ dev_err(chan->dev, "no memory for desc\n");
+ goto fail;
+ }
+
+ new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
+ (DCMD_LENGTH & period_len);
+ new->desc.dsadr = dma_src;
+ new->desc.dtadr = dma_dst;
+
+ if (!first)
+ first = new;
+ else
+ prev->desc.ddadr = new->async_tx.phys;
+
+ new->async_tx.cookie = 0;
+ async_tx_ack(&new->async_tx);
+
+ prev = new;
+ len -= period_len;
+
+ if (chan->dir == DMA_MEM_TO_DEV)
+ dma_src += period_len;
+ else
+ dma_dst += period_len;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ first->async_tx.flags = flags; /* client is in control of this ack */
+ first->async_tx.cookie = -EBUSY;
+
+ /* make the cyclic link */
+ new->desc.ddadr = first->async_tx.phys;
+ chan->cyclic_first = first;
+
return &first->async_tx;
fail:
@@ -581,10 +692,7 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
disable_chan(chan->phy);
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
spin_lock_irqsave(&chan->desc_lock, flags);
mmp_pdma_free_desc_list(chan, &chan->chain_pending);
mmp_pdma_free_desc_list(chan, &chan->chain_running);
@@ -619,8 +727,13 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
chan->dcmd |= DCMD_BURST32;
chan->dir = cfg->direction;
- chan->drcmr = cfg->slave_id;
chan->dev_addr = addr;
+ /* FIXME: drivers should be ported over to use the filter
+ * function. Once that's done, the following two lines can
+ * be removed.
+ */
+ if (cfg->slave_id)
+ chan->drcmr = cfg->slave_id;
break;
default:
return -ENOSYS;
@@ -632,15 +745,7 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
- struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
- ret = dma_cookie_status(dchan, cookie, txstate);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-
- return ret;
+ return dma_cookie_status(dchan, cookie, txstate);
}
/**
@@ -669,29 +774,51 @@ static void dma_do_tasklet(unsigned long data)
LIST_HEAD(chain_cleanup);
unsigned long flags;
- /* submit pending list; callback for each desc; free desc */
+ if (chan->cyclic_first) {
+ dma_async_tx_callback cb = NULL;
+ void *cb_data = NULL;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ desc = chan->cyclic_first;
+ cb = desc->async_tx.callback;
+ cb_data = desc->async_tx.callback_param;
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ if (cb)
+ cb(cb_data);
- /* update the cookie if we have some descriptors to cleanup */
- if (!list_empty(&chan->chain_running)) {
- dma_cookie_t cookie;
+ return;
+ }
- desc = to_mmp_pdma_desc(chan->chain_running.prev);
- cookie = desc->async_tx.cookie;
- dma_cookie_complete(&desc->async_tx);
+ /* submit pending list; callback for each desc; free desc */
+ spin_lock_irqsave(&chan->desc_lock, flags);
- dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+ list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
+ /*
+ * move the descriptors to a temporary list so we can drop
+ * the lock during the entire cleanup operation
+ */
+ list_del(&desc->node);
+ list_add(&desc->node, &chain_cleanup);
+
+ /*
+ * Look for the first list entry which has the ENDIRQEN flag
+ * set. That is the descriptor we got an interrupt for, so
+ * complete that transaction and its cookie.
+ */
+ if (desc->desc.dcmd & DCMD_ENDIRQEN) {
+ dma_cookie_t cookie = desc->async_tx.cookie;
+ dma_cookie_complete(&desc->async_tx);
+ dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+ break;
+ }
}
/*
- * move the descriptors to a temporary list so we can drop the lock
- * during the entire cleanup operation
+ * The hardware is idle and ready for more when the
+ * chain_running list is empty.
*/
- list_splice_tail_init(&chan->chain_running, &chain_cleanup);
-
- /* the hardware is now idle and ready for more */
- chan->idle = true;
+ chan->idle = list_empty(&chan->chain_running);
/* Start any pending transactions automatically */
start_pending_queue(chan);
@@ -763,6 +890,39 @@ static struct of_device_id mmp_pdma_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
+static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct mmp_pdma_device *d = ofdma->of_dma_data;
+ struct dma_chan *chan, *candidate;
+
+retry:
+ candidate = NULL;
+
+ /* walk the list of channels registered with the current instance and
+ * find one that is currently unused */
+ list_for_each_entry(chan, &d->device.channels, device_node)
+ if (chan->client_count == 0) {
+ candidate = chan;
+ break;
+ }
+
+ if (!candidate)
+ return NULL;
+
+ /* dma_get_slave_channel will return NULL if we lost a race between
+ * the lookup and the reservation */
+ chan = dma_get_slave_channel(candidate);
+
+ if (chan) {
+ struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+ c->drcmr = dma_spec->args[0];
+ return chan;
+ }
+
+ goto retry;
+}
+
static int mmp_pdma_probe(struct platform_device *op)
{
struct mmp_pdma_device *pdev;
@@ -777,10 +937,9 @@ static int mmp_pdma_probe(struct platform_device *op)
return -ENOMEM;
pdev->dev = &op->dev;
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
+ spin_lock_init(&pdev->phy_lock);
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
pdev->base = devm_ioremap_resource(pdev->dev, iores);
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
@@ -825,13 +984,15 @@ static int mmp_pdma_probe(struct platform_device *op)
dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
- dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
+ dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
pdev->device.dev = &op->dev;
pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
pdev->device.device_tx_status = mmp_pdma_tx_status;
pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
+ pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
pdev->device.device_issue_pending = mmp_pdma_issue_pending;
pdev->device.device_control = mmp_pdma_control;
pdev->device.copy_align = PDMA_ALIGNMENT;
@@ -847,7 +1008,17 @@ static int mmp_pdma_probe(struct platform_device *op)
return ret;
}
- dev_info(pdev->device.dev, "initialized\n");
+ if (op->dev.of_node) {
+ /* Device-tree DMA controller registration */
+ ret = of_dma_controller_register(op->dev.of_node,
+ mmp_pdma_dma_xlate, pdev);
+ if (ret < 0) {
+ dev_err(&op->dev, "of_dma_controller_register failed\n");
+ return ret;
+ }
+ }
+
+ dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
return 0;
}
@@ -867,6 +1038,19 @@ static struct platform_driver mmp_pdma_driver = {
.remove = mmp_pdma_remove,
};
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+
+ if (chan->device->dev->driver != &mmp_pdma_driver.driver)
+ return false;
+
+ c->drcmr = *(unsigned int *) param;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
+
module_platform_driver(mmp_pdma_driver);
MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 9b9366537d7..38cb517fb2e 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -460,7 +460,8 @@ static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
{
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
- dma_set_residue(txstate, tdmac->buf_len - tdmac->pos);
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
+ tdmac->buf_len - tdmac->pos);
return tdmac->status;
}
@@ -549,9 +550,6 @@ static int mmp_tdma_probe(struct platform_device *pdev)
}
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
-
tdev->base = devm_ioremap_resource(&pdev->dev, iores);
if (IS_ERR(tdev->base))
return PTR_ERR(tdev->base);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 2d956732aa3..2fe43537733 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -556,15 +556,7 @@ static enum dma_status
mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&mchan->lock, flags);
- ret = dma_cookie_status(chan, cookie, txstate);
- spin_unlock_irqrestore(&mchan->lock, flags);
-
- return ret;
+ return dma_cookie_status(chan, cookie, txstate);
}
/* Prepare descriptor for memory to memory copy */
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 200f1a3c9a4..536dcb8ba5f 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -64,7 +64,7 @@ static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
int src_idx)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
- return hw_desc->phy_src_addr[src_idx];
+ return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
}
@@ -107,32 +107,32 @@ static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
int index, dma_addr_t addr)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
- hw_desc->phy_src_addr[index] = addr;
+ hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
if (desc->type == DMA_XOR)
hw_desc->desc_command |= (1 << index);
}
static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
{
- return __raw_readl(XOR_CURR_DESC(chan));
+ return readl_relaxed(XOR_CURR_DESC(chan));
}
static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
u32 next_desc_addr)
{
- __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
+ writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
}
static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
{
- u32 val = __raw_readl(XOR_INTR_MASK(chan));
+ u32 val = readl_relaxed(XOR_INTR_MASK(chan));
val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
- __raw_writel(val, XOR_INTR_MASK(chan));
+ writel_relaxed(val, XOR_INTR_MASK(chan));
}
static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
{
- u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
+ u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
return intr_cause;
}
@@ -149,13 +149,13 @@ static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
{
u32 val = ~(1 << (chan->idx * 16));
dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
- __raw_writel(val, XOR_INTR_CAUSE(chan));
+ writel_relaxed(val, XOR_INTR_CAUSE(chan));
}
static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
{
u32 val = 0xFFFF0000 >> (chan->idx * 16);
- __raw_writel(val, XOR_INTR_CAUSE(chan));
+ writel_relaxed(val, XOR_INTR_CAUSE(chan));
}
static int mv_can_chain(struct mv_xor_desc_slot *desc)
@@ -173,7 +173,7 @@ static void mv_set_mode(struct mv_xor_chan *chan,
enum dma_transaction_type type)
{
u32 op_mode;
- u32 config = __raw_readl(XOR_CONFIG(chan));
+ u32 config = readl_relaxed(XOR_CONFIG(chan));
switch (type) {
case DMA_XOR:
@@ -192,7 +192,14 @@ static void mv_set_mode(struct mv_xor_chan *chan,
config &= ~0x7;
config |= op_mode;
- __raw_writel(config, XOR_CONFIG(chan));
+
+#if defined(__BIG_ENDIAN)
+ config |= XOR_DESCRIPTOR_SWAP;
+#else
+ config &= ~XOR_DESCRIPTOR_SWAP;
+#endif
+
+ writel_relaxed(config, XOR_CONFIG(chan));
chan->current_type = type;
}
@@ -201,14 +208,14 @@ static void mv_chan_activate(struct mv_xor_chan *chan)
u32 activation;
dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
- activation = __raw_readl(XOR_ACTIVATION(chan));
+ activation = readl_relaxed(XOR_ACTIVATION(chan));
activation |= 0x1;
- __raw_writel(activation, XOR_ACTIVATION(chan));
+ writel_relaxed(activation, XOR_ACTIVATION(chan));
}
static char mv_chan_is_busy(struct mv_xor_chan *chan)
{
- u32 state = __raw_readl(XOR_ACTIVATION(chan));
+ u32 state = readl_relaxed(XOR_ACTIVATION(chan));
state = (state >> 4) & 0x3;
@@ -647,7 +654,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p\n",
- __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
+ __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
return sw_desc ? &sw_desc->async_tx : NULL;
}
@@ -755,22 +762,22 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
{
u32 val;
- val = __raw_readl(XOR_CONFIG(chan));
+ val = readl_relaxed(XOR_CONFIG(chan));
dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
- val = __raw_readl(XOR_ACTIVATION(chan));
+ val = readl_relaxed(XOR_ACTIVATION(chan));
dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
- val = __raw_readl(XOR_INTR_CAUSE(chan));
+ val = readl_relaxed(XOR_INTR_CAUSE(chan));
dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
- val = __raw_readl(XOR_INTR_MASK(chan));
+ val = readl_relaxed(XOR_INTR_MASK(chan));
dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
- val = __raw_readl(XOR_ERROR_CAUSE(chan));
+ val = readl_relaxed(XOR_ERROR_CAUSE(chan));
dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
- val = __raw_readl(XOR_ERROR_ADDR(chan));
+ val = readl_relaxed(XOR_ERROR_ADDR(chan));
dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
}
@@ -1029,10 +1036,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
struct dma_device *dma_dev;
mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
- if (!mv_chan) {
- ret = -ENOMEM;
- goto err_free_dma;
- }
+ if (!mv_chan)
+ return ERR_PTR(-ENOMEM);
mv_chan->idx = idx;
mv_chan->irq = irq;
@@ -1166,7 +1171,7 @@ static int mv_xor_probe(struct platform_device *pdev)
{
const struct mbus_dram_target_info *dram;
struct mv_xor_device *xordev;
- struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
+ struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *res;
int i, ret;
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index c619359cb7f..06b067f24c9 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -29,8 +29,10 @@
#define MV_XOR_THRESHOLD 1
#define MV_XOR_MAX_CHANNELS 2
+/* Values for the XOR_CONFIG register */
#define XOR_OPERATION_MODE_XOR 0
#define XOR_OPERATION_MODE_MEMCPY 2
+#define XOR_DESCRIPTOR_SWAP BIT(14)
#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
@@ -143,7 +145,16 @@ struct mv_xor_desc_slot {
#endif
};
-/* This structure describes XOR descriptor size 64bytes */
+/*
+ * This structure describes XOR descriptor size 64bytes. The
+ * mv_phy_src_idx() macro must be used when indexing the values of the
+ * phy_src_addr[] array. This is due to the fact that the 'descriptor
+ * swap' feature, used on big endian systems, swaps descriptors data
+ * within blocks of 8 bytes. So two consecutive values of the
+ * phy_src_addr[] array are actually swapped in big-endian, which
+ * explains the different mv_phy_src_idx() implementation.
+ */
+#if defined(__LITTLE_ENDIAN)
struct mv_xor_desc {
u32 status; /* descriptor execution status */
u32 crc32_result; /* result of CRC-32 calculation */
@@ -155,6 +166,21 @@ struct mv_xor_desc {
u32 reserved0;
u32 reserved1;
};
+#define mv_phy_src_idx(src_idx) (src_idx)
+#else
+struct mv_xor_desc {
+ u32 crc32_result; /* result of CRC-32 calculation */
+ u32 status; /* descriptor execution status */
+ u32 phy_next_desc; /* next descriptor address pointer */
+ u32 desc_command; /* type of operation to be carried out */
+ u32 phy_dest_addr; /* destination block address */
+ u32 byte_count; /* size of src/dst blocks in bytes */
+ u32 phy_src_addr[8]; /* source block addresses */
+ u32 reserved1;
+ u32 reserved0;
+};
+#define mv_phy_src_idx(src_idx) (src_idx ^ 1)
+#endif
#define to_mv_sw_desc(addr_hw_desc) \
container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 719593002ab..ccd13df841d 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -23,7 +23,6 @@
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/fsl/mxs-dma.h>
#include <linux/stmp_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -197,24 +196,6 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
return container_of(chan, struct mxs_dma_chan, chan);
}
-int mxs_dma_is_apbh(struct dma_chan *chan)
-{
- struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-
- return dma_is_apbh(mxs_dma);
-}
-EXPORT_SYMBOL_GPL(mxs_dma_is_apbh);
-
-int mxs_dma_is_apbx(struct dma_chan *chan)
-{
- struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-
- return !dma_is_apbh(mxs_dma);
-}
-EXPORT_SYMBOL_GPL(mxs_dma_is_apbx);
-
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -349,13 +330,9 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- struct mxs_dma_data *data = chan->private;
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int ret;
- if (data)
- mxs_chan->chan_irq = data->chan_irq;
-
mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
CCW_BLOCK_SIZE, &mxs_chan->ccw_phys,
GFP_KERNEL);
@@ -622,10 +599,8 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- dma_cookie_t last_used;
- last_used = chan->cookie;
- dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
return mxs_chan->status;
}
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 75334bdd2c5..0b88dd3d05f 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -160,7 +160,8 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
count = of_property_count_strings(np, "dma-names");
if (count < 0) {
- pr_err("%s: dma-names property missing or empty\n", __func__);
+ pr_err("%s: dma-names property of node '%s' missing or empty\n",
+ __func__, np->full_name);
return NULL;
}
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 0bbdea5059f..61fdc54a3c8 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -564,14 +564,7 @@ static void pd_free_chan_resources(struct dma_chan *chan)
static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct pch_dma_chan *pd_chan = to_pd_chan(chan);
- enum dma_status ret;
-
- spin_lock_irq(&pd_chan->lock);
- ret = dma_cookie_status(chan, cookie, txstate);
- spin_unlock_irq(&pd_chan->lock);
-
- return ret;
+ return dma_cookie_status(chan, cookie, txstate);
}
static void pd_issue_pending(struct dma_chan *chan)
@@ -1036,3 +1029,4 @@ MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
"DMA controller driver");
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, pch_dma_id_table);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fa645d82500..a562d24d20b 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -545,6 +545,8 @@ struct dma_pl330_chan {
/* List of to be xfered descriptors */
struct list_head work_list;
+ /* List of completed descriptors */
+ struct list_head completed_list;
/* Pointer to the DMAC that manages this channel,
* NULL if the channel is available to be acquired.
@@ -2198,66 +2200,6 @@ to_desc(struct dma_async_tx_descriptor *tx)
return container_of(tx, struct dma_pl330_desc, txd);
}
-static inline void free_desc_list(struct list_head *list)
-{
- struct dma_pl330_dmac *pdmac;
- struct dma_pl330_desc *desc;
- struct dma_pl330_chan *pch = NULL;
- unsigned long flags;
-
- /* Finish off the work list */
- list_for_each_entry(desc, list, node) {
- dma_async_tx_callback callback;
- void *param;
-
- /* All desc in a list belong to same channel */
- pch = desc->pchan;
- callback = desc->txd.callback;
- param = desc->txd.callback_param;
-
- if (callback)
- callback(param);
-
- desc->pchan = NULL;
- }
-
- /* pch will be unset if list was empty */
- if (!pch)
- return;
-
- pdmac = pch->dmac;
-
- spin_lock_irqsave(&pdmac->pool_lock, flags);
- list_splice_tail_init(list, &pdmac->desc_pool);
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
-}
-
-static inline void handle_cyclic_desc_list(struct list_head *list)
-{
- struct dma_pl330_desc *desc;
- struct dma_pl330_chan *pch = NULL;
- unsigned long flags;
-
- list_for_each_entry(desc, list, node) {
- dma_async_tx_callback callback;
-
- /* Change status to reload it */
- desc->status = PREP;
- pch = desc->pchan;
- callback = desc->txd.callback;
- if (callback)
- callback(desc->txd.callback_param);
- }
-
- /* pch will be unset if list was empty */
- if (!pch)
- return;
-
- spin_lock_irqsave(&pch->lock, flags);
- list_splice_tail_init(list, &pch->work_list);
- spin_unlock_irqrestore(&pch->lock, flags);
-}
-
static inline void fill_queue(struct dma_pl330_chan *pch)
{
struct dma_pl330_desc *desc;
@@ -2291,7 +2233,6 @@ static void pl330_tasklet(unsigned long data)
struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
struct dma_pl330_desc *desc, *_dt;
unsigned long flags;
- LIST_HEAD(list);
spin_lock_irqsave(&pch->lock, flags);
@@ -2300,7 +2241,7 @@ static void pl330_tasklet(unsigned long data)
if (desc->status == DONE) {
if (!pch->cyclic)
dma_cookie_complete(&desc->txd);
- list_move_tail(&desc->node, &list);
+ list_move_tail(&desc->node, &pch->completed_list);
}
/* Try to submit a req imm. next to the last completed cookie */
@@ -2309,12 +2250,31 @@ static void pl330_tasklet(unsigned long data)
/* Make sure the PL330 Channel thread is active */
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
- spin_unlock_irqrestore(&pch->lock, flags);
+ while (!list_empty(&pch->completed_list)) {
+ dma_async_tx_callback callback;
+ void *callback_param;
- if (pch->cyclic)
- handle_cyclic_desc_list(&list);
- else
- free_desc_list(&list);
+ desc = list_first_entry(&pch->completed_list,
+ struct dma_pl330_desc, node);
+
+ callback = desc->txd.callback;
+ callback_param = desc->txd.callback_param;
+
+ if (pch->cyclic) {
+ desc->status = PREP;
+ list_move_tail(&desc->node, &pch->work_list);
+ } else {
+ desc->status = FREE;
+ list_move_tail(&desc->node, &pch->dmac->desc_pool);
+ }
+
+ if (callback) {
+ spin_unlock_irqrestore(&pch->lock, flags);
+ callback(callback_param);
+ spin_lock_irqsave(&pch->lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&pch->lock, flags);
}
static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -2409,7 +2369,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
{
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_desc *desc, *_dt;
+ struct dma_pl330_desc *desc;
unsigned long flags;
struct dma_pl330_dmac *pdmac = pch->dmac;
struct dma_slave_config *slave_config;
@@ -2423,12 +2383,18 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
/* Mark all desc done */
- list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
- desc->status = DONE;
- list_move_tail(&desc->node, &list);
+ list_for_each_entry(desc, &pch->work_list , node) {
+ desc->status = FREE;
+ dma_cookie_complete(&desc->txd);
}
- list_splice_tail_init(&list, &pdmac->desc_pool);
+ list_for_each_entry(desc, &pch->completed_list , node) {
+ desc->status = FREE;
+ dma_cookie_complete(&desc->txd);
+ }
+
+ list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
+ list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
break;
case DMA_SLAVE_CONFIG:
@@ -2814,6 +2780,28 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
return &desc->txd;
}
+static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac,
+ struct dma_pl330_desc *first)
+{
+ unsigned long flags;
+ struct dma_pl330_desc *desc;
+
+ if (!first)
+ return;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ while (!list_empty(&first->node)) {
+ desc = list_entry(first->node.next,
+ struct dma_pl330_desc, node);
+ list_move_tail(&desc->node, &pdmac->desc_pool);
+ }
+
+ list_move_tail(&first->node, &pdmac->desc_pool);
+
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+}
+
static struct dma_async_tx_descriptor *
pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -2822,7 +2810,6 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dma_pl330_desc *first, *desc = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
struct scatterlist *sg;
- unsigned long flags;
int i;
dma_addr_t addr;
@@ -2842,20 +2829,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dev_err(pch->dmac->pif.dev,
"%s:%d Unable to fetch desc\n",
__func__, __LINE__);
- if (!first)
- return NULL;
-
- spin_lock_irqsave(&pdmac->pool_lock, flags);
-
- while (!list_empty(&first->node)) {
- desc = list_entry(first->node.next,
- struct dma_pl330_desc, node);
- list_move_tail(&desc->node, &pdmac->desc_pool);
- }
-
- list_move_tail(&first->node, &pdmac->desc_pool);
-
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+ __pl330_giveback_desc(pdmac, first);
return NULL;
}
@@ -2896,6 +2870,25 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
return IRQ_NONE;
}
+#define PL330_DMA_BUSWIDTHS \
+ BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+
+static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = false;
+ caps->cmd_terminate = true;
+
+ return 0;
+}
+
static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
@@ -2908,7 +2901,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
int i, ret, irq;
int num_chan;
- pdat = adev->dev.platform_data;
+ pdat = dev_get_platdata(&adev->dev);
/* Allocate a new DMAC and its Channels */
pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
@@ -2971,6 +2964,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pch->chan.private = adev->dev.of_node;
INIT_LIST_HEAD(&pch->work_list);
+ INIT_LIST_HEAD(&pch->completed_list);
spin_lock_init(&pch->lock);
pch->pl330_chid = NULL;
pch->chan.device = pd;
@@ -3000,6 +2994,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->device_prep_slave_sg = pl330_prep_slave_sg;
pd->device_control = pl330_control;
pd->device_issue_pending = pl330_issue_pending;
+ pd->device_slave_caps = pl330_dma_device_slave_caps;
ret = dma_async_device_register(pd);
if (ret) {
@@ -3015,6 +3010,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
"unable to register DMA to the generic DT DMA helpers\n");
}
}
+ /*
+ * This is the limit for transfers with a buswidth of 1, larger
+ * buswidths will have larger limits.
+ */
+ ret = dma_set_max_seg_size(&adev->dev, 1900800);
+ if (ret)
+ dev_err(&adev->dev, "unable to set the seg size\n");
+
dev_info(&adev->dev,
"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 5c1dee20c13..dadd9e010c0 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -22,3 +22,13 @@ config SUDMAC
depends on SH_DMAE_BASE
help
Enable support for the Renesas SUDMAC controllers.
+
+config RCAR_HPB_DMAE
+ tristate "Renesas R-Car HPB DMAC support"
+ depends on SH_DMAE_BASE
+ help
+ Enable support for the Renesas R-Car series DMA controllers.
+
+config SHDMA_R8A73A4
+ def_bool y
+ depends on ARCH_R8A73A4 && SH_DMAE != n
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index c962138dde9..e856af23b78 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -1,3 +1,9 @@
obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o
obj-$(CONFIG_SH_DMAE) += shdma.o
+shdma-y := shdmac.o
+ifeq ($(CONFIG_OF),y)
+shdma-$(CONFIG_SHDMA_R8A73A4) += shdma-r8a73a4.o
+endif
+shdma-objs := $(shdma-y)
obj-$(CONFIG_SUDMAC) += sudmac.o
+obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
new file mode 100644
index 00000000000..45a520281ce
--- /dev/null
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -0,0 +1,655 @@
+/*
+ * Copyright (C) 2011-2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This file is based on the drivers/dma/sh/shdma.c
+ *
+ * Renesas SuperH DMA Engine support
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * - DMA of SuperH does not have Hardware DMA chain mode.
+ * - max DMA size is 16MB.
+ *
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_data/dma-rcar-hpbdma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/shdma-base.h>
+#include <linux/slab.h>
+
+/* DMA channel registers */
+#define HPB_DMAE_DSAR0 0x00
+#define HPB_DMAE_DDAR0 0x04
+#define HPB_DMAE_DTCR0 0x08
+#define HPB_DMAE_DSAR1 0x0C
+#define HPB_DMAE_DDAR1 0x10
+#define HPB_DMAE_DTCR1 0x14
+#define HPB_DMAE_DSASR 0x18
+#define HPB_DMAE_DDASR 0x1C
+#define HPB_DMAE_DTCSR 0x20
+#define HPB_DMAE_DPTR 0x24
+#define HPB_DMAE_DCR 0x28
+#define HPB_DMAE_DCMDR 0x2C
+#define HPB_DMAE_DSTPR 0x30
+#define HPB_DMAE_DSTSR 0x34
+#define HPB_DMAE_DDBGR 0x38
+#define HPB_DMAE_DDBGR2 0x3C
+#define HPB_DMAE_CHAN(n) (0x40 * (n))
+
+/* DMA command register (DCMDR) bits */
+#define HPB_DMAE_DCMDR_BDOUT BIT(7)
+#define HPB_DMAE_DCMDR_DQSPD BIT(6)
+#define HPB_DMAE_DCMDR_DQSPC BIT(5)
+#define HPB_DMAE_DCMDR_DMSPD BIT(4)
+#define HPB_DMAE_DCMDR_DMSPC BIT(3)
+#define HPB_DMAE_DCMDR_DQEND BIT(2)
+#define HPB_DMAE_DCMDR_DNXT BIT(1)
+#define HPB_DMAE_DCMDR_DMEN BIT(0)
+
+/* DMA forced stop register (DSTPR) bits */
+#define HPB_DMAE_DSTPR_DMSTP BIT(0)
+
+/* DMA status register (DSTSR) bits */
+#define HPB_DMAE_DSTSR_DMSTS BIT(0)
+
+/* DMA common registers */
+#define HPB_DMAE_DTIMR 0x00
+#define HPB_DMAE_DINTSR0 0x0C
+#define HPB_DMAE_DINTSR1 0x10
+#define HPB_DMAE_DINTCR0 0x14
+#define HPB_DMAE_DINTCR1 0x18
+#define HPB_DMAE_DINTMR0 0x1C
+#define HPB_DMAE_DINTMR1 0x20
+#define HPB_DMAE_DACTSR0 0x24
+#define HPB_DMAE_DACTSR1 0x28
+#define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4)
+#define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4)
+#define HPB_DMAE_HPB_DMLVLR0 0x160
+#define HPB_DMAE_HPB_DMLVLR1 0x164
+#define HPB_DMAE_HPB_DMSHPT0 0x168
+#define HPB_DMAE_HPB_DMSHPT1 0x16C
+
+#define HPB_DMA_SLAVE_NUMBER 256
+#define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */
+
+struct hpb_dmae_chan {
+ struct shdma_chan shdma_chan;
+ int xfer_mode; /* DMA transfer mode */
+#define XFER_SINGLE 1
+#define XFER_DOUBLE 2
+ unsigned plane_idx; /* current DMA information set */
+ bool first_desc; /* first/next transfer */
+ int xmit_shift; /* log_2(bytes_per_xfer) */
+ void __iomem *base;
+ const struct hpb_dmae_slave_config *cfg;
+ char dev_id[16]; /* unique name per DMAC of channel */
+};
+
+struct hpb_dmae_device {
+ struct shdma_dev shdma_dev;
+ spinlock_t reg_lock; /* comm_reg operation lock */
+ struct hpb_dmae_pdata *pdata;
+ void __iomem *chan_reg;
+ void __iomem *comm_reg;
+ void __iomem *reset_reg;
+ void __iomem *mode_reg;
+};
+
+struct hpb_dmae_regs {
+ u32 sar; /* SAR / source address */
+ u32 dar; /* DAR / destination address */
+ u32 tcr; /* TCR / transfer count */
+};
+
+struct hpb_desc {
+ struct shdma_desc shdma_desc;
+ struct hpb_dmae_regs hw;
+ unsigned plane_idx;
+};
+
+#define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan)
+#define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc)
+#define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \
+ struct hpb_dmae_device, shdma_dev.dma_dev)
+
+static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg)
+{
+ iowrite32(data, hpb_dc->base + reg);
+}
+
+static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg)
+{
+ return ioread32(hpb_dc->base + reg);
+}
+
+static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
+{
+ iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR);
+}
+
+static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch));
+}
+
+static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ u32 v;
+
+ if (ch < 32)
+ v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch;
+ else
+ v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32);
+ return v & 0x1;
+}
+
+static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ if (ch < 32)
+ iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0);
+ else
+ iowrite32((0x1 << (ch - 32)),
+ hpbdev->comm_reg + HPB_DMAE_DINTCR1);
+}
+
+static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
+{
+ iowrite32(data, hpbdev->mode_reg);
+}
+
+static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev)
+{
+ return ioread32(hpbdev->mode_reg);
+}
+
+static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ u32 intreg;
+
+ spin_lock_irq(&hpbdev->reg_lock);
+ if (ch < 32) {
+ intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0);
+ iowrite32(BIT(ch) | intreg,
+ hpbdev->comm_reg + HPB_DMAE_DINTMR0);
+ } else {
+ intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1);
+ iowrite32(BIT(ch - 32) | intreg,
+ hpbdev->comm_reg + HPB_DMAE_DINTMR1);
+ }
+ spin_unlock_irq(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data)
+{
+ u32 rstr;
+ int timeout = 10000; /* 100 ms */
+
+ spin_lock(&hpbdev->reg_lock);
+ rstr = ioread32(hpbdev->reset_reg);
+ rstr |= data;
+ iowrite32(rstr, hpbdev->reset_reg);
+ do {
+ rstr = ioread32(hpbdev->reset_reg);
+ if ((rstr & data) == data)
+ break;
+ udelay(10);
+ } while (timeout--);
+
+ if (timeout < 0)
+ dev_err(hpbdev->shdma_dev.dma_dev.dev,
+ "%s timeout\n", __func__);
+
+ rstr &= ~data;
+ iowrite32(rstr, hpbdev->reset_reg);
+ spin_unlock(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev,
+ u32 mask, u32 data)
+{
+ u32 mode;
+
+ spin_lock_irq(&hpbdev->reg_lock);
+ mode = asyncmdr_read(hpbdev);
+ mode &= ~mask;
+ mode |= data;
+ asyncmdr_write(hpbdev, mode);
+ spin_unlock_irq(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev)
+{
+ dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD);
+}
+
+static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev)
+{
+ u32 ch;
+
+ for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++)
+ hsrstr_write(hpbdev, ch);
+}
+
+static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan)
+{
+ struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+ struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+ int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR);
+ int i;
+
+ switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) {
+ case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT:
+ default:
+ i = XMIT_SZ_8BIT;
+ break;
+ case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT:
+ i = XMIT_SZ_16BIT;
+ break;
+ case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT:
+ i = XMIT_SZ_32BIT;
+ break;
+ }
+ return pdata->ts_shift[i];
+}
+
+static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan,
+ struct hpb_dmae_regs *hw, unsigned plane)
+{
+ ch_reg_write(hpb_chan, hw->sar,
+ plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0);
+ ch_reg_write(hpb_chan, hw->dar,
+ plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0);
+ ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift,
+ plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
+}
+
+static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next)
+{
+ ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) |
+ HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR);
+}
+
+static void hpb_dmae_halt(struct shdma_chan *schan)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+
+ ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
+ ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
+}
+
+static const struct hpb_dmae_slave_config *
+hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id)
+{
+ struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+ struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+ int i;
+
+ if (slave_id >= HPB_DMA_SLAVE_NUMBER)
+ return NULL;
+
+ for (i = 0; i < pdata->num_slaves; i++)
+ if (pdata->slaves[i].id == slave_id)
+ return pdata->slaves + i;
+
+ return NULL;
+}
+
+static void hpb_dmae_start_xfer(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ struct hpb_dmae_device *hpbdev = to_dev(chan);
+ struct hpb_desc *desc = to_desc(sdesc);
+
+ if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET)
+ hpb_dmae_async_reset(hpbdev, chan->cfg->rstr);
+
+ desc->plane_idx = chan->plane_idx;
+ hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx);
+ hpb_dmae_start(chan, !chan->first_desc);
+
+ if (chan->xfer_mode == XFER_DOUBLE) {
+ chan->plane_idx ^= 1;
+ chan->first_desc = false;
+ }
+}
+
+static bool hpb_dmae_desc_completed(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ /*
+ * This is correct since we always have at most single
+ * outstanding DMA transfer per channel, and by the time
+ * we get completion interrupt the transfer is completed.
+ * This will change if we ever use alternating DMA
+ * information sets and submit two descriptors at once.
+ */
+ return true;
+}
+
+static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ struct hpb_dmae_device *hpbdev = to_dev(chan);
+ int ch = chan->cfg->dma_ch;
+
+ /* Check Complete DMA Transfer */
+ if (dintsr_read(hpbdev, ch)) {
+ /* Clear Interrupt status */
+ dintcr_write(hpbdev, ch);
+ return true;
+ }
+ return false;
+}
+
+static int hpb_dmae_desc_setup(struct shdma_chan *schan,
+ struct shdma_desc *sdesc,
+ dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+ struct hpb_desc *desc = to_desc(sdesc);
+
+ if (*len > (size_t)HPB_DMA_TCR_MAX)
+ *len = (size_t)HPB_DMA_TCR_MAX;
+
+ desc->hw.sar = src;
+ desc->hw.dar = dst;
+ desc->hw.tcr = *len;
+
+ return 0;
+}
+
+static size_t hpb_dmae_get_partial(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct hpb_desc *desc = to_desc(sdesc);
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ u32 tcr = ch_reg_read(chan, desc->plane_idx ?
+ HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
+
+ return (desc->hw.tcr - tcr) << chan->xmit_shift;
+}
+
+static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
+
+ return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS;
+}
+
+static int
+hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
+ const struct hpb_dmae_slave_config *cfg)
+{
+ struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+ struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+ const struct hpb_dmae_channel *channel = pdata->channels;
+ int slave_id = cfg->id;
+ int i, err;
+
+ for (i = 0; i < pdata->num_channels; i++, channel++) {
+ if (channel->s_id == slave_id) {
+ struct device *dev = hpb_chan->shdma_chan.dev;
+
+ hpb_chan->base = hpbdev->chan_reg +
+ HPB_DMAE_CHAN(cfg->dma_ch);
+
+ dev_dbg(dev, "Detected Slave device\n");
+ dev_dbg(dev, " -- slave_id : 0x%x\n", slave_id);
+ dev_dbg(dev, " -- cfg->dma_ch : %d\n", cfg->dma_ch);
+ dev_dbg(dev, " -- channel->ch_irq: %d\n",
+ channel->ch_irq);
+ break;
+ }
+ }
+
+ err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq,
+ IRQF_SHARED, hpb_chan->dev_id);
+ if (err) {
+ dev_err(hpb_chan->shdma_chan.dev,
+ "DMA channel request_irq %d failed with error %d\n",
+ channel->ch_irq, err);
+ return err;
+ }
+
+ hpb_chan->plane_idx = 0;
+ hpb_chan->first_desc = true;
+
+ if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) {
+ hpb_chan->xfer_mode = XFER_SINGLE;
+ } else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) ==
+ (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) {
+ hpb_chan->xfer_mode = XFER_DOUBLE;
+ } else {
+ dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
+ shdma_free_irq(&hpb_chan->shdma_chan);
+ return -EINVAL;
+ }
+
+ if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE)
+ hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr);
+ ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR);
+ ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR);
+ hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan);
+ hpb_dmae_enable_int(hpbdev, cfg->dma_ch);
+
+ return 0;
+}
+
+static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ const struct hpb_dmae_slave_config *sc =
+ hpb_dmae_find_slave(chan, slave_id);
+
+ if (!sc)
+ return -ENODEV;
+ if (try)
+ return 0;
+ chan->cfg = sc;
+ return hpb_dmae_alloc_chan_resources(chan, sc);
+}
+
+static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
+{
+}
+
+static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+
+ return chan->cfg->addr;
+}
+
+static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
+{
+ return &((struct hpb_desc *)buf)[i].shdma_desc;
+}
+
+static const struct shdma_ops hpb_dmae_ops = {
+ .desc_completed = hpb_dmae_desc_completed,
+ .halt_channel = hpb_dmae_halt,
+ .channel_busy = hpb_dmae_channel_busy,
+ .slave_addr = hpb_dmae_slave_addr,
+ .desc_setup = hpb_dmae_desc_setup,
+ .set_slave = hpb_dmae_set_slave,
+ .setup_xfer = hpb_dmae_setup_xfer,
+ .start_xfer = hpb_dmae_start_xfer,
+ .embedded_desc = hpb_dmae_embedded_desc,
+ .chan_irq = hpb_dmae_chan_irq,
+ .get_partial = hpb_dmae_get_partial,
+};
+
+static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
+{
+ struct shdma_dev *sdev = &hpbdev->shdma_dev;
+ struct platform_device *pdev =
+ to_platform_device(hpbdev->shdma_dev.dma_dev.dev);
+ struct hpb_dmae_chan *new_hpb_chan;
+ struct shdma_chan *schan;
+
+ /* Alloc channel */
+ new_hpb_chan = devm_kzalloc(&pdev->dev,
+ sizeof(struct hpb_dmae_chan), GFP_KERNEL);
+ if (!new_hpb_chan) {
+ dev_err(hpbdev->shdma_dev.dma_dev.dev,
+ "No free memory for allocating DMA channels!\n");
+ return -ENOMEM;
+ }
+
+ schan = &new_hpb_chan->shdma_chan;
+ shdma_chan_probe(sdev, schan, id);
+
+ if (pdev->id >= 0)
+ snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
+ "hpb-dmae%d.%d", pdev->id, id);
+ else
+ snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
+ "hpb-dma.%d", id);
+
+ return 0;
+}
+
+static int hpb_dmae_probe(struct platform_device *pdev)
+{
+ struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
+ struct hpb_dmae_device *hpbdev;
+ struct dma_device *dma_dev;
+ struct resource *chan, *comm, *rest, *mode, *irq_res;
+ int err, i;
+
+ /* Get platform data */
+ if (!pdata || !pdata->num_channels)
+ return -ENODEV;
+
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ comm = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ rest = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ mode = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res)
+ return -ENODEV;
+
+ hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device),
+ GFP_KERNEL);
+ if (!hpbdev) {
+ dev_err(&pdev->dev, "Not enough memory\n");
+ return -ENOMEM;
+ }
+
+ hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ if (IS_ERR(hpbdev->chan_reg))
+ return PTR_ERR(hpbdev->chan_reg);
+
+ hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm);
+ if (IS_ERR(hpbdev->comm_reg))
+ return PTR_ERR(hpbdev->comm_reg);
+
+ hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest);
+ if (IS_ERR(hpbdev->reset_reg))
+ return PTR_ERR(hpbdev->reset_reg);
+
+ hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode);
+ if (IS_ERR(hpbdev->mode_reg))
+ return PTR_ERR(hpbdev->mode_reg);
+
+ dma_dev = &hpbdev->shdma_dev.dma_dev;
+
+ spin_lock_init(&hpbdev->reg_lock);
+
+ /* Platform data */
+ hpbdev->pdata = pdata;
+
+ pm_runtime_enable(&pdev->dev);
+ err = pm_runtime_get_sync(&pdev->dev);
+ if (err < 0)
+ dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
+
+ /* Reset DMA controller */
+ hpb_dmae_reset(hpbdev);
+
+ pm_runtime_put(&pdev->dev);
+
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ hpbdev->shdma_dev.ops = &hpb_dmae_ops;
+ hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
+ err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels);
+ if (err < 0)
+ goto error;
+
+ /* Create DMA channels */
+ for (i = 0; i < pdata->num_channels; i++)
+ hpb_dmae_chan_probe(hpbdev, i);
+
+ platform_set_drvdata(pdev, hpbdev);
+ err = dma_async_device_register(dma_dev);
+ if (!err)
+ return 0;
+
+ shdma_cleanup(&hpbdev->shdma_dev);
+error:
+ pm_runtime_disable(&pdev->dev);
+ return err;
+}
+
+static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
+{
+ struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev;
+ struct shdma_chan *schan;
+ int i;
+
+ shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
+ BUG_ON(!schan);
+
+ shdma_free_irq(schan);
+ shdma_chan_remove(schan);
+ }
+ dma_dev->chancnt = 0;
+}
+
+static int hpb_dmae_remove(struct platform_device *pdev)
+{
+ struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ hpb_dmae_chan_remove(hpbdev);
+
+ return 0;
+}
+
+static void hpb_dmae_shutdown(struct platform_device *pdev)
+{
+ struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
+ hpb_dmae_ctl_stop(hpbdev);
+}
+
+static struct platform_driver hpb_dmae_driver = {
+ .probe = hpb_dmae_probe,
+ .remove = hpb_dmae_remove,
+ .shutdown = hpb_dmae_shutdown,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hpb-dma-engine",
+ },
+};
+module_platform_driver(hpb_dmae_driver);
+
+MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>");
+MODULE_DESCRIPTION("Renesas HPB DMA Engine driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h
new file mode 100644
index 00000000000..a2b8258426c
--- /dev/null
+++ b/drivers/dma/sh/shdma-arm.h
@@ -0,0 +1,51 @@
+/*
+ * Renesas SuperH DMA Engine support
+ *
+ * Copyright (C) 2013 Renesas Electronics, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify it under the
+ * terms of version 2 the GNU General Public License as published by the Free
+ * Software Foundation.
+ */
+
+#ifndef SHDMA_ARM_H
+#define SHDMA_ARM_H
+
+#include "shdma.h"
+
+/* Transmit sizes and respective CHCR register values */
+enum {
+ XMIT_SZ_8BIT = 0,
+ XMIT_SZ_16BIT = 1,
+ XMIT_SZ_32BIT = 2,
+ XMIT_SZ_64BIT = 7,
+ XMIT_SZ_128BIT = 3,
+ XMIT_SZ_256BIT = 4,
+ XMIT_SZ_512BIT = 5,
+};
+
+/* log2(size / 8) - used to calculate number of transfers */
+#define SH_DMAE_TS_SHIFT { \
+ [XMIT_SZ_8BIT] = 0, \
+ [XMIT_SZ_16BIT] = 1, \
+ [XMIT_SZ_32BIT] = 2, \
+ [XMIT_SZ_64BIT] = 3, \
+ [XMIT_SZ_128BIT] = 4, \
+ [XMIT_SZ_256BIT] = 5, \
+ [XMIT_SZ_512BIT] = 6, \
+}
+
+#define TS_LOW_BIT 0x3 /* --xx */
+#define TS_HI_BIT 0xc /* xx-- */
+
+#define TS_LOW_SHIFT (3)
+#define TS_HI_SHIFT (20 - 2) /* 2 bits for shifted low TS */
+
+#define TS_INDEX2VAL(i) \
+ ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\
+ (((i) & TS_HI_BIT) << TS_HI_SHIFT))
+
+#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz)))
+#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz)))
+
+#endif
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 28ca3612163..d94ab592cc1 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -171,7 +171,8 @@ static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
return NULL;
}
-static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
+static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
+ dma_addr_t slave_addr)
{
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
@@ -179,7 +180,7 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
if (schan->dev->of_node) {
match = schan->hw_req;
- ret = ops->set_slave(schan, match, true);
+ ret = ops->set_slave(schan, match, slave_addr, true);
if (ret < 0)
return ret;
@@ -194,7 +195,7 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
if (test_and_set_bit(slave_id, shdma_slave_used))
return -EBUSY;
- ret = ops->set_slave(schan, match, false);
+ ret = ops->set_slave(schan, match, slave_addr, false);
if (ret < 0) {
clear_bit(slave_id, shdma_slave_used);
return ret;
@@ -236,7 +237,7 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)
if (!schan->dev->of_node && match >= slave_num)
return false;
- ret = ops->set_slave(schan, match, true);
+ ret = ops->set_slave(schan, match, 0, true);
if (ret < 0)
return false;
@@ -259,7 +260,7 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
*/
if (slave) {
/* Legacy mode: .private is set in filter */
- ret = shdma_setup_slave(schan, slave->slave_id);
+ ret = shdma_setup_slave(schan, slave->slave_id, 0);
if (ret < 0)
goto esetslave;
} else {
@@ -680,7 +681,9 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* channel, while using it...
*/
config = (struct dma_slave_config *)arg;
- ret = shdma_setup_slave(schan, config->slave_id);
+ ret = shdma_setup_slave(schan, config->slave_id,
+ config->direction == DMA_DEV_TO_MEM ?
+ config->src_addr : config->dst_addr);
if (ret < 0)
return ret;
break;
@@ -831,8 +834,8 @@ static irqreturn_t chan_irqt(int irq, void *dev)
int shdma_request_irq(struct shdma_chan *schan, int irq,
unsigned long flags, const char *name)
{
- int ret = request_threaded_irq(irq, chan_irq, chan_irqt,
- flags, name, schan);
+ int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
+ chan_irqt, flags, name, schan);
schan->irq = ret < 0 ? ret : irq;
@@ -840,13 +843,6 @@ int shdma_request_irq(struct shdma_chan *schan, int irq,
}
EXPORT_SYMBOL(shdma_request_irq);
-void shdma_free_irq(struct shdma_chan *schan)
-{
- if (schan->irq >= 0)
- free_irq(schan->irq, schan);
-}
-EXPORT_SYMBOL(shdma_free_irq);
-
void shdma_chan_probe(struct shdma_dev *sdev,
struct shdma_chan *schan, int id)
{
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index 11bcb05cd79..06473a05fe4 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -42,12 +42,9 @@ static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
static int shdma_of_probe(struct platform_device *pdev)
{
- const struct of_dev_auxdata *lookup = pdev->dev.platform_data;
+ const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
int ret;
- if (!lookup)
- return -EINVAL;
-
ret = of_dma_controller_register(pdev->dev.of_node,
shdma_of_xlate, pdev);
if (ret < 0)
diff --git a/drivers/dma/sh/shdma-r8a73a4.c b/drivers/dma/sh/shdma-r8a73a4.c
new file mode 100644
index 00000000000..4fb99970a3e
--- /dev/null
+++ b/drivers/dma/sh/shdma-r8a73a4.c
@@ -0,0 +1,77 @@
+/*
+ * Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs
+ *
+ * Copyright (C) 2013 Renesas Electronics, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify it under the
+ * terms of version 2 the GNU General Public License as published by the Free
+ * Software Foundation.
+ */
+#include <linux/sh_dma.h>
+
+#include "shdma-arm.h"
+
+const unsigned int dma_ts_shift[] = SH_DMAE_TS_SHIFT;
+
+static const struct sh_dmae_slave_config dma_slaves[] = {
+ {
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
+ .mid_rid = 0xd1, /* MMC0 Tx */
+ }, {
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0xd2, /* MMC0 Rx */
+ }, {
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
+ .mid_rid = 0xe1, /* MMC1 Tx */
+ }, {
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0xe2, /* MMC1 Rx */
+ },
+};
+
+#define DMAE_CHANNEL(a, b) \
+ { \
+ .offset = (a) - 0x20, \
+ .dmars = (a) - 0x20 + 0x40, \
+ .chclr_bit = (b), \
+ .chclr_offset = 0x80 - 0x20, \
+ }
+
+static const struct sh_dmae_channel dma_channels[] = {
+ DMAE_CHANNEL(0x8000, 0),
+ DMAE_CHANNEL(0x8080, 1),
+ DMAE_CHANNEL(0x8100, 2),
+ DMAE_CHANNEL(0x8180, 3),
+ DMAE_CHANNEL(0x8200, 4),
+ DMAE_CHANNEL(0x8280, 5),
+ DMAE_CHANNEL(0x8300, 6),
+ DMAE_CHANNEL(0x8380, 7),
+ DMAE_CHANNEL(0x8400, 8),
+ DMAE_CHANNEL(0x8480, 9),
+ DMAE_CHANNEL(0x8500, 10),
+ DMAE_CHANNEL(0x8580, 11),
+ DMAE_CHANNEL(0x8600, 12),
+ DMAE_CHANNEL(0x8680, 13),
+ DMAE_CHANNEL(0x8700, 14),
+ DMAE_CHANNEL(0x8780, 15),
+ DMAE_CHANNEL(0x8800, 16),
+ DMAE_CHANNEL(0x8880, 17),
+ DMAE_CHANNEL(0x8900, 18),
+ DMAE_CHANNEL(0x8980, 19),
+};
+
+const struct sh_dmae_pdata r8a73a4_dma_pdata = {
+ .slave = dma_slaves,
+ .slave_num = ARRAY_SIZE(dma_slaves),
+ .channel = dma_channels,
+ .channel_num = ARRAY_SIZE(dma_channels),
+ .ts_low_shift = TS_LOW_SHIFT,
+ .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT,
+ .ts_high_shift = TS_HI_SHIFT,
+ .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT,
+ .ts_shift = dma_ts_shift,
+ .ts_shift_num = ARRAY_SIZE(dma_ts_shift),
+ .dmaor_init = DMAOR_DME,
+ .chclr_present = 1,
+ .chclr_bitwise = 1,
+};
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
index 9314e93225d..758a57b5187 100644
--- a/drivers/dma/sh/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -28,18 +28,19 @@ struct sh_dmae_chan {
struct shdma_chan shdma_chan;
const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
int xmit_shift; /* log_2(bytes_per_xfer) */
- u32 __iomem *base;
+ void __iomem *base;
char dev_id[16]; /* unique name per DMAC of channel */
int pm_error;
+ dma_addr_t slave_addr;
};
struct sh_dmae_device {
struct shdma_dev shdma_dev;
struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS];
- struct sh_dmae_pdata *pdata;
+ const struct sh_dmae_pdata *pdata;
struct list_head node;
- u32 __iomem *chan_reg;
- u16 __iomem *dmars;
+ void __iomem *chan_reg;
+ void __iomem *dmars;
unsigned int chcr_offset;
u32 chcr_ie_bit;
};
@@ -61,4 +62,11 @@ struct sh_dmae_desc {
#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
struct sh_dmae_device, shdma_dev.dma_dev)
+#ifdef CONFIG_SHDMA_R8A73A4
+extern const struct sh_dmae_pdata r8a73a4_dma_pdata;
+#define r8a73a4_shdma_devid (&r8a73a4_dma_pdata)
+#else
+#define r8a73a4_shdma_devid NULL
+#endif
+
#endif /* __DMA_SHDMA_H */
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdmac.c
index 5039fbc8825..1069e8869f2 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdmac.c
@@ -20,6 +20,8 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
@@ -35,6 +37,15 @@
#include "../dmaengine.h"
#include "shdma.h"
+/* DMA register */
+#define SAR 0x00
+#define DAR 0x04
+#define TCR 0x08
+#define CHCR 0x0C
+#define DMAOR 0x40
+
+#define TEND 0x18 /* USB-DMAC */
+
#define SH_DMAE_DRV_NAME "sh-dma-engine"
/* Default MEMCPY transfer size = 2^2 = 4 bytes */
@@ -49,27 +60,37 @@
static DEFINE_SPINLOCK(sh_dmae_lock);
static LIST_HEAD(sh_dmae_devices);
-static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
+/*
+ * Different DMAC implementations provide different ways to clear DMA channels:
+ * (1) none - no CHCLR registers are available
+ * (2) one CHCLR register per channel - 0 has to be written to it to clear
+ * channel buffers
+ * (3) one CHCLR per several channels - 1 has to be written to the bit,
+ * corresponding to the specific channel to reset it
+ */
+static void channel_clear(struct sh_dmae_chan *sh_dc)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+ const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
+ sh_dc->shdma_chan.id;
+ u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
- __raw_writel(data, shdev->chan_reg +
- shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset);
+ __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
}
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
- __raw_writel(data, sh_dc->base + reg / sizeof(u32));
+ __raw_writel(data, sh_dc->base + reg);
}
static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
{
- return __raw_readl(sh_dc->base + reg / sizeof(u32));
+ return __raw_readl(sh_dc->base + reg);
}
static u16 dmaor_read(struct sh_dmae_device *shdev)
{
- u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+ void __iomem *addr = shdev->chan_reg + DMAOR;
if (shdev->pdata->dmaor_is_32bit)
return __raw_readl(addr);
@@ -79,7 +100,7 @@ static u16 dmaor_read(struct sh_dmae_device *shdev)
static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
{
- u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+ void __iomem *addr = shdev->chan_reg + DMAOR;
if (shdev->pdata->dmaor_is_32bit)
__raw_writel(data, addr);
@@ -91,14 +112,14 @@ static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
- __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
+ __raw_writel(data, sh_dc->base + shdev->chcr_offset);
}
static u32 chcr_read(struct sh_dmae_chan *sh_dc)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
- return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
+ return __raw_readl(sh_dc->base + shdev->chcr_offset);
}
/*
@@ -133,7 +154,7 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
for (i = 0; i < shdev->pdata->channel_num; i++) {
struct sh_dmae_chan *sh_chan = shdev->chan[i];
if (sh_chan)
- chclr_write(sh_chan, 0);
+ channel_clear(sh_chan);
}
}
@@ -167,7 +188,7 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
@@ -180,7 +201,7 @@ static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
for (i = 0; i < pdata->ts_shift_num; i++)
@@ -240,9 +261,9 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
- u16 __iomem *addr = shdev->dmars;
+ void __iomem *addr = shdev->dmars;
unsigned int shift = chan_pdata->dmars_bit;
if (dmae_is_busy(sh_chan))
@@ -253,8 +274,8 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
/* in the case of a missing DMARS resource use first memory window */
if (!addr)
- addr = (u16 __iomem *)shdev->chan_reg;
- addr += chan_pdata->dmars / sizeof(u16);
+ addr = shdev->chan_reg;
+ addr += chan_pdata->dmars;
__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
addr);
@@ -309,7 +330,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave(
struct sh_dmae_chan *sh_chan, int match)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
const struct sh_dmae_slave_config *cfg;
int i;
@@ -323,7 +344,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave(
} else {
for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
if (cfg->mid_rid == match) {
- sh_chan->shdma_chan.slave_id = cfg->slave_id;
+ sh_chan->shdma_chan.slave_id = i;
return cfg;
}
}
@@ -332,7 +353,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave(
}
static int sh_dmae_set_slave(struct shdma_chan *schan,
- int slave_id, bool try)
+ int slave_id, dma_addr_t slave_addr, bool try)
{
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
shdma_chan);
@@ -340,8 +361,10 @@ static int sh_dmae_set_slave(struct shdma_chan *schan,
if (!cfg)
return -ENXIO;
- if (!try)
+ if (!try) {
sh_chan->config = cfg;
+ sh_chan->slave_addr = slave_addr ? : cfg->addr;
+ }
return 0;
}
@@ -505,7 +528,8 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
struct shdma_chan *schan;
int err;
- sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
+ sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
+ GFP_KERNEL);
if (!sh_chan) {
dev_err(sdev->dma_dev.dev,
"No free memory for allocating dma channels!\n");
@@ -517,7 +541,7 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
shdma_chan_probe(sdev, schan, id);
- sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
+ sh_chan->base = shdev->chan_reg + chan_pdata->offset;
/* set up channel irq */
if (pdev->id >= 0)
@@ -541,7 +565,6 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
err_no_irq:
/* remove from dmaengine device node */
shdma_chan_remove(schan);
- kfree(sh_chan);
return err;
}
@@ -552,14 +575,9 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
int i;
shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
- struct sh_dmae_chan *sh_chan = container_of(schan,
- struct sh_dmae_chan, shdma_chan);
BUG_ON(!schan);
- shdma_free_irq(&sh_chan->shdma_chan);
-
shdma_chan_remove(schan);
- kfree(sh_chan);
}
dma_dev->chancnt = 0;
}
@@ -636,7 +654,7 @@ static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
* This is an exclusive slave DMA operation, may only be called after a
* successful slave configuration.
*/
- return sh_chan->config->addr;
+ return sh_chan->slave_addr;
}
static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
@@ -658,9 +676,15 @@ static const struct shdma_ops sh_dmae_shdma_ops = {
.get_partial = sh_dmae_get_partial,
};
+static const struct of_device_id sh_dmae_of_match[] = {
+ {.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,},
+ {}
+};
+MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
+
static int sh_dmae_probe(struct platform_device *pdev)
{
- struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
+ const struct sh_dmae_pdata *pdata;
unsigned long irqflags = IRQF_DISABLED,
chan_flag[SH_DMAE_MAX_CHANNELS] = {};
int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
@@ -669,6 +693,11 @@ static int sh_dmae_probe(struct platform_device *pdev)
struct dma_device *dma_dev;
struct resource *chan, *dmars, *errirq_res, *chanirq_res;
+ if (pdev->dev.of_node)
+ pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
+ else
+ pdata = dev_get_platdata(&pdev->dev);
+
/* get platform data */
if (!pdata || !pdata->channel_num)
return -ENODEV;
@@ -696,33 +725,22 @@ static int sh_dmae_probe(struct platform_device *pdev)
if (!chan || !errirq_res)
return -ENODEV;
- if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
- dev_err(&pdev->dev, "DMAC register region already claimed\n");
- return -EBUSY;
- }
-
- if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
- dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
- err = -EBUSY;
- goto ermrdmars;
- }
-
- err = -ENOMEM;
- shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
+ shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
+ GFP_KERNEL);
if (!shdev) {
dev_err(&pdev->dev, "Not enough memory\n");
- goto ealloc;
+ return -ENOMEM;
}
dma_dev = &shdev->shdma_dev.dma_dev;
- shdev->chan_reg = ioremap(chan->start, resource_size(chan));
- if (!shdev->chan_reg)
- goto emapchan;
+ shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ if (IS_ERR(shdev->chan_reg))
+ return PTR_ERR(shdev->chan_reg);
if (dmars) {
- shdev->dmars = ioremap(dmars->start, resource_size(dmars));
- if (!shdev->dmars)
- goto emapdmars;
+ shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
+ if (IS_ERR(shdev->dmars))
+ return PTR_ERR(shdev->dmars);
}
if (!pdata->slave_only)
@@ -783,8 +801,8 @@ static int sh_dmae_probe(struct platform_device *pdev)
errirq = errirq_res->start;
- err = request_irq(errirq, sh_dmae_err, irqflags,
- "DMAC Address Error", shdev);
+ err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
+ "DMAC Address Error", shdev);
if (err) {
dev_err(&pdev->dev,
"DMA failed requesting irq #%d, error %d\n",
@@ -862,7 +880,6 @@ chan_probe_err:
sh_dmae_chan_remove(shdev);
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
- free_irq(errirq, shdev);
eirq_err:
#endif
rst_err:
@@ -873,21 +890,9 @@ rst_err:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
shdma_cleanup(&shdev->shdma_dev);
eshdma:
- if (dmars)
- iounmap(shdev->dmars);
-emapdmars:
- iounmap(shdev->chan_reg);
synchronize_rcu();
-emapchan:
- kfree(shdev);
-ealloc:
- if (dmars)
- release_mem_region(dmars->start, resource_size(dmars));
-ermrdmars:
- release_mem_region(chan->start, resource_size(chan));
return err;
}
@@ -896,14 +901,9 @@ static int sh_dmae_remove(struct platform_device *pdev)
{
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
- struct resource *res;
- int errirq = platform_get_irq(pdev, 0);
dma_async_device_unregister(dma_dev);
- if (errirq > 0)
- free_irq(errirq, shdev);
-
spin_lock_irq(&sh_dmae_lock);
list_del_rcu(&shdev->node);
spin_unlock_irq(&sh_dmae_lock);
@@ -913,31 +913,11 @@ static int sh_dmae_remove(struct platform_device *pdev)
sh_dmae_chan_remove(shdev);
shdma_cleanup(&shdev->shdma_dev);
- if (shdev->dmars)
- iounmap(shdev->dmars);
- iounmap(shdev->chan_reg);
-
- platform_set_drvdata(pdev, NULL);
-
synchronize_rcu();
- kfree(shdev);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- release_mem_region(res->start, resource_size(res));
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res)
- release_mem_region(res->start, resource_size(res));
return 0;
}
-static const struct of_device_id sh_dmae_of_match[] = {
- { .compatible = "renesas,shdma", },
- { }
-};
-MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
-
static struct platform_driver sh_dmae_driver = {
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index e7c94bbddb5..c7e9cdff070 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -150,7 +150,8 @@ static const struct sudmac_slave_config *sudmac_find_slave(
return NULL;
}
-static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+static int sudmac_set_slave(struct shdma_chan *schan, int slave_id,
+ dma_addr_t slave_addr, bool try)
{
struct sudmac_chan *sc = to_chan(schan);
const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id);
@@ -298,11 +299,8 @@ static void sudmac_chan_remove(struct sudmac_device *su_dev)
int i;
shdma_for_each_chan(schan, &su_dev->shdma_dev, i) {
- struct sudmac_chan *sc = to_chan(schan);
-
BUG_ON(!schan);
- shdma_free_irq(&sc->shdma_chan);
shdma_chan_remove(schan);
}
dma_dev->chancnt = 0;
@@ -335,7 +333,7 @@ static const struct shdma_ops sudmac_shdma_ops = {
static int sudmac_probe(struct platform_device *pdev)
{
- struct sudmac_pdata *pdata = pdev->dev.platform_data;
+ struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev);
int err, i;
struct sudmac_device *su_dev;
struct dma_device *dma_dev;
@@ -345,9 +343,8 @@ static int sudmac_probe(struct platform_device *pdev)
if (!pdata)
return -ENODEV;
- chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!chan || !irq_res)
+ if (!irq_res)
return -ENODEV;
err = -ENOMEM;
@@ -360,9 +357,10 @@ static int sudmac_probe(struct platform_device *pdev)
dma_dev = &su_dev->shdma_dev.dma_dev;
- su_dev->chan_reg = devm_request_and_ioremap(&pdev->dev, chan);
- if (!su_dev->chan_reg)
- return err;
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ if (IS_ERR(su_dev->chan_reg))
+ return PTR_ERR(su_dev->chan_reg);
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
@@ -373,7 +371,7 @@ static int sudmac_probe(struct platform_device *pdev)
return err;
/* platform data */
- su_dev->pdata = pdev->dev.platform_data;
+ su_dev->pdata = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, su_dev);
@@ -393,7 +391,6 @@ static int sudmac_probe(struct platform_device *pdev)
chan_probe_err:
sudmac_chan_remove(su_dev);
- platform_set_drvdata(pdev, NULL);
shdma_cleanup(&su_dev->shdma_dev);
return err;
@@ -407,7 +404,6 @@ static int sudmac_remove(struct platform_device *pdev)
dma_async_device_unregister(dma_dev);
sudmac_chan_remove(su_dev);
shdma_cleanup(&su_dev->shdma_dev);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 716b23e4f32..6aec3ad814d 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -73,6 +74,11 @@ struct sirfsoc_dma_chan {
int mode;
};
+struct sirfsoc_dma_regs {
+ u32 ctrl[SIRFSOC_DMA_CHANNELS];
+ u32 interrupt_en;
+};
+
struct sirfsoc_dma {
struct dma_device dma;
struct tasklet_struct tasklet;
@@ -81,10 +87,13 @@ struct sirfsoc_dma {
int irq;
struct clk *clk;
bool is_marco;
+ struct sirfsoc_dma_regs regs_save;
};
#define DRV_NAME "sirfsoc_dma"
+static int sirfsoc_dma_runtime_suspend(struct device *dev);
+
/* Convert struct dma_chan to struct sirfsoc_dma_chan */
static inline
struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
@@ -393,6 +402,8 @@ static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
LIST_HEAD(descs);
int i;
+ pm_runtime_get_sync(sdma->dma.dev);
+
/* Alloc descriptors for this channel */
for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
@@ -425,6 +436,7 @@ static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
{
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
struct sirfsoc_dma_desc *sdesc, *tmp;
unsigned long flags;
LIST_HEAD(descs);
@@ -445,6 +457,8 @@ static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
/* Free descriptors */
list_for_each_entry_safe(sdesc, tmp, &descs, node)
kfree(sdesc);
+
+ pm_runtime_put(sdma->dma.dev);
}
/* Send pending descriptor to hardware */
@@ -595,7 +609,7 @@ sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
spin_unlock_irqrestore(&schan->lock, iflags);
if (!sdesc)
- return 0;
+ return NULL;
/* Place descriptor in prepared list */
spin_lock_irqsave(&schan->lock, iflags);
@@ -723,14 +737,14 @@ static int sirfsoc_dma_probe(struct platform_device *op)
tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
- clk_prepare_enable(sdma->clk);
-
/* Register DMA engine */
dev_set_drvdata(dev, sdma);
+
ret = dma_async_device_register(dma);
if (ret)
goto free_irq;
+ pm_runtime_enable(&op->dev);
dev_info(dev, "initialized SIRFSOC DMAC driver\n");
return 0;
@@ -747,13 +761,124 @@ static int sirfsoc_dma_remove(struct platform_device *op)
struct device *dev = &op->dev;
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
- clk_disable_unprepare(sdma->clk);
dma_async_device_unregister(&sdma->dma);
free_irq(sdma->irq, sdma);
irq_dispose_mapping(sdma->irq);
+ pm_runtime_disable(&op->dev);
+ if (!pm_runtime_status_suspended(&op->dev))
+ sirfsoc_dma_runtime_suspend(&op->dev);
+
+ return 0;
+}
+
+static int sirfsoc_dma_runtime_suspend(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(sdma->clk);
+ return 0;
+}
+
+static int sirfsoc_dma_runtime_resume(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(sdma->clk);
+ if (ret < 0) {
+ dev_err(dev, "clk_enable failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int sirfsoc_dma_pm_suspend(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ struct sirfsoc_dma_regs *save = &sdma->regs_save;
+ struct sirfsoc_dma_desc *sdesc;
+ struct sirfsoc_dma_chan *schan;
+ int ch;
+ int ret;
+
+ /*
+ * if we were runtime-suspended before, resume to enable clock
+ * before accessing register
+ */
+ if (pm_runtime_status_suspended(dev)) {
+ ret = sirfsoc_dma_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * DMA controller will lose all registers while suspending
+ * so we need to save registers for active channels
+ */
+ for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
+ schan = &sdma->channels[ch];
+ if (list_empty(&schan->active))
+ continue;
+ sdesc = list_first_entry(&schan->active,
+ struct sirfsoc_dma_desc,
+ node);
+ save->ctrl[ch] = readl_relaxed(sdma->base +
+ ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
+ }
+ save->interrupt_en = readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN);
+
+ /* Disable clock */
+ sirfsoc_dma_runtime_suspend(dev);
+
+ return 0;
+}
+
+static int sirfsoc_dma_pm_resume(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ struct sirfsoc_dma_regs *save = &sdma->regs_save;
+ struct sirfsoc_dma_desc *sdesc;
+ struct sirfsoc_dma_chan *schan;
+ int ch;
+ int ret;
+
+ /* Enable clock before accessing register */
+ ret = sirfsoc_dma_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ writel_relaxed(save->interrupt_en, sdma->base + SIRFSOC_DMA_INT_EN);
+ for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
+ schan = &sdma->channels[ch];
+ if (list_empty(&schan->active))
+ continue;
+ sdesc = list_first_entry(&schan->active,
+ struct sirfsoc_dma_desc,
+ node);
+ writel_relaxed(sdesc->width,
+ sdma->base + SIRFSOC_DMA_WIDTH_0 + ch * 4);
+ writel_relaxed(sdesc->xlen,
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
+ writel_relaxed(sdesc->ylen,
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
+ writel_relaxed(save->ctrl[ch],
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
+ writel_relaxed(sdesc->addr >> 2,
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
+ }
+
+ /* if we were runtime-suspended before, suspend again */
+ if (pm_runtime_status_suspended(dev))
+ sirfsoc_dma_runtime_suspend(dev);
+
return 0;
}
+static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
+ SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
+};
+
static struct of_device_id sirfsoc_dma_match[] = {
{ .compatible = "sirf,prima2-dmac", },
{ .compatible = "sirf,marco-dmac", },
@@ -766,6 +891,7 @@ static struct platform_driver sirfsoc_dma_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = &sirfsoc_dma_pm_ops,
.of_match_table = sirfsoc_dma_match,
},
};
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 5ab5880d5c9..82d2b97ad94 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2591,6 +2591,9 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
int i;
sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
+ if (!sg)
+ return NULL;
+
for (i = 0; i < periods; i++) {
sg_dma_address(&sg[i]) = dma_addr;
sg_dma_len(&sg[i]) = period_len;
@@ -3139,7 +3142,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{
- struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
+ struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
struct clk *clk = NULL;
void __iomem *virtbase = NULL;
struct resource *res = NULL;
@@ -3226,8 +3229,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
dev_info(&pdev->dev,
- "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n",
- rev, res->start, num_phy_chans, num_log_chans);
+ "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
+ rev, &res->start, num_phy_chans, num_log_chans);
base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
(num_phy_chans + num_log_chans + num_memcpy_chans) *
@@ -3485,7 +3488,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
{
struct stedma40_platform_data *pdata;
int num_phy = 0, num_memcpy = 0, num_disabled = 0;
- const const __be32 *list;
+ const __be32 *list;
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct stedma40_platform_data),
@@ -3516,7 +3519,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
list = of_get_property(np, "disabled-channels", &num_disabled);
num_disabled /= sizeof(*list);
- if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) {
+ if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
d40_err(&pdev->dev,
"Invalid number of disabled channels specified (%d)\n",
num_disabled);
@@ -3535,7 +3538,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
static int __init d40_probe(struct platform_device *pdev)
{
- struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
+ struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
int ret = -ENOENT;
struct d40_base *base = NULL;
@@ -3579,9 +3582,7 @@ static int __init d40_probe(struct platform_device *pdev)
if (request_mem_region(res->start, resource_size(res),
D40_NAME " I/O lcpa") == NULL) {
ret = -EBUSY;
- d40_err(&pdev->dev,
- "Failed to request LCPA region 0x%x-0x%x\n",
- res->start, res->end);
+ d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
goto failure;
}
@@ -3589,8 +3590,8 @@ static int __init d40_probe(struct platform_device *pdev)
val = readl(base->virtbase + D40_DREG_LCPA);
if (res->start != val && val != 0) {
dev_warn(&pdev->dev,
- "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
- __func__, val, res->start);
+ "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
+ __func__, val, &res->start);
} else
writel(res->start, base->virtbase + D40_DREG_LCPA);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index f137914d7b1..5d4986e5f5f 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -767,13 +767,11 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
unsigned long flags;
unsigned int residual;
- spin_lock_irqsave(&tdc->lock, flags);
-
ret = dma_cookie_status(dc, cookie, txstate);
- if (ret == DMA_SUCCESS) {
- spin_unlock_irqrestore(&tdc->lock, flags);
+ if (ret == DMA_SUCCESS)
return ret;
- }
+
+ spin_lock_irqsave(&tdc->lock, flags);
/* Check on wait_ack desc status */
list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 0ef43c136aa..28af214fce0 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -669,7 +669,7 @@ static irqreturn_t td_irq(int irq, void *devid)
static int td_probe(struct platform_device *pdev)
{
- struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct timb_dma *td;
struct resource *iomem;
int irq;
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index a59fb4841d4..71e8e775189 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -962,15 +962,14 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
- spin_lock_bh(&dc->lock);
- txx9dmac_scan_descriptors(dc);
- spin_unlock_bh(&dc->lock);
+ if (ret == DMA_SUCCESS)
+ return DMA_SUCCESS;
- ret = dma_cookie_status(chan, cookie, txstate);
- }
+ spin_lock_bh(&dc->lock);
+ txx9dmac_scan_descriptors(dc);
+ spin_unlock_bh(&dc->lock);
- return ret;
+ return dma_cookie_status(chan, cookie, txstate);
}
static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
@@ -1118,9 +1117,10 @@ static void txx9dmac_off(struct txx9dmac_dev *ddev)
static int __init txx9dmac_chan_probe(struct platform_device *pdev)
{
- struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data;
+ struct txx9dmac_chan_platform_data *cpdata =
+ dev_get_platdata(&pdev->dev);
struct platform_device *dmac_dev = cpdata->dmac_dev;
- struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data;
+ struct txx9dmac_platform_data *pdata = dev_get_platdata(&dmac_dev->dev);
struct txx9dmac_chan *dc;
int err;
int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
@@ -1203,7 +1203,7 @@ static int txx9dmac_chan_remove(struct platform_device *pdev)
static int __init txx9dmac_probe(struct platform_device *pdev)
{
- struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
+ struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *io;
struct txx9dmac_dev *ddev;
u32 mcr;
@@ -1282,7 +1282,7 @@ static int txx9dmac_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
- struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
+ struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
u32 mcr;
mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;