diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-07 16:38:06 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-07 16:38:06 -0700 |
commit | 26c12d93348f0bda0756aff83f4867d9ae58a5a6 (patch) | |
tree | 65221f6837c66a9260c5c973e5fb908b10e0d504 /drivers/rapidio/devices/tsi721_dma.c | |
parent | dc5ed40686a4da95881c35d913b60f867755cbe2 (diff) | |
parent | fdc5813fbbd484a54c88477f91a78934cda8bb32 (diff) |
Merge branch 'akpm' (incoming from Andrew)
Merge second patch-bomb from Andrew Morton:
- the rest of MM
- zram updates
- zswap updates
- exit
- procfs
- exec
- wait
- crash dump
- lib/idr
- rapidio
- adfs, affs, bfs, ufs
- cris
- Kconfig things
- initramfs
- small amount of IPC material
- percpu enhancements
- early ioremap support
- various other misc things
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (156 commits)
MAINTAINERS: update Intel C600 SAS driver maintainers
fs/ufs: remove unused ufs_super_block_third pointer
fs/ufs: remove unused ufs_super_block_second pointer
fs/ufs: remove unused ufs_super_block_first pointer
fs/ufs/super.c: add __init to init_inodecache()
doc/kernel-parameters.txt: add early_ioremap_debug
arm64: add early_ioremap support
arm64: initialize pgprot info earlier in boot
x86: use generic early_ioremap
mm: create generic early_ioremap() support
x86/mm: sparse warning fix for early_memremap
lglock: map to spinlock when !CONFIG_SMP
percpu: add preemption checks to __this_cpu ops
vmstat: use raw_cpu_ops to avoid false positives on preemption checks
slub: use raw_cpu_inc for incrementing statistics
net: replace __this_cpu_inc in route.c with raw_cpu_inc
modules: use raw_cpu_write for initialization of per cpu refcount.
mm: use raw_cpu ops for determining current NUMA node
percpu: add raw_cpu_ops
slub: fix leak of 'name' in sysfs_slab_add
...
Diffstat (limited to 'drivers/rapidio/devices/tsi721_dma.c')
-rw-r--r-- | drivers/rapidio/devices/tsi721_dma.c | 111 |
1 files changed, 78 insertions, 33 deletions
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index 91245f5dbe8..9b60b1f3261 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c @@ -304,35 +304,17 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan) } static int -tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan, - struct tsi721_tx_desc *desc, struct scatterlist *sg, +tsi721_desc_fill_init(struct tsi721_tx_desc *desc, struct scatterlist *sg, enum dma_rtype rtype, u32 sys_size) { struct tsi721_dma_desc *bd_ptr = desc->hw_desc; u64 rio_addr; - if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) { - dev_err(bdma_chan->dchan.device->dev, - "SG element is too large\n"); - return -EINVAL; - } - - dev_dbg(bdma_chan->dchan.device->dev, - "desc: 0x%llx, addr: 0x%llx len: 0x%x\n", - (u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg), - sg_dma_len(sg)); - - dev_dbg(bdma_chan->dchan.device->dev, - "bd_ptr = %p did=%d raddr=0x%llx\n", - bd_ptr, desc->destid, desc->rio_addr); - /* Initialize DMA descriptor */ bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | (rtype << 19) | desc->destid); - if (desc->interrupt) - bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | - (sys_size << 26) | sg_dma_len(sg)); + (sys_size << 26)); rio_addr = (desc->rio_addr >> 2) | ((u64)(desc->rio_addr_u & 0x3) << 62); bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff); @@ -346,6 +328,20 @@ tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan, return 0; } +static int +tsi721_desc_fill_end(struct tsi721_tx_desc *desc) +{ + struct tsi721_dma_desc *bd_ptr = desc->hw_desc; + + /* Update DMA descriptor */ + if (desc->interrupt) + bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); + bd_ptr->bcount |= cpu_to_le32(desc->bcount & TSI721_DMAD_BCOUNT1); + + return 0; +} + + static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan, struct tsi721_tx_desc *desc) { @@ -674,6 +670,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, unsigned int i; u32 sys_size = dma_to_mport(dchan->device)->sys_size; enum dma_rtype rtype; + dma_addr_t next_addr = -1; if (!sgl || !sg_len) { dev_err(dchan->device->dev, "%s: No SG list\n", __func__); @@ -704,36 +701,84 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, for_each_sg(sgl, sg, sg_len, i) { int err; - dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i); + if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) { + dev_err(dchan->device->dev, + "%s: SG entry %d is too large\n", __func__, i); + goto err_desc_put; + } + + /* + * If this sg entry forms contiguous block with previous one, + * try to merge it into existing DMA descriptor + */ + if (desc) { + if (next_addr == sg_dma_address(sg) && + desc->bcount + sg_dma_len(sg) <= + TSI721_BDMA_MAX_BCOUNT) { + /* Adjust byte count of the descriptor */ + desc->bcount += sg_dma_len(sg); + goto entry_done; + } + + /* + * Finalize this descriptor using total + * byte count value. + */ + tsi721_desc_fill_end(desc); + dev_dbg(dchan->device->dev, "%s: desc final len: %d\n", + __func__, desc->bcount); + } + + /* + * Obtain and initialize a new descriptor + */ desc = tsi721_desc_get(bdma_chan); if (!desc) { dev_err(dchan->device->dev, - "Not enough descriptors available\n"); - goto err_desc_get; + "%s: Failed to get new descriptor for SG %d\n", + __func__, i); + goto err_desc_put; } - if (sg_is_last(sg)) - desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; - else - desc->interrupt = false; - desc->destid = rext->destid; desc->rio_addr = rio_addr; desc->rio_addr_u = 0; + desc->bcount = sg_dma_len(sg); + + dev_dbg(dchan->device->dev, + "sg%d desc: 0x%llx, addr: 0x%llx len: %d\n", + i, (u64)desc->txd.phys, + (unsigned long long)sg_dma_address(sg), + sg_dma_len(sg)); + + dev_dbg(dchan->device->dev, + "bd_ptr = %p did=%d raddr=0x%llx\n", + desc->hw_desc, desc->destid, desc->rio_addr); - err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size); + err = tsi721_desc_fill_init(desc, sg, rtype, sys_size); if (err) { dev_err(dchan->device->dev, "Failed to build desc: %d\n", err); - goto err_desc_get; + goto err_desc_put; } - rio_addr += sg_dma_len(sg); + next_addr = sg_dma_address(sg); if (!first) first = desc; else list_add_tail(&desc->desc_node, &first->tx_list); + +entry_done: + if (sg_is_last(sg)) { + desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; + tsi721_desc_fill_end(desc); + dev_dbg(dchan->device->dev, "%s: desc final len: %d\n", + __func__, desc->bcount); + } else { + rio_addr += sg_dma_len(sg); + next_addr += sg_dma_len(sg); + } } first->txd.cookie = -EBUSY; @@ -741,7 +786,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, return &first->txd; -err_desc_get: +err_desc_put: tsi721_desc_put(bdma_chan, first); return NULL; } @@ -792,7 +837,7 @@ int tsi721_register_dma(struct tsi721_device *priv) if (i == TSI721_DMACH_MAINT) continue; - bdma_chan->bd_num = 64; + bdma_chan->bd_num = TSI721_BDMA_BD_RING_SZ; bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); bdma_chan->dchan.device = &mport->dma; |