diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 10 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 4 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 29 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 270 | ||||
-rw-r--r-- | drivers/dma/fsldma.h | 1 | ||||
-rw-r--r-- | drivers/dma/ioat_dma.c | 19 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 11 | ||||
-rw-r--r-- | drivers/dma/iovlock.c | 17 |
8 files changed, 140 insertions, 221 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index cd303901eb5..904e57558bb 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -48,13 +48,13 @@ config DW_DMAC can be integrated in chips such as the Atmel AT32ap7000. config FSL_DMA - bool "Freescale MPC85xx/MPC83xx DMA support" - depends on PPC + tristate "Freescale Elo and Elo Plus DMA support" + depends on FSL_SOC select DMA_ENGINE ---help--- - Enable support for the Freescale DMA engine. Now, it support - MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. - The MPC8349, MPC8360 is also supported. + Enable support for the Freescale Elo and Elo Plus DMA controllers. + The Elo is the DMA controller on some 82xx and 83xx parts, and the + Elo Plus is the DMA controller on 85xx and 86xx parts. config MV_XOR bool "Marvell XOR engine support" diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index dc003a3a787..5317e08221e 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -399,8 +399,8 @@ int dma_async_device_register(struct dma_device *device) chan->chan_id = chancnt++; chan->dev.class = &dma_devclass; chan->dev.parent = device->dev; - snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d", - device->dev_id, chan->chan_id); + dev_set_name(&chan->dev, "dma%dchan%d", + device->dev_id, chan->chan_id); rc = device_register(&chan->dev); if (rc) { diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index a08d1970474..ed9636bfb54 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -20,11 +20,11 @@ static unsigned int test_buf_size = 16384; module_param(test_buf_size, uint, S_IRUGO); MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); -static char test_channel[BUS_ID_SIZE]; +static char test_channel[20]; module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO); MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); -static char test_device[BUS_ID_SIZE]; +static char test_device[20]; module_param_string(device, test_device, sizeof(test_device), S_IRUGO); MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); @@ -80,14 +80,14 @@ static bool dmatest_match_channel(struct dma_chan *chan) { if (test_channel[0] == '\0') return true; - return strcmp(chan->dev.bus_id, test_channel) == 0; + return strcmp(dev_name(&chan->dev), test_channel) == 0; } static bool dmatest_match_device(struct dma_device *device) { if (test_device[0] == '\0') return true; - return strcmp(device->dev->bus_id, test_device) == 0; + return strcmp(dev_name(device->dev), test_device) == 0; } static unsigned long dmatest_random(void) @@ -325,9 +325,14 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) struct dmatest_thread *thread; unsigned int i; - dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC); + /* Have we already been told about this channel? */ + list_for_each_entry(dtc, &dmatest_channels, node) + if (dtc->chan == chan) + return DMA_DUP; + + dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); if (!dtc) { - pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id); + pr_warning("dmatest: No memory for %s\n", dev_name(&chan->dev)); return DMA_NAK; } @@ -338,16 +343,16 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); if (!thread) { pr_warning("dmatest: No memory for %s-test%u\n", - chan->dev.bus_id, i); + dev_name(&chan->dev), i); break; } thread->chan = dtc->chan; smp_wmb(); thread->task = kthread_run(dmatest_func, thread, "%s-test%u", - chan->dev.bus_id, i); + dev_name(&chan->dev), i); if (IS_ERR(thread->task)) { pr_warning("dmatest: Failed to run thread %s-test%u\n", - chan->dev.bus_id, i); + dev_name(&chan->dev), i); kfree(thread); break; } @@ -357,7 +362,7 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) list_add_tail(&thread->node, &dtc->threads); } - pr_info("dmatest: Started %u threads using %s\n", i, chan->dev.bus_id); + pr_info("dmatest: Started %u threads using %s\n", i, dev_name(&chan->dev)); list_add_tail(&dtc->node, &dmatest_channels); nr_channels++; @@ -374,7 +379,7 @@ static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan) list_del(&dtc->node); dmatest_cleanup_channel(dtc); pr_debug("dmatest: lost channel %s\n", - chan->dev.bus_id); + dev_name(&chan->dev)); return DMA_ACK; } } @@ -413,7 +418,7 @@ dmatest_event(struct dma_client *client, struct dma_chan *chan, default: pr_info("dmatest: Unhandled event %u (%s)\n", - state, chan->dev.bus_id); + state, dev_name(&chan->dev)); break; } diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index c0059ca5834..0b95dcce447 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -370,7 +370,10 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *chan, struct dma_client *client) { struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); - LIST_HEAD(tmp_list); + + /* Has this channel already been allocated? */ + if (fsl_chan->desc_pool) + return 1; /* We need the descriptor to be aligned to 32bytes * for meeting FSL DMA specification requirement. @@ -410,6 +413,8 @@ static void fsl_dma_free_chan_resources(struct dma_chan *chan) } spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); dma_pool_destroy(fsl_chan->desc_pool); + + fsl_chan->desc_pool = NULL; } static struct dma_async_tx_descriptor * @@ -786,159 +791,29 @@ static void dma_do_tasklet(unsigned long data) fsl_chan_ld_cleanup(fsl_chan); } -static void fsl_dma_callback_test(void *param) -{ - struct fsl_dma_chan *fsl_chan = param; - if (fsl_chan) - dev_dbg(fsl_chan->dev, "selftest: callback is ok!\n"); -} - -static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) -{ - struct dma_chan *chan; - int err = 0; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - u8 *src, *dest; - int i; - size_t test_size; - struct dma_async_tx_descriptor *tx1, *tx2, *tx3; - - test_size = 4096; - - src = kmalloc(test_size * 2, GFP_KERNEL); - if (!src) { - dev_err(fsl_chan->dev, - "selftest: Cannot alloc memory for test!\n"); - return -ENOMEM; - } - - dest = src + test_size; - - for (i = 0; i < test_size; i++) - src[i] = (u8) i; - - chan = &fsl_chan->common; - - if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) { - dev_err(fsl_chan->dev, - "selftest: Cannot alloc resources for DMA\n"); - err = -ENODEV; - goto out; - } - - /* TX 1 */ - dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2, - DMA_TO_DEVICE); - dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2, - DMA_FROM_DEVICE); - tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0); - async_tx_ack(tx1); - - cookie = fsl_dma_tx_submit(tx1); - fsl_dma_memcpy_issue_pending(chan); - msleep(2); - - if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { - dev_err(fsl_chan->dev, "selftest: Time out!\n"); - err = -ENODEV; - goto free_resources; - } - - /* Test free and re-alloc channel resources */ - fsl_dma_free_chan_resources(chan); - - if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) { - dev_err(fsl_chan->dev, - "selftest: Cannot alloc resources for DMA\n"); - err = -ENODEV; - goto free_resources; - } - - /* Continue to test - * TX 2 - */ - dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2, - test_size / 4, DMA_TO_DEVICE); - dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2, - test_size / 4, DMA_FROM_DEVICE); - tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); - async_tx_ack(tx2); - - /* TX 3 */ - dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4, - test_size / 4, DMA_TO_DEVICE); - dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4, - test_size / 4, DMA_FROM_DEVICE); - tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); - async_tx_ack(tx3); - - /* Interrupt tx test */ - tx1 = fsl_dma_prep_interrupt(chan, 0); - async_tx_ack(tx1); - cookie = fsl_dma_tx_submit(tx1); - - /* Test exchanging the prepared tx sort */ - cookie = fsl_dma_tx_submit(tx3); - cookie = fsl_dma_tx_submit(tx2); - - if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *) - dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) { - tx3->callback = fsl_dma_callback_test; - tx3->callback_param = fsl_chan; - } - fsl_dma_memcpy_issue_pending(chan); - msleep(2); - - if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { - dev_err(fsl_chan->dev, "selftest: Time out!\n"); - err = -ENODEV; - goto free_resources; - } - - err = memcmp(src, dest, test_size); - if (err) { - for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size); - i++); - dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%ld is " - "error! src 0x%x, dest 0x%x\n", - i, (long)test_size, *(src + i), *(dest + i)); - } - -free_resources: - fsl_dma_free_chan_resources(chan); -out: - kfree(src); - return err; -} - -static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, - const struct of_device_id *match) +static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, + struct device_node *node, u32 feature, const char *compatible) { - struct fsl_dma_device *fdev; struct fsl_dma_chan *new_fsl_chan; int err; - fdev = dev_get_drvdata(dev->dev.parent); - BUG_ON(!fdev); - /* alloc channel */ new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); if (!new_fsl_chan) { - dev_err(&dev->dev, "No free memory for allocating " + dev_err(fdev->dev, "No free memory for allocating " "dma channels!\n"); return -ENOMEM; } /* get dma channel register base */ - err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg); + err = of_address_to_resource(node, 0, &new_fsl_chan->reg); if (err) { - dev_err(&dev->dev, "Can't get %s property 'reg'\n", - dev->node->full_name); + dev_err(fdev->dev, "Can't get %s property 'reg'\n", + node->full_name); goto err_no_reg; } - new_fsl_chan->feature = *(u32 *)match->data; + new_fsl_chan->feature = feature; if (!fdev->feature) fdev->feature = new_fsl_chan->feature; @@ -948,13 +823,13 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, */ WARN_ON(fdev->feature != new_fsl_chan->feature); - new_fsl_chan->dev = &dev->dev; + new_fsl_chan->dev = &new_fsl_chan->common.dev; new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { - dev_err(&dev->dev, "There is no %d channel!\n", + dev_err(fdev->dev, "There is no %d channel!\n", new_fsl_chan->id); err = -EINVAL; goto err_no_chan; @@ -988,29 +863,23 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, &fdev->common.channels); fdev->common.chancnt++; - new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0); + new_fsl_chan->irq = irq_of_parse_and_map(node, 0); if (new_fsl_chan->irq != NO_IRQ) { err = request_irq(new_fsl_chan->irq, &fsl_dma_chan_do_interrupt, IRQF_SHARED, "fsldma-channel", new_fsl_chan); if (err) { - dev_err(&dev->dev, "DMA channel %s request_irq error " - "with return %d\n", dev->node->full_name, err); + dev_err(fdev->dev, "DMA channel %s request_irq error " + "with return %d\n", node->full_name, err); goto err_no_irq; } } - err = fsl_dma_self_test(new_fsl_chan); - if (err) - goto err_self_test; - - dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, - match->compatible, new_fsl_chan->irq); + dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, + compatible, new_fsl_chan->irq); return 0; -err_self_test: - free_irq(new_fsl_chan->irq, new_fsl_chan); err_no_irq: list_del(&new_fsl_chan->common.device_node); err_no_chan: @@ -1020,38 +889,20 @@ err_no_reg: return err; } -const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN; -const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN; - -static struct of_device_id of_fsl_dma_chan_ids[] = { - { - .compatible = "fsl,eloplus-dma-channel", - .data = (void *)&mpc8540_dma_ip_feature, - }, - { - .compatible = "fsl,elo-dma-channel", - .data = (void *)&mpc8349_dma_ip_feature, - }, - {} -}; - -static struct of_platform_driver of_fsl_dma_chan_driver = { - .name = "of-fsl-dma-channel", - .match_table = of_fsl_dma_chan_ids, - .probe = of_fsl_dma_chan_probe, -}; - -static __init int of_fsl_dma_chan_init(void) +static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) { - return of_register_platform_driver(&of_fsl_dma_chan_driver); + free_irq(fchan->irq, fchan); + list_del(&fchan->common.device_node); + iounmap(fchan->reg_base); + kfree(fchan); } static int __devinit of_fsl_dma_probe(struct of_device *dev, const struct of_device_id *match) { int err; - unsigned int irq; struct fsl_dma_device *fdev; + struct device_node *child; fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); if (!fdev) { @@ -1085,9 +936,9 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; fdev->common.dev = &dev->dev; - irq = irq_of_parse_and_map(dev->node, 0); - if (irq != NO_IRQ) { - err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED, + fdev->irq = irq_of_parse_and_map(dev->node, 0); + if (fdev->irq != NO_IRQ) { + err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED, "fsldma-device", fdev); if (err) { dev_err(&dev->dev, "DMA device request_irq error " @@ -1097,7 +948,21 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, } dev_set_drvdata(&(dev->dev), fdev); - of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev); + + /* We cannot use of_platform_bus_probe() because there is no + * of_platform_bus_remove. Instead, we manually instantiate every DMA + * channel object. + */ + for_each_child_of_node(dev->node, child) { + if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) + fsl_dma_chan_probe(fdev, child, + FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, + "fsl,eloplus-dma-channel"); + if (of_device_is_compatible(child, "fsl,elo-dma-channel")) + fsl_dma_chan_probe(fdev, child, + FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, + "fsl,elo-dma-channel"); + } dma_async_device_register(&fdev->common); return 0; @@ -1109,6 +974,30 @@ err_no_reg: return err; } +static int of_fsl_dma_remove(struct of_device *of_dev) +{ + struct fsl_dma_device *fdev; + unsigned int i; + + fdev = dev_get_drvdata(&of_dev->dev); + + dma_async_device_unregister(&fdev->common); + + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) + if (fdev->chan[i]) + fsl_dma_chan_remove(fdev->chan[i]); + + if (fdev->irq != NO_IRQ) + free_irq(fdev->irq, fdev); + + iounmap(fdev->reg_base); + + kfree(fdev); + dev_set_drvdata(&of_dev->dev, NULL); + + return 0; +} + static struct of_device_id of_fsl_dma_ids[] = { { .compatible = "fsl,eloplus-dma", }, { .compatible = "fsl,elo-dma", }, @@ -1116,15 +1005,32 @@ static struct of_device_id of_fsl_dma_ids[] = { }; static struct of_platform_driver of_fsl_dma_driver = { - .name = "of-fsl-dma", + .name = "fsl-elo-dma", .match_table = of_fsl_dma_ids, .probe = of_fsl_dma_probe, + .remove = of_fsl_dma_remove, }; static __init int of_fsl_dma_init(void) { - return of_register_platform_driver(&of_fsl_dma_driver); + int ret; + + pr_info("Freescale Elo / Elo Plus DMA driver\n"); + + ret = of_register_platform_driver(&of_fsl_dma_driver); + if (ret) + pr_err("fsldma: failed to register platform driver\n"); + + return ret; +} + +static void __exit of_fsl_dma_exit(void) +{ + of_unregister_platform_driver(&of_fsl_dma_driver); } -subsys_initcall(of_fsl_dma_chan_init); subsys_initcall(of_fsl_dma_init); +module_exit(of_fsl_dma_exit); + +MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 6faf07ba0d0..4f21a512d84 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -114,6 +114,7 @@ struct fsl_dma_device { struct dma_device common; struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; u32 feature; /* The same as DMA channels */ + int irq; /* Channel IRQ */ }; /* Define macros for fsl_dma_chan->feature property */ diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index bc8c6e3470c..ecd743f7cc6 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -33,6 +33,7 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/workqueue.h> +#include <linux/i7300_idle.h> #include "ioatdma.h" #include "ioatdma_registers.h" #include "ioatdma_hw.h" @@ -171,6 +172,11 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); +#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL + if (i7300_idle_platform_probe(NULL, NULL) == 0) { + device->common.chancnt--; + } +#endif for (i = 0; i < device->common.chancnt; i++) { ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); if (!ioat_chan) { @@ -519,7 +525,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) } hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; - if (new->async_tx.callback) { + if (first->async_tx.callback) { hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; if (first != new) { /* move callback into to last desc */ @@ -611,7 +617,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) } hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; - if (new->async_tx.callback) { + if (first->async_tx.callback) { hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; if (first != new) { /* move callback into to last desc */ @@ -801,6 +807,12 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) struct ioat_desc_sw *desc, *_desc; int in_use_descs = 0; + /* Before freeing channel resources first check + * if they have been previously allocated for this channel. + */ + if (ioat_chan->desccount == 0) + return; + tasklet_disable(&ioat_chan->cleanup_task); ioat_dma_memcpy_cleanup(ioat_chan); @@ -863,6 +875,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) ioat_chan->last_completion = ioat_chan->completion_addr = 0; ioat_chan->pending = 0; ioat_chan->dmacount = 0; + ioat_chan->desccount = 0; ioat_chan->watchdog_completion = 0; ioat_chan->last_compl_desc_addr_hw = 0; ioat_chan->watchdog_tcp_cookie = @@ -971,11 +984,9 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor( switch (ioat_chan->device->version) { case IOAT_VER_1_2: return ioat1_dma_get_next_descriptor(ioat_chan); - break; case IOAT_VER_2_0: case IOAT_VER_3_0: return ioat2_dma_get_next_descriptor(ioat_chan); - break; } return NULL; } diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 71fba82462c..c7a9306d951 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -411,6 +411,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) int slot_cnt; int slots_per_op; dma_cookie_t cookie; + dma_addr_t next_dma; grp_start = sw_desc->group_head; slot_cnt = grp_start->slot_cnt; @@ -425,12 +426,12 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) &old_chain_tail->chain_node); /* fix up the hardware chain */ - iop_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); + next_dma = grp_start->async_tx.phys; + iop_desc_set_next_desc(old_chain_tail, next_dma); + BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */ - /* 1/ don't add pre-chained descriptors - * 2/ dummy read to flush next_desc write - */ - BUG_ON(iop_desc_get_next_desc(sw_desc)); + /* check for pre-chained descriptors */ + iop_paranoia(iop_desc_get_next_desc(sw_desc)); /* increment the pending count by the number of slots * memcpy operations have a 1:1 (slot:operation) relation diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c index e763d723e4c..9f6fe46a9b8 100644 --- a/drivers/dma/iovlock.c +++ b/drivers/dma/iovlock.c @@ -55,7 +55,6 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len) int nr_iovecs = 0; int iovec_len_used = 0; int iovec_pages_used = 0; - long err; /* don't pin down non-user-based iovecs */ if (segment_eq(get_fs(), KERNEL_DS)) @@ -72,23 +71,21 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len) local_list = kmalloc(sizeof(*local_list) + (nr_iovecs * sizeof (struct dma_page_list)) + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL); - if (!local_list) { - err = -ENOMEM; + if (!local_list) goto out; - } /* list of pages starts right after the page list array */ pages = (struct page **) &local_list->page_list[nr_iovecs]; + local_list->nr_iovecs = 0; + for (i = 0; i < nr_iovecs; i++) { struct dma_page_list *page_list = &local_list->page_list[i]; len -= iov[i].iov_len; - if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len)) { - err = -EFAULT; + if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len)) goto unpin; - } page_list->nr_pages = num_pages_spanned(&iov[i]); page_list->base_address = iov[i].iov_base; @@ -109,10 +106,8 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len) NULL); up_read(¤t->mm->mmap_sem); - if (ret != page_list->nr_pages) { - err = -ENOMEM; + if (ret != page_list->nr_pages) goto unpin; - } local_list->nr_iovecs = i + 1; } @@ -122,7 +117,7 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len) unpin: dma_unpin_iovec_pages(local_list); out: - return ERR_PTR(err); + return NULL; } void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list) |