diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 2 | ||||
-rw-r--r-- | drivers/dma/acpi-dma.c | 172 | ||||
-rw-r--r-- | drivers/dma/coh901318.c | 43 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 7 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 45 | ||||
-rw-r--r-- | drivers/dma/edma.c | 2 | ||||
-rw-r--r-- | drivers/dma/intel_mid_dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 3 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.h | 1 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 114 | ||||
-rw-r--r-- | drivers/dma/ioat/hw.h | 27 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 66 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 85 | ||||
-rw-r--r-- | drivers/dma/mv_xor.h | 1 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 4 | ||||
-rw-r--r-- | drivers/dma/ppc4xx/adma.c | 47 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 541 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.c | 189 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.h | 3 | ||||
-rw-r--r-- | drivers/dma/tegra20-apb-dma.c | 5 |
20 files changed, 662 insertions, 697 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index e9924898043..3215a3cb3de 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -213,7 +213,7 @@ config SIRF_DMA config TI_EDMA tristate "TI EDMA support" - depends on ARCH_DAVINCI + depends on ARCH_DAVINCI || ARCH_OMAP select DMA_ENGINE select DMA_VIRTUAL_CHANNELS default n diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index ba6fc62e965..5a18f82f732 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -4,7 +4,8 @@ * Based on of-dma.c * * Copyright (C) 2013, Intel Corporation - * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> + * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -16,6 +17,7 @@ #include <linux/list.h> #include <linux/mutex.h> #include <linux/slab.h> +#include <linux/ioport.h> #include <linux/acpi.h> #include <linux/acpi_dma.h> @@ -23,6 +25,117 @@ static LIST_HEAD(acpi_dma_list); static DEFINE_MUTEX(acpi_dma_lock); /** + * acpi_dma_parse_resource_group - match device and parse resource group + * @grp: CSRT resource group + * @adev: ACPI device to match with + * @adma: struct acpi_dma of the given DMA controller + * + * Returns 1 on success, 0 when no information is available, or appropriate + * errno value on error. + * + * In order to match a device from DSDT table to the corresponding CSRT device + * we use MMIO address and IRQ. + */ +static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, + struct acpi_device *adev, struct acpi_dma *adma) +{ + const struct acpi_csrt_shared_info *si; + struct list_head resource_list; + struct resource_list_entry *rentry; + resource_size_t mem = 0, irq = 0; + u32 vendor_id; + int ret; + + if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info)) + return -ENODEV; + + INIT_LIST_HEAD(&resource_list); + ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); + if (ret <= 0) + return 0; + + list_for_each_entry(rentry, &resource_list, node) { + if (resource_type(&rentry->res) == IORESOURCE_MEM) + mem = rentry->res.start; + else if (resource_type(&rentry->res) == IORESOURCE_IRQ) + irq = rentry->res.start; + } + + acpi_dev_free_resource_list(&resource_list); + + /* Consider initial zero values as resource not found */ + if (mem == 0 && irq == 0) + return 0; + + si = (const struct acpi_csrt_shared_info *)&grp[1]; + + /* Match device by MMIO and IRQ */ + if (si->mmio_base_low != mem || si->gsi_interrupt != irq) + return 0; + + vendor_id = le32_to_cpu(grp->vendor_id); + dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n", + (char *)&vendor_id, grp->device_id, grp->revision); + + /* Check if the request line range is available */ + if (si->base_request_line == 0 && si->num_handshake_signals == 0) + return 0; + + adma->base_request_line = si->base_request_line; + adma->end_request_line = si->base_request_line + + si->num_handshake_signals - 1; + + dev_dbg(&adev->dev, "request line base: 0x%04x end: 0x%04x\n", + adma->base_request_line, adma->end_request_line); + + return 1; +} + +/** + * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources + * @adev: ACPI device to match with + * @adma: struct acpi_dma of the given DMA controller + * + * CSRT or Core System Resources Table is a proprietary ACPI table + * introduced by Microsoft. This table can contain devices that are not in + * the system DSDT table. In particular DMA controllers might be described + * here. + * + * We are using this table to get the request line range of the specific DMA + * controller to be used later. + * + */ +static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) +{ + struct acpi_csrt_group *grp, *end; + struct acpi_table_csrt *csrt; + acpi_status status; + int ret; + + status = acpi_get_table(ACPI_SIG_CSRT, 0, + (struct acpi_table_header **)&csrt); + if (ACPI_FAILURE(status)) { + if (status != AE_NOT_FOUND) + dev_warn(&adev->dev, "failed to get the CSRT table\n"); + return; + } + + grp = (struct acpi_csrt_group *)(csrt + 1); + end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length); + + while (grp < end) { + ret = acpi_dma_parse_resource_group(grp, adev, adma); + if (ret < 0) { + dev_warn(&adev->dev, + "error in parsing resource group\n"); + return; + } + + grp = (struct acpi_csrt_group *)((void *)grp + grp->length); + } +} + +/** * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers * @dev: struct device of DMA controller * @acpi_dma_xlate: translation function which converts a dma specifier @@ -61,6 +174,8 @@ int acpi_dma_controller_register(struct device *dev, adma->acpi_dma_xlate = acpi_dma_xlate; adma->data = data; + acpi_dma_parse_csrt(adev, adma); + /* Now queue acpi_dma controller structure in list */ mutex_lock(&acpi_dma_lock); list_add_tail(&adma->dma_controllers, &acpi_dma_list); @@ -149,6 +264,45 @@ void devm_acpi_dma_controller_free(struct device *dev) } EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); +/** + * acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function + * @adma: struct acpi_dma of DMA controller + * @dma_spec: dma specifier to update + * + * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise. + * + * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource + * Descriptor": + * DMA Request Line bits is a platform-relative number uniquely + * identifying the request line assigned. Request line-to-Controller + * mapping is done in a controller-specific OS driver. + * That's why we can safely adjust slave_id when the appropriate controller is + * found. + */ +static int acpi_dma_update_dma_spec(struct acpi_dma *adma, + struct acpi_dma_spec *dma_spec) +{ + /* Set link to the DMA controller device */ + dma_spec->dev = adma->dev; + + /* Check if the request line range is available */ + if (adma->base_request_line == 0 && adma->end_request_line == 0) + return 0; + + /* Check if slave_id falls to the range */ + if (dma_spec->slave_id < adma->base_request_line || + dma_spec->slave_id > adma->end_request_line) + return -1; + + /* + * Here we adjust slave_id. It should be a relative number to the base + * request line. + */ + dma_spec->slave_id -= adma->base_request_line; + + return 1; +} + struct acpi_dma_parser_data { struct acpi_dma_spec dma_spec; size_t index; @@ -193,6 +347,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, struct acpi_device *adev; struct acpi_dma *adma; struct dma_chan *chan = NULL; + int found; /* Check if the device was enumerated by ACPI */ if (!dev || !ACPI_HANDLE(dev)) @@ -219,9 +374,20 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, mutex_lock(&acpi_dma_lock); list_for_each_entry(adma, &acpi_dma_list, dma_controllers) { - dma_spec->dev = adma->dev; + /* + * We are not going to call translation function if slave_id + * doesn't fall to the request range. + */ + found = acpi_dma_update_dma_spec(adma, dma_spec); + if (found < 0) + continue; chan = adma->acpi_dma_xlate(dma_spec, adma); - if (chan) + /* + * Try to get a channel only from the DMA controller that + * matches the slave_id. See acpi_dma_update_dma_spec() + * description for the details. + */ + if (found > 0 || chan) break; } diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 3b23061cdb4..9bfaddd57ef 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -22,6 +22,7 @@ #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/platform_data/dma-coh901318.h> +#include <linux/of_dma.h> #include "coh901318.h" #include "dmaengine.h" @@ -1788,6 +1789,35 @@ bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) } EXPORT_SYMBOL(coh901318_filter_id); +struct coh901318_filter_args { + struct coh901318_base *base; + unsigned int ch_nr; +}; + +static bool coh901318_filter_base_and_id(struct dma_chan *chan, void *data) +{ + struct coh901318_filter_args *args = data; + + if (&args->base->dma_slave == chan->device && + args->ch_nr == to_coh901318_chan(chan)->id) + return true; + + return false; +} + +static struct dma_chan *coh901318_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct coh901318_filter_args args = { + .base = ofdma->of_dma_data, + .ch_nr = dma_spec->args[0], + }; + dma_cap_mask_t cap; + dma_cap_zero(cap); + dma_cap_set(DMA_SLAVE, cap); + + return dma_request_channel(cap, coh901318_filter_base_and_id, &args); +} /* * DMA channel allocation */ @@ -2735,12 +2765,19 @@ static int __init coh901318_probe(struct platform_device *pdev) if (err) goto err_register_memcpy; + err = of_dma_controller_register(pdev->dev.of_node, coh901318_xlate, + base); + if (err) + goto err_register_of_dma; + platform_set_drvdata(pdev, base); dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", (u32) base->virtbase); return err; + err_register_of_dma: + dma_async_device_unregister(&base->dma_memcpy); err_register_memcpy: dma_async_device_unregister(&base->dma_slave); err_register_slave: @@ -2752,17 +2789,23 @@ static int coh901318_remove(struct platform_device *pdev) { struct coh901318_base *base = platform_get_drvdata(pdev); + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&base->dma_memcpy); dma_async_device_unregister(&base->dma_slave); coh901318_pool_destroy(&base->pool); return 0; } +static const struct of_device_id coh901318_dt_match[] = { + { .compatible = "stericsson,coh901318" }, + {}, +}; static struct platform_driver coh901318_driver = { .remove = coh901318_remove, .driver = { .name = "coh901318", + .of_match_table = coh901318_dt_match, }, }; diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 93f7992bee5..9e56745f87b 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -663,11 +663,6 @@ static bool device_has_all_tx_types(struct dma_device *device) return false; #endif - #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE) - if (!dma_has_cap(DMA_MEMSET, device->cap_mask)) - return false; - #endif - #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) if (!dma_has_cap(DMA_XOR, device->cap_mask)) return false; @@ -729,8 +724,6 @@ int dma_async_device_register(struct dma_device *device) !device->device_prep_dma_pq); BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val); - BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && - !device->device_prep_dma_memset); BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt); BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index d8ce4ecfef1..e88ded2c8d2 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -716,8 +716,7 @@ static int dmatest_func(void *data) } dma_async_issue_pending(chan); - wait_event_freezable_timeout(done_wait, - done.done || kthread_should_stop(), + wait_event_freezable_timeout(done_wait, done.done, msecs_to_jiffies(params->timeout)); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); @@ -997,7 +996,6 @@ static void stop_threaded_test(struct dmatest_info *info) static int __restart_threaded_test(struct dmatest_info *info, bool run) { struct dmatest_params *params = &info->params; - int ret; /* Stop any running test first */ __stop_threaded_test(info); @@ -1012,13 +1010,23 @@ static int __restart_threaded_test(struct dmatest_info *info, bool run) memcpy(params, &info->dbgfs_params, sizeof(*params)); /* Run test with new parameters */ - ret = __run_threaded_test(info); - if (ret) { - __stop_threaded_test(info); - pr_err("dmatest: Can't run test\n"); + return __run_threaded_test(info); +} + +static bool __is_threaded_test_run(struct dmatest_info *info) +{ + struct dmatest_chan *dtc; + + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + list_for_each_entry(thread, &dtc->threads, node) { + if (!thread->done) + return true; + } } - return ret; + return false; } static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos, @@ -1091,22 +1099,10 @@ static ssize_t dtf_read_run(struct file *file, char __user *user_buf, { struct dmatest_info *info = file->private_data; char buf[3]; - struct dmatest_chan *dtc; - bool alive = false; mutex_lock(&info->lock); - list_for_each_entry(dtc, &info->channels, node) { - struct dmatest_thread *thread; - - list_for_each_entry(thread, &dtc->threads, node) { - if (!thread->done) { - alive = true; - break; - } - } - } - if (alive) { + if (__is_threaded_test_run(info)) { buf[0] = 'Y'; } else { __stop_threaded_test(info); @@ -1132,7 +1128,12 @@ static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, if (strtobool(buf, &bv) == 0) { mutex_lock(&info->lock); - ret = __restart_threaded_test(info, bv); + + if (__is_threaded_test_run(info)) + ret = -EBUSY; + else + ret = __restart_threaded_test(info, bv); + mutex_unlock(&info->lock); } diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index cd7e3280fad..5f3e532436e 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -24,7 +24,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> -#include <mach/edma.h> +#include <linux/platform_data/edma.h> #include "dmaengine.h" #include "virt-dma.h" diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index a0de82e21a7..a975ebebea8 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -1405,7 +1405,7 @@ static int dma_runtime_idle(struct device *dev) return -EAGAIN; } - return pm_schedule_suspend(dev, 0); + return 0; } /****************************************************************************** diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 17a2393b3e2..5ff6fc1819d 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -1105,12 +1105,11 @@ static ssize_t cap_show(struct dma_chan *c, char *page) { struct dma_device *dma = c->device; - return sprintf(page, "copy%s%s%s%s%s%s\n", + return sprintf(page, "copy%s%s%s%s%s\n", dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", - dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "", dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); } diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 29bf9448035..212d584fe42 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -123,7 +123,6 @@ static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len struct ioat_ring_ent { union { struct ioat_dma_descriptor *hw; - struct ioat_fill_descriptor *fill; struct ioat_xor_descriptor *xor; struct ioat_xor_ext_descriptor *xor_ex; struct ioat_pq_descriptor *pq; diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index ca6ea9b3551..b642e035579 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -311,14 +311,6 @@ static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ ioat_dma_unmap(chan, flags, len, desc->hw); break; - case IOAT_OP_FILL: { - struct ioat_fill_descriptor *hw = desc->fill; - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) - ioat_unmap(pdev, hw->dst_addr - offset, len, - PCI_DMA_FROMDEVICE, flags, 1); - break; - } case IOAT_OP_XOR_VAL: case IOAT_OP_XOR: { struct ioat_xor_descriptor *xor = desc->xor; @@ -824,51 +816,6 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, } static struct dma_async_tx_descriptor * -ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value, - size_t len, unsigned long flags) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_ring_ent *desc; - size_t total_len = len; - struct ioat_fill_descriptor *fill; - u64 src_data = (0x0101010101010101ULL) * (value & 0xff); - int num_descs, idx, i; - - num_descs = ioat2_xferlen_to_descs(ioat, len); - if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) - idx = ioat->head; - else - return NULL; - i = 0; - do { - size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); - - desc = ioat2_get_ring_ent(ioat, idx + i); - fill = desc->fill; - - fill->size = xfer_size; - fill->src_data = src_data; - fill->dst_addr = dest; - fill->ctl = 0; - fill->ctl_f.op = IOAT_OP_FILL; - - len -= xfer_size; - dest += xfer_size; - dump_desc_dbg(ioat, desc); - } while (++i < num_descs); - - desc->txd.flags = flags; - desc->len = total_len; - fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE); - fill->ctl_f.compl_write = 1; - dump_desc_dbg(ioat, desc); - - /* we leave the channel locked to ensure in order submission */ - return &desc->txd; -} - -static struct dma_async_tx_descriptor * __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) @@ -1431,7 +1378,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) struct page *xor_srcs[IOAT_NUM_SRC_TEST]; struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; - dma_addr_t dma_addr, dest_dma; + dma_addr_t dest_dma; struct dma_async_tx_descriptor *tx; struct dma_chan *dma_chan; dma_cookie_t cookie; @@ -1598,56 +1545,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) goto free_resources; } - /* skip memset if the capability is not present */ - if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask)) - goto free_resources; - - /* test memset */ - op = IOAT_OP_FILL; - - dma_addr = dma_map_page(dev, dest, 0, - PAGE_SIZE, DMA_FROM_DEVICE); - tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, - DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP); - if (!tx) { - dev_err(dev, "Self-test memset prep failed\n"); - err = -ENODEV; - goto dma_unmap; - } - - async_tx_ack(tx); - init_completion(&cmp); - tx->callback = ioat3_dma_test_callback; - tx->callback_param = &cmp; - cookie = tx->tx_submit(tx); - if (cookie < 0) { - dev_err(dev, "Self-test memset setup failed\n"); - err = -ENODEV; - goto dma_unmap; - } - dma->device_issue_pending(dma_chan); - - tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { - dev_err(dev, "Self-test memset timed out\n"); - err = -ENODEV; - goto dma_unmap; - } - - dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); - - for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { - u32 *ptr = page_address(dest); - if (ptr[i]) { - dev_err(dev, "Self-test memset failed compare\n"); - err = -ENODEV; - goto free_resources; - } - } - /* test for non-zero parity sum */ op = IOAT_OP_XOR_VAL; @@ -1706,8 +1603,7 @@ dma_unmap: for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); - } else if (op == IOAT_OP_FILL) - dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); + } free_resources: dma->device_free_chan_resources(dma_chan); out: @@ -1944,12 +1840,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) } } - if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) { - dma_cap_set(DMA_MEMSET, dma->cap_mask); - dma->device_prep_dma_memset = ioat3_prep_memset_lock; - } - - dma->device_tx_status = ioat3_tx_status; device->cleanup_fn = ioat3_cleanup_event; device->timer_fn = ioat3_timer_event; diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 5ee57d402a6..62f83e983d8 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -100,33 +100,6 @@ struct ioat_dma_descriptor { uint64_t user2; }; -struct ioat_fill_descriptor { - uint32_t size; - union { - uint32_t ctl; - struct { - unsigned int int_en:1; - unsigned int rsvd:1; - unsigned int dest_snoop_dis:1; - unsigned int compl_write:1; - unsigned int fence:1; - unsigned int rsvd2:2; - unsigned int dest_brk:1; - unsigned int bundle:1; - unsigned int rsvd4:15; - #define IOAT_OP_FILL 0x01 - unsigned int op:8; - } ctl_f; - }; - uint64_t src_data; - uint64_t dst_addr; - uint64_t next; - uint64_t rsv1; - uint64_t next_dst_addr; - uint64_t user1; - uint64_t user2; -}; - struct ioat_xor_descriptor { uint32_t size; union { diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 7dafb9f3785..c9cc08c2dbb 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -633,39 +633,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, } static struct dma_async_tx_descriptor * -iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, - int value, size_t len, unsigned long flags) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *sw_desc, *grp_start; - int slot_cnt, slots_per_op; - - if (unlikely(!len)) - return NULL; - BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); - - dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", - __func__, len); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - grp_start = sw_desc->group_head; - iop_desc_init_memset(grp_start, flags); - iop_desc_set_byte_count(grp_start, iop_chan, len); - iop_desc_set_block_fill_val(grp_start, value); - iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); - sw_desc->unmap_src_cnt = 1; - sw_desc->unmap_len = len; - sw_desc->async_tx.flags = flags; - } - spin_unlock_bh(&iop_chan->lock); - - return sw_desc ? &sw_desc->async_tx : NULL; -} - -static struct dma_async_tx_descriptor * iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t *dma_src, unsigned int src_cnt, size_t len, unsigned long flags) @@ -1176,33 +1143,6 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) goto free_resources; } - /* test memset */ - dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, - PAGE_SIZE, DMA_FROM_DEVICE); - tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(8); - - if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { - dev_err(dma_chan->device->dev, - "Self-test memset timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { - u32 *ptr = page_address(dest); - if (ptr[i]) { - dev_err(dma_chan->device->dev, - "Self-test memset failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; - } - } - /* test for non-zero parity sum */ zero_sum_result = 0; for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) @@ -1487,8 +1427,6 @@ static int iop_adma_probe(struct platform_device *pdev) /* set prep routines based on capability */ if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy; - if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) - dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset; if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { dma_dev->max_xor = iop_adma_get_max_xor(); dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; @@ -1556,8 +1494,7 @@ static int iop_adma_probe(struct platform_device *pdev) goto err_free_iop_chan; } - if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || - dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { + if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { ret = iop_adma_xor_val_self_test(adev); dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); if (ret) @@ -1584,7 +1521,6 @@ static int iop_adma_probe(struct platform_device *pdev) dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", - dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index d64ae14f270..200f1a3c9a4 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -89,11 +89,6 @@ static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) hw_desc->phy_next_desc = 0; } -static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) -{ - desc->value = val; -} - static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, dma_addr_t addr) { @@ -128,22 +123,6 @@ static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); } -static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) -{ - __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); -} - -static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) -{ - __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); -} - -static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) -{ - __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); - __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); -} - static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) { u32 val = __raw_readl(XOR_INTR_MASK(chan)); @@ -186,8 +165,6 @@ static int mv_can_chain(struct mv_xor_desc_slot *desc) if (chain_old_tail->type != desc->type) return 0; - if (desc->type == DMA_MEMSET) - return 0; return 1; } @@ -205,9 +182,6 @@ static void mv_set_mode(struct mv_xor_chan *chan, case DMA_MEMCPY: op_mode = XOR_OPERATION_MODE_MEMCPY; break; - case DMA_MEMSET: - op_mode = XOR_OPERATION_MODE_MEMSET; - break; default: dev_err(mv_chan_to_devp(chan), "error: unsupported operation %d\n", @@ -274,18 +248,9 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, if (sw_desc->type != mv_chan->current_type) mv_set_mode(mv_chan, sw_desc->type); - if (sw_desc->type == DMA_MEMSET) { - /* for memset requests we need to program the engine, no - * descriptors used. - */ - struct mv_xor_desc *hw_desc = sw_desc->hw_desc; - mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); - mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); - mv_chan_set_value(mv_chan, sw_desc->value); - } else { - /* set the hardware chain */ - mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); - } + /* set the hardware chain */ + mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); + mv_chan->pending += sw_desc->slot_cnt; mv_xor_issue_pending(&mv_chan->dmachan); } @@ -688,43 +653,6 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, } static struct dma_async_tx_descriptor * -mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, - size_t len, unsigned long flags) -{ - struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); - struct mv_xor_desc_slot *sw_desc, *grp_start; - int slot_cnt; - - dev_dbg(mv_chan_to_devp(mv_chan), - "%s dest: %x len: %u flags: %ld\n", - __func__, dest, len, flags); - if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) - return NULL; - - BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); - - spin_lock_bh(&mv_chan->lock); - slot_cnt = mv_chan_memset_slot_count(len); - sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); - if (sw_desc) { - sw_desc->type = DMA_MEMSET; - sw_desc->async_tx.flags = flags; - grp_start = sw_desc->group_head; - mv_desc_init(grp_start, flags); - mv_desc_set_byte_count(grp_start, len); - mv_desc_set_dest_addr(sw_desc->group_head, dest); - mv_desc_set_block_fill_val(grp_start, value); - sw_desc->unmap_src_cnt = 1; - sw_desc->unmap_len = len; - } - spin_unlock_bh(&mv_chan->lock); - dev_dbg(mv_chan_to_devp(mv_chan), - "%s sw_desc %p async_tx %p \n", - __func__, sw_desc, &sw_desc->async_tx); - return sw_desc ? &sw_desc->async_tx : NULL; -} - -static struct dma_async_tx_descriptor * mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { @@ -1137,8 +1065,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev, /* set prep routines based on capability */ if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; - if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) - dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { dma_dev->max_xor = 8; dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; @@ -1187,9 +1113,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev, goto err_free_irq; } - dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n", + dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", - dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); @@ -1298,8 +1223,6 @@ static int mv_xor_probe(struct platform_device *pdev) dma_cap_set(DMA_MEMCPY, cap_mask); if (of_property_read_bool(np, "dmacap,xor")) dma_cap_set(DMA_XOR, cap_mask); - if (of_property_read_bool(np, "dmacap,memset")) - dma_cap_set(DMA_MEMSET, cap_mask); if (of_property_read_bool(np, "dmacap,interrupt")) dma_cap_set(DMA_INTERRUPT, cap_mask); diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index c632a4761fc..c619359cb7f 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h @@ -31,7 +31,6 @@ #define XOR_OPERATION_MODE_XOR 0 #define XOR_OPERATION_MODE_MEMCPY 2 -#define XOR_OPERATION_MODE_MEMSET 4 #define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) #define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index a17553f7c02..7ec82f0667e 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2485,10 +2485,10 @@ static void pl330_free_chan_resources(struct dma_chan *chan) struct dma_pl330_chan *pch = to_pchan(chan); unsigned long flags; - spin_lock_irqsave(&pch->lock, flags); - tasklet_kill(&pch->task); + spin_lock_irqsave(&pch->lock, flags); + pl330_release_channel(pch->pl330_chid); pch->pl330_chid = NULL; diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 5d3d95569a1..1e220f8dfd8 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -2323,47 +2323,6 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy( } /** - * ppc440spe_adma_prep_dma_memset - prepare CDB for a MEMSET operation - */ -static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset( - struct dma_chan *chan, dma_addr_t dma_dest, int value, - size_t len, unsigned long flags) -{ - struct ppc440spe_adma_chan *ppc440spe_chan; - struct ppc440spe_adma_desc_slot *sw_desc, *group_start; - int slot_cnt, slots_per_op; - - ppc440spe_chan = to_ppc440spe_adma_chan(chan); - - if (unlikely(!len)) - return NULL; - - BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT); - - spin_lock_bh(&ppc440spe_chan->lock); - - dev_dbg(ppc440spe_chan->device->common.dev, - "ppc440spe adma%d: %s cal: %u len: %u int_en %d\n", - ppc440spe_chan->device->id, __func__, value, len, - flags & DMA_PREP_INTERRUPT ? 1 : 0); - - slot_cnt = slots_per_op = 1; - sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, - slots_per_op); - if (sw_desc) { - group_start = sw_desc->group_head; - ppc440spe_desc_init_memset(group_start, value, flags); - ppc440spe_adma_set_dest(group_start, dma_dest, 0); - ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len); - sw_desc->unmap_len = len; - sw_desc->async_tx.flags = flags; - } - spin_unlock_bh(&ppc440spe_chan->lock); - - return sw_desc ? &sw_desc->async_tx : NULL; -} - -/** * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation */ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor( @@ -4125,7 +4084,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) case PPC440SPE_DMA1_ID: dma_cap_set(DMA_MEMCPY, adev->common.cap_mask); dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask); - dma_cap_set(DMA_MEMSET, adev->common.cap_mask); dma_cap_set(DMA_PQ, adev->common.cap_mask); dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask); dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask); @@ -4151,10 +4109,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) adev->common.device_prep_dma_memcpy = ppc440spe_adma_prep_dma_memcpy; } - if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) { - adev->common.device_prep_dma_memset = - ppc440spe_adma_prep_dma_memset; - } if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) { adev->common.max_xor = XOR_MAX_OPS; adev->common.device_prep_dma_xor = @@ -4217,7 +4171,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "", dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "", dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "", - dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "", dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : ""); } diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 1734feec47b..5ab5880d5c9 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -17,6 +17,8 @@ #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/err.h> +#include <linux/of.h> +#include <linux/of_dma.h> #include <linux/amba/bus.h> #include <linux/regulator/consumer.h> #include <linux/platform_data/dma-ste-dma40.h> @@ -45,15 +47,63 @@ #define D40_LCLA_LINK_PER_EVENT_GRP 128 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP +/* Max number of logical channels per physical channel */ +#define D40_MAX_LOG_CHAN_PER_PHY 32 + /* Attempts before giving up to trying to get pages that are aligned */ #define MAX_LCLA_ALLOC_ATTEMPTS 256 /* Bit markings for allocation map */ -#define D40_ALLOC_FREE (1 << 31) -#define D40_ALLOC_PHY (1 << 30) +#define D40_ALLOC_FREE BIT(31) +#define D40_ALLOC_PHY BIT(30) #define D40_ALLOC_LOG_FREE 0 -#define MAX(a, b) (((a) < (b)) ? (b) : (a)) +#define D40_MEMCPY_MAX_CHANS 8 + +/* Reserved event lines for memcpy only. */ +#define DB8500_DMA_MEMCPY_EV_0 51 +#define DB8500_DMA_MEMCPY_EV_1 56 +#define DB8500_DMA_MEMCPY_EV_2 57 +#define DB8500_DMA_MEMCPY_EV_3 58 +#define DB8500_DMA_MEMCPY_EV_4 59 +#define DB8500_DMA_MEMCPY_EV_5 60 + +static int dma40_memcpy_channels[] = { + DB8500_DMA_MEMCPY_EV_0, + DB8500_DMA_MEMCPY_EV_1, + DB8500_DMA_MEMCPY_EV_2, + DB8500_DMA_MEMCPY_EV_3, + DB8500_DMA_MEMCPY_EV_4, + DB8500_DMA_MEMCPY_EV_5, +}; + +/* Default configuration for physcial memcpy */ +static struct stedma40_chan_cfg dma40_memcpy_conf_phy = { + .mode = STEDMA40_MODE_PHYSICAL, + .dir = DMA_MEM_TO_MEM, + + .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .src_info.psize = STEDMA40_PSIZE_PHY_1, + .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, + + .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .dst_info.psize = STEDMA40_PSIZE_PHY_1, + .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, +}; + +/* Default configuration for logical memcpy */ +static struct stedma40_chan_cfg dma40_memcpy_conf_log = { + .mode = STEDMA40_MODE_LOGICAL, + .dir = DMA_MEM_TO_MEM, + + .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .src_info.psize = STEDMA40_PSIZE_LOG_1, + .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, + + .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .dst_info.psize = STEDMA40_PSIZE_LOG_1, + .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, +}; /** * enum 40_command - The different commands and/or statuses. @@ -171,6 +221,9 @@ static u32 d40_backup_regs_chan[] = { D40_CHAN_REG_SDLNK, }; +#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \ + BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B) + /** * struct d40_interrupt_lookup - lookup table for interrupt handler * @@ -471,6 +524,8 @@ struct d40_gen_dmac { * @phy_start: Physical memory start of the DMA registers. * @phy_size: Size of the DMA register map. * @irq: The IRQ number. + * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem + * transfers). * @num_phy_chans: The number of physical channels. Read from HW. This * is the number of available channels for this driver, not counting "Secure * mode" allocated physical channels. @@ -514,6 +569,7 @@ struct d40_base { phys_addr_t phy_start; resource_size_t phy_size; int irq; + int num_memcpy_chans; int num_phy_chans; int num_log_chans; struct device_dma_parameters dma_parms; @@ -534,7 +590,7 @@ struct d40_base { resource_size_t lcpa_size; struct kmem_cache *desc_slab; u32 reg_val_backup[BACKUP_REGS_SZ]; - u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)]; + u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX]; u32 *reg_val_backup_chan; u16 gcc_pwr_off_mask; bool initialized; @@ -792,7 +848,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) * that uses linked lists. */ if (!(chan->phy_chan->use_soft_lli && - chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)) + chan->dma_cfg.dir == DMA_DEV_TO_MEM)) curr_lcla = d40_lcla_alloc_one(chan, desc); first_lcla = curr_lcla; @@ -954,20 +1010,21 @@ static int d40_psize_2_burst_size(bool is_log, int psize) /* * The dma only supports transmitting packages up to - * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of - * dma elements required to send the entire sg list + * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes. + * + * Calculate the total number of dma elements required to send the entire sg list. */ static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) { int dmalen; u32 max_w = max(data_width1, data_width2); u32 min_w = min(data_width1, data_width2); - u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); + u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w); if (seg_max > STEDMA40_MAX_SEG_SIZE) - seg_max -= (1 << max_w); + seg_max -= max_w; - if (!IS_ALIGNED(size, 1 << max_w)) + if (!IS_ALIGNED(size, max_w)) return -EINVAL; if (size <= seg_max) @@ -1257,21 +1314,17 @@ static void __d40_config_set_event(struct d40_chan *d40c, static void d40_config_set_event(struct d40_chan *d40c, enum d40_events event_type) { - /* Enable event line connected to device (or memcpy) */ - if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || - (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { - u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); + u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); + /* Enable event line connected to device (or memcpy) */ + if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || + (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) __d40_config_set_event(d40c, event_type, event, D40_CHAN_REG_SSLNK); - } - - if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { - u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); + if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM) __d40_config_set_event(d40c, event_type, event, D40_CHAN_REG_SDLNK); - } } static u32 d40_chan_has_events(struct d40_chan *d40c) @@ -1417,7 +1470,7 @@ static u32 d40_residue(struct d40_chan *d40c) >> D40_SREG_ELEM_PHY_ECNT_POS; } - return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); + return num_elt * d40c->dma_cfg.dst_info.data_width; } static bool d40_tx_is_linked(struct d40_chan *d40c) @@ -1566,10 +1619,12 @@ static void dma_tc_handle(struct d40_chan *d40c) return; } - if (d40_queue_start(d40c) == NULL) + if (d40_queue_start(d40c) == NULL) { d40c->busy = false; - pm_runtime_mark_last_busy(d40c->base->dev); - pm_runtime_put_autosuspend(d40c->base->dev); + + pm_runtime_mark_last_busy(d40c->base->dev); + pm_runtime_put_autosuspend(d40c->base->dev); + } d40_desc_remove(d40d); d40_desc_done(d40c, d40d); @@ -1691,7 +1746,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) } /* ACK interrupt */ - writel(1 << idx, base->virtbase + il[row].clr); + writel(BIT(idx), base->virtbase + il[row].clr); spin_lock(&d40c->lock); @@ -1713,8 +1768,6 @@ static int d40_validate_conf(struct d40_chan *d40c, struct stedma40_chan_cfg *conf) { int res = 0; - u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); - u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; if (!conf->dir) { @@ -1722,48 +1775,14 @@ static int d40_validate_conf(struct d40_chan *d40c, res = -EINVAL; } - if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY && - d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && - d40c->runtime_addr == 0) { - - chan_err(d40c, "Invalid TX channel address (%d)\n", - conf->dst_dev_type); - res = -EINVAL; - } - - if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && - d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && - d40c->runtime_addr == 0) { - chan_err(d40c, "Invalid RX channel address (%d)\n", - conf->src_dev_type); + if ((is_log && conf->dev_type > d40c->base->num_log_chans) || + (!is_log && conf->dev_type > d40c->base->num_phy_chans) || + (conf->dev_type < 0)) { + chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type); res = -EINVAL; } - if (conf->dir == STEDMA40_MEM_TO_PERIPH && - dst_event_group == STEDMA40_DEV_DST_MEMORY) { - chan_err(d40c, "Invalid dst\n"); - res = -EINVAL; - } - - if (conf->dir == STEDMA40_PERIPH_TO_MEM && - src_event_group == STEDMA40_DEV_SRC_MEMORY) { - chan_err(d40c, "Invalid src\n"); - res = -EINVAL; - } - - if (src_event_group == STEDMA40_DEV_SRC_MEMORY && - dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { - chan_err(d40c, "No event line\n"); - res = -EINVAL; - } - - if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && - (src_event_group != dst_event_group)) { - chan_err(d40c, "Invalid event group\n"); - res = -EINVAL; - } - - if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) { + if (conf->dir == DMA_DEV_TO_DEV) { /* * DMAC HW supports it. Will be added to this driver, * in case any dma client requires it. @@ -1773,9 +1792,9 @@ static int d40_validate_conf(struct d40_chan *d40c, } if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * - (1 << conf->src_info.data_width) != + conf->src_info.data_width != d40_psize_2_burst_size(is_log, conf->dst_info.psize) * - (1 << conf->dst_info.data_width)) { + conf->dst_info.data_width) { /* * The DMAC hardware only supports * src (burst x width) == dst (burst x width) @@ -1817,8 +1836,8 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy, if (phy->allocated_src == D40_ALLOC_FREE) phy->allocated_src = D40_ALLOC_LOG_FREE; - if (!(phy->allocated_src & (1 << log_event_line))) { - phy->allocated_src |= 1 << log_event_line; + if (!(phy->allocated_src & BIT(log_event_line))) { + phy->allocated_src |= BIT(log_event_line); goto found; } else goto not_found; @@ -1829,8 +1848,8 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy, if (phy->allocated_dst == D40_ALLOC_FREE) phy->allocated_dst = D40_ALLOC_LOG_FREE; - if (!(phy->allocated_dst & (1 << log_event_line))) { - phy->allocated_dst |= 1 << log_event_line; + if (!(phy->allocated_dst & BIT(log_event_line))) { + phy->allocated_dst |= BIT(log_event_line); goto found; } else goto not_found; @@ -1860,11 +1879,11 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, /* Logical channel */ if (is_src) { - phy->allocated_src &= ~(1 << log_event_line); + phy->allocated_src &= ~BIT(log_event_line); if (phy->allocated_src == D40_ALLOC_LOG_FREE) phy->allocated_src = D40_ALLOC_FREE; } else { - phy->allocated_dst &= ~(1 << log_event_line); + phy->allocated_dst &= ~BIT(log_event_line); if (phy->allocated_dst == D40_ALLOC_LOG_FREE) phy->allocated_dst = D40_ALLOC_FREE; } @@ -1880,7 +1899,7 @@ out: static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) { - int dev_type; + int dev_type = d40c->dma_cfg.dev_type; int event_group; int event_line; struct d40_phy_res *phys; @@ -1894,14 +1913,12 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) phys = d40c->base->phy_res; num_phy_chans = d40c->base->num_phy_chans; - if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { - dev_type = d40c->dma_cfg.src_dev_type; + if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { log_num = 2 * dev_type; is_src = true; - } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || - d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { + } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || + d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { /* dst event lines are used for logical memcpy */ - dev_type = d40c->dma_cfg.dst_dev_type; log_num = 2 * dev_type + 1; is_src = false; } else @@ -1911,7 +1928,7 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) event_line = D40_TYPE_TO_EVENT(dev_type); if (!is_log) { - if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { + if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { /* Find physical half channel */ if (d40c->dma_cfg.use_fixed_channel) { i = d40c->dma_cfg.phy_channel; @@ -2012,14 +2029,23 @@ static int d40_config_memcpy(struct d40_chan *d40c) dma_cap_mask_t cap = d40c->chan.device->cap_mask; if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { - d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; - d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; - d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> - memcpy[d40c->chan.chan_id]; + d40c->dma_cfg = dma40_memcpy_conf_log; + d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id]; + + d40_log_cfg(&d40c->dma_cfg, + &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); } else if (dma_has_cap(DMA_MEMCPY, cap) && dma_has_cap(DMA_SLAVE, cap)) { - d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; + d40c->dma_cfg = dma40_memcpy_conf_phy; + + /* Generate interrrupt at end of transfer or relink. */ + d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS); + + /* Generate interrupt on error. */ + d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); + d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); + } else { chan_err(d40c, "No memcpy\n"); return -EINVAL; @@ -2032,7 +2058,7 @@ static int d40_free_dma(struct d40_chan *d40c) { int res = 0; - u32 event; + u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); struct d40_phy_res *phy = d40c->phy_chan; bool is_src; @@ -2050,14 +2076,12 @@ static int d40_free_dma(struct d40_chan *d40c) return -EINVAL; } - if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || - d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { - event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); + if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || + d40c->dma_cfg.dir == DMA_MEM_TO_MEM) is_src = false; - } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { - event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); + else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) is_src = true; - } else { + else { chan_err(d40c, "Unknown direction\n"); return -EINVAL; } @@ -2098,7 +2122,7 @@ static bool d40_is_paused(struct d40_chan *d40c) unsigned long flags; void __iomem *active_reg; u32 status; - u32 event; + u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); spin_lock_irqsave(&d40c->lock, flags); @@ -2117,12 +2141,10 @@ static bool d40_is_paused(struct d40_chan *d40c) goto _exit; } - if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || - d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { - event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); + if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || + d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { status = readl(chanbase + D40_CHAN_REG_SDLNK); - } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { - event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); + } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { status = readl(chanbase + D40_CHAN_REG_SSLNK); } else { chan_err(d40c, "Unknown direction\n"); @@ -2253,24 +2275,6 @@ err: return NULL; } -static dma_addr_t -d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) -{ - struct stedma40_platform_data *plat = chan->base->plat_data; - struct stedma40_chan_cfg *cfg = &chan->dma_cfg; - dma_addr_t addr = 0; - - if (chan->runtime_addr) - return chan->runtime_addr; - - if (direction == DMA_DEV_TO_MEM) - addr = plat->dev_rx[cfg->src_dev_type]; - else if (direction == DMA_MEM_TO_DEV) - addr = plat->dev_tx[cfg->dst_dev_type]; - - return addr; -} - static struct dma_async_tx_descriptor * d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, struct scatterlist *sg_dst, unsigned int sg_len, @@ -2297,14 +2301,10 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, if (sg_next(&sg_src[sg_len - 1]) == sg_src) desc->cyclic = true; - if (direction != DMA_TRANS_NONE) { - dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); - - if (direction == DMA_DEV_TO_MEM) - src_dev_addr = dev_addr; - else if (direction == DMA_MEM_TO_DEV) - dst_dev_addr = dev_addr; - } + if (direction == DMA_DEV_TO_MEM) + src_dev_addr = chan->runtime_addr; + else if (direction == DMA_MEM_TO_DEV) + dst_dev_addr = chan->runtime_addr; if (chan_is_logical(chan)) ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, @@ -2364,7 +2364,7 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) u32 rtreg; u32 event = D40_TYPE_TO_EVENT(dev_type); u32 group = D40_TYPE_TO_GROUP(dev_type); - u32 bit = 1 << event; + u32 bit = BIT(event); u32 prioreg; struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; @@ -2395,13 +2395,57 @@ static void d40_set_prio_realtime(struct d40_chan *d40c) if (d40c->base->rev < 3) return; - if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || - (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) - __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); + if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || + (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) + __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true); - if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || - (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) - __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); + if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) || + (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) + __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false); +} + +#define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1) +#define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1) +#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1) +#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1) + +static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct stedma40_chan_cfg cfg; + dma_cap_mask_t cap; + u32 flags; + + memset(&cfg, 0, sizeof(struct stedma40_chan_cfg)); + + dma_cap_zero(cap); + dma_cap_set(DMA_SLAVE, cap); + + cfg.dev_type = dma_spec->args[0]; + flags = dma_spec->args[2]; + + switch (D40_DT_FLAGS_MODE(flags)) { + case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break; + case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break; + } + + switch (D40_DT_FLAGS_DIR(flags)) { + case 0: + cfg.dir = DMA_MEM_TO_DEV; + cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); + break; + case 1: + cfg.dir = DMA_DEV_TO_MEM; + cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); + break; + } + + if (D40_DT_FLAGS_FIXED_CHAN(flags)) { + cfg.phy_channel = dma_spec->args[1]; + cfg.use_fixed_channel = true; + } + + return dma_request_channel(cap, stedma40_filter, &cfg); } /* DMA ENGINE functions */ @@ -2433,23 +2477,21 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) } pm_runtime_get_sync(d40c->base->dev); - /* Fill in basic CFG register values */ - d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, - &d40c->dst_def_cfg, chan_is_logical(d40c)); d40_set_prio_realtime(d40c); if (chan_is_logical(d40c)) { - d40_log_cfg(&d40c->dma_cfg, - &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); - - if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) + if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) d40c->lcpa = d40c->base->lcpa_base + - d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; + d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE; else d40c->lcpa = d40c->base->lcpa_base + - d40c->dma_cfg.dst_dev_type * + d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; + + /* Unmask the Global Interrupt Mask. */ + d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); + d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); } dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", @@ -2639,33 +2681,10 @@ static void d40_terminate_all(struct dma_chan *chan) static int dma40_config_to_halfchannel(struct d40_chan *d40c, struct stedma40_half_channel_info *info, - enum dma_slave_buswidth width, u32 maxburst) { - enum stedma40_periph_data_width addr_width; int psize; - switch (width) { - case DMA_SLAVE_BUSWIDTH_1_BYTE: - addr_width = STEDMA40_BYTE_WIDTH; - break; - case DMA_SLAVE_BUSWIDTH_2_BYTES: - addr_width = STEDMA40_HALFWORD_WIDTH; - break; - case DMA_SLAVE_BUSWIDTH_4_BYTES: - addr_width = STEDMA40_WORD_WIDTH; - break; - case DMA_SLAVE_BUSWIDTH_8_BYTES: - addr_width = STEDMA40_DOUBLEWORD_WIDTH; - break; - default: - dev_err(d40c->base->dev, - "illegal peripheral address width " - "requested (%d)\n", - width); - return -EINVAL; - } - if (chan_is_logical(d40c)) { if (maxburst >= 16) psize = STEDMA40_PSIZE_LOG_16; @@ -2686,7 +2705,6 @@ dma40_config_to_halfchannel(struct d40_chan *d40c, psize = STEDMA40_PSIZE_PHY_1; } - info->data_width = addr_width; info->psize = psize; info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; @@ -2710,21 +2728,14 @@ static int d40_set_runtime_config(struct dma_chan *chan, dst_maxburst = config->dst_maxburst; if (config->direction == DMA_DEV_TO_MEM) { - dma_addr_t dev_addr_rx = - d40c->base->plat_data->dev_rx[cfg->src_dev_type]; - config_addr = config->src_addr; - if (dev_addr_rx) - dev_dbg(d40c->base->dev, - "channel has a pre-wired RX address %08x " - "overriding with %08x\n", - dev_addr_rx, config_addr); - if (cfg->dir != STEDMA40_PERIPH_TO_MEM) + + if (cfg->dir != DMA_DEV_TO_MEM) dev_dbg(d40c->base->dev, "channel was not configured for peripheral " "to memory transfer (%d) overriding\n", cfg->dir); - cfg->dir = STEDMA40_PERIPH_TO_MEM; + cfg->dir = DMA_DEV_TO_MEM; /* Configure the memory side */ if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) @@ -2733,21 +2744,14 @@ static int d40_set_runtime_config(struct dma_chan *chan, dst_maxburst = src_maxburst; } else if (config->direction == DMA_MEM_TO_DEV) { - dma_addr_t dev_addr_tx = - d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; - config_addr = config->dst_addr; - if (dev_addr_tx) - dev_dbg(d40c->base->dev, - "channel has a pre-wired TX address %08x " - "overriding with %08x\n", - dev_addr_tx, config_addr); - if (cfg->dir != STEDMA40_MEM_TO_PERIPH) + + if (cfg->dir != DMA_MEM_TO_DEV) dev_dbg(d40c->base->dev, "channel was not configured for memory " "to peripheral transfer (%d) overriding\n", cfg->dir); - cfg->dir = STEDMA40_MEM_TO_PERIPH; + cfg->dir = DMA_MEM_TO_DEV; /* Configure the memory side */ if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) @@ -2761,6 +2765,11 @@ static int d40_set_runtime_config(struct dma_chan *chan, return -EINVAL; } + if (config_addr <= 0) { + dev_err(d40c->base->dev, "no address supplied\n"); + return -EINVAL; + } + if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { dev_err(d40c->base->dev, "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", @@ -2779,14 +2788,24 @@ static int d40_set_runtime_config(struct dma_chan *chan, src_maxburst = dst_maxburst * dst_addr_width / src_addr_width; } + /* Only valid widths are; 1, 2, 4 and 8. */ + if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || + src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || + dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || + dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || + ((src_addr_width > 1) && (src_addr_width & 1)) || + ((dst_addr_width > 1) && (dst_addr_width & 1))) + return -EINVAL; + + cfg->src_info.data_width = src_addr_width; + cfg->dst_info.data_width = dst_addr_width; + ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, - src_addr_width, src_maxburst); if (ret) return ret; ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, - dst_addr_width, dst_maxburst); if (ret) return ret; @@ -2795,8 +2814,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, if (chan_is_logical(d40c)) d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); else - d40_phy_cfg(cfg, &d40c->src_def_cfg, - &d40c->dst_def_cfg, false); + d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg); /* These settings will take precedence later */ d40c->runtime_addr = config_addr; @@ -2927,7 +2945,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, } d40_chan_init(base, &base->dma_memcpy, base->log_chans, - base->num_log_chans, base->plat_data->memcpy_len); + base->num_log_chans, base->num_memcpy_chans); dma_cap_zero(base->dma_memcpy.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); @@ -3121,13 +3139,14 @@ static int __init d40_phy_res_init(struct d40_base *base) static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) { - struct stedma40_platform_data *plat_data; + struct stedma40_platform_data *plat_data = pdev->dev.platform_data; struct clk *clk = NULL; void __iomem *virtbase = NULL; struct resource *res = NULL; struct d40_base *base = NULL; int num_log_chans = 0; int num_phy_chans; + int num_memcpy_chans; int clk_ret = -EINVAL; int i; u32 pid; @@ -3187,8 +3206,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) * DB8540v1 has revision 4 */ rev = AMBA_REV_BITS(pid); - - plat_data = pdev->dev.platform_data; + if (rev < 2) { + d40_err(&pdev->dev, "hardware revision: %d is not supported", rev); + goto failure; + } /* The number of physical channels on this HW */ if (plat_data->num_of_phy_chans) @@ -3196,26 +3217,20 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) else num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; - dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n", - rev, res->start, num_phy_chans); - - if (rev < 2) { - d40_err(&pdev->dev, "hardware revision: %d is not supported", - rev); - goto failure; - } + /* The number of channels used for memcpy */ + if (plat_data->num_of_memcpy_chans) + num_memcpy_chans = plat_data->num_of_memcpy_chans; + else + num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels); - /* Count the number of logical channels in use */ - for (i = 0; i < plat_data->dev_len; i++) - if (plat_data->dev_rx[i] != 0) - num_log_chans++; + num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY; - for (i = 0; i < plat_data->dev_len; i++) - if (plat_data->dev_tx[i] != 0) - num_log_chans++; + dev_info(&pdev->dev, + "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n", + rev, res->start, num_phy_chans, num_log_chans); base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + - (num_phy_chans + num_log_chans + plat_data->memcpy_len) * + (num_phy_chans + num_log_chans + num_memcpy_chans) * sizeof(struct d40_chan), GFP_KERNEL); if (base == NULL) { @@ -3225,6 +3240,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) base->rev = rev; base->clk = clk; + base->num_memcpy_chans = num_memcpy_chans; base->num_phy_chans = num_phy_chans; base->num_log_chans = num_log_chans; base->phy_start = res->start; @@ -3276,17 +3292,11 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) if (!base->lookup_phy_chans) goto failure; - if (num_log_chans + plat_data->memcpy_len) { - /* - * The max number of logical channels are event lines for all - * src devices and dst devices - */ - base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * - sizeof(struct d40_chan *), - GFP_KERNEL); - if (!base->lookup_log_chans) - goto failure; - } + base->lookup_log_chans = kzalloc(num_log_chans * + sizeof(struct d40_chan *), + GFP_KERNEL); + if (!base->lookup_log_chans) + goto failure; base->reg_val_backup_chan = kmalloc(base->num_phy_chans * sizeof(d40_backup_regs_chan), @@ -3470,17 +3480,82 @@ failure: return ret; } +static int __init d40_of_probe(struct platform_device *pdev, + struct device_node *np) +{ + struct stedma40_platform_data *pdata; + int num_phy = 0, num_memcpy = 0, num_disabled = 0; + const const __be32 *list; + + pdata = devm_kzalloc(&pdev->dev, + sizeof(struct stedma40_platform_data), + GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + /* If absent this value will be obtained from h/w. */ + of_property_read_u32(np, "dma-channels", &num_phy); + if (num_phy > 0) + pdata->num_of_phy_chans = num_phy; + + list = of_get_property(np, "memcpy-channels", &num_memcpy); + num_memcpy /= sizeof(*list); + + if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) { + d40_err(&pdev->dev, + "Invalid number of memcpy channels specified (%d)\n", + num_memcpy); + return -EINVAL; + } + pdata->num_of_memcpy_chans = num_memcpy; + + of_property_read_u32_array(np, "memcpy-channels", + dma40_memcpy_channels, + num_memcpy); + + list = of_get_property(np, "disabled-channels", &num_disabled); + num_disabled /= sizeof(*list); + + if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) { + d40_err(&pdev->dev, + "Invalid number of disabled channels specified (%d)\n", + num_disabled); + return -EINVAL; + } + + of_property_read_u32_array(np, "disabled-channels", + pdata->disabled_channels, + num_disabled); + pdata->disabled_channels[num_disabled] = -1; + + pdev->dev.platform_data = pdata; + + return 0; +} + static int __init d40_probe(struct platform_device *pdev) { - int err; + struct stedma40_platform_data *plat_data = pdev->dev.platform_data; + struct device_node *np = pdev->dev.of_node; int ret = -ENOENT; - struct d40_base *base; + struct d40_base *base = NULL; struct resource *res = NULL; int num_reserved_chans; u32 val; - base = d40_hw_detect_init(pdev); + if (!plat_data) { + if (np) { + if(d40_of_probe(pdev, np)) { + ret = -ENOMEM; + goto failure; + } + } else { + d40_err(&pdev->dev, "No pdata or Device Tree provided\n"); + goto failure; + } + } + base = d40_hw_detect_init(pdev); if (!base) goto failure; @@ -3573,6 +3648,7 @@ static int __init d40_probe(struct platform_device *pdev) base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); if (IS_ERR(base->lcpa_regulator)) { d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); + ret = PTR_ERR(base->lcpa_regulator); base->lcpa_regulator = NULL; goto failure; } @@ -3588,19 +3664,26 @@ static int __init d40_probe(struct platform_device *pdev) } base->initialized = true; - err = d40_dmaengine_init(base, num_reserved_chans); - if (err) + ret = d40_dmaengine_init(base, num_reserved_chans); + if (ret) goto failure; base->dev->dma_parms = &base->dma_parms; - err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); - if (err) { + ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); + if (ret) { d40_err(&pdev->dev, "Failed to set dma max seg size\n"); goto failure; } d40_hw_init(base); + if (np) { + ret = of_dma_controller_register(np, d40_xlate, NULL); + if (ret) + dev_err(&pdev->dev, + "could not register of_dma_controller\n"); + } + dev_info(base->dev, "initialized\n"); return 0; @@ -3654,11 +3737,17 @@ failure: return ret; } +static const struct of_device_id d40_match[] = { + { .compatible = "stericsson,dma40", }, + {} +}; + static struct platform_driver d40_driver = { .driver = { .owner = THIS_MODULE, .name = D40_NAME, .pm = DMA40_PM_OPS, + .of_match_table = d40_match, }, }; diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 7180e0d4172..27b818dee7c 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -10,6 +10,18 @@ #include "ste_dma40_ll.h" +u8 d40_width_to_bits(enum dma_slave_buswidth width) +{ + if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) + return STEDMA40_ESIZE_8_BIT; + else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) + return STEDMA40_ESIZE_16_BIT; + else if (width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return STEDMA40_ESIZE_64_BIT; + else + return STEDMA40_ESIZE_32_BIT; +} + /* Sets up proper LCSP1 and LCSP3 register for a logical channel */ void d40_log_cfg(struct stedma40_chan_cfg *cfg, u32 *lcsp1, u32 *lcsp3) @@ -18,106 +30,100 @@ void d40_log_cfg(struct stedma40_chan_cfg *cfg, u32 l1 = 0; /* src */ /* src is mem? -> increase address pos */ - if (cfg->dir == STEDMA40_MEM_TO_PERIPH || - cfg->dir == STEDMA40_MEM_TO_MEM) - l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS; + if (cfg->dir == DMA_MEM_TO_DEV || + cfg->dir == DMA_MEM_TO_MEM) + l1 |= BIT(D40_MEM_LCSP1_SCFG_INCR_POS); /* dst is mem? -> increase address pos */ - if (cfg->dir == STEDMA40_PERIPH_TO_MEM || - cfg->dir == STEDMA40_MEM_TO_MEM) - l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS; + if (cfg->dir == DMA_DEV_TO_MEM || + cfg->dir == DMA_MEM_TO_MEM) + l3 |= BIT(D40_MEM_LCSP3_DCFG_INCR_POS); /* src is hw? -> master port 1 */ - if (cfg->dir == STEDMA40_PERIPH_TO_MEM || - cfg->dir == STEDMA40_PERIPH_TO_PERIPH) - l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS; + if (cfg->dir == DMA_DEV_TO_MEM || + cfg->dir == DMA_DEV_TO_DEV) + l1 |= BIT(D40_MEM_LCSP1_SCFG_MST_POS); /* dst is hw? -> master port 1 */ - if (cfg->dir == STEDMA40_MEM_TO_PERIPH || - cfg->dir == STEDMA40_PERIPH_TO_PERIPH) - l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS; + if (cfg->dir == DMA_MEM_TO_DEV || + cfg->dir == DMA_DEV_TO_DEV) + l3 |= BIT(D40_MEM_LCSP3_DCFG_MST_POS); - l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS; + l3 |= BIT(D40_MEM_LCSP3_DCFG_EIM_POS); l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS; - l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS; + l3 |= d40_width_to_bits(cfg->dst_info.data_width) + << D40_MEM_LCSP3_DCFG_ESIZE_POS; - l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS; + l1 |= BIT(D40_MEM_LCSP1_SCFG_EIM_POS); l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; - l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS; + l1 |= d40_width_to_bits(cfg->src_info.data_width) + << D40_MEM_LCSP1_SCFG_ESIZE_POS; *lcsp1 = l1; *lcsp3 = l3; } -/* Sets up SRC and DST CFG register for both logical and physical channels */ -void d40_phy_cfg(struct stedma40_chan_cfg *cfg, - u32 *src_cfg, u32 *dst_cfg, bool is_log) +void d40_phy_cfg(struct stedma40_chan_cfg *cfg, u32 *src_cfg, u32 *dst_cfg) { u32 src = 0; u32 dst = 0; - if (!is_log) { - /* Physical channel */ - if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) || - (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) { - /* Set master port to 1 */ - src |= 1 << D40_SREG_CFG_MST_POS; - src |= D40_TYPE_TO_EVENT(cfg->src_dev_type); - - if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) - src |= 1 << D40_SREG_CFG_PHY_TM_POS; - else - src |= 3 << D40_SREG_CFG_PHY_TM_POS; - } - if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) || - (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) { - /* Set master port to 1 */ - dst |= 1 << D40_SREG_CFG_MST_POS; - dst |= D40_TYPE_TO_EVENT(cfg->dst_dev_type); - - if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) - dst |= 1 << D40_SREG_CFG_PHY_TM_POS; - else - dst |= 3 << D40_SREG_CFG_PHY_TM_POS; - } - /* Interrupt on end of transfer for destination */ - dst |= 1 << D40_SREG_CFG_TIM_POS; - - /* Generate interrupt on error */ - src |= 1 << D40_SREG_CFG_EIM_POS; - dst |= 1 << D40_SREG_CFG_EIM_POS; - - /* PSIZE */ - if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) { - src |= 1 << D40_SREG_CFG_PHY_PEN_POS; - src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS; - } - if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) { - dst |= 1 << D40_SREG_CFG_PHY_PEN_POS; - dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS; - } - - /* Element size */ - src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS; - dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS; - - /* Set the priority bit to high for the physical channel */ - if (cfg->high_priority) { - src |= 1 << D40_SREG_CFG_PRI_POS; - dst |= 1 << D40_SREG_CFG_PRI_POS; - } - - } else { - /* Logical channel */ - dst |= 1 << D40_SREG_CFG_LOG_GIM_POS; - src |= 1 << D40_SREG_CFG_LOG_GIM_POS; + if ((cfg->dir == DMA_DEV_TO_MEM) || + (cfg->dir == DMA_DEV_TO_DEV)) { + /* Set master port to 1 */ + src |= BIT(D40_SREG_CFG_MST_POS); + src |= D40_TYPE_TO_EVENT(cfg->dev_type); + + if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) + src |= BIT(D40_SREG_CFG_PHY_TM_POS); + else + src |= 3 << D40_SREG_CFG_PHY_TM_POS; + } + if ((cfg->dir == DMA_MEM_TO_DEV) || + (cfg->dir == DMA_DEV_TO_DEV)) { + /* Set master port to 1 */ + dst |= BIT(D40_SREG_CFG_MST_POS); + dst |= D40_TYPE_TO_EVENT(cfg->dev_type); + + if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) + dst |= BIT(D40_SREG_CFG_PHY_TM_POS); + else + dst |= 3 << D40_SREG_CFG_PHY_TM_POS; + } + /* Interrupt on end of transfer for destination */ + dst |= BIT(D40_SREG_CFG_TIM_POS); + + /* Generate interrupt on error */ + src |= BIT(D40_SREG_CFG_EIM_POS); + dst |= BIT(D40_SREG_CFG_EIM_POS); + + /* PSIZE */ + if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) { + src |= BIT(D40_SREG_CFG_PHY_PEN_POS); + src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS; + } + if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) { + dst |= BIT(D40_SREG_CFG_PHY_PEN_POS); + dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS; + } + + /* Element size */ + src |= d40_width_to_bits(cfg->src_info.data_width) + << D40_SREG_CFG_ESIZE_POS; + dst |= d40_width_to_bits(cfg->dst_info.data_width) + << D40_SREG_CFG_ESIZE_POS; + + /* Set the priority bit to high for the physical channel */ + if (cfg->high_priority) { + src |= BIT(D40_SREG_CFG_PRI_POS); + dst |= BIT(D40_SREG_CFG_PRI_POS); } if (cfg->src_info.big_endian) - src |= 1 << D40_SREG_CFG_LBE_POS; + src |= BIT(D40_SREG_CFG_LBE_POS); if (cfg->dst_info.big_endian) - dst |= 1 << D40_SREG_CFG_LBE_POS; + dst |= BIT(D40_SREG_CFG_LBE_POS); *src_cfg = src; *dst_cfg = dst; @@ -143,23 +149,22 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli, num_elems = 2 << psize; /* Must be aligned */ - if (!IS_ALIGNED(data, 0x1 << data_width)) + if (!IS_ALIGNED(data, data_width)) return -EINVAL; /* Transfer size can't be smaller than (num_elms * elem_size) */ - if (data_size < num_elems * (0x1 << data_width)) + if (data_size < num_elems * data_width) return -EINVAL; /* The number of elements. IE now many chunks */ - lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS; + lli->reg_elt = (data_size / data_width) << D40_SREG_ELEM_PHY_ECNT_POS; /* * Distance to next element sized entry. * Usually the size of the element unless you want gaps. */ if (addr_inc) - lli->reg_elt |= (0x1 << data_width) << - D40_SREG_ELEM_PHY_EIDX_POS; + lli->reg_elt |= data_width << D40_SREG_ELEM_PHY_EIDX_POS; /* Where the data is */ lli->reg_ptr = data; @@ -167,18 +172,20 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli, /* If this scatter list entry is the last one, no next link */ if (next_lli == 0) - lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS; + lli->reg_lnk = BIT(D40_SREG_LNK_PHY_TCP_POS); else lli->reg_lnk = next_lli; /* Set/clear interrupt generation on this link item.*/ if (term_int) - lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS; + lli->reg_cfg |= BIT(D40_SREG_CFG_TIM_POS); else - lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS); + lli->reg_cfg &= ~BIT(D40_SREG_CFG_TIM_POS); - /* Post link */ - lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS; + /* + * Post link - D40_SREG_LNK_PHY_PRE_POS = 0 + * Relink happens after transfer completion. + */ return 0; } @@ -187,16 +194,16 @@ static int d40_seg_size(int size, int data_width1, int data_width2) { u32 max_w = max(data_width1, data_width2); u32 min_w = min(data_width1, data_width2); - u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); + u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w); if (seg_max > STEDMA40_MAX_SEG_SIZE) - seg_max -= (1 << max_w); + seg_max -= max_w; if (size <= seg_max) return size; if (size <= 2 * seg_max) - return ALIGN(size / 2, 1 << max_w); + return ALIGN(size / 2, max_w); return seg_max; } @@ -362,10 +369,10 @@ static void d40_log_fill_lli(struct d40_log_lli *lli, lli->lcsp13 = reg_cfg; /* The number of elements to transfer */ - lli->lcsp02 = ((data_size >> data_width) << + lli->lcsp02 = ((data_size / data_width) << D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; - BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE); + BUG_ON((data_size / data_width) > STEDMA40_MAX_SEG_SIZE); /* 16 LSBs address of the current element */ lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index fdde8ef7754..1b47312bc57 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -432,8 +432,7 @@ enum d40_lli_flags { void d40_phy_cfg(struct stedma40_chan_cfg *cfg, u32 *src_cfg, - u32 *dst_cfg, - bool is_log); + u32 *dst_cfg); void d40_log_cfg(struct stedma40_chan_cfg *cfg, u32 *lcsp1, diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index ce193409ebd..33f59ecd256 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1273,11 +1273,6 @@ static int tegra_dma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tdma); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "No mem resource for DMA\n"); - return -EINVAL; - } - tdma->base_addr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(tdma->base_addr)) return PTR_ERR(tdma->base_addr); |