diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 12 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 419 | ||||
-rw-r--r-- | drivers/dma/ioatdma.c | 369 | ||||
-rw-r--r-- | drivers/dma/ioatdma.h | 16 | ||||
-rw-r--r-- | drivers/dma/ioatdma_io.h | 118 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 1467 |
7 files changed, 1974 insertions, 428 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 72be6c63edf..b31756d5997 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -8,8 +8,8 @@ menu "DMA Engine support" config DMA_ENGINE bool "Support for DMA engines" ---help--- - DMA engines offload copy operations from the CPU to dedicated - hardware, allowing the copies to happen asynchronously. + DMA engines offload bulk memory operations from the CPU to dedicated + hardware, allowing the operations to happen asynchronously. comment "DMA Clients" @@ -32,4 +32,12 @@ config INTEL_IOATDMA ---help--- Enable support for the Intel(R) I/OAT DMA engine. +config INTEL_IOP_ADMA + tristate "Intel IOP ADMA support" + depends on DMA_ENGINE && (ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX) + select ASYNC_CORE + default m + ---help--- + Enable support for the Intel(R) IOP Series RAID engines. + endmenu diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index bdcfdbdb1ae..b3839b687ae 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o +obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 322ee2984e3..82489923af0 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -37,11 +37,11 @@ * Each device has a channels list, which runs unlocked but is never modified * once the device is registered, it's just setup by the driver. * - * Each client has a channels list, it's only modified under the client->lock - * and in an RCU callback, so it's safe to read under rcu_read_lock(). + * Each client is responsible for keeping track of the channels it uses. See + * the definition of dma_event_callback in dmaengine.h. * * Each device has a kref, which is initialized to 1 when the device is - * registered. A kref_put is done for each class_device registered. When the + * registered. A kref_get is done for each class_device registered. When the * class_device is released, the coresponding kref_put is done in the release * method. Every time one of the device's channels is allocated to a client, * a kref_get occurs. When the channel is freed, the coresponding kref_put @@ -51,14 +51,17 @@ * references to finish. * * Each channel has an open-coded implementation of Rusty Russell's "bigref," - * with a kref and a per_cpu local_t. A single reference is set when on an - * ADDED event, and removed with a REMOVE event. Net DMA client takes an - * extra reference per outstanding transaction. The relase function does a - * kref_put on the device. -ChrisL + * with a kref and a per_cpu local_t. A dma_chan_get is called when a client + * signals that it wants to use a channel, and dma_chan_put is called when + * a channel is removed or a client using it is unregesitered. A client can + * take extra references per outstanding transaction, as is the case with + * the NET DMA client. The release function does a kref_put on the device. + * -ChrisL, DanW */ #include <linux/init.h> #include <linux/module.h> +#include <linux/mm.h> #include <linux/device.h> #include <linux/dmaengine.h> #include <linux/hardirq.h> @@ -66,6 +69,7 @@ #include <linux/percpu.h> #include <linux/rcupdate.h> #include <linux/mutex.h> +#include <linux/jiffies.h> static DEFINE_MUTEX(dma_list_mutex); static LIST_HEAD(dma_device_list); @@ -100,8 +104,19 @@ static ssize_t show_bytes_transferred(struct class_device *cd, char *buf) static ssize_t show_in_use(struct class_device *cd, char *buf) { struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); + int in_use = 0; + + if (unlikely(chan->slow_ref) && + atomic_read(&chan->refcount.refcount) > 1) + in_use = 1; + else { + if (local_read(&(per_cpu_ptr(chan->local, + get_cpu())->refcount)) > 0) + in_use = 1; + put_cpu(); + } - return sprintf(buf, "%d\n", (chan->client ? 1 : 0)); + return sprintf(buf, "%d\n", in_use); } static struct class_device_attribute dma_class_attrs[] = { @@ -127,43 +142,72 @@ static struct class dma_devclass = { /* --- client and device registration --- */ +#define dma_chan_satisfies_mask(chan, mask) \ + __dma_chan_satisfies_mask((chan), &(mask)) +static int +__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want) +{ + dma_cap_mask_t has; + + bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits, + DMA_TX_TYPE_END); + return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); +} + /** - * dma_client_chan_alloc - try to allocate a channel to a client + * dma_client_chan_alloc - try to allocate channels to a client * @client: &dma_client * * Called with dma_list_mutex held. */ -static struct dma_chan *dma_client_chan_alloc(struct dma_client *client) +static void dma_client_chan_alloc(struct dma_client *client) { struct dma_device *device; struct dma_chan *chan; - unsigned long flags; int desc; /* allocated descriptor count */ + enum dma_state_client ack; - /* Find a channel, any DMA engine will do */ - list_for_each_entry(device, &dma_device_list, global_node) { + /* Find a channel */ + list_for_each_entry(device, &dma_device_list, global_node) list_for_each_entry(chan, &device->channels, device_node) { - if (chan->client) + if (!dma_chan_satisfies_mask(chan, client->cap_mask)) continue; desc = chan->device->device_alloc_chan_resources(chan); if (desc >= 0) { - kref_get(&device->refcount); - kref_init(&chan->refcount); - chan->slow_ref = 0; - INIT_RCU_HEAD(&chan->rcu); - chan->client = client; - spin_lock_irqsave(&client->lock, flags); - list_add_tail_rcu(&chan->client_node, - &client->channels); - spin_unlock_irqrestore(&client->lock, flags); - return chan; + ack = client->event_callback(client, + chan, + DMA_RESOURCE_AVAILABLE); + + /* we are done once this client rejects + * an available resource + */ + if (ack == DMA_ACK) { + dma_chan_get(chan); + kref_get(&device->refcount); + } else if (ack == DMA_NAK) + return; } } - } +} + +enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) +{ + enum dma_status status; + unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); + + dma_async_issue_pending(chan); + do { + status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); + if (time_after_eq(jiffies, dma_sync_wait_timeout)) { + printk(KERN_ERR "dma_sync_wait_timeout!\n"); + return DMA_ERROR; + } + } while (status == DMA_IN_PROGRESS); - return NULL; + return status; } +EXPORT_SYMBOL(dma_sync_wait); /** * dma_chan_cleanup - release a DMA channel's resources @@ -173,7 +217,6 @@ void dma_chan_cleanup(struct kref *kref) { struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); chan->device->device_free_chan_resources(chan); - chan->client = NULL; kref_put(&chan->device->refcount, dma_async_device_cleanup); } EXPORT_SYMBOL(dma_chan_cleanup); @@ -189,7 +232,7 @@ static void dma_chan_free_rcu(struct rcu_head *rcu) kref_put(&chan->refcount, dma_chan_cleanup); } -static void dma_client_chan_free(struct dma_chan *chan) +static void dma_chan_release(struct dma_chan *chan) { atomic_add(0x7FFFFFFF, &chan->refcount.refcount); chan->slow_ref = 1; @@ -197,70 +240,57 @@ static void dma_client_chan_free(struct dma_chan *chan) } /** - * dma_chans_rebalance - reallocate channels to clients - * - * When the number of DMA channel in the system changes, - * channels need to be rebalanced among clients. + * dma_chans_notify_available - broadcast available channels to the clients */ -static void dma_chans_rebalance(void) +static void dma_clients_notify_available(void) { struct dma_client *client; - struct dma_chan *chan; - unsigned long flags; mutex_lock(&dma_list_mutex); - list_for_each_entry(client, &dma_client_list, global_node) { - while (client->chans_desired > client->chan_count) { - chan = dma_client_chan_alloc(client); - if (!chan) - break; - client->chan_count++; - client->event_callback(client, - chan, - DMA_RESOURCE_ADDED); - } - while (client->chans_desired < client->chan_count) { - spin_lock_irqsave(&client->lock, flags); - chan = list_entry(client->channels.next, - struct dma_chan, - client_node); - list_del_rcu(&chan->client_node); - spin_unlock_irqrestore(&client->lock, flags); - client->chan_count--; - client->event_callback(client, - chan, - DMA_RESOURCE_REMOVED); - dma_client_chan_free(chan); - } - } + list_for_each_entry(client, &dma_client_list, global_node) + dma_client_chan_alloc(client); mutex_unlock(&dma_list_mutex); } /** - * dma_async_client_register - allocate and register a &dma_client - * @event_callback: callback for notification of channel addition/removal + * dma_chans_notify_available - tell the clients that a channel is going away + * @chan: channel on its way out */ -struct dma_client *dma_async_client_register(dma_event_callback event_callback) +static void dma_clients_notify_removed(struct dma_chan *chan) { struct dma_client *client; + enum dma_state_client ack; - client = kzalloc(sizeof(*client), GFP_KERNEL); - if (!client) - return NULL; + mutex_lock(&dma_list_mutex); - INIT_LIST_HEAD(&client->channels); - spin_lock_init(&client->lock); - client->chans_desired = 0; - client->chan_count = 0; - client->event_callback = event_callback; + list_for_each_entry(client, &dma_client_list, global_node) { + ack = client->event_callback(client, chan, + DMA_RESOURCE_REMOVED); + + /* client was holding resources for this channel so + * free it + */ + if (ack == DMA_ACK) { + dma_chan_put(chan); + kref_put(&chan->device->refcount, + dma_async_device_cleanup); + } + } + mutex_unlock(&dma_list_mutex); +} + +/** + * dma_async_client_register - register a &dma_client + * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask' + */ +void dma_async_client_register(struct dma_client *client) +{ mutex_lock(&dma_list_mutex); list_add_tail(&client->global_node, &dma_client_list); mutex_unlock(&dma_list_mutex); - - return client; } EXPORT_SYMBOL(dma_async_client_register); @@ -272,40 +302,42 @@ EXPORT_SYMBOL(dma_async_client_register); */ void dma_async_client_unregister(struct dma_client *client) { + struct dma_device *device; struct dma_chan *chan; + enum dma_state_client ack; if (!client) return; - rcu_read_lock(); - list_for_each_entry_rcu(chan, &client->channels, client_node) - dma_client_chan_free(chan); - rcu_read_unlock(); - mutex_lock(&dma_list_mutex); + /* free all channels the client is holding */ + list_for_each_entry(device, &dma_device_list, global_node) + list_for_each_entry(chan, &device->channels, device_node) { + ack = client->event_callback(client, chan, + DMA_RESOURCE_REMOVED); + + if (ack == DMA_ACK) { + dma_chan_put(chan); + kref_put(&chan->device->refcount, + dma_async_device_cleanup); + } + } + list_del(&client->global_node); mutex_unlock(&dma_list_mutex); - - kfree(client); - dma_chans_rebalance(); } EXPORT_SYMBOL(dma_async_client_unregister); /** - * dma_async_client_chan_request - request DMA channels - * @client: &dma_client - * @number: count of DMA channels requested - * - * Clients call dma_async_client_chan_request() to specify how many - * DMA channels they need, 0 to free all currently allocated. - * The resulting allocations/frees are indicated to the client via the - * event callback. + * dma_async_client_chan_request - send all available channels to the + * client that satisfy the capability mask + * @client - requester */ -void dma_async_client_chan_request(struct dma_client *client, - unsigned int number) +void dma_async_client_chan_request(struct dma_client *client) { - client->chans_desired = number; - dma_chans_rebalance(); + mutex_lock(&dma_list_mutex); + dma_client_chan_alloc(client); + mutex_unlock(&dma_list_mutex); } EXPORT_SYMBOL(dma_async_client_chan_request); @@ -316,12 +348,31 @@ EXPORT_SYMBOL(dma_async_client_chan_request); int dma_async_device_register(struct dma_device *device) { static int id; - int chancnt = 0; + int chancnt = 0, rc; struct dma_chan* chan; if (!device) return -ENODEV; + /* validate device routines */ + BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && + !device->device_prep_dma_memcpy); + BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && + !device->device_prep_dma_xor); + BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && + !device->device_prep_dma_zero_sum); + BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && + !device->device_prep_dma_memset); + BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && + !device->device_prep_dma_interrupt); + + BUG_ON(!device->device_alloc_chan_resources); + BUG_ON(!device->device_free_chan_resources); + BUG_ON(!device->device_dependency_added); + BUG_ON(!device->device_is_tx_complete); + BUG_ON(!device->device_issue_pending); + BUG_ON(!device->dev); + init_completion(&device->done); kref_init(&device->refcount); device->dev_id = id++; @@ -338,17 +389,38 @@ int dma_async_device_register(struct dma_device *device) snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d", device->dev_id, chan->chan_id); + rc = class_device_register(&chan->class_dev); + if (rc) { + chancnt--; + free_percpu(chan->local); + chan->local = NULL; + goto err_out; + } + kref_get(&device->refcount); - class_device_register(&chan->class_dev); + kref_init(&chan->refcount); + chan->slow_ref = 0; + INIT_RCU_HEAD(&chan->rcu); } mutex_lock(&dma_list_mutex); list_add_tail(&device->global_node, &dma_device_list); mutex_unlock(&dma_list_mutex); - dma_chans_rebalance(); + dma_clients_notify_available(); return 0; + +err_out: + list_for_each_entry(chan, &device->channels, device_node) { + if (chan->local == NULL) + continue; + kref_put(&device->refcount, dma_async_device_cleanup); + class_device_unregister(&chan->class_dev); + chancnt--; + free_percpu(chan->local); + } + return rc; } EXPORT_SYMBOL(dma_async_device_register); @@ -371,32 +443,165 @@ static void dma_async_device_cleanup(struct kref *kref) void dma_async_device_unregister(struct dma_device *device) { struct dma_chan *chan; - unsigned long flags; mutex_lock(&dma_list_mutex); list_del(&device->global_node); mutex_unlock(&dma_list_mutex); list_for_each_entry(chan, &device->channels, device_node) { - if (chan->client) { - spin_lock_irqsave(&chan->client->lock, flags); - list_del(&chan->client_node); - chan->client->chan_count--; - spin_unlock_irqrestore(&chan->client->lock, flags); - chan->client->event_callback(chan->client, - chan, - DMA_RESOURCE_REMOVED); - dma_client_chan_free(chan); - } + dma_clients_notify_removed(chan); class_device_unregister(&chan->class_dev); + dma_chan_release(chan); } - dma_chans_rebalance(); kref_put(&device->refcount, dma_async_device_cleanup); wait_for_completion(&device->done); } EXPORT_SYMBOL(dma_async_device_unregister); +/** + * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses + * @chan: DMA channel to offload copy to + * @dest: destination address (virtual) + * @src: source address (virtual) + * @len: length + * + * Both @dest and @src must be mappable to a bus address according to the + * DMA mapping API rules for streaming mappings. + * Both @dest and @src must stay memory resident (kernel memory or locked + * user space pages). + */ +dma_cookie_t +dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, + void *src, size_t len) +{ + struct dma_device *dev = chan->device; + struct dma_async_tx_descriptor *tx; + dma_addr_t addr; + dma_cookie_t cookie; + int cpu; + + tx = dev->device_prep_dma_memcpy(chan, len, 0); + if (!tx) + return -ENOMEM; + + tx->ack = 1; + tx->callback = NULL; + addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); + tx->tx_set_src(addr, tx, 0); + addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); + tx->tx_set_dest(addr, tx, 0); + cookie = tx->tx_submit(tx); + + cpu = get_cpu(); + per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; + per_cpu_ptr(chan->local, cpu)->memcpy_count++; + put_cpu(); + + return cookie; +} +EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); + +/** + * dma_async_memcpy_buf_to_pg - offloaded copy from address to page + * @chan: DMA channel to offload copy to + * @page: destination page + * @offset: offset in page to copy to + * @kdata: source address (virtual) + * @len: length + * + * Both @page/@offset and @kdata must be mappable to a bus address according + * to the DMA mapping API rules for streaming mappings. + * Both @page/@offset and @kdata must stay memory resident (kernel memory or + * locked user space pages) + */ +dma_cookie_t +dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, + unsigned int offset, void *kdata, size_t len) +{ + struct dma_device *dev = chan->device; + struct dma_async_tx_descriptor *tx; + dma_addr_t addr; + dma_cookie_t cookie; + int cpu; + + tx = dev->device_prep_dma_memcpy(chan, len, 0); + if (!tx) + return -ENOMEM; + + tx->ack = 1; + tx->callback = NULL; + addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); + tx->tx_set_src(addr, tx, 0); + addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); + tx->tx_set_dest(addr, tx, 0); + cookie = tx->tx_submit(tx); + + cpu = get_cpu(); + per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; + per_cpu_ptr(chan->local, cpu)->memcpy_count++; + put_cpu(); + + return cookie; +} +EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); + +/** + * dma_async_memcpy_pg_to_pg - offloaded copy from page to page + * @chan: DMA channel to offload copy to + * @dest_pg: destination page + * @dest_off: offset in page to copy to + * @src_pg: source page + * @src_off: offset in page to copy from + * @len: length + * + * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus + * address according to the DMA mapping API rules for streaming mappings. + * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident + * (kernel memory or locked user space pages). + */ +dma_cookie_t +dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, + unsigned int dest_off, struct page *src_pg, unsigned int src_off, + size_t len) +{ + struct dma_device *dev = chan->device; + struct dma_async_tx_descriptor *tx; + dma_addr_t addr; + dma_cookie_t cookie; + int cpu; + + tx = dev->device_prep_dma_memcpy(chan, len, 0); + if (!tx) + return -ENOMEM; + + tx->ack = 1; + tx->callback = NULL; + addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); + tx->tx_set_src(addr, tx, 0); + addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE); + tx->tx_set_dest(addr, tx, 0); + cookie = tx->tx_submit(tx); + + cpu = get_cpu(); + per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; + per_cpu_ptr(chan->local, cpu)->memcpy_count++; + put_cpu(); + + return cookie; +} +EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); + +void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, + struct dma_chan *chan) +{ + tx->chan = chan; + spin_lock_init(&tx->lock); + INIT_LIST_HEAD(&tx->depend_node); + INIT_LIST_HEAD(&tx->depend_list); +} +EXPORT_SYMBOL(dma_async_tx_descriptor_init); + static int __init dma_bus_init(void) { mutex_init(&dma_list_mutex); diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c index 85001413955..5fbe56b5cea 100644 --- a/drivers/dma/ioatdma.c +++ b/drivers/dma/ioatdma.c @@ -32,16 +32,17 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> #include "ioatdma.h" -#include "ioatdma_io.h" #include "ioatdma_registers.h" #include "ioatdma_hw.h" #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) #define to_ioat_device(dev) container_of(dev, struct ioat_device, common) #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) +#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) /* internal functions */ static int __devinit ioat_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void ioat_shutdown(struct pci_dev *pdev); static void __devexit ioat_remove(struct pci_dev *pdev); static int enumerate_dma_channels(struct ioat_device *device) @@ -51,8 +52,8 @@ static int enumerate_dma_channels(struct ioat_device *device) int i; struct ioat_dma_chan *ioat_chan; - device->common.chancnt = ioatdma_read8(device, IOAT_CHANCNT_OFFSET); - xfercap_scale = ioatdma_read8(device, IOAT_XFERCAP_OFFSET); + device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); + xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); for (i = 0; i < device->common.chancnt; i++) { @@ -71,13 +72,79 @@ static int enumerate_dma_channels(struct ioat_device *device) INIT_LIST_HEAD(&ioat_chan->used_desc); /* This should be made common somewhere in dmaengine.c */ ioat_chan->common.device = &device->common; - ioat_chan->common.client = NULL; list_add_tail(&ioat_chan->common.device_node, &device->common.channels); } return device->common.chancnt; } +static void +ioat_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index) +{ + struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx); + struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); + + pci_unmap_addr_set(desc, src, addr); + + list_for_each_entry(iter, &desc->async_tx.tx_list, node) { + iter->hw->src_addr = addr; + addr += ioat_chan->xfercap; + } + +} + +static void +ioat_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index) +{ + struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx); + struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); + + pci_unmap_addr_set(desc, dst, addr); + + list_for_each_entry(iter, &desc->async_tx.tx_list, node) { + iter->hw->dst_addr = addr; + addr += ioat_chan->xfercap; + } +} + +static dma_cookie_t +ioat_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); + struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); + int append = 0; + dma_cookie_t cookie; + struct ioat_desc_sw *group_start; + + group_start = list_entry(desc->async_tx.tx_list.next, + struct ioat_desc_sw, node); + spin_lock_bh(&ioat_chan->desc_lock); + /* cookie incr and addition to used_list must be atomic */ + cookie = ioat_chan->common.cookie; + cookie++; + if (cookie < 0) + cookie = 1; + ioat_chan->common.cookie = desc->async_tx.cookie = cookie; + + /* write address into NextDescriptor field of last desc in chain */ + to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = + group_start->async_tx.phys; + list_splice_init(&desc->async_tx.tx_list, ioat_chan->used_desc.prev); + + ioat_chan->pending += desc->tx_cnt; + if (ioat_chan->pending >= 4) { + append = 1; + ioat_chan->pending = 0; + } + spin_unlock_bh(&ioat_chan->desc_lock); + + if (append) + writeb(IOAT_CHANCMD_APPEND, + ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); + + return cookie; +} + static struct ioat_desc_sw *ioat_dma_alloc_descriptor( struct ioat_dma_chan *ioat_chan, gfp_t flags) @@ -99,8 +166,13 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor( } memset(desc, 0, sizeof(*desc)); + dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); + desc_sw->async_tx.tx_set_src = ioat_set_src; + desc_sw->async_tx.tx_set_dest = ioat_set_dest; + desc_sw->async_tx.tx_submit = ioat_tx_submit; + INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); desc_sw->hw = desc; - desc_sw->phys = phys; + desc_sw->async_tx.phys = phys; return desc_sw; } @@ -123,7 +195,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) * In-use bit automatically set by reading chanctrl * If 0, we got it, if 1, someone else did */ - chanctrl = ioatdma_chan_read16(ioat_chan, IOAT_CHANCTRL_OFFSET); + chanctrl = readw(ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); if (chanctrl & IOAT_CHANCTRL_CHANNEL_IN_USE) return -EBUSY; @@ -132,12 +204,12 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) IOAT_CHANCTRL_ERR_INT_EN | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | IOAT_CHANCTRL_ERR_COMPLETION_EN; - ioatdma_chan_write16(ioat_chan, IOAT_CHANCTRL_OFFSET, chanctrl); + writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); - chanerr = ioatdma_chan_read32(ioat_chan, IOAT_CHANERR_OFFSET); + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); if (chanerr) { printk("IOAT: CHANERR = %x, clearing\n", chanerr); - ioatdma_chan_write32(ioat_chan, IOAT_CHANERR_OFFSET, chanerr); + writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); } /* Allocate descriptors */ @@ -161,10 +233,10 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) &ioat_chan->completion_addr); memset(ioat_chan->completion_virt, 0, sizeof(*ioat_chan->completion_virt)); - ioatdma_chan_write32(ioat_chan, IOAT_CHANCMP_OFFSET_LOW, - ((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF); - ioatdma_chan_write32(ioat_chan, IOAT_CHANCMP_OFFSET_HIGH, - ((u64) ioat_chan->completion_addr) >> 32); + writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, + ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); + writel(((u64) ioat_chan->completion_addr) >> 32, + ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); ioat_start_null_desc(ioat_chan); return i; @@ -182,18 +254,20 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) ioat_dma_memcpy_cleanup(ioat_chan); - ioatdma_chan_write8(ioat_chan, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_RESET); + writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); spin_lock_bh(&ioat_chan->desc_lock); list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { in_use_descs++; list_del(&desc->node); - pci_pool_free(ioat_device->dma_pool, desc->hw, desc->phys); + pci_pool_free(ioat_device->dma_pool, desc->hw, + desc->async_tx.phys); kfree(desc); } list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) { list_del(&desc->node); - pci_pool_free(ioat_device->dma_pool, desc->hw, desc->phys); + pci_pool_free(ioat_device->dma_pool, desc->hw, + desc->async_tx.phys); kfree(desc); } spin_unlock_bh(&ioat_chan->desc_lock); @@ -210,50 +284,30 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) ioat_chan->last_completion = ioat_chan->completion_addr = 0; /* Tell hw the chan is free */ - chanctrl = ioatdma_chan_read16(ioat_chan, IOAT_CHANCTRL_OFFSET); + chanctrl = readw(ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); chanctrl &= ~IOAT_CHANCTRL_CHANNEL_IN_USE; - ioatdma_chan_write16(ioat_chan, IOAT_CHANCTRL_OFFSET, chanctrl); + writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); } -/** - * do_ioat_dma_memcpy - actual function that initiates a IOAT DMA transaction - * @ioat_chan: IOAT DMA channel handle - * @dest: DMA destination address - * @src: DMA source address - * @len: transaction length in bytes - */ - -static dma_cookie_t do_ioat_dma_memcpy(struct ioat_dma_chan *ioat_chan, - dma_addr_t dest, - dma_addr_t src, - size_t len) +static struct dma_async_tx_descriptor * +ioat_dma_prep_memcpy(struct dma_chan *chan, size_t len, int int_en) { - struct ioat_desc_sw *first; - struct ioat_desc_sw *prev; - struct ioat_desc_sw *new; - dma_cookie_t cookie; + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_desc_sw *first, *prev, *new; LIST_HEAD(new_chain); u32 copy; size_t orig_len; - dma_addr_t orig_src, orig_dst; - unsigned int desc_count = 0; - unsigned int append = 0; - - if (!ioat_chan || !dest || !src) - return -EFAULT; + int desc_count = 0; if (!len) - return ioat_chan->common.cookie; + return NULL; orig_len = len; - orig_src = src; - orig_dst = dest; first = NULL; prev = NULL; spin_lock_bh(&ioat_chan->desc_lock); - while (len) { if (!list_empty(&ioat_chan->free_desc)) { new = to_ioat_desc(ioat_chan->free_desc.next); @@ -270,141 +324,36 @@ static dma_cookie_t do_ioat_dma_memcpy(struct ioat_dma_chan *ioat_chan, new->hw->size = copy; new->hw->ctl = 0; - new->hw->src_addr = src; - new->hw->dst_addr = dest; - new->cookie = 0; + new->async_tx.cookie = 0; + new->async_tx.ack = 1; /* chain together the physical address list for the HW */ if (!first) first = new; else - prev->hw->next = (u64) new->phys; + prev->hw->next = (u64) new->async_tx.phys; prev = new; - len -= copy; - dest += copy; - src += copy; - list_add_tail(&new->node, &new_chain); desc_count++; } - new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; - new->hw->next = 0; - /* cookie incr and addition to used_list must be atomic */ + list_splice(&new_chain, &new->async_tx.tx_list); - cookie = ioat_chan->common.cookie; - cookie++; - if (cookie < 0) - cookie = 1; - ioat_chan->common.cookie = new->cookie = cookie; + new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; + new->hw->next = 0; + new->tx_cnt = desc_count; + new->async_tx.ack = 0; /* client is in control of this ack */ + new->async_tx.cookie = -EBUSY; - pci_unmap_addr_set(new, src, orig_src); - pci_unmap_addr_set(new, dst, orig_dst); pci_unmap_len_set(new, src_len, orig_len); pci_unmap_len_set(new, dst_len, orig_len); - - /* write address into NextDescriptor field of last desc in chain */ - to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = first->phys; - list_splice_init(&new_chain, ioat_chan->used_desc.prev); - - ioat_chan->pending += desc_count; - if (ioat_chan->pending >= 20) { - append = 1; - ioat_chan->pending = 0; - } - spin_unlock_bh(&ioat_chan->desc_lock); - if (append) - ioatdma_chan_write8(ioat_chan, - IOAT_CHANCMD_OFFSET, - IOAT_CHANCMD_APPEND); - return cookie; -} - -/** - * ioat_dma_memcpy_buf_to_buf - wrapper that takes src & dest bufs - * @chan: IOAT DMA channel handle - * @dest: DMA destination address - * @src: DMA source address - * @len: transaction length in bytes - */ - -static dma_cookie_t ioat_dma_memcpy_buf_to_buf(struct dma_chan *chan, - void *dest, - void *src, - size_t len) -{ - dma_addr_t dest_addr; - dma_addr_t src_addr; - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - - dest_addr = pci_map_single(ioat_chan->device->pdev, - dest, len, PCI_DMA_FROMDEVICE); - src_addr = pci_map_single(ioat_chan->device->pdev, - src, len, PCI_DMA_TODEVICE); - - return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len); + return new ? &new->async_tx : NULL; } -/** - * ioat_dma_memcpy_buf_to_pg - wrapper, copying from a buf to a page - * @chan: IOAT DMA channel handle - * @page: pointer to the page to copy to - * @offset: offset into that page - * @src: DMA source address - * @len: transaction length in bytes - */ - -static dma_cookie_t ioat_dma_memcpy_buf_to_pg(struct dma_chan *chan, - struct page *page, - unsigned int offset, - void *src, - size_t len) -{ - dma_addr_t dest_addr; - dma_addr_t src_addr; - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - - dest_addr = pci_map_page(ioat_chan->device->pdev, - page, offset, len, PCI_DMA_FROMDEVICE); - src_addr = pci_map_single(ioat_chan->device->pdev, - src, len, PCI_DMA_TODEVICE); - - return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len); -} - -/** - * ioat_dma_memcpy_pg_to_pg - wrapper, copying between two pages - * @chan: IOAT DMA channel handle - * @dest_pg: pointer to the page to copy to - * @dest_off: offset into that page - * @src_pg: pointer to the page to copy from - * @src_off: offset into that page - * @len: transaction length in bytes. This is guaranteed not to make a copy - * across a page boundary. - */ - -static dma_cookie_t ioat_dma_memcpy_pg_to_pg(struct dma_chan *chan, - struct page *dest_pg, - unsigned int dest_off, - struct page *src_pg, - unsigned int src_off, - size_t len) -{ - dma_addr_t dest_addr; - dma_addr_t src_addr; - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - - dest_addr = pci_map_page(ioat_chan->device->pdev, - dest_pg, dest_off, len, PCI_DMA_FROMDEVICE); - src_addr = pci_map_page(ioat_chan->device->pdev, - src_pg, src_off, len, PCI_DMA_TODEVICE); - - return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len); -} /** * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended descriptors to hw @@ -417,9 +366,8 @@ static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan) if (ioat_chan->pending != 0) { ioat_chan->pending = 0; - ioatdma_chan_write8(ioat_chan, - IOAT_CHANCMD_OFFSET, - IOAT_CHANCMD_APPEND); + writeb(IOAT_CHANCMD_APPEND, + ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); } } @@ -449,7 +397,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan) if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { printk("IOAT: Channel halted, chanerr = %x\n", - ioatdma_chan_read32(chan, IOAT_CHANERR_OFFSET)); + readl(chan->reg_base + IOAT_CHANERR_OFFSET)); /* TODO do something to salvage the situation */ } @@ -467,8 +415,8 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan) * exceeding xfercap, perhaps. If so, only the last one will * have a cookie, and require unmapping. */ - if (desc->cookie) { - cookie = desc->cookie; + if (desc->async_tx.cookie) { + cookie = desc->async_tx.cookie; /* yes we are unmapping both _page and _single alloc'd regions with unmap_page. Is this *really* that bad? @@ -483,14 +431,19 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan) PCI_DMA_TODEVICE); } - if (desc->phys != phys_complete) { - /* a completed entry, but not the last, so cleanup */ - list_del(&desc->node); - list_add_tail(&desc->node, &chan->free_desc); + if (desc->async_tx.phys != phys_complete) { + /* a completed entry, but not the last, so cleanup + * if the client is done with the descriptor + */ + if (desc->async_tx.ack) { + list_del(&desc->node); + list_add_tail(&desc->node, &chan->free_desc); + } else + desc->async_tx.cookie = 0; } else { /* last used desc. Do not remove, so we can append from it, but don't look at it next time, either */ - desc->cookie = 0; + desc->async_tx.cookie = 0; /* TODO check status bits? */ break; @@ -506,6 +459,17 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan) spin_unlock(&chan->cleanup_lock); } +static void ioat_dma_dependency_added(struct dma_chan *chan) +{ + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + spin_lock_bh(&ioat_chan->desc_lock); + if (ioat_chan->pending == 0) { + spin_unlock_bh(&ioat_chan->desc_lock); + ioat_dma_memcpy_cleanup(ioat_chan); + } else + spin_unlock_bh(&ioat_chan->desc_lock); +} + /** * ioat_dma_is_complete - poll the status of a IOAT DMA transaction * @chan: IOAT DMA channel handle @@ -553,6 +517,8 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, static struct pci_device_id ioat_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, + { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, + PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) }, { 0, } }; @@ -560,6 +526,7 @@ static struct pci_driver ioat_pci_driver = { .name = "ioatdma", .id_table = ioat_pci_tbl, .probe = ioat_probe, + .shutdown = ioat_shutdown, .remove = __devexit_p(ioat_remove), }; @@ -569,21 +536,21 @@ static irqreturn_t ioat_do_interrupt(int irq, void *data) unsigned long attnstatus; u8 intrctrl; - intrctrl = ioatdma_read8(instance, IOAT_INTRCTRL_OFFSET); + intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) return IRQ_NONE; if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { - ioatdma_write8(instance, IOAT_INTRCTRL_OFFSET, intrctrl); + writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); return IRQ_NONE; } - attnstatus = ioatdma_read32(instance, IOAT_ATTNSTATUS_OFFSET); + attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); printk(KERN_ERR "ioatdma error: interrupt! status %lx\n", attnstatus); - ioatdma_write8(instance, IOAT_INTRCTRL_OFFSET, intrctrl); + writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); return IRQ_HANDLED; } @@ -607,19 +574,17 @@ static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan) desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; desc->hw->next = 0; + desc->async_tx.ack = 1; list_add_tail(&desc->node, &ioat_chan->used_desc); spin_unlock_bh(&ioat_chan->desc_lock); -#if (BITS_PER_LONG == 64) - ioatdma_chan_write64(ioat_chan, IOAT_CHAINADDR_OFFSET, desc->phys); -#else - ioatdma_chan_write32(ioat_chan, - IOAT_CHAINADDR_OFFSET_LOW, - (u32) desc->phys); - ioatdma_chan_write32(ioat_chan, IOAT_CHAINADDR_OFFSET_HIGH, 0); -#endif - ioatdma_chan_write8(ioat_chan, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_START); + writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW); + writel(((u64) desc->async_tx.phys) >> 32, + ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH); + + writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); } /* @@ -633,6 +598,8 @@ static int ioat_self_test(struct ioat_device *device) u8 *src; u8 *dest; struct dma_chan *dma_chan; + struct dma_async_tx_descriptor *tx; + dma_addr_t addr; dma_cookie_t cookie; int err = 0; @@ -658,7 +625,15 @@ static int ioat_self_test(struct ioat_device *device) goto out; } - cookie = ioat_dma_memcpy_buf_to_buf(dma_chan, dest, src, IOAT_TEST_SIZE); + tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0); + async_tx_ack(tx); + addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, + DMA_TO_DEVICE); + ioat_set_src(addr, tx, 0); + addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, + DMA_FROM_DEVICE); + ioat_set_dest(addr, tx, 0); + cookie = ioat_tx_submit(tx); ioat_dma_memcpy_issue_pending(dma_chan); msleep(1); @@ -748,19 +723,20 @@ static int __devinit ioat_probe(struct pci_dev *pdev, device->reg_base = reg_base; - ioatdma_write8(device, IOAT_INTRCTRL_OFFSET, IOAT_INTRCTRL_MASTER_INT_EN); + writeb(IOAT_INTRCTRL_MASTER_INT_EN, device->reg_base + IOAT_INTRCTRL_OFFSET); pci_set_master(pdev); INIT_LIST_HEAD(&device->common.channels); enumerate_dma_channels(device); + dma_cap_set(DMA_MEMCPY, device->common.cap_mask); device->common.device_alloc_chan_resources = ioat_dma_alloc_chan_resources; device->common.device_free_chan_resources = ioat_dma_free_chan_resources; - device->common.device_memcpy_buf_to_buf = ioat_dma_memcpy_buf_to_buf; - device->common.device_memcpy_buf_to_pg = ioat_dma_memcpy_buf_to_pg; - device->common.device_memcpy_pg_to_pg = ioat_dma_memcpy_pg_to_pg; - device->common.device_memcpy_complete = ioat_dma_is_complete; - device->common.device_memcpy_issue_pending = ioat_dma_memcpy_issue_pending; + device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy; + device->common.device_is_tx_complete = ioat_dma_is_complete; + device->common.device_issue_pending = ioat_dma_memcpy_issue_pending; + device->common.device_dependency_added = ioat_dma_dependency_added; + device->common.dev = &pdev->dev; printk(KERN_INFO "Intel(R) I/OAT DMA Engine found, %d channels\n", device->common.chancnt); @@ -787,9 +763,20 @@ err_request_regions: err_set_dma_mask: pci_disable_device(pdev); err_enable_device: + + printk(KERN_ERR "Intel(R) I/OAT DMA Engine initialization failed\n"); + return err; } +static void ioat_shutdown(struct pci_dev *pdev) +{ + struct ioat_device *device; + device = pci_get_drvdata(pdev); + + dma_async_device_unregister(&device->common); +} + static void __devexit ioat_remove(struct pci_dev *pdev) { struct ioat_device *device; @@ -818,7 +805,7 @@ static void __devexit ioat_remove(struct pci_dev *pdev) } /* MODULE API */ -MODULE_VERSION("1.7"); +MODULE_VERSION("1.9"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Intel Corporation"); diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h index 62b26a9be4c..d3726478031 100644 --- a/drivers/dma/ioatdma.h +++ b/drivers/dma/ioatdma.h @@ -30,9 +30,6 @@ #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 -extern struct list_head dma_device_list; -extern struct list_head dma_client_list; - /** * struct ioat_device - internal representation of a IOAT device * @pdev: PCI-Express device @@ -105,21 +102,20 @@ struct ioat_dma_chan { /** * struct ioat_desc_sw - wrapper around hardware descriptor * @hw: hardware DMA descriptor - * @node: - * @cookie: - * @phys: + * @node: this descriptor will either be on the free list, + * or attached to a transaction list (async_tx.tx_list) + * @tx_cnt: number of descriptors required to complete the transaction + * @async_tx: the generic software descriptor for all engines */ - struct ioat_desc_sw { struct ioat_dma_descriptor *hw; struct list_head node; - dma_cookie_t cookie; - dma_addr_t phys; + int tx_cnt; DECLARE_PCI_UNMAP_ADDR(src) DECLARE_PCI_UNMAP_LEN(src_len) DECLARE_PCI_UNMAP_ADDR(dst) DECLARE_PCI_UNMAP_LEN(dst_len) + struct dma_async_tx_descriptor async_tx; }; #endif /* IOATDMA_H */ - diff --git a/drivers/dma/ioatdma_io.h b/drivers/dma/ioatdma_io.h deleted file mode 100644 index c0b4bf66c92..00000000000 --- a/drivers/dma/ioatdma_io.h +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - */ -#ifndef IOATDMA_IO_H -#define IOATDMA_IO_H - -#include <asm/io.h> - -/* - * device and per-channel MMIO register read and write functions - * this is a lot of anoying inline functions, but it's typesafe - */ - -static inline u8 ioatdma_read8(struct ioat_device *device, - unsigned int offset) -{ - return readb(device->reg_base + offset); -} - -static inline u16 ioatdma_read16(struct ioat_device *device, - unsigned int offset) -{ - return readw(device->reg_base + offset); -} - -static inline u32 ioatdma_read32(struct ioat_device *device, - unsigned int offset) -{ - return readl(device->reg_base + offset); -} - -static inline void ioatdma_write8(struct ioat_device *device, - unsigned int offset, u8 value) -{ - writeb(value, device->reg_base + offset); -} - -static inline void ioatdma_write16(struct ioat_device *device, - unsigned int offset, u16 value) -{ - writew(value, device->reg_base + offset); -} - -static inline void ioatdma_write32(struct ioat_device *device, - unsigned int offset, u32 value) -{ - writel(value, device->reg_base + offset); -} - -static inline u8 ioatdma_chan_read8(struct ioat_dma_chan *chan, - unsigned int offset) -{ - return readb(chan->reg_base + offset); -} - -static inline u16 ioatdma_chan_read16(struct ioat_dma_chan *chan, - unsigned int offset) -{ - return readw(chan->reg_base + offset); -} - -static inline u32 ioatdma_chan_read32(struct ioat_dma_chan *chan, - unsigned int offset) -{ - return readl(chan->reg_base + offset); -} - -static inline void ioatdma_chan_write8(struct ioat_dma_chan *chan, - unsigned int offset, u8 value) -{ - writeb(value, chan->reg_base + offset); -} - -static inline void ioatdma_chan_write16(struct ioat_dma_chan *chan, - unsigned int offset, u16 value) -{ - writew(value, chan->reg_base + offset); -} - -static inline void ioatdma_chan_write32(struct ioat_dma_chan *chan, - unsigned int offset, u32 value) -{ - writel(value, chan->reg_base + offset); -} - -#if (BITS_PER_LONG == 64) -static inline u64 ioatdma_chan_read64(struct ioat_dma_chan *chan, - unsigned int offset) -{ - return readq(chan->reg_base + offset); -} - -static inline void ioatdma_chan_write64(struct ioat_dma_chan *chan, - unsigned int offset, u64 value) -{ - writeq(value, chan->reg_base + offset); -} -#endif - -#endif /* IOATDMA_IO_H */ - diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c new file mode 100644 index 00000000000..5a1d426744d --- /dev/null +++ b/drivers/dma/iop-adma.c @@ -0,0 +1,1467 @@ +/* + * offload engine driver for the Intel Xscale series of i/o processors + * Copyright © 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +/* + * This driver supports the asynchrounous DMA copy and RAID engines available + * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x) + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/async_tx.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/memory.h> +#include <linux/ioport.h> + +#include <asm/arch/adma.h> + +#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common) +#define to_iop_adma_device(dev) \ + container_of(dev, struct iop_adma_device, common) +#define tx_to_iop_adma_slot(tx) \ + container_of(tx, struct iop_adma_desc_slot, async_tx) + +/** + * iop_adma_free_slots - flags descriptor slots for reuse + * @slot: Slot to free + * Caller must hold &iop_chan->lock while calling this function + */ +static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) +{ + int stride = slot->slots_per_op; + + while (stride--) { + slot->slots_per_op = 0; + slot = list_entry(slot->slot_node.next, + struct iop_adma_desc_slot, + slot_node); + } +} + +static dma_cookie_t +iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *iop_chan, dma_cookie_t cookie) +{ + BUG_ON(desc->async_tx.cookie < 0); + spin_lock_bh(&desc->async_tx.lock); + if (desc->async_tx.cookie > 0) { + cookie = desc->async_tx.cookie; + desc->async_tx.cookie = 0; + + /* call the callback (must not sleep or submit new + * operations to this channel) + */ + if (desc->async_tx.callback) + desc->async_tx.callback( + desc->async_tx.callback_param); + + /* unmap dma addresses + * (unmap_single vs unmap_page?) + */ + if (desc->group_head && desc->unmap_len) { + struct iop_adma_desc_slot *unmap = desc->group_head; + struct device *dev = + &iop_chan->device->pdev->dev; + u32 len = unmap->unmap_len; + u32 src_cnt = unmap->unmap_src_cnt; + dma_addr_t addr = iop_desc_get_dest_addr(unmap, + iop_chan); + + dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); + while (src_cnt--) { + addr = iop_desc_get_src_addr(unmap, + iop_chan, + src_cnt); + dma_unmap_page(dev, addr, len, + DMA_TO_DEVICE); + } + desc->group_head = NULL; + } + } + + /* run dependent operations */ + async_tx_run_dependencies(&desc->async_tx); + spin_unlock_bh(&desc->async_tx.lock); + + return cookie; +} + +static int +iop_adma_clean_slot(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *iop_chan) +{ + /* the client is allowed to attach dependent operations + * until 'ack' is set + */ + if (!desc->async_tx.ack) + return 0; + + /* leave the last descriptor in the chain + * so we can append to it + */ + if (desc->chain_node.next == &iop_chan->chain) + return 1; + + dev_dbg(iop_chan->device->common.dev, + "\tfree slot: %d slots_per_op: %d\n", + desc->idx, desc->slots_per_op); + + list_del(&desc->chain_node); + iop_adma_free_slots(desc); + + return 0; +} + +static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) +{ + struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL; + dma_cookie_t cookie = 0; + u32 current_desc = iop_chan_get_current_descriptor(iop_chan); + int busy = iop_chan_is_busy(iop_chan); + int seen_current = 0, slot_cnt = 0, slots_per_op = 0; + + dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); + /* free completed slots from the chain starting with + * the oldest descriptor + */ + list_for_each_entry_safe(iter, _iter, &iop_chan->chain, + chain_node) { + pr_debug("\tcookie: %d slot: %d busy: %d " + "this_desc: %#x next_desc: %#x ack: %d\n", + iter->async_tx.cookie, iter->idx, busy, + iter->async_tx.phys, iop_desc_get_next_desc(iter), + iter->async_tx.ack); + prefetch(_iter); + prefetch(&_iter->async_tx); + + /* do not advance past the current descriptor loaded into the + * hardware channel, subsequent descriptors are either in + * process or have not been submitted + */ + if (seen_current) + break; + + /* stop the search if we reach the current descriptor and the + * channel is busy, or if it appears that the current descriptor + * needs to be re-read (i.e. has been appended to) + */ + if (iter->async_tx.phys == current_desc) { + BUG_ON(seen_current++); + if (busy || iop_desc_get_next_desc(iter)) + break; + } + + /* detect the start of a group transaction */ + if (!slot_cnt && !slots_per_op) { + slot_cnt = iter->slot_cnt; + slots_per_op = iter->slots_per_op; + if (slot_cnt <= slots_per_op) { + slot_cnt = 0; + slots_per_op = 0; + } + } + + if (slot_cnt) { + pr_debug("\tgroup++\n"); + if (!grp_start) + grp_start = iter; + slot_cnt -= slots_per_op; + } + + /* all the members of a group are complete */ + if (slots_per_op != 0 && slot_cnt == 0) { + struct iop_adma_desc_slot *grp_iter, *_grp_iter; + int end_of_chain = 0; + pr_debug("\tgroup end\n"); + + /* collect the total results */ + if (grp_start->xor_check_result) { + u32 zero_sum_result = 0; + slot_cnt = grp_start->slot_cnt; + grp_iter = grp_start; + + list_for_each_entry_from(grp_iter, + &iop_chan->chain, chain_node) { + zero_sum_result |= + iop_desc_get_zero_result(grp_iter); + pr_debug("\titer%d result: %d\n", + grp_iter->idx, zero_sum_result); + slot_cnt -= slots_per_op; + if (slot_cnt == 0) + break; + } + pr_debug("\tgrp_start->xor_check_result: %p\n", + grp_start->xor_check_result); + *grp_start->xor_check_result = zero_sum_result; + } + + /* clean up the group */ + slot_cnt = grp_start->slot_cnt; + grp_iter = grp_start; + list_for_each_entry_safe_from(grp_iter, _grp_iter, + &iop_chan->chain, chain_node) { + cookie = iop_adma_run_tx_complete_actions( + grp_iter, iop_chan, cookie); + + slot_cnt -= slots_per_op; + end_of_chain = iop_adma_clean_slot(grp_iter, + iop_chan); + + if (slot_cnt == 0 || end_of_chain) + break; + } + + /* the group should be complete at this point */ + BUG_ON(slot_cnt); + + slots_per_op = 0; + grp_start = NULL; + if (end_of_chain) + break; + else + continue; + } else if (slots_per_op) /* wait for group completion */ + continue; + + /* write back zero sum results (single descriptor case) */ + if (iter->xor_check_result && iter->async_tx.cookie) + *iter->xor_check_result = + iop_desc_get_zero_result(iter); + + cookie = iop_adma_run_tx_complete_actions( + iter, iop_chan, cookie); + + if (iop_adma_clean_slot(iter, iop_chan)) + break; + } + + BUG_ON(!seen_current); + + iop_chan_idle(busy, iop_chan); + + if (cookie > 0) { + iop_chan->completed_cookie = cookie; + pr_debug("\tcompleted cookie %d\n", cookie); + } +} + +static void +iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) +{ + spin_lock_bh(&iop_chan->lock); + __iop_adma_slot_cleanup(iop_chan); + spin_unlock_bh(&iop_chan->lock); +} + +static void iop_adma_tasklet(unsigned long data) +{ + struct iop_adma_chan *chan = (struct iop_adma_chan *) data; + __iop_adma_slot_cleanup(chan); +} + +static struct iop_adma_desc_slot * +iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots, + int slots_per_op) +{ + struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL; + struct list_head chain = LIST_HEAD_INIT(chain); + int slots_found, retry = 0; + + /* start search from the last allocated descrtiptor + * if a contiguous allocation can not be found start searching + * from the beginning of the list + */ +retry: + slots_found = 0; + if (retry == 0) + iter = iop_chan->last_used; + else + iter = list_entry(&iop_chan->all_slots, + struct iop_adma_desc_slot, + slot_node); + + list_for_each_entry_safe_continue( + iter, _iter, &iop_chan->all_slots, slot_node) { + prefetch(_iter); + prefetch(&_iter->async_tx); + if (iter->slots_per_op) { + /* give up after finding the first busy slot + * on the second pass through the list + */ + if (retry) + break; + + slots_found = 0; + continue; + } + + /* start the allocation if the slot is correctly aligned */ + if (!slots_found++) { + if (iop_desc_is_aligned(iter, slots_per_op)) + alloc_start = iter; + else { + slots_found = 0; + continue; + } + } + + if (slots_found == num_slots) { + struct iop_adma_desc_slot *alloc_tail = NULL; + struct iop_adma_desc_slot *last_used = NULL; + iter = alloc_start; + while (num_slots) { + int i; + dev_dbg(iop_chan->device->common.dev, + "allocated slot: %d " + "(desc %p phys: %#x) slots_per_op %d\n", + iter->idx, iter->hw_desc, + iter->async_tx.phys, slots_per_op); + + /* pre-ack all but the last descriptor */ + if (num_slots != slots_per_op) + iter->async_tx.ack = 1; + else + iter->async_tx.ack = 0; + + list_add_tail(&iter->chain_node, &chain); + alloc_tail = iter; + iter->async_tx.cookie = 0; + iter->slot_cnt = num_slots; + iter->xor_check_result = NULL; + for (i = 0; i < slots_per_op; i++) { + iter->slots_per_op = slots_per_op - i; + last_used = iter; + iter = list_entry(iter->slot_node.next, + struct iop_adma_desc_slot, + slot_node); + } + num_slots -= slots_per_op; + } + alloc_tail->group_head = alloc_start; + alloc_tail->async_tx.cookie = -EBUSY; + list_splice(&chain, &alloc_tail->async_tx.tx_list); + iop_chan->last_used = last_used; + iop_desc_clear_next_desc(alloc_start); + iop_desc_clear_next_desc(alloc_tail); + return alloc_tail; + } + } + if (!retry++) + goto retry; + + /* try to free some slots if the allocation fails */ + tasklet_schedule(&iop_chan->irq_tasklet); + + return NULL; +} + +static dma_cookie_t +iop_desc_assign_cookie(struct iop_adma_chan *iop_chan, + struct iop_adma_desc_slot *desc) +{ + dma_cookie_t cookie = iop_chan->common.cookie; + cookie++; + if (cookie < 0) + cookie = 1; + iop_chan->common.cookie = desc->async_tx.cookie = cookie; + return cookie; +} + +static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) +{ + dev_dbg(iop_chan->device->common.dev, "pending: %d\n", + iop_chan->pending); + + if (iop_chan->pending >= IOP_ADMA_THRESHOLD) { + iop_chan->pending = 0; + iop_chan_append(iop_chan); + } +} + +static dma_cookie_t +iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); + struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan); + struct iop_adma_desc_slot *grp_start, *old_chain_tail; + int slot_cnt; + int slots_per_op; + dma_cookie_t cookie; + + grp_start = sw_desc->group_head; + slot_cnt = grp_start->slot_cnt; + slots_per_op = grp_start->slots_per_op; + + spin_lock_bh(&iop_chan->lock); + cookie = iop_desc_assign_cookie(iop_chan, sw_desc); + + old_chain_tail = list_entry(iop_chan->chain.prev, + struct iop_adma_desc_slot, chain_node); + list_splice_init(&sw_desc->async_tx.tx_list, + &old_chain_tail->chain_node); + + /* fix up the hardware chain */ + iop_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); + + /* 1/ don't add pre-chained descriptors + * 2/ dummy read to flush next_desc write + */ + BUG_ON(iop_desc_get_next_desc(sw_desc)); + + /* increment the pending count by the number of slots + * memcpy operations have a 1:1 (slot:operation) relation + * other operations are heavier and will pop the threshold + * more often. + */ + iop_chan->pending += slot_cnt; + iop_adma_check_threshold(iop_chan); + spin_unlock_bh(&iop_chan->lock); + + dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", + __FUNCTION__, sw_desc->async_tx.cookie, sw_desc->idx); + + return cookie; +} + +static void +iop_adma_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx, + int index) +{ + struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); + struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan); + + /* to do: support transfers lengths > IOP_ADMA_MAX_BYTE_COUNT */ + iop_desc_set_dest_addr(sw_desc->group_head, iop_chan, addr); +} + +static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); +static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); + +/* returns the number of allocated descriptors */ +static int iop_adma_alloc_chan_resources(struct dma_chan *chan) +{ + char *hw_desc; + int idx; + struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); + struct iop_adma_desc_slot *slot = NULL; + int init = iop_chan->slots_allocated ? 0 : 1; + struct iop_adma_platform_data *plat_data = + iop_chan->device->pdev->dev.platform_data; + int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE; + + /* Allocate descriptor slots */ + do { + idx = iop_chan->slots_allocated; + if (idx == num_descs_in_pool) + break; + + slot = kzalloc(sizeof(*slot), GFP_KERNEL); + if (!slot) { + printk(KERN_INFO "IOP ADMA Channel only initialized" + " %d descriptor slots", idx); + break; + } + hw_desc = (char *) iop_chan->device->dma_desc_pool_virt; + slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; + + dma_async_tx_descriptor_init(&slot->async_tx, chan); + slot->async_tx.tx_submit = iop_adma_tx_submit; + slot->async_tx.tx_set_dest = iop_adma_set_dest; + INIT_LIST_HEAD(&slot->chain_node); + INIT_LIST_HEAD(&slot->slot_node); + INIT_LIST_HEAD(&slot->async_tx.tx_list); + hw_desc = (char *) iop_chan->device->dma_desc_pool; + slot->async_tx.phys = + (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; + slot->idx = idx; + + spin_lock_bh(&iop_chan->lock); + iop_chan->slots_allocated++; + list_add_tail(&slot->slot_node, &iop_chan->all_slots); + spin_unlock_bh(&iop_chan->lock); + } while (iop_chan->slots_allocated < num_descs_in_pool); + + if (idx && !iop_chan->last_used) + iop_chan->last_used = list_entry(iop_chan->all_slots.next, + struct iop_adma_desc_slot, + slot_node); + + dev_dbg(iop_chan->device->common.dev, + "allocated %d descriptor slots last_used: %p\n", + iop_chan->slots_allocated, iop_chan->last_used); + + /* initialize the channel and the chain with a null operation */ + if (init) { + if (dma_has_cap(DMA_MEMCPY, + iop_chan->device->common.cap_mask)) + iop_chan_start_null_memcpy(iop_chan); + else if (dma_has_cap(DMA_XOR, + iop_chan->device->common.cap_mask)) + iop_chan_start_null_xor(iop_chan); + else + BUG(); + } + + return (idx > 0) ? idx : -ENOMEM; +} + +static struct dma_async_tx_descriptor * +iop_adma_prep_dma_interrupt(struct dma_chan *chan) +{ + struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); + struct iop_adma_desc_slot *sw_desc, *grp_start; + int slot_cnt, slots_per_op; + + dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); + + spin_lock_bh(&iop_chan->lock); + slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); + sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); + if (sw_desc) { + grp_start = sw_desc->group_head; + iop_desc_init_interrupt(grp_start, iop_chan); + grp_start->unmap_len = 0; + } + spin_unlock_bh(&iop_chan->lock); + + return sw_desc ? &sw_desc->async_tx : NULL; +} + +static void +iop_adma_memcpy_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, + int index) +{ + struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); + struct iop_adma_desc_slot *grp_start = sw_desc->group_head; + + iop_desc_set_memcpy_src_addr(grp_start, addr); +} + +static struct dma_async_tx_descriptor * +iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en) +{ + struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); + struct iop_adma_desc_slot *sw_desc, *grp_start; + int slot_cnt, slots_per_op; + + if (unlikely(!len)) + return NULL; + BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); + + dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", + __FUNCTION__, len); + + spin_lock_bh(&iop_chan->lock); + slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); + sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); + if (sw_desc) { + grp_start = sw_desc->group_head; + iop_desc_init_memcpy(grp_start, int_en); + iop_desc_set_byte_count(grp_start, iop_chan, len); + sw_desc->unmap_src_cnt = 1; + sw_desc->unmap_len = len; + sw_desc->async_tx.tx_set_src = iop_adma_memcpy_set_src; + } + spin_unlock_bh(&iop_chan->lock); + + return sw_desc ? &sw_desc->async_tx : NULL; +} + +static struct dma_async_tx_descriptor * +iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len, + int int_en) +{ + struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); + struct iop_adma_desc_slot *sw_desc, *grp_start; + int slot_cnt, slots_per_op; + + if (unlikely(!len)) + return NULL; + BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); + + dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", + __FUNCTION__, len); + + spin_lock_bh(&iop_chan->lock); + slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); + sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); + if (sw_desc) { + grp_start = sw_desc->group_head; + iop_desc_init_memset(grp_start, int_en); + iop_desc_set_byte_count(grp_start, iop_chan, len); + iop_desc_set_block_fill_val(grp_start, value); + sw_desc->unmap_src_cnt = 1; + sw_desc->unmap_len = len; + } + spin_unlock_bh(&iop_chan->lock); + + return sw_desc ? &sw_desc->async_tx : NULL; +} + +static void +iop_adma_xor_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, + int index) +{ + struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); + struct iop_adma_desc_slot *grp_start = sw_desc->group_head; + + iop_desc_set_xor_src_addr(grp_start, index, addr); +} + +static struct dma_async_tx_descriptor * +iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len, + int int_en) +{ + struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); + struct iop_adma_desc_slot *sw_desc, *grp_start; + int slot_cnt, slots_per_op; + + if (unlikely(!len)) + return NULL; + BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT)); + + dev_dbg(iop_chan->device->common.dev, + "%s src_cnt: %d len: %u int_en: %d\n", + __FUNCTION__, src_cnt, len, int_en); + + spin_lock_bh(&iop_chan->lock); + slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); + sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); + if (sw_desc) { + grp_start = sw_desc->group_head; + iop_desc_init_xor(grp_start, src_cnt, int_en); + iop_desc_set_byte_count(grp_start, iop_chan, len); + sw_desc->unmap_src_cnt = src_cnt; + sw_desc->unmap_len = len; + sw_desc->async_tx.tx_set_src = iop_adma_xor_set_src; + } + spin_unlock_bh(&iop_chan->lock); + + return sw_desc ? &sw_desc->async_tx : NULL; +} + +static void +iop_adma_xor_zero_sum_set_src(dma_addr_t addr, + struct dma_async_tx_descriptor *tx, + int index) +{ + struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); + struct iop_adma_desc_slot *grp_start = sw_desc->group_head; + + iop_desc_set_zero_sum_src_addr(grp_start, index, addr); +} + +static struct dma_async_tx_descriptor * +iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt, + size_t len, u32 *result, int int_en) +{ + struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); + struct iop_adma_desc_slot *sw_desc, *grp_start; + int slot_cnt, slots_per_op; + + if (unlikely(!len)) + return NULL; + + dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", + __FUNCTION__, src_cnt, len); + + spin_lock_bh(&iop_chan->lock); + slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); + sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); + if (sw_desc) { + grp_start = sw_desc->group_head; + iop_desc_init_zero_sum(grp_start, src_cnt, int_en); + iop_desc_set_zero_sum_byte_count(grp_start, len); + grp_start->xor_check_result = result; + pr_debug("\t%s: grp_start->xor_check_result: %p\n", + __FUNCTION__, grp_start->xor_check_result); + sw_desc->unmap_src_cnt = src_cnt; + sw_desc->unmap_len = len; + sw_desc->async_tx.tx_set_src = iop_adma_xor_zero_sum_set_src; + } + spin_unlock_bh(&iop_chan->lock); + + return sw_desc ? &sw_desc->async_tx : NULL; +} + +static void iop_adma_dependency_added(struct dma_chan *chan) +{ + struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); + tasklet_schedule(&iop_chan->irq_tasklet); +} + +static void iop_adma_free_chan_resources(struct dma_chan *chan) +{ + struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); + struct iop_adma_desc_slot *iter, *_iter; + int in_use_descs = 0; + + iop_adma_slot_cleanup(iop_chan); + + spin_lock_bh(&iop_chan->lock); + list_for_each_entry_safe(iter, _iter, &iop_chan->chain, + chain_node) { + in_use_descs++; + list_del(&iter->chain_node); + } + list_for_each_entry_safe_reverse( + iter, _iter, &iop_chan->all_slots, slot_node) { + list_del(&iter->slot_node); + kfree(iter); + iop_chan->slots_allocated--; + } + iop_chan->last_used = NULL; + + dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", + __FUNCTION__, iop_chan->slots_allocated); + spin_unlock_bh(&iop_chan->lock); + + /* one is ok since we left it on there on purpose */ + if (in_use_descs > 1) + printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n", + in_use_descs - 1); +} + +/** + * iop_adma_is_complete - poll the status of an ADMA transaction + * @chan: ADMA channel handle + * @cookie: ADMA transaction identifier + */ +static enum dma_status iop_adma_is_complete(struct dma_chan *chan, + dma_cookie_t cookie, + dma_cookie_t *done, + dma_cookie_t *used) +{ + struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); + dma_cookie_t last_used; + dma_cookie_t last_complete; + enum dma_status ret; + + last_used = chan->cookie; + last_complete = iop_chan->completed_cookie; + + if (done) + *done = last_complete; + if (used) + *used = last_used; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + if (ret == DMA_SUCCESS) + return ret; + + iop_adma_slot_cleanup(iop_chan); + + last_used = chan->cookie; + last_complete = iop_chan->completed_cookie; + + if (done) + *done = last_complete; + if (used) + *used = last_used; + + return dma_async_is_complete(cookie, last_complete, last_used); +} + +static irqreturn_t iop_adma_eot_handler(int irq, void *data) +{ + struct iop_adma_chan *chan = data; + + dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); + + tasklet_schedule(&chan->irq_tasklet); + + iop_adma_device_clear_eot_status(chan); + + return IRQ_HANDLED; +} + +static irqreturn_t iop_adma_eoc_handler(int irq, void *data) +{ + struct iop_adma_chan *chan = data; + + dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); + + tasklet_schedule(&chan->irq_tasklet); + + iop_adma_device_clear_eoc_status(chan); + + return IRQ_HANDLED; +} + +static irqreturn_t iop_adma_err_handler(int irq, void *data) +{ + struct iop_adma_chan *chan = data; + unsigned long status = iop_chan_get_status(chan); + + dev_printk(KERN_ERR, chan->device->common.dev, + "error ( %s%s%s%s%s%s%s)\n", + iop_is_err_int_parity(status, chan) ? "int_parity " : "", + iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "", + iop_is_err_int_tabort(status, chan) ? "int_tabort " : "", + iop_is_err_int_mabort(status, chan) ? "int_mabort " : "", + iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "", + iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "", + iop_is_err_split_tx(status, chan) ? "split_tx " : ""); + + iop_adma_device_clear_err_status(chan); + + BUG(); + + return IRQ_HANDLED; +} + +static void iop_adma_issue_pending(struct dma_chan *chan) +{ + struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); + + if (iop_chan->pending) { + iop_chan->pending = 0; + iop_chan_append(iop_chan); + } +} + +/* + * Perform a transaction to verify the HW works. + */ +#define IOP_ADMA_TEST_SIZE 2000 + +static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) +{ + int i; + void *src, *dest; + dma_addr_t src_dma, dest_dma; + struct dma_chan *dma_chan; + dma_cookie_t cookie; + struct dma_async_tx_descriptor *tx; + int err = 0; + struct iop_adma_chan *iop_chan; + + dev_dbg(device->common.dev, "%s\n", __FUNCTION__); + + src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL); + if (!src) + return -ENOMEM; + dest = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL); + if (!dest) { + kfree(src); + return -ENOMEM; + } + + /* Fill in src buffer */ + for (i = 0; i < IOP_ADMA_TEST_SIZE; i++) + ((u8 *) src)[i] = (u8)i; + + memset(dest, 0, IOP_ADMA_TEST_SIZE); + + /* Start copy, using first DMA channel */ + dma_chan = container_of(device->common.channels.next, + struct dma_chan, + device_node); + if (iop_adma_alloc_chan_resources(dma_chan) < 1) { + err = -ENODEV; + goto out; + } + + tx = iop_adma_prep_dma_memcpy(dma_chan, IOP_ADMA_TEST_SIZE, 1); + dest_dma = dma_map_single(dma_chan->device->dev, dest, + IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); + iop_adma_set_dest(dest_dma, tx, 0); + src_dma = dma_map_single(dma_chan->device->dev, src, + IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); + iop_adma_memcpy_set_src(src_dma, tx, 0); + + cookie = iop_adma_tx_submit(tx); + iop_adma_issue_pending(dma_chan); + async_tx_ack(tx); + msleep(1); + + if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != + DMA_SUCCESS) { + dev_printk(KERN_ERR, dma_chan->device->dev, + "Self-test copy timed out, disabling\n"); + err = -ENODEV; + goto free_resources; + } + + iop_chan = to_iop_adma_chan(dma_chan); + dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, + IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); + if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) { + dev_printk(KERN_ERR, dma_chan->device->dev, + "Self-test copy failed compare, disabling\n"); + err = -ENODEV; + goto free_resources; + } + +free_resources: + iop_adma_free_chan_resources(dma_chan); +out: + kfree(src); + kfree(dest); + return err; +} + +#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */ +static int __devinit +iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) +{ + int i, src_idx; + struct page *dest; + struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; + struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; + dma_addr_t dma_addr, dest_dma; + struct dma_async_tx_descriptor *tx; + struct dma_chan *dma_chan; + dma_cookie_t cookie; + u8 cmp_byte = 0; + u32 cmp_word; + u32 zero_sum_result; + int err = 0; + struct iop_adma_chan *iop_chan; + + dev_dbg(device->common.dev, "%s\n", __FUNCTION__); + + for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { + xor_srcs[src_idx] = alloc_page(GFP_KERNEL); + if (!xor_srcs[src_idx]) + while (src_idx--) { + __free_page(xor_srcs[src_idx]); + return -ENOMEM; + } + } + + dest = alloc_page(GFP_KERNEL); + if (!dest) + while (src_idx--) { + __free_page(xor_srcs[src_idx]); + return -ENOMEM; + } + + /* Fill in src buffers */ + for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { + u8 *ptr = page_address(xor_srcs[src_idx]); + for (i = 0; i < PAGE_SIZE; i++) + ptr[i] = (1 << src_idx); + } + + for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) + cmp_byte ^= (u8) (1 << src_idx); + + cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | + (cmp_byte << 8) | cmp_byte; + + memset(page_address(dest), 0, PAGE_SIZE); + + dma_chan = container_of(device->common.channels.next, + struct dma_chan, + device_node); + if (iop_adma_alloc_chan_resources(dma_chan) < 1) { + err = -ENODEV; + goto out; + } + + /* test xor */ + tx = iop_adma_prep_dma_xor(dma_chan, IOP_ADMA_NUM_SRC_TEST, + PAGE_SIZE, 1); + dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + iop_adma_set_dest(dest_dma, tx, 0); + + for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) { + dma_addr = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0, + PAGE_SIZE, DMA_TO_DEVICE); + iop_adma_xor_set_src(dma_addr, tx, i); + } + + cookie = iop_adma_tx_submit(tx); + iop_adma_issue_pending(dma_chan); + async_tx_ack(tx); + msleep(8); + + if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != + DMA_SUCCESS) { + dev_printk(KERN_ERR, dma_chan->device->dev, + "Self-test xor timed out, disabling\n"); + err = -ENODEV; + goto free_resources; + } + + iop_chan = to_iop_adma_chan(dma_chan); + dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, + PAGE_SIZE, DMA_FROM_DEVICE); + for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { + u32 *ptr = page_address(dest); + if (ptr[i] != cmp_word) { + dev_printk(KERN_ERR, dma_chan->device->dev, + "Self-test xor failed compare, disabling\n"); + err = -ENODEV; + goto free_resources; + } + } + dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma, + PAGE_SIZE, DMA_TO_DEVICE); + + /* skip zero sum if the capability is not present */ + if (!dma_has_cap(DMA_ZERO_SUM, dma_chan->device->cap_mask)) + goto free_resources; + + /* zero sum the sources with the destintation page */ + for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) + zero_sum_srcs[i] = xor_srcs[i]; + zero_sum_srcs[i] = dest; + + zero_sum_result = 1; + + tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1, + PAGE_SIZE, &zero_sum_result, 1); + for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) { + dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i], + 0, PAGE_SIZE, DMA_TO_DEVICE); + iop_adma_xor_zero_sum_set_src(dma_addr, tx, i); + } + + cookie = iop_adma_tx_submit(tx); + iop_adma_issue_pending(dma_chan); + async_tx_ack(tx); + msleep(8); + + if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + dev_printk(KERN_ERR, dma_chan->device->dev, + "Self-test zero sum timed out, disabling\n"); + err = -ENODEV; + goto free_resources; + } + + if (zero_sum_result != 0) { + dev_printk(KERN_ERR, dma_chan->device->dev, + "Self-test zero sum failed compare, disabling\n"); + err = -ENODEV; + goto free_resources; + } + + /* test memset */ + tx = iop_adma_prep_dma_memset(dma_chan, 0, PAGE_SIZE, 1); + dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + iop_adma_set_dest(dma_addr, tx, 0); + + cookie = iop_adma_tx_submit(tx); + iop_adma_issue_pending(dma_chan); + async_tx_ack(tx); + msleep(8); + + if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + dev_printk(KERN_ERR, dma_chan->device->dev, + "Self-test memset timed out, disabling\n"); + err = -ENODEV; + goto free_resources; + } + + for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { + u32 *ptr = page_address(dest); + if (ptr[i]) { + dev_printk(KERN_ERR, dma_chan->device->dev, + "Self-test memset failed compare, disabling\n"); + err = -ENODEV; + goto free_resources; + } + } + + /* test for non-zero parity sum */ + zero_sum_result = 0; + tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1, + PAGE_SIZE, &zero_sum_result, 1); + for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) { + dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i], + 0, PAGE_SIZE, DMA_TO_DEVICE); + iop_adma_xor_zero_sum_set_src(dma_addr, tx, i); + } + + cookie = iop_adma_tx_submit(tx); + iop_adma_issue_pending(dma_chan); + async_tx_ack(tx); + msleep(8); + + if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + dev_printk(KERN_ERR, dma_chan->device->dev, + "Self-test non-zero sum timed out, disabling\n"); + err = -ENODEV; + goto free_resources; + } + + if (zero_sum_result != 1) { + dev_printk(KERN_ERR, dma_chan->device->dev, + "Self-test non-zero sum failed compare, disabling\n"); + err = -ENODEV; + goto free_resources; + } + +free_resources: + iop_adma_free_chan_resources(dma_chan); +out: + src_idx = IOP_ADMA_NUM_SRC_TEST; + while (src_idx--) + __free_page(xor_srcs[src_idx]); + __free_page(dest); + return err; +} + +static int __devexit iop_adma_remove(struct platform_device *dev) +{ + struct iop_adma_device *device = platform_get_drvdata(dev); + struct dma_chan *chan, *_chan; + struct iop_adma_chan *iop_chan; + int i; + struct iop_adma_platform_data *plat_data = dev->dev.platform_data; + + dma_async_device_unregister(&device->common); + + for (i = 0; i < 3; i++) { + unsigned int irq; + irq = platform_get_irq(dev, i); + free_irq(irq, device); + } + + dma_free_coherent(&dev->dev, plat_data->pool_size, + device->dma_desc_pool_virt, device->dma_desc_pool); + + do { + struct resource *res; + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + release_mem_region(res->start, res->end - res->start); + } while (0); + + list_for_each_entry_safe(chan, _chan, &device->common.channels, + device_node) { + iop_chan = to_iop_adma_chan(chan); + list_del(&chan->device_node); + kfree(iop_chan); + } + kfree(device); + + return 0; +} + +static int __devinit iop_adma_probe(struct platform_device *pdev) +{ + struct resource *res; + int ret = 0, i; + struct iop_adma_device *adev; + struct iop_adma_chan *iop_chan; + struct dma_device *dma_dev; + struct iop_adma_platform_data *plat_data = pdev->dev.platform_data; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + if (!devm_request_mem_region(&pdev->dev, res->start, + res->end - res->start, pdev->name)) + return -EBUSY; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return -ENOMEM; + dma_dev = &adev->common; + + /* allocate coherent memory for hardware descriptors + * note: writecombine gives slightly better performance, but + * requires that we explicitly flush the writes + */ + if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, + plat_data->pool_size, + &adev->dma_desc_pool, + GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto err_free_adev; + } + + dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", + __FUNCTION__, adev->dma_desc_pool_virt, + (void *) adev->dma_desc_pool); + + adev->id = plat_data->hw_id; + + /* discover transaction capabilites from the platform data */ + dma_dev->cap_mask = plat_data->cap_mask; + + adev->pdev = pdev; + platform_set_drvdata(pdev, adev); + + INIT_LIST_HEAD(&dma_dev->channels); + + /* set base routines */ + dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources; + dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; + dma_dev->device_is_tx_complete = iop_adma_is_complete; + dma_dev->device_issue_pending = iop_adma_issue_pending; + dma_dev->device_dependency_added = iop_adma_dependency_added; + dma_dev->dev = &pdev->dev; + + /* set prep routines based on capability */ + if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) + dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy; + if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) + dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset; + if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { + dma_dev->max_xor = iop_adma_get_max_xor(); + dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; + } + if (dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask)) + dma_dev->device_prep_dma_zero_sum = + iop_adma_prep_dma_zero_sum; + if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) + dma_dev->device_prep_dma_interrupt = + iop_adma_prep_dma_interrupt; + + iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL); + if (!iop_chan) { + ret = -ENOMEM; + goto err_free_dma; + } + iop_chan->device = adev; + + iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start, + res->end - res->start); + if (!iop_chan->mmr_base) { + ret = -ENOMEM; + goto err_free_iop_chan; + } + tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long) + iop_chan); + + /* clear errors before enabling interrupts */ + iop_adma_device_clear_err_status(iop_chan); + + for (i = 0; i < 3; i++) { + irq_handler_t handler[] = { iop_adma_eot_handler, + iop_adma_eoc_handler, + iop_adma_err_handler }; + int irq = platform_get_irq(pdev, i); + if (irq < 0) { + ret = -ENXIO; + goto err_free_iop_chan; + } else { + ret = devm_request_irq(&pdev->dev, irq, + handler[i], 0, pdev->name, iop_chan); + if (ret) + goto err_free_iop_chan; + } + } + + spin_lock_init(&iop_chan->lock); + init_timer(&iop_chan->cleanup_watchdog); + iop_chan->cleanup_watchdog.data = (unsigned long) iop_chan; + iop_chan->cleanup_watchdog.function = iop_adma_tasklet; + INIT_LIST_HEAD(&iop_chan->chain); + INIT_LIST_HEAD(&iop_chan->all_slots); + INIT_RCU_HEAD(&iop_chan->common.rcu); + iop_chan->common.device = dma_dev; + list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); + + if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { + ret = iop_adma_memcpy_self_test(adev); + dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); + if (ret) + goto err_free_iop_chan; + } + + if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || + dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { + ret = iop_adma_xor_zero_sum_self_test(adev); + dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); + if (ret) + goto err_free_iop_chan; + } + + dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: " + "( %s%s%s%s%s%s%s%s%s%s)\n", + dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "", + dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "", + dma_has_cap(DMA_PQ_ZERO_SUM, dma_dev->cap_mask) ? "pq_zero_sum " : "", + dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", + dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "", + dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask) ? "xor_zero_sum " : "", + dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", + dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "", + dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", + dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); + + dma_async_device_register(dma_dev); + goto out; + + err_free_iop_chan: + kfree(iop_chan); + err_free_dma: + dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, + adev->dma_desc_pool_virt, adev->dma_desc_pool); + err_free_adev: + kfree(adev); + out: + return ret; +} + +static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) +{ + struct iop_adma_desc_slot *sw_desc, *grp_start; + dma_cookie_t cookie; + int slot_cnt, slots_per_op; + + dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); + + spin_lock_bh(&iop_chan->lock); + slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); + sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); + if (sw_desc) { + grp_start = sw_desc->group_head; + + list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); + sw_desc->async_tx.ack = 1; + iop_desc_init_memcpy(grp_start, 0); + iop_desc_set_byte_count(grp_start, iop_chan, 0); + iop_desc_set_dest_addr(grp_start, iop_chan, 0); + iop_desc_set_memcpy_src_addr(grp_start, 0); + + cookie = iop_chan->common.cookie; + cookie++; + if (cookie <= 1) + cookie = 2; + + /* initialize the completed cookie to be less than + * the most recently used cookie + */ + iop_chan->completed_cookie = cookie - 1; + iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; + + /* channel should not be busy */ + BUG_ON(iop_chan_is_busy(iop_chan)); + + /* clear any prior error-status bits */ + iop_adma_device_clear_err_status(iop_chan); + + /* disable operation */ + iop_chan_disable(iop_chan); + + /* set the descriptor address */ + iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys); + + /* 1/ don't add pre-chained descriptors + * 2/ dummy read to flush next_desc write + */ + BUG_ON(iop_desc_get_next_desc(sw_desc)); + + /* run the descriptor */ + iop_chan_enable(iop_chan); + } else + dev_printk(KERN_ERR, iop_chan->device->common.dev, + "failed to allocate null descriptor\n"); + spin_unlock_bh(&iop_chan->lock); +} + +static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) +{ + struct iop_adma_desc_slot *sw_desc, *grp_start; + dma_cookie_t cookie; + int slot_cnt, slots_per_op; + + dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); + + spin_lock_bh(&iop_chan->lock); + slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); + sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); + if (sw_desc) { + grp_start = sw_desc->group_head; + list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); + sw_desc->async_tx.ack = 1; + iop_desc_init_null_xor(grp_start, 2, 0); + iop_desc_set_byte_count(grp_start, iop_chan, 0); + iop_desc_set_dest_addr(grp_start, iop_chan, 0); + iop_desc_set_xor_src_addr(grp_start, 0, 0); + iop_desc_set_xor_src_addr(grp_start, 1, 0); + + cookie = iop_chan->common.cookie; + cookie++; + if (cookie <= 1) + cookie = 2; + + /* initialize the completed cookie to be less than + * the most recently used cookie + */ + iop_chan->completed_cookie = cookie - 1; + iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; + + /* channel should not be busy */ + BUG_ON(iop_chan_is_busy(iop_chan)); + + /* clear any prior error-status bits */ + iop_adma_device_clear_err_status(iop_chan); + + /* disable operation */ + iop_chan_disable(iop_chan); + + /* set the descriptor address */ + iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys); + + /* 1/ don't add pre-chained descriptors + * 2/ dummy read to flush next_desc write + */ + BUG_ON(iop_desc_get_next_desc(sw_desc)); + + /* run the descriptor */ + iop_chan_enable(iop_chan); + } else + dev_printk(KERN_ERR, iop_chan->device->common.dev, + "failed to allocate null descriptor\n"); + spin_unlock_bh(&iop_chan->lock); +} + +static struct platform_driver iop_adma_driver = { + .probe = iop_adma_probe, + .remove = iop_adma_remove, + .driver = { + .owner = THIS_MODULE, + .name = "iop-adma", + }, +}; + +static int __init iop_adma_init (void) +{ + /* it's currently unsafe to unload this module */ + /* if forced, worst case is that rmmod hangs */ + __unsafe(THIS_MODULE); + + return platform_driver_register(&iop_adma_driver); +} + +static void __exit iop_adma_exit (void) +{ + platform_driver_unregister(&iop_adma_driver); + return; +} + +module_init(iop_adma_init); +module_exit(iop_adma_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("IOP ADMA Engine Driver"); +MODULE_LICENSE("GPL"); |