summaryrefslogtreecommitdiffstats
path: root/drivers/firewire
diff options
context:
space:
mode:
authorJarod Wilson <jwilson@redhat.com>2008-03-12 17:43:26 -0400
committerStefan Richter <stefanr@s5r6.in-berlin.de>2008-03-14 00:57:00 +0100
commitbde1709aaa98f5004ab1580842c422be18eb4bc3 (patch)
tree9c7a3241d88574680911a48e81ce8d6ad252e559 /drivers/firewire
parent6e45ef4c7aeefbf97df748866cd1b24f73b86160 (diff)
firewire: fw-ohci: use dma_alloc_coherent for ar_buffer
Currently, we do nothing to guarantee we have a consistent DMA buffer for asynchronous receive packets. Rather than doing several sync's following a dma_map_single() to get consistent buffers, just switch to using dma_alloc_coherent(). Resolves constant buffer failures on my own x86_64 laptop w/4GB of RAM and likely to fix a number of other failures witnessed on x86_64 systems with 4GB of RAM or more. Signed-off-by: Jarod Wilson <jwilson@redhat.com> Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/firewire')
-rw-r--r--drivers/firewire/fw-ohci.c18
1 files changed, 5 insertions, 13 deletions
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index eaa213e2159..fcf59fcae1b 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -284,16 +284,10 @@ static int ar_context_add_page(struct ar_context *ctx)
dma_addr_t ab_bus;
size_t offset;
- ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC);
+ ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
if (ab == NULL)
return -ENOMEM;
- ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(ab_bus)) {
- free_page((unsigned long) ab);
- return -ENOMEM;
- }
-
memset(&ab->descriptor, 0, sizeof(ab->descriptor));
ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
DESCRIPTOR_STATUS |
@@ -304,8 +298,6 @@ static int ar_context_add_page(struct ar_context *ctx)
ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
ab->descriptor.branch_address = 0;
- dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
-
ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
ctx->last_buffer->next = ab;
ctx->last_buffer = ab;
@@ -409,6 +401,7 @@ static void ar_context_tasklet(unsigned long data)
if (d->res_count == 0) {
size_t size, rest, offset;
+ dma_addr_t buffer_bus;
/*
* This descriptor is finished and we may have a
@@ -417,9 +410,7 @@ static void ar_context_tasklet(unsigned long data)
*/
offset = offsetof(struct ar_buffer, data);
- dma_unmap_single(ohci->card.device,
- le32_to_cpu(ab->descriptor.data_address) - offset,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ buffer_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
buffer = ab;
ab = ab->next;
@@ -435,7 +426,8 @@ static void ar_context_tasklet(unsigned long data)
while (buffer < end)
buffer = handle_ar_packet(ctx, buffer);
- free_page((unsigned long)buffer);
+ dma_free_coherent(ohci->card.device, PAGE_SIZE,
+ buffer, buffer_bus);
ar_context_add_page(ctx);
} else {
buffer = ctx->pointer;