summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDivy Le Ray <divy@chelsio.com>2009-05-28 11:23:02 +0000
committerDavid S. Miller <davem@davemloft.net>2009-05-29 01:54:37 -0700
commit10b6d95612672f89deb39b5a60fb677c78ba4844 (patch)
tree5b853a9db086d4c0894d93f54e5bf666f0b8fa70
parent4d3383d0adb6d1047fb9ee3edd9dc05e4d2184f0 (diff)
cxgb3: fix dma mapping regression
Commit 5e68b772e6efd189d6aca76f6872fb75d51ace60 cxgb3: map entire Rx page, feed map+offset to Rx ring. introduced a regression on platforms defining DECLARE_PCI_UNMAP_ADDR() and related macros as no-ops. Rx descriptors are fed with the a page buffer bus address + page chunk offset. The page buffer bus address is set and retrieved through pci_unamp_addr_set(), pci_unmap_addr(). These functions being meaningless on x86 (if CONFIG_DMA_API_DEBUG is not set). The HW ends up with a bogus bus address. This patch saves the page buffer bus address for all plaftorms. Signed-off-by: Divy Le Ray <divy@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/cxgb3/adapter.h4
-rw-r--r--drivers/net/cxgb3/sge.c11
2 files changed, 7 insertions, 8 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 714df2b675e..c888e97c967 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -85,8 +85,8 @@ struct fl_pg_chunk {
struct page *page;
void *va;
unsigned int offset;
- u64 *p_cnt;
- DECLARE_PCI_UNMAP_ADDR(mapping);
+ unsigned long *p_cnt;
+ dma_addr_t mapping;
};
struct rx_desc;
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 26d3587f339..b3ee2bc1a00 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -355,7 +355,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
(*d->pg_chunk.p_cnt)--;
if (!*d->pg_chunk.p_cnt)
pci_unmap_page(pdev,
- pci_unmap_addr(&d->pg_chunk, mapping),
+ d->pg_chunk.mapping,
q->alloc_size, PCI_DMA_FROMDEVICE);
put_page(d->pg_chunk.page);
@@ -454,7 +454,7 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
q->pg_chunk.offset = 0;
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
0, q->alloc_size, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(&q->pg_chunk, mapping, mapping);
+ q->pg_chunk.mapping = mapping;
}
sd->pg_chunk = q->pg_chunk;
@@ -511,8 +511,7 @@ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
nomem: q->alloc_failed++;
break;
}
- mapping = pci_unmap_addr(&sd->pg_chunk, mapping) +
- sd->pg_chunk.offset;
+ mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
pci_unmap_addr_set(sd, dma_addr, mapping);
add_one_rx_chunk(mapping, d, q->gen);
@@ -881,7 +880,7 @@ recycle:
(*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt)
pci_unmap_page(adap->pdev,
- pci_unmap_addr(&sd->pg_chunk, mapping),
+ sd->pg_chunk.mapping,
fl->alloc_size,
PCI_DMA_FROMDEVICE);
if (!skb) {
@@ -2096,7 +2095,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
(*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt)
pci_unmap_page(adap->pdev,
- pci_unmap_addr(&sd->pg_chunk, mapping),
+ sd->pg_chunk.mapping,
fl->alloc_size,
PCI_DMA_FROMDEVICE);