summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/pci_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/pci_iommu.c')
-rw-r--r--arch/sparc64/kernel/pci_iommu.c104
1 files changed, 43 insertions, 61 deletions
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 7aca0f33f88..66712772f49 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -1,7 +1,6 @@
-/* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $
- * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
+/* pci_iommu.c: UltraSparc PCI controller IOM/STC support.
*
- * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
*/
@@ -36,7 +35,7 @@
"i" (ASI_PHYS_BYPASS_EC_E))
/* Must be invoked under the IOMMU lock. */
-static void __iommu_flushall(struct pci_iommu *iommu)
+static void __iommu_flushall(struct iommu *iommu)
{
unsigned long tag;
int entry;
@@ -64,7 +63,7 @@ static void __iommu_flushall(struct pci_iommu *iommu)
#define IOPTE_IS_DUMMY(iommu, iopte) \
((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
-static inline void iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
+static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
{
unsigned long val = iopte_val(*iopte);
@@ -75,9 +74,9 @@ static inline void iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
}
/* Based largely upon the ppc64 iommu allocator. */
-static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages)
+static long pci_arena_alloc(struct iommu *iommu, unsigned long npages)
{
- struct pci_iommu_arena *arena = &iommu->arena;
+ struct iommu_arena *arena = &iommu->arena;
unsigned long n, i, start, end, limit;
int pass;
@@ -116,7 +115,7 @@ again:
return n;
}
-static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
+static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
{
unsigned long i;
@@ -124,7 +123,7 @@ static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, un
__clear_bit(i, arena->map);
}
-void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
+void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
{
unsigned long i, tsbbase, order, sz, num_tsb_entries;
@@ -170,7 +169,7 @@ void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset,
iopte_make_dummy(iommu, &iommu->page_table[i]);
}
-static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages)
+static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
{
long entry;
@@ -181,12 +180,12 @@ static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npage
return iommu->page_table + entry;
}
-static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages)
+static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
{
pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
}
-static int iommu_alloc_ctx(struct pci_iommu *iommu)
+static int iommu_alloc_ctx(struct iommu *iommu)
{
int lowest = iommu->ctx_lowest_free;
int sz = IOMMU_NUM_CTXS - lowest;
@@ -205,7 +204,7 @@ static int iommu_alloc_ctx(struct pci_iommu *iommu)
return n;
}
-static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
+static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
{
if (likely(ctx)) {
__clear_bit(ctx, iommu->ctx_bitmap);
@@ -220,8 +219,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
*/
static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
+ struct iommu *iommu;
iopte_t *iopte;
unsigned long flags, order, first_page;
void *ret;
@@ -237,8 +235,7 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
return NULL;
memset((char *)first_page, 0, PAGE_SIZE << order);
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
+ iommu = pdev->dev.archdata.iommu;
spin_lock_irqsave(&iommu->lock, flags);
iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
@@ -268,14 +265,12 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
/* Free and unmap a consistent DMA translation. */
static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
+ struct iommu *iommu;
iopte_t *iopte;
unsigned long flags, order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
+ iommu = pdev->dev.archdata.iommu;
iopte = iommu->page_table +
((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
@@ -295,18 +290,16 @@ static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
*/
static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- struct pci_strbuf *strbuf;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, oaddr;
unsigned long i, base_paddr, ctx;
u32 bus_addr, ret;
unsigned long iopte_protection;
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- strbuf = &pcp->pbm->stc;
+ iommu = pdev->dev.archdata.iommu;
+ strbuf = pdev->dev.archdata.stc;
if (unlikely(direction == PCI_DMA_NONE))
goto bad_no_ctx;
@@ -349,7 +342,7 @@ bad_no_ctx:
return PCI_DMA_ERROR_CODE;
}
-static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
+static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
{
int limit;
@@ -416,9 +409,8 @@ do_flush_sync:
/* Unmap a single streaming mode DMA translation. */
static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- struct pci_strbuf *strbuf;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, ctx, i;
@@ -428,9 +420,8 @@ static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
return;
}
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- strbuf = &pcp->pbm->stc;
+ iommu = pdev->dev.archdata.iommu;
+ strbuf = pdev->dev.archdata.stc;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
@@ -549,9 +540,8 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
*/
static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- struct pci_strbuf *strbuf;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
unsigned long flags, ctx, npages, iopte_protection;
iopte_t *base;
u32 dma_base;
@@ -570,9 +560,8 @@ static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
return 1;
}
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- strbuf = &pcp->pbm->stc;
+ iommu = pdev->dev.archdata.iommu;
+ strbuf = pdev->dev.archdata.stc;
if (unlikely(direction == PCI_DMA_NONE))
goto bad_no_ctx;
@@ -636,9 +625,8 @@ bad_no_ctx:
/* Unmap a set of streaming mode DMA translations. */
static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- struct pci_strbuf *strbuf;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, ctx, i, npages;
u32 bus_addr;
@@ -648,9 +636,8 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
WARN_ON(1);
}
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- strbuf = &pcp->pbm->stc;
+ iommu = pdev->dev.archdata.iommu;
+ strbuf = pdev->dev.archdata.stc;
bus_addr = sglist->dma_address & IO_PAGE_MASK;
@@ -696,14 +683,12 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
*/
static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- struct pci_strbuf *strbuf;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
unsigned long flags, ctx, npages;
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- strbuf = &pcp->pbm->stc;
+ iommu = pdev->dev.archdata.iommu;
+ strbuf = pdev->dev.archdata.stc;
if (!strbuf->strbuf_enabled)
return;
@@ -736,15 +721,13 @@ static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_
*/
static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- struct pci_strbuf *strbuf;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
unsigned long flags, ctx, npages, i;
u32 bus_addr;
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- strbuf = &pcp->pbm->stc;
+ iommu = pdev->dev.archdata.iommu;
+ strbuf = pdev->dev.archdata.stc;
if (!strbuf->strbuf_enabled)
return;
@@ -775,7 +758,7 @@ static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist
spin_unlock_irqrestore(&iommu->lock, flags);
}
-struct pci_iommu_ops pci_sun4u_iommu_ops = {
+const struct pci_iommu_ops pci_sun4u_iommu_ops = {
.alloc_consistent = pci_4u_alloc_consistent,
.free_consistent = pci_4u_free_consistent,
.map_single = pci_4u_map_single,
@@ -809,13 +792,12 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
{
- struct pcidev_cookie *pcp = pdev->sysdata;
u64 dma_addr_mask;
if (pdev == NULL) {
dma_addr_mask = 0xffffffff;
} else {
- struct pci_iommu *iommu = pcp->pbm->iommu;
+ struct iommu *iommu = pdev->dev.archdata.iommu;
dma_addr_mask = iommu->dma_addr_mask;