summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/iommu.c')
-rw-r--r--arch/sparc64/kernel/iommu.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index d3276ebcfb4..756fa24eeef 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -134,7 +134,8 @@ unsigned long iommu_range_alloc(struct device *dev,
else
boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
- n = iommu_area_alloc(arena->map, limit, start, npages, 0,
+ n = iommu_area_alloc(arena->map, limit, start, npages,
+ iommu->page_table_map_base >> IO_PAGE_SHIFT,
boundary_size >> IO_PAGE_SHIFT, 0);
if (n == -1) {
if (likely(pass < 1)) {
@@ -200,12 +201,11 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to.
*/
- iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
+ iommu->dummy_page = get_zeroed_page(GFP_KERNEL);
if (!iommu->dummy_page) {
printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
goto out_free_map;
}
- memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
/* Now allocate and setup the IOMMU page table itself. */
@@ -516,9 +516,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
unsigned long flags, handle, prot, ctx;
dma_addr_t dma_next = 0, dma_addr;
unsigned int max_seg_size;
+ unsigned long seg_boundary_size;
int outcount, incount, i;
struct strbuf *strbuf;
struct iommu *iommu;
+ unsigned long base_shift;
BUG_ON(direction == DMA_NONE);
@@ -549,8 +551,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
outs->dma_length = 0;
max_seg_size = dma_get_max_seg_size(dev);
+ seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+ IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
+ base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
- unsigned long paddr, npages, entry, slen;
+ unsigned long paddr, npages, entry, out_entry = 0, slen;
iopte_t *base;
slen = s->length;
@@ -593,7 +598,9 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
* - allocated dma_addr isn't contiguous to previous allocation
*/
if ((dma_addr != dma_next) ||
- (outs->dma_length + s->length > max_seg_size)) {
+ (outs->dma_length + s->length > max_seg_size) ||
+ (is_span_boundary(out_entry, base_shift,
+ seg_boundary_size, outs, s))) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
@@ -607,6 +614,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
+ out_entry = entry;
}
/* Calculate next page pointer for contiguous check */
@@ -626,7 +634,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
iommu_map_failed:
for_each_sg(sglist, s, nelems, i) {
if (s->dma_length != 0) {
- unsigned long vaddr, npages, entry, i;
+ unsigned long vaddr, npages, entry, j;
iopte_t *base;
vaddr = s->dma_address & IO_PAGE_MASK;
@@ -637,8 +645,8 @@ iommu_map_failed:
>> IO_PAGE_SHIFT;
base = iommu->page_table + entry;
- for (i = 0; i < npages; i++)
- iopte_make_dummy(iommu, base + i);
+ for (j = 0; j < npages; j++)
+ iopte_make_dummy(iommu, base + j);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
@@ -803,7 +811,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
spin_unlock_irqrestore(&iommu->lock, flags);
}
-const struct dma_ops sun4u_dma_ops = {
+static const struct dma_ops sun4u_dma_ops = {
.alloc_coherent = dma_4u_alloc_coherent,
.free_coherent = dma_4u_free_coherent,
.map_single = dma_4u_map_single,