summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorFUJITA Tomonori <tomof@acm.org>2008-02-04 22:28:09 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 09:44:11 -0800
commit383af9525bb27f927511874f6306247ec13f1c28 (patch)
treec2a88846ba944954c87aaeb9087fc5ff0f0f9d57 /arch/powerpc
parentfb3475e9b6bfa666107512fbd6006c26014f04b8 (diff)
iommu sg: powerpc: remove DMA 4GB boundary protection
Previously, during initialization of the IOMMU tables, the last entry at each 4GB boundary is marked as used since there are many adapters which cannot handle DMAing across any 4GB boundary. The IOMMU doesn't allocate a memory area spanning LLD's segment boundary anymore. The segment boundary of devices are set to 4GB by default. So we can remove 4GB boundary protection now. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Jeff Garzik <jeff@garzik.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/iommu.c21
1 files changed, 1 insertions, 20 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index c42219c0afd..8f1f4e539c4 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -453,9 +453,6 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
{
unsigned long sz;
- unsigned long start_index, end_index;
- unsigned long entries_per_4g;
- unsigned long index;
static int welcomed = 0;
struct page *page;
@@ -477,6 +474,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
#ifdef CONFIG_CRASH_DUMP
if (ppc_md.tce_get) {
+ unsigned long index;
unsigned long tceval;
unsigned long tcecount = 0;
@@ -507,23 +505,6 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
#endif
- /*
- * DMA cannot cross 4 GB boundary. Mark last entry of each 4
- * GB chunk as reserved.
- */
- if (protect4gb) {
- entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT;
-
- /* Mark the last bit before a 4GB boundary as used */
- start_index = tbl->it_offset | (entries_per_4g - 1);
- start_index -= tbl->it_offset;
-
- end_index = tbl->it_size;
-
- for (index = start_index; index < end_index - 1; index += entries_per_4g)
- __set_bit(index, tbl->it_map);
- }
-
if (!welcomed) {
printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
novmerge ? "disabled" : "enabled");