summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/iommu.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-03-19 04:52:48 -0700
committerDavid S. Miller <davem@davemloft.net>2008-04-23 23:32:16 -0700
commitc1b1a5f1f1b2612b69b67381b223bce9f8ec4da5 (patch)
treee95abb7a3dbd0ca6c341c44f2b69dd174fd03bec /arch/sparc64/kernel/iommu.c
parent0c49a573ea93f777fd27f26b7853e7bf5a20e1a3 (diff)
[SPARC64]: NUMA device infrastructure.
Record and propagate NUMA information for devices. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/iommu.c')
-rw-r--r--arch/sparc64/kernel/iommu.c33
1 files changed, 21 insertions, 12 deletions
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index 756fa24eeef..2a37a6ca2a1 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -173,9 +173,11 @@ void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long np
}
int iommu_table_init(struct iommu *iommu, int tsbsize,
- u32 dma_offset, u32 dma_addr_mask)
+ u32 dma_offset, u32 dma_addr_mask,
+ int numa_node)
{
- unsigned long i, tsbbase, order, sz, num_tsb_entries;
+ unsigned long i, order, sz, num_tsb_entries;
+ struct page *page;
num_tsb_entries = tsbsize / sizeof(iopte_t);
@@ -188,11 +190,12 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
- iommu->arena.map = kzalloc(sz, GFP_KERNEL);
+ iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
if (!iommu->arena.map) {
printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
return -ENOMEM;
}
+ memset(iommu->arena.map, 0, sz);
iommu->arena.limit = num_tsb_entries;
if (tlb_type != hypervisor)
@@ -201,21 +204,23 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to.
*/
- iommu->dummy_page = get_zeroed_page(GFP_KERNEL);
- if (!iommu->dummy_page) {
+ page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
+ if (!page) {
printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
goto out_free_map;
}
+ iommu->dummy_page = (unsigned long) page_address(page);
+ memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
/* Now allocate and setup the IOMMU page table itself. */
order = get_order(tsbsize);
- tsbbase = __get_free_pages(GFP_KERNEL, order);
- if (!tsbbase) {
+ page = alloc_pages_node(numa_node, GFP_KERNEL, order);
+ if (!page) {
printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
goto out_free_dummy_page;
}
- iommu->page_table = (iopte_t *)tsbbase;
+ iommu->page_table = (iopte_t *)page_address(page);
for (i = 0; i < num_tsb_entries; i++)
iopte_make_dummy(iommu, &iommu->page_table[i]);
@@ -276,20 +281,24 @@ static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp)
{
+ unsigned long flags, order, first_page;
struct iommu *iommu;
+ struct page *page;
+ int npages, nid;
iopte_t *iopte;
- unsigned long flags, order, first_page;
void *ret;
- int npages;
size = IO_PAGE_ALIGN(size);
order = get_order(size);
if (order >= 10)
return NULL;
- first_page = __get_free_pages(gfp, order);
- if (first_page == 0UL)
+ nid = dev->archdata.numa_node;
+ page = alloc_pages_node(nid, gfp, order);
+ if (unlikely(!page))
return NULL;
+
+ first_page = (unsigned long) page_address(page);
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;