summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cachetlb.txt2
-rw-r--r--arch/powerpc/mm/imalloc.c3
-rw-r--r--arch/powerpc/mm/pgtable_64.c1
-rw-r--r--include/linux/vmalloc.h3
-rw-r--r--mm/vmalloc.c13
5 files changed, 14 insertions, 8 deletions
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index debf6813934..866b7613942 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -253,7 +253,7 @@ Here are the routines, one by one:
The first of these two routines is invoked after map_vm_area()
has installed the page table entries. The second is invoked
- before unmap_vm_area() deletes the page table entries.
+ before unmap_kernel_range() deletes the page table entries.
There exists another whole class of cpu cache issues which currently
require a whole different set of interfaces to handle properly.
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
index c831815c31f..9eddf37303d 100644
--- a/arch/powerpc/mm/imalloc.c
+++ b/arch/powerpc/mm/imalloc.c
@@ -301,7 +301,8 @@ void im_free(void * addr)
for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
- unmap_vm_area(tmp);
+ unmap_kernel_range((unsigned long)tmp->addr,
+ tmp->size);
kfree(tmp);
mutex_unlock(&imlist_mutex);
return;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index ad6e135bf21..fa5c828d387 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -240,7 +240,6 @@ int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
/*
* Unmap an IO region and remove it from imalloc'd list.
* Access to IO memory should be serialized by driver.
- * This code is modeled after vmalloc code - unmap_vm_area()
*
* XXX what about calls before mem_init_done (ie python_countermeasures())
*/
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 4b7ee83787c..132b260aef1 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -65,9 +65,10 @@ extern struct vm_struct *get_vm_area_node(unsigned long size,
unsigned long flags, int node,
gfp_t gfp_mask);
extern struct vm_struct *remove_vm_area(void *addr);
+
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages);
-extern void unmap_vm_area(struct vm_struct *area);
+extern void unmap_kernel_range(unsigned long addr, unsigned long size);
/*
* Internals. Dont't use..
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d3a9c536825..ddf87145cc4 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -68,12 +68,12 @@ static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
} while (pud++, addr = next, addr != end);
}
-void unmap_vm_area(struct vm_struct *area)
+void unmap_kernel_range(unsigned long addr, unsigned long size)
{
pgd_t *pgd;
unsigned long next;
- unsigned long addr = (unsigned long) area->addr;
- unsigned long end = addr + area->size;
+ unsigned long start = addr;
+ unsigned long end = addr + size;
BUG_ON(addr >= end);
pgd = pgd_offset_k(addr);
@@ -84,7 +84,12 @@ void unmap_vm_area(struct vm_struct *area)
continue;
vunmap_pud_range(pgd, addr, next);
} while (pgd++, addr = next, addr != end);
- flush_tlb_kernel_range((unsigned long) area->addr, end);
+ flush_tlb_kernel_range(start, end);
+}
+
+static void unmap_vm_area(struct vm_struct *area)
+{
+ unmap_kernel_range((unsigned long)area->addr, area->size);
}
static int vmap_pte_range(pmd_t *pmd, unsigned long addr,