summaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2012-08-01 13:49:13 +0900
committerPaul Mundt <lethal@linux-sh.org>2012-08-01 13:49:13 +0900
commit91ba548cfd5cc8ee93b9435527efb8fa4caf5c5e (patch)
treec96ed92413044a28d17783e84a8824bfd2437af1 /mm/vmalloc.c
parentb9ccfda293ee6fca9a89a1584f0900e0627b975e (diff)
parent4dc4c51675c137c30838425ecc8d471ff5eb138b (diff)
Merge branch 'sh/dmaengine' into sh-latest
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c52
1 files changed, 32 insertions, 20 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 2aad49981b5..2bb90b1d241 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -413,11 +413,11 @@ nocache:
if (addr + size - 1 < addr)
goto overflow;
- n = rb_next(&first->rb_node);
- if (n)
- first = rb_entry(n, struct vmap_area, rb_node);
- else
+ if (list_is_last(&first->list, &vmap_area_list))
goto found;
+
+ first = list_entry(first->list.next,
+ struct vmap_area, list);
}
found:
@@ -904,6 +904,14 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
BUG_ON(size & ~PAGE_MASK);
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
+ if (WARN_ON(size == 0)) {
+ /*
+ * Allocating 0 bytes isn't what caller wants since
+ * get_order(0) returns funny result. Just warn and terminate
+ * early.
+ */
+ return NULL;
+ }
order = get_order(size);
again:
@@ -1280,7 +1288,7 @@ DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;
static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
- unsigned long flags, void *caller)
+ unsigned long flags, const void *caller)
{
vm->flags = flags;
vm->addr = (void *)va->va_start;
@@ -1306,7 +1314,7 @@ static void insert_vmalloc_vmlist(struct vm_struct *vm)
}
static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
- unsigned long flags, void *caller)
+ unsigned long flags, const void *caller)
{
setup_vmalloc_vm(vm, va, flags, caller);
insert_vmalloc_vmlist(vm);
@@ -1314,7 +1322,7 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
static struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long align, unsigned long flags, unsigned long start,
- unsigned long end, int node, gfp_t gfp_mask, void *caller)
+ unsigned long end, int node, gfp_t gfp_mask, const void *caller)
{
struct vmap_area *va;
struct vm_struct *area;
@@ -1375,7 +1383,7 @@ EXPORT_SYMBOL_GPL(__get_vm_area);
struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end,
- void *caller)
+ const void *caller)
{
return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
caller);
@@ -1397,13 +1405,21 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
}
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
- void *caller)
+ const void *caller)
{
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
-1, GFP_KERNEL, caller);
}
-static struct vm_struct *find_vm_area(const void *addr)
+/**
+ * find_vm_area - find a continuous kernel virtual area
+ * @addr: base address
+ *
+ * Search for the kernel VM area starting at @addr, and return it.
+ * It is up to the caller to do all required locking to keep the returned
+ * pointer valid.
+ */
+struct vm_struct *find_vm_area(const void *addr)
{
struct vmap_area *va;
@@ -1568,9 +1584,9 @@ EXPORT_SYMBOL(vmap);
static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot,
- int node, void *caller);
+ int node, const void *caller);
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
- pgprot_t prot, int node, void *caller)
+ pgprot_t prot, int node, const void *caller)
{
const int order = 0;
struct page **pages;
@@ -1643,7 +1659,7 @@ fail:
*/
void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, int node, void *caller)
+ pgprot_t prot, int node, const void *caller)
{
struct vm_struct *area;
void *addr;
@@ -1699,7 +1715,7 @@ fail:
*/
static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot,
- int node, void *caller)
+ int node, const void *caller)
{
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
gfp_mask, prot, node, caller);
@@ -1975,9 +1991,7 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
* IOREMAP area is treated as memory hole and no copy is done.
*
* If [addr...addr+count) doesn't includes any intersects with alive
- * vm_struct area, returns 0.
- * @buf should be kernel's buffer. Because this function uses KM_USER0,
- * the caller should guarantee KM_USER0 is not used.
+ * vm_struct area, returns 0. @buf should be kernel's buffer.
*
* Note: In usual ops, vread() is never necessary because the caller
* should know vmalloc() area is valid and can use memcpy().
@@ -2051,9 +2065,7 @@ finished:
* IOREMAP area is treated as memory hole and no copy is done.
*
* If [addr...addr+count) doesn't includes any intersects with alive
- * vm_struct area, returns 0.
- * @buf should be kernel's buffer. Because this function uses KM_USER0,
- * the caller should guarantee KM_USER0 is not used.
+ * vm_struct area, returns 0. @buf should be kernel's buffer.
*
* Note: In usual ops, vwrite() is never necessary because the caller
* should know vmalloc() area is valid and can use memcpy().