summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJoonsoo Kim <js1304@gmail.com>2013-04-29 15:07:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 15:54:34 -0700
commite81ce85f960c2e26efb5d0802d56c34533edb1bd (patch)
tree57e1d4ac1704a6311383403f8d11d51bbbd78961 /mm
parentc69480adeea15883d9459a8adc3da3f6e8cb7a8c (diff)
mm, vmalloc: iterate vmap_area_list, instead of vmlist in vread/vwrite()
Now, when we hold a vmap_area_lock, va->vm can't be discarded. So we can safely access to va->vm when iterating a vmap_area_list with holding a vmap_area_lock. With this property, change iterating vmlist codes in vread/vwrite() to iterating vmap_area_list. There is a little difference relate to lock, because vmlist_lock is mutex, but, vmap_area_lock is spin_lock. It may introduce a spinning overhead during vread/vwrite() is executing. But, these are debug-oriented functions, so this overhead is not real problem for common case. Signed-off-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Dave Anderson <anderson@redhat.com> Cc: Eric Biederman <ebiederm@xmission.com> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Cc: Ingo Molnar <mingo@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmalloc.c48
1 files changed, 32 insertions, 16 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1bf94ad452b..59aa328007b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2012,7 +2012,8 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
long vread(char *buf, char *addr, unsigned long count)
{
- struct vm_struct *tmp;
+ struct vmap_area *va;
+ struct vm_struct *vm;
char *vaddr, *buf_start = buf;
unsigned long buflen = count;
unsigned long n;
@@ -2021,10 +2022,17 @@ long vread(char *buf, char *addr, unsigned long count)
if ((unsigned long) addr + count < count)
count = -(unsigned long) addr;
- read_lock(&vmlist_lock);
- for (tmp = vmlist; count && tmp; tmp = tmp->next) {
- vaddr = (char *) tmp->addr;
- if (addr >= vaddr + tmp->size - PAGE_SIZE)
+ spin_lock(&vmap_area_lock);
+ list_for_each_entry(va, &vmap_area_list, list) {
+ if (!count)
+ break;
+
+ if (!(va->flags & VM_VM_AREA))
+ continue;
+
+ vm = va->vm;
+ vaddr = (char *) vm->addr;
+ if (addr >= vaddr + vm->size - PAGE_SIZE)
continue;
while (addr < vaddr) {
if (count == 0)
@@ -2034,10 +2042,10 @@ long vread(char *buf, char *addr, unsigned long count)
addr++;
count--;
}
- n = vaddr + tmp->size - PAGE_SIZE - addr;
+ n = vaddr + vm->size - PAGE_SIZE - addr;
if (n > count)
n = count;
- if (!(tmp->flags & VM_IOREMAP))
+ if (!(vm->flags & VM_IOREMAP))
aligned_vread(buf, addr, n);
else /* IOREMAP area is treated as memory hole */
memset(buf, 0, n);
@@ -2046,7 +2054,7 @@ long vread(char *buf, char *addr, unsigned long count)
count -= n;
}
finished:
- read_unlock(&vmlist_lock);
+ spin_unlock(&vmap_area_lock);
if (buf == buf_start)
return 0;
@@ -2085,7 +2093,8 @@ finished:
long vwrite(char *buf, char *addr, unsigned long count)
{
- struct vm_struct *tmp;
+ struct vmap_area *va;
+ struct vm_struct *vm;
char *vaddr;
unsigned long n, buflen;
int copied = 0;
@@ -2095,10 +2104,17 @@ long vwrite(char *buf, char *addr, unsigned long count)
count = -(unsigned long) addr;
buflen = count;
- read_lock(&vmlist_lock);
- for (tmp = vmlist; count && tmp; tmp = tmp->next) {
- vaddr = (char *) tmp->addr;
- if (addr >= vaddr + tmp->size - PAGE_SIZE)
+ spin_lock(&vmap_area_lock);
+ list_for_each_entry(va, &vmap_area_list, list) {
+ if (!count)
+ break;
+
+ if (!(va->flags & VM_VM_AREA))
+ continue;
+
+ vm = va->vm;
+ vaddr = (char *) vm->addr;
+ if (addr >= vaddr + vm->size - PAGE_SIZE)
continue;
while (addr < vaddr) {
if (count == 0)
@@ -2107,10 +2123,10 @@ long vwrite(char *buf, char *addr, unsigned long count)
addr++;
count--;
}
- n = vaddr + tmp->size - PAGE_SIZE - addr;
+ n = vaddr + vm->size - PAGE_SIZE - addr;
if (n > count)
n = count;
- if (!(tmp->flags & VM_IOREMAP)) {
+ if (!(vm->flags & VM_IOREMAP)) {
aligned_vwrite(buf, addr, n);
copied++;
}
@@ -2119,7 +2135,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
count -= n;
}
finished:
- read_unlock(&vmlist_lock);
+ spin_unlock(&vmap_area_lock);
if (!copied)
return 0;
return buflen;