summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMarkus Metzger <markus.t.metzger@intel.com>2009-04-24 09:51:43 +0200
committerIngo Molnar <mingo@elte.hu>2009-04-24 10:18:52 +0200
commit1cb81b143fa8f0e4629f10690862e2e52ca792ff (patch)
tree667b9677f8ad1211ca3d094bedabe47a3d4f5ba9 /mm
parent7e0bfad24d85de7cf2202a7b0ce51de11a077b21 (diff)
x86, bts, mm: clean up buffer allocation
The current mm interface is asymetric. One function allocates a locked buffer, another function only refunds the memory. Change this to have two functions for accounting and refunding locked memory, respectively; and do the actual buffer allocation in ptrace. [ Impact: refactor BTS buffer allocation code ] Signed-off-by: Markus Metzger <markus.t.metzger@intel.com> Acked-by: Andrew Morton <akpm@linux-foundation.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20090424095143.A30265@sedona.ch.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r--mm/mlock.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 28be15ead9c..ac130433c7d 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -629,38 +629,36 @@ void user_shm_unlock(size_t size, struct user_struct *user)
free_uid(user);
}
-void *alloc_locked_buffer(size_t size)
+int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
+ size_t size)
{
- unsigned long rlim, vm, pgsz;
- void *buffer = NULL;
+ unsigned long lim, vm, pgsz;
+ int error = -ENOMEM;
pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
- down_write(&current->mm->mmap_sem);
-
- rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
- vm = current->mm->total_vm + pgsz;
- if (rlim < vm)
- goto out;
+ down_write(&mm->mmap_sem);
- rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
- vm = current->mm->locked_vm + pgsz;
- if (rlim < vm)
+ lim = rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
+ vm = mm->total_vm + pgsz;
+ if (lim < vm)
goto out;
- buffer = kzalloc(size, GFP_KERNEL);
- if (!buffer)
+ lim = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
+ vm = mm->locked_vm + pgsz;
+ if (lim < vm)
goto out;
- current->mm->total_vm += pgsz;
- current->mm->locked_vm += pgsz;
+ mm->total_vm += pgsz;
+ mm->locked_vm += pgsz;
+ error = 0;
out:
- up_write(&current->mm->mmap_sem);
- return buffer;
+ up_write(&mm->mmap_sem);
+ return error;
}
-void refund_locked_buffer_memory(struct mm_struct *mm, size_t size)
+void refund_locked_memory(struct mm_struct *mm, size_t size)
{
unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;