summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2011-07-25 17:12:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-25 20:57:11 -0700
commite83c32e8f92724a06a22a3b42f3afc07db93e131 (patch)
treeeeefe97f26e7b4faf672eba777a12749bfdad024
parent9276aad6c898dbcc31d095f2934dedd5cbb2e93e (diff)
tmpfs: simplify prealloc_page
The prealloc_page handling in shmem_getpage_gfp() is unnecessarily complicated: first simplify that before going on to filepage/swappage. That's right, don't report ENOMEM when the preallocation fails: we may or may not need the page. But simply report ENOMEM once we find we do need it, instead of dropping lock, repeating allocation, unwinding on failure etc. And leave the out label on the fast path, don't goto. Fix something that looks like a bug but turns out not to be: set PageSwapBacked on prealloc_page before its mem_cgroup_cache_charge(), as the removed case was doing. That's important before adding to LRU (determines which LRU the page goes on), and does affect which path it takes through memcontrol.c, but in the end MEM_CGROUP_CHANGE_TYPE_ SHMEM is handled no differently from CACHE. Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Shaohua Li <shaohua.li@intel.com> Cc: "Zhang, Yanmin" <yanmin.zhang@intel.com> Cc: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/shmem.c60
1 files changed, 16 insertions, 44 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index ff6713a2579..8f8534f3547 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1269,9 +1269,9 @@ repeat:
goto failed;
radix_tree_preload_end();
if (sgp != SGP_READ && !prealloc_page) {
- /* We don't care if this fails */
prealloc_page = shmem_alloc_page(gfp, info, idx);
if (prealloc_page) {
+ SetPageSwapBacked(prealloc_page);
if (mem_cgroup_cache_charge(prealloc_page,
current->mm, GFP_KERNEL)) {
page_cache_release(prealloc_page);
@@ -1403,7 +1403,8 @@ repeat:
goto repeat;
}
spin_unlock(&info->lock);
- } else {
+
+ } else if (prealloc_page) {
shmem_swp_unmap(entry);
sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks) {
@@ -1419,41 +1420,8 @@ repeat:
if (!filepage) {
int ret;
- if (!prealloc_page) {
- spin_unlock(&info->lock);
- filepage = shmem_alloc_page(gfp, info, idx);
- if (!filepage) {
- spin_lock(&info->lock);
- shmem_unacct_blocks(info->flags, 1);
- shmem_free_blocks(inode, 1);
- spin_unlock(&info->lock);
- error = -ENOMEM;
- goto failed;
- }
- SetPageSwapBacked(filepage);
-
- /*
- * Precharge page while we can wait, compensate
- * after
- */
- error = mem_cgroup_cache_charge(filepage,
- current->mm, GFP_KERNEL);
- if (error) {
- page_cache_release(filepage);
- spin_lock(&info->lock);
- shmem_unacct_blocks(info->flags, 1);
- shmem_free_blocks(inode, 1);
- spin_unlock(&info->lock);
- filepage = NULL;
- goto failed;
- }
-
- spin_lock(&info->lock);
- } else {
- filepage = prealloc_page;
- prealloc_page = NULL;
- SetPageSwapBacked(filepage);
- }
+ filepage = prealloc_page;
+ prealloc_page = NULL;
entry = shmem_swp_alloc(info, idx, sgp, gfp);
if (IS_ERR(entry))
@@ -1492,11 +1460,20 @@ repeat:
SetPageUptodate(filepage);
if (sgp == SGP_DIRTY)
set_page_dirty(filepage);
+ } else {
+ spin_unlock(&info->lock);
+ error = -ENOMEM;
+ goto out;
}
done:
*pagep = filepage;
error = 0;
- goto out;
+out:
+ if (prealloc_page) {
+ mem_cgroup_uncharge_cache_page(prealloc_page);
+ page_cache_release(prealloc_page);
+ }
+ return error;
nospace:
/*
@@ -1520,12 +1497,7 @@ failed:
unlock_page(filepage);
page_cache_release(filepage);
}
-out:
- if (prealloc_page) {
- mem_cgroup_uncharge_cache_page(prealloc_page);
- page_cache_release(prealloc_page);
- }
- return error;
+ goto out;
}
static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)