diff options
author | Andreas Gruenbacher <agruen@suse.de> | 2010-07-21 19:44:45 +0200 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2010-08-09 16:48:47 -0400 |
commit | e566d48c9bd56f57e25e855a21e06ca2c2525795 (patch) | |
tree | a1520539bab91b693a00eff3ca11340b8ae9038a /fs/mbcache.c | |
parent | 2aec7c523291621ebb68ba8e0bd9b52a26bb76ee (diff) |
mbcache: fix shrinker function return value
The shrinker function is supposed to return the number of cache
entries after shrinking, not before shrinking. Fix that.
Based on a patch from Wang Sheng-Hui <crosslonelyover@gmail.com>.
Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/mbcache.c')
-rw-r--r-- | fs/mbcache.c | 27 |
1 files changed, 10 insertions, 17 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c index 8a2cbd82307..cf4e6cdfd15 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -176,22 +176,12 @@ static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) { LIST_HEAD(free_list); - struct list_head *l, *ltmp; + struct mb_cache *cache; + struct mb_cache_entry *entry, *tmp; int count = 0; - spin_lock(&mb_cache_spinlock); - list_for_each(l, &mb_cache_list) { - struct mb_cache *cache = - list_entry(l, struct mb_cache, c_cache_list); - mb_debug("cache %s (%d)", cache->c_name, - atomic_read(&cache->c_entry_count)); - count += atomic_read(&cache->c_entry_count); - } mb_debug("trying to free %d entries", nr_to_scan); - if (nr_to_scan == 0) { - spin_unlock(&mb_cache_spinlock); - goto out; - } + spin_lock(&mb_cache_spinlock); while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) { struct mb_cache_entry *ce = list_entry(mb_cache_lru_list.next, @@ -199,12 +189,15 @@ mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) list_move_tail(&ce->e_lru_list, &free_list); __mb_cache_entry_unhash(ce); } + list_for_each_entry(cache, &mb_cache_list, c_cache_list) { + mb_debug("cache %s (%d)", cache->c_name, + atomic_read(&cache->c_entry_count)); + count += atomic_read(&cache->c_entry_count); + } spin_unlock(&mb_cache_spinlock); - list_for_each_safe(l, ltmp, &free_list) { - __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, - e_lru_list), gfp_mask); + list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) { + __mb_cache_entry_forget(entry, gfp_mask); } -out: return (count / 100) * sysctl_vfs_cache_pressure; } |