summaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-28 10:18:00 +1000
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:30 -0400
commitf604156751db77e08afe47ce29fe8f3d51ad9b04 (patch)
treee0a109be920e4db54ac6384bebb2460aa1e309a9 /fs/dcache.c
parentd38fa6986e9124f827aa6ea4a9dde01e67a37be7 (diff)
dcache: convert to use new lru list infrastructure
[glommer@openvz.org: don't reintroduce double decrement of nr_unused_dentries, adapted for new LRU return codes] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c170
1 files changed, 78 insertions, 92 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 77d466b13fe..38a4a03499a 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -37,6 +37,7 @@
#include <linux/rculist_bl.h>
#include <linux/prefetch.h>
#include <linux/ratelimit.h>
+#include <linux/list_lru.h>
#include "internal.h"
#include "mount.h"
@@ -356,28 +357,17 @@ static void dentry_unlink_inode(struct dentry * dentry)
}
/*
- * dentry_lru_(add|del|move_list) must be called with d_lock held.
+ * dentry_lru_(add|del)_list) must be called with d_lock held.
*/
static void dentry_lru_add(struct dentry *dentry)
{
if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) {
- spin_lock(&dentry->d_sb->s_dentry_lru_lock);
+ if (list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru))
+ this_cpu_inc(nr_dentry_unused);
dentry->d_flags |= DCACHE_LRU_LIST;
- list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
- dentry->d_sb->s_nr_dentry_unused++;
- this_cpu_inc(nr_dentry_unused);
- spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
}
}
-static void __dentry_lru_del(struct dentry *dentry)
-{
- list_del_init(&dentry->d_lru);
- dentry->d_flags &= ~DCACHE_LRU_LIST;
- dentry->d_sb->s_nr_dentry_unused--;
- this_cpu_dec(nr_dentry_unused);
-}
-
/*
* Remove a dentry with references from the LRU.
*
@@ -393,27 +383,9 @@ static void dentry_lru_del(struct dentry *dentry)
return;
}
- if (!list_empty(&dentry->d_lru)) {
- spin_lock(&dentry->d_sb->s_dentry_lru_lock);
- __dentry_lru_del(dentry);
- spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
- }
-}
-
-static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
-{
- BUG_ON(dentry->d_flags & DCACHE_SHRINK_LIST);
-
- spin_lock(&dentry->d_sb->s_dentry_lru_lock);
- if (list_empty(&dentry->d_lru)) {
- dentry->d_flags |= DCACHE_LRU_LIST;
- list_add_tail(&dentry->d_lru, list);
- } else {
- list_move_tail(&dentry->d_lru, list);
- dentry->d_sb->s_nr_dentry_unused--;
+ if (list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru))
this_cpu_dec(nr_dentry_unused);
- }
- spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
+ dentry->d_flags &= ~DCACHE_LRU_LIST;
}
/**
@@ -901,12 +873,72 @@ static void shrink_dentry_list(struct list_head *list)
rcu_read_unlock();
}
+static enum lru_status
+dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
+{
+ struct list_head *freeable = arg;
+ struct dentry *dentry = container_of(item, struct dentry, d_lru);
+
+
+ /*
+ * we are inverting the lru lock/dentry->d_lock here,
+ * so use a trylock. If we fail to get the lock, just skip
+ * it
+ */
+ if (!spin_trylock(&dentry->d_lock))
+ return LRU_SKIP;
+
+ /*
+ * Referenced dentries are still in use. If they have active
+ * counts, just remove them from the LRU. Otherwise give them
+ * another pass through the LRU.
+ */
+ if (dentry->d_lockref.count) {
+ list_del_init(&dentry->d_lru);
+ spin_unlock(&dentry->d_lock);
+ return LRU_REMOVED;
+ }
+
+ if (dentry->d_flags & DCACHE_REFERENCED) {
+ dentry->d_flags &= ~DCACHE_REFERENCED;
+ spin_unlock(&dentry->d_lock);
+
+ /*
+ * The list move itself will be made by the common LRU code. At
+ * this point, we've dropped the dentry->d_lock but keep the
+ * lru lock. This is safe to do, since every list movement is
+ * protected by the lru lock even if both locks are held.
+ *
+ * This is guaranteed by the fact that all LRU management
+ * functions are intermediated by the LRU API calls like
+ * list_lru_add and list_lru_del. List movement in this file
+ * only ever occur through this functions or through callbacks
+ * like this one, that are called from the LRU API.
+ *
+ * The only exceptions to this are functions like
+ * shrink_dentry_list, and code that first checks for the
+ * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
+ * operating only with stack provided lists after they are
+ * properly isolated from the main list. It is thus, always a
+ * local access.
+ */
+ return LRU_ROTATE;
+ }
+
+ dentry->d_flags |= DCACHE_SHRINK_LIST;
+ list_move_tail(&dentry->d_lru, freeable);
+ this_cpu_dec(nr_dentry_unused);
+ spin_unlock(&dentry->d_lock);
+
+ return LRU_REMOVED;
+}
+
/**
* prune_dcache_sb - shrink the dcache
* @sb: superblock
- * @count: number of entries to try to free
+ * @nr_to_scan : number of entries to try to free
*
- * Attempt to shrink the superblock dcache LRU by @count entries. This is
+ * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
* done when we need more memory an called from the superblock shrinker
* function.
*
@@ -915,45 +947,12 @@ static void shrink_dentry_list(struct list_head *list)
*/
long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan)
{
- struct dentry *dentry;
- LIST_HEAD(referenced);
- LIST_HEAD(tmp);
- long freed = 0;
+ LIST_HEAD(dispose);
+ long freed;
-relock:
- spin_lock(&sb->s_dentry_lru_lock);
- while (!list_empty(&sb->s_dentry_lru)) {
- dentry = list_entry(sb->s_dentry_lru.prev,
- struct dentry, d_lru);
- BUG_ON(dentry->d_sb != sb);
-
- if (!spin_trylock(&dentry->d_lock)) {
- spin_unlock(&sb->s_dentry_lru_lock);
- cpu_relax();
- goto relock;
- }
-
- if (dentry->d_flags & DCACHE_REFERENCED) {
- dentry->d_flags &= ~DCACHE_REFERENCED;
- list_move(&dentry->d_lru, &referenced);
- spin_unlock(&dentry->d_lock);
- } else {
- list_move(&dentry->d_lru, &tmp);
- dentry->d_flags |= DCACHE_SHRINK_LIST;
- this_cpu_dec(nr_dentry_unused);
- sb->s_nr_dentry_unused--;
- spin_unlock(&dentry->d_lock);
- freed++;
- if (!--nr_to_scan)
- break;
- }
- cond_resched_lock(&sb->s_dentry_lru_lock);
- }
- if (!list_empty(&referenced))
- list_splice(&referenced, &sb->s_dentry_lru);
- spin_unlock(&sb->s_dentry_lru_lock);
-
- shrink_dentry_list(&tmp);
+ freed = list_lru_walk(&sb->s_dentry_lru, dentry_lru_isolate,
+ &dispose, nr_to_scan);
+ shrink_dentry_list(&dispose);
return freed;
}
@@ -987,24 +986,10 @@ shrink_dcache_list(
*/
void shrink_dcache_sb(struct super_block *sb)
{
- LIST_HEAD(tmp);
-
- spin_lock(&sb->s_dentry_lru_lock);
- while (!list_empty(&sb->s_dentry_lru)) {
- /*
- * account for removal here so we don't need to handle it later
- * even though the dentry is no longer on the lru list.
- */
- list_splice_init(&sb->s_dentry_lru, &tmp);
- this_cpu_sub(nr_dentry_unused, sb->s_nr_dentry_unused);
- sb->s_nr_dentry_unused = 0;
- spin_unlock(&sb->s_dentry_lru_lock);
+ long disposed;
- shrink_dcache_list(&tmp);
-
- spin_lock(&sb->s_dentry_lru_lock);
- }
- spin_unlock(&sb->s_dentry_lru_lock);
+ disposed = list_lru_dispose_all(&sb->s_dentry_lru, shrink_dcache_list);
+ this_cpu_sub(nr_dentry_unused, disposed);
}
EXPORT_SYMBOL(shrink_dcache_sb);
@@ -1366,7 +1351,8 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
if (dentry->d_lockref.count) {
dentry_lru_del(dentry);
} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
- dentry_lru_move_list(dentry, &data->dispose);
+ dentry_lru_del(dentry);
+ list_add_tail(&dentry->d_lru, &data->dispose);
dentry->d_flags |= DCACHE_SHRINK_LIST;
data->found++;
ret = D_WALK_NORETRY;