summaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c145
1 files changed, 66 insertions, 79 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index f09b9085f7d..b949af850cd 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -229,7 +229,7 @@ static void __d_free(struct rcu_head *head)
*/
static void d_free(struct dentry *dentry)
{
- BUG_ON(dentry->d_count);
+ BUG_ON(dentry->d_lockref.count);
this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
@@ -467,7 +467,7 @@ relock:
}
if (ref)
- dentry->d_count--;
+ dentry->d_lockref.count--;
/*
* inform the fs via d_prune that this dentry is about to be
* unhashed and destroyed.
@@ -513,15 +513,10 @@ void dput(struct dentry *dentry)
return;
repeat:
- if (dentry->d_count == 1)
+ if (dentry->d_lockref.count == 1)
might_sleep();
- spin_lock(&dentry->d_lock);
- BUG_ON(!dentry->d_count);
- if (dentry->d_count > 1) {
- dentry->d_count--;
- spin_unlock(&dentry->d_lock);
+ if (lockref_put_or_lock(&dentry->d_lockref))
return;
- }
if (dentry->d_flags & DCACHE_OP_DELETE) {
if (dentry->d_op->d_delete(dentry))
@@ -535,7 +530,7 @@ repeat:
dentry->d_flags |= DCACHE_REFERENCED;
dentry_lru_add(dentry);
- dentry->d_count--;
+ dentry->d_lockref.count--;
spin_unlock(&dentry->d_lock);
return;
@@ -590,7 +585,7 @@ int d_invalidate(struct dentry * dentry)
* We also need to leave mountpoints alone,
* directory or not.
*/
- if (dentry->d_count > 1 && dentry->d_inode) {
+ if (dentry->d_lockref.count > 1 && dentry->d_inode) {
if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
spin_unlock(&dentry->d_lock);
return -EBUSY;
@@ -606,14 +601,12 @@ EXPORT_SYMBOL(d_invalidate);
/* This must be called with d_lock held */
static inline void __dget_dlock(struct dentry *dentry)
{
- dentry->d_count++;
+ dentry->d_lockref.count++;
}
static inline void __dget(struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
- __dget_dlock(dentry);
- spin_unlock(&dentry->d_lock);
+ lockref_get(&dentry->d_lockref);
}
struct dentry *dget_parent(struct dentry *dentry)
@@ -634,8 +627,8 @@ repeat:
goto repeat;
}
rcu_read_unlock();
- BUG_ON(!ret->d_count);
- ret->d_count++;
+ BUG_ON(!ret->d_lockref.count);
+ ret->d_lockref.count++;
spin_unlock(&ret->d_lock);
return ret;
}
@@ -718,7 +711,7 @@ restart:
spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock);
- if (!dentry->d_count) {
+ if (!dentry->d_lockref.count) {
__dget_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
@@ -763,12 +756,8 @@ static void try_prune_one_dentry(struct dentry *dentry)
/* Prune ancestors. */
dentry = parent;
while (dentry) {
- spin_lock(&dentry->d_lock);
- if (dentry->d_count > 1) {
- dentry->d_count--;
- spin_unlock(&dentry->d_lock);
+ if (lockref_put_or_lock(&dentry->d_lockref))
return;
- }
dentry = dentry_kill(dentry, 1);
}
}
@@ -793,7 +782,7 @@ static void shrink_dentry_list(struct list_head *list)
* the LRU because of laziness during lookup. Do not free
* it - just keep it off the LRU list.
*/
- if (dentry->d_count) {
+ if (dentry->d_lockref.count) {
dentry_lru_del(dentry);
spin_unlock(&dentry->d_lock);
continue;
@@ -913,7 +902,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
dentry_lru_del(dentry);
__d_shrink(dentry);
- if (dentry->d_count != 0) {
+ if (dentry->d_lockref.count != 0) {
printk(KERN_ERR
"BUG: Dentry %p{i=%lx,n=%s}"
" still in use (%d)"
@@ -922,7 +911,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
dentry->d_inode ?
dentry->d_inode->i_ino : 0UL,
dentry->d_name.name,
- dentry->d_count,
+ dentry->d_lockref.count,
dentry->d_sb->s_type->name,
dentry->d_sb->s_id);
BUG();
@@ -933,7 +922,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
list_del(&dentry->d_u.d_child);
} else {
parent = dentry->d_parent;
- parent->d_count--;
+ parent->d_lockref.count--;
list_del(&dentry->d_u.d_child);
}
@@ -981,7 +970,7 @@ void shrink_dcache_for_umount(struct super_block *sb)
dentry = sb->s_root;
sb->s_root = NULL;
- dentry->d_count--;
+ dentry->d_lockref.count--;
shrink_dcache_for_umount_subtree(dentry);
while (!hlist_bl_empty(&sb->s_anon)) {
@@ -1147,7 +1136,7 @@ resume:
* loop in shrink_dcache_parent() might not make any progress
* and loop forever.
*/
- if (dentry->d_count) {
+ if (dentry->d_lockref.count) {
dentry_lru_del(dentry);
} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
dentry_lru_move_list(dentry, dispose);
@@ -1269,7 +1258,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
smp_wmb();
dentry->d_name.name = dname;
- dentry->d_count = 1;
+ dentry->d_lockref.count = 1;
dentry->d_flags = 0;
spin_lock_init(&dentry->d_lock);
seqcount_init(&dentry->d_seq);
@@ -1612,6 +1601,10 @@ EXPORT_SYMBOL(d_obtain_alias);
* If a dentry was found and moved, then it is returned. Otherwise NULL
* is returned. This matches the expected return value of ->lookup.
*
+ * Cluster filesystems may call this function with a negative, hashed dentry.
+ * In that case, we know that the inode will be a regular file, and also this
+ * will only occur during atomic_open. So we need to check for the dentry
+ * being already hashed only in the final case.
*/
struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
{
@@ -1636,8 +1629,11 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
security_d_instantiate(dentry, inode);
d_rehash(dentry);
}
- } else
- d_add(dentry, inode);
+ } else {
+ d_instantiate(dentry, inode);
+ if (d_unhashed(dentry))
+ d_rehash(dentry);
+ }
return new;
}
EXPORT_SYMBOL(d_splice_alias);
@@ -1723,7 +1719,7 @@ EXPORT_SYMBOL(d_add_ci);
* Do the slow-case of the dentry name compare.
*
* Unlike the dentry_cmp() function, we need to atomically
- * load the name, length and inode information, so that the
+ * load the name and length information, so that the
* filesystem can rely on them, and can use the 'name' and
* 'len' information without worrying about walking off the
* end of memory etc.
@@ -1741,22 +1737,18 @@ enum slow_d_compare {
static noinline enum slow_d_compare slow_dentry_cmp(
const struct dentry *parent,
- struct inode *inode,
struct dentry *dentry,
unsigned int seq,
const struct qstr *name)
{
int tlen = dentry->d_name.len;
const char *tname = dentry->d_name.name;
- struct inode *i = dentry->d_inode;
if (read_seqcount_retry(&dentry->d_seq, seq)) {
cpu_relax();
return D_COMP_SEQRETRY;
}
- if (parent->d_op->d_compare(parent, inode,
- dentry, i,
- tlen, tname, name))
+ if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
return D_COMP_NOMATCH;
return D_COMP_OK;
}
@@ -1766,7 +1758,6 @@ static noinline enum slow_d_compare slow_dentry_cmp(
* @parent: parent dentry
* @name: qstr of name we wish to find
* @seqp: returns d_seq value at the point where the dentry was found
- * @inode: returns dentry->d_inode when the inode was found valid.
* Returns: dentry, or NULL
*
* __d_lookup_rcu is the dcache lookup function for rcu-walk name
@@ -1793,7 +1784,7 @@ static noinline enum slow_d_compare slow_dentry_cmp(
*/
struct dentry *__d_lookup_rcu(const struct dentry *parent,
const struct qstr *name,
- unsigned *seqp, struct inode *inode)
+ unsigned *seqp)
{
u64 hashlen = name->hash_len;
const unsigned char *str = name->name;
@@ -1827,11 +1818,10 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
seqretry:
/*
* The dentry sequence count protects us from concurrent
- * renames, and thus protects inode, parent and name fields.
+ * renames, and thus protects parent and name fields.
*
* The caller must perform a seqcount check in order
- * to do anything useful with the returned dentry,
- * including using the 'd_inode' pointer.
+ * to do anything useful with the returned dentry.
*
* NOTE! We do a "raw" seqcount_begin here. That means that
* we don't wait for the sequence count to stabilize if it
@@ -1845,12 +1835,12 @@ seqretry:
continue;
if (d_unhashed(dentry))
continue;
- *seqp = seq;
if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
if (dentry->d_name.hash != hashlen_hash(hashlen))
continue;
- switch (slow_dentry_cmp(parent, inode, dentry, seq, name)) {
+ *seqp = seq;
+ switch (slow_dentry_cmp(parent, dentry, seq, name)) {
case D_COMP_OK:
return dentry;
case D_COMP_NOMATCH:
@@ -1862,6 +1852,7 @@ seqretry:
if (dentry->d_name.hash_len != hashlen)
continue;
+ *seqp = seq;
if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
return dentry;
}
@@ -1959,9 +1950,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
if (parent->d_flags & DCACHE_OP_COMPARE) {
int tlen = dentry->d_name.len;
const char *tname = dentry->d_name.name;
- if (parent->d_op->d_compare(parent, parent->d_inode,
- dentry, dentry->d_inode,
- tlen, tname, name))
+ if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
goto next;
} else {
if (dentry->d_name.len != len)
@@ -1970,7 +1959,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
goto next;
}
- dentry->d_count++;
+ dentry->d_lockref.count++;
found = dentry;
spin_unlock(&dentry->d_lock);
break;
@@ -1998,7 +1987,7 @@ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
*/
name->hash = full_name_hash(name->name, name->len);
if (dir->d_flags & DCACHE_OP_HASH) {
- int err = dir->d_op->d_hash(dir, dir->d_inode, name);
+ int err = dir->d_op->d_hash(dir, name);
if (unlikely(err < 0))
return ERR_PTR(err);
}
@@ -2069,7 +2058,7 @@ again:
spin_lock(&dentry->d_lock);
inode = dentry->d_inode;
isdir = S_ISDIR(inode->i_mode);
- if (dentry->d_count == 1) {
+ if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
cpu_relax();
@@ -2724,6 +2713,17 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
return memcpy(buffer, temp, sz);
}
+char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+ char *end = buffer + buflen;
+ /* these dentries are never renamed, so d_lock is not needed */
+ if (prepend(&end, &buflen, " (deleted)", 11) ||
+ prepend_name(&end, &buflen, &dentry->d_name) ||
+ prepend(&end, &buflen, "/", 1))
+ end = ERR_PTR(-ENAMETOOLONG);
+ return end;
+}
+
/*
* Write full pathname from the root of the filesystem into the buffer.
*/
@@ -2937,7 +2937,7 @@ resume:
}
if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
dentry->d_flags |= DCACHE_GENOCIDE;
- dentry->d_count--;
+ dentry->d_lockref.count--;
}
spin_unlock(&dentry->d_lock);
}
@@ -2945,7 +2945,7 @@ resume:
struct dentry *child = this_parent;
if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
this_parent->d_flags |= DCACHE_GENOCIDE;
- this_parent->d_count--;
+ this_parent->d_lockref.count--;
}
this_parent = try_to_ascend(this_parent, locked, seq);
if (!this_parent)
@@ -2968,34 +2968,21 @@ rename_retry:
goto again;
}
-/**
- * find_inode_number - check for dentry with name
- * @dir: directory to check
- * @name: Name to find.
- *
- * Check whether a dentry already exists for the given name,
- * and return the inode number if it has an inode. Otherwise
- * 0 is returned.
- *
- * This routine is used to post-process directory listings for
- * filesystems using synthetic inode numbers, and is necessary
- * to keep getcwd() working.
- */
-
-ino_t find_inode_number(struct dentry *dir, struct qstr *name)
+void d_tmpfile(struct dentry *dentry, struct inode *inode)
{
- struct dentry * dentry;
- ino_t ino = 0;
-
- dentry = d_hash_and_lookup(dir, name);
- if (!IS_ERR_OR_NULL(dentry)) {
- if (dentry->d_inode)
- ino = dentry->d_inode->i_ino;
- dput(dentry);
- }
- return ino;
+ inode_dec_link_count(inode);
+ BUG_ON(dentry->d_name.name != dentry->d_iname ||
+ !hlist_unhashed(&dentry->d_alias) ||
+ !d_unlinked(dentry));
+ spin_lock(&dentry->d_parent->d_lock);
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
+ (unsigned long long)inode->i_ino);
+ spin_unlock(&dentry->d_lock);
+ spin_unlock(&dentry->d_parent->d_lock);
+ d_instantiate(dentry, inode);
}
-EXPORT_SYMBOL(find_inode_number);
+EXPORT_SYMBOL(d_tmpfile);
static __initdata unsigned long dhash_entries;
static int __init set_dhash_entries(char *str)