summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig4
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/rxrpc.c3
-rw-r--r--fs/aio.c24
-rw-r--r--fs/binfmt_aout.c14
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/block_dev.c16
-rw-r--r--fs/btrfs/backref.c8
-rw-r--r--fs/btrfs/reada.c2
-rw-r--r--fs/cifs/dir.c20
-rw-r--r--fs/cifs/file.c69
-rw-r--r--fs/cifs/inode.c28
-rw-r--r--fs/cifs/xattr.c6
-rw-r--r--fs/dcache.c62
-rw-r--r--fs/ecryptfs/miscdev.c2
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exec.c18
-rw-r--r--fs/gfs2/glock.c14
-rw-r--r--fs/gfs2/inode.c5
-rw-r--r--fs/gfs2/ops_fstype.c5
-rw-r--r--fs/gfs2/rgrp.c13
-rw-r--r--fs/inode.c4
-rw-r--r--fs/namei.c192
-rw-r--r--fs/nilfs2/the_nilfs.c7
-rw-r--r--fs/udf/file.c2
25 files changed, 406 insertions, 120 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index d621f02a3f9..aa195265362 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -4,6 +4,10 @@
menu "File systems"
+# Use unaligned word dcache accesses
+config DCACHE_WORD_ACCESS
+ bool
+
if BLOCK
source "fs/ext2/Kconfig"
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index d2b0888126d..a306bb6d88d 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -109,7 +109,7 @@ struct afs_call {
unsigned reply_size; /* current size of reply */
unsigned first_offset; /* offset into mapping[first] */
unsigned last_to; /* amount of mapping[last] */
- unsigned short offset; /* offset into received data store */
+ unsigned offset; /* offset into received data store */
unsigned char unmarshall; /* unmarshalling phase */
bool incoming; /* T if incoming call */
bool send_pages; /* T if data from mapping should be sent */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index e45a323aebb..8ad8c2a0703 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -314,6 +314,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
struct msghdr msg;
struct kvec iov[1];
int ret;
+ struct sk_buff *skb;
_enter("%x,{%d},", addr->s_addr, ntohs(call->port));
@@ -380,6 +381,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
error_do_abort:
rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
+ while ((skb = skb_dequeue(&call->rx_queue)))
+ afs_free_skb(skb);
rxrpc_kernel_end_call(rxcall);
call->rxcall = NULL;
error_kill_call:
diff --git a/fs/aio.c b/fs/aio.c
index 969beb0e223..b9d64d89a04 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -228,12 +228,6 @@ static void __put_ioctx(struct kioctx *ctx)
call_rcu(&ctx->rcu_head, ctx_rcu_free);
}
-static inline void get_ioctx(struct kioctx *kioctx)
-{
- BUG_ON(atomic_read(&kioctx->users) <= 0);
- atomic_inc(&kioctx->users);
-}
-
static inline int try_get_ioctx(struct kioctx *kioctx)
{
return atomic_inc_not_zero(&kioctx->users);
@@ -273,7 +267,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
mm = ctx->mm = current->mm;
atomic_inc(&mm->mm_count);
- atomic_set(&ctx->users, 1);
+ atomic_set(&ctx->users, 2);
spin_lock_init(&ctx->ctx_lock);
spin_lock_init(&ctx->ring_info.ring_lock);
init_waitqueue_head(&ctx->wait);
@@ -490,6 +484,8 @@ static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
kmem_cache_free(kiocb_cachep, req);
ctx->reqs_active--;
}
+ if (unlikely(!ctx->reqs_active && ctx->dead))
+ wake_up_all(&ctx->wait);
spin_unlock_irq(&ctx->ctx_lock);
}
@@ -607,11 +603,16 @@ static void aio_fput_routine(struct work_struct *data)
fput(req->ki_filp);
/* Link the iocb into the context's free list */
+ rcu_read_lock();
spin_lock_irq(&ctx->ctx_lock);
really_put_req(ctx, req);
+ /*
+ * at that point ctx might've been killed, but actual
+ * freeing is RCU'd
+ */
spin_unlock_irq(&ctx->ctx_lock);
+ rcu_read_unlock();
- put_ioctx(ctx);
spin_lock_irq(&fput_lock);
}
spin_unlock_irq(&fput_lock);
@@ -642,7 +643,6 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
* this function will be executed w/out any aio kthread wakeup.
*/
if (unlikely(!fput_atomic(req->ki_filp))) {
- get_ioctx(ctx);
spin_lock(&fput_lock);
list_add(&req->ki_list, &fput_head);
spin_unlock(&fput_lock);
@@ -1336,10 +1336,10 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
ret = PTR_ERR(ioctx);
if (!IS_ERR(ioctx)) {
ret = put_user(ioctx->user_id, ctxp);
- if (!ret)
+ if (!ret) {
+ put_ioctx(ioctx);
return 0;
-
- get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
+ }
io_destroy(ioctx);
}
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index a6395bdb26a..1ff94054d35 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -259,6 +259,13 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
current->mm->free_area_cache = current->mm->mmap_base;
current->mm->cached_hole_size = 0;
+ retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
+ if (retval < 0) {
+ /* Someone check-me: is this error path enough? */
+ send_sig(SIGKILL, current, 0);
+ return retval;
+ }
+
install_exec_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
@@ -352,13 +359,6 @@ beyond_if:
return retval;
}
- retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
- if (retval < 0) {
- /* Someone check-me: is this error path enough? */
- send_sig(SIGKILL, current, 0);
- return retval;
- }
-
current->mm->start_stack =
(unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
#ifdef __alpha__
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index bcb884e2d61..07d096c4992 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 0e575d1304b..5e9f198f771 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1183,8 +1183,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
* The latter is necessary to prevent ghost
* partitions on a removed medium.
*/
- if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
- rescan_partitions(disk, bdev);
+ if (bdev->bd_invalidated) {
+ if (!ret)
+ rescan_partitions(disk, bdev);
+ else if (ret == -ENOMEDIUM)
+ invalidate_partitions(disk, bdev);
+ }
if (ret)
goto out_clear;
} else {
@@ -1214,8 +1218,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (bdev->bd_disk->fops->open)
ret = bdev->bd_disk->fops->open(bdev, mode);
/* the same as first opener case, read comment there */
- if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
- rescan_partitions(bdev->bd_disk, bdev);
+ if (bdev->bd_invalidated) {
+ if (!ret)
+ rescan_partitions(bdev->bd_disk, bdev);
+ else if (ret == -ENOMEDIUM)
+ invalidate_partitions(bdev->bd_disk, bdev);
+ }
if (ret)
goto out_unlock_bdev;
}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 98f6bf10bbd..0436c12da8c 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -583,7 +583,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
struct btrfs_key info_key = { 0 };
struct btrfs_delayed_ref_root *delayed_refs = NULL;
- struct btrfs_delayed_ref_head *head = NULL;
+ struct btrfs_delayed_ref_head *head;
int info_level = 0;
int ret;
struct list_head prefs_delayed;
@@ -607,6 +607,8 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
* at a specified point in time
*/
again:
+ head = NULL;
+
ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
if (ret < 0)
goto out;
@@ -635,8 +637,10 @@ again:
goto again;
}
ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed);
- if (ret)
+ if (ret) {
+ spin_unlock(&delayed_refs->lock);
goto out;
+ }
}
spin_unlock(&delayed_refs->lock);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 2373b39a132..22db04550f6 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -305,7 +305,7 @@ again:
spin_lock(&fs_info->reada_lock);
ret = radix_tree_insert(&dev->reada_zones,
- (unsigned long)zone->end >> PAGE_CACHE_SHIFT,
+ (unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
zone);
spin_unlock(&fs_info->reada_lock);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 63a196b97d5..bc7e24420ac 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -584,10 +584,26 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
* If either that or op not supported returned, follow
* the normal lookup.
*/
- if ((rc == 0) || (rc == -ENOENT))
+ switch (rc) {
+ case 0:
+ /*
+ * The server may allow us to open things like
+ * FIFOs, but the client isn't set up to deal
+ * with that. If it's not a regular file, just
+ * close it and proceed as if it were a normal
+ * lookup.
+ */
+ if (newInode && !S_ISREG(newInode->i_mode)) {
+ CIFSSMBClose(xid, pTcon, fileHandle);
+ break;
+ }
+ case -ENOENT:
posix_open = true;
- else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP))
+ case -EOPNOTSUPP:
+ break;
+ default:
pTcon->broken_posix_open = true;
+ }
}
if (!posix_open)
rc = cifs_get_inode_info_unix(&newInode, full_path,
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 4dd9283885e..5e64748a291 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -920,16 +920,26 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
for (lockp = &inode->i_flock; *lockp != NULL; \
lockp = &(*lockp)->fl_next)
+struct lock_to_push {
+ struct list_head llist;
+ __u64 offset;
+ __u64 length;
+ __u32 pid;
+ __u16 netfid;
+ __u8 type;
+};
+
static int
cifs_push_posix_locks(struct cifsFileInfo *cfile)
{
struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct file_lock *flock, **before;
- struct cifsLockInfo *lck, *tmp;
+ unsigned int count = 0, i = 0;
int rc = 0, xid, type;
+ struct list_head locks_to_send, *el;
+ struct lock_to_push *lck, *tmp;
__u64 length;
- struct list_head locks_to_send;
xid = GetXid();
@@ -940,29 +950,55 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
return rc;
}
+ lock_flocks();
+ cifs_for_each_lock(cfile->dentry->d_inode, before) {
+ if ((*before)->fl_flags & FL_POSIX)
+ count++;
+ }
+ unlock_flocks();
+
INIT_LIST_HEAD(&locks_to_send);
+ /*
+ * Allocating count locks is enough because no locks can be added to
+ * the list while we are holding cinode->lock_mutex that protects
+ * locking operations of this inode.
+ */
+ for (; i < count; i++) {
+ lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
+ if (!lck) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ list_add_tail(&lck->llist, &locks_to_send);
+ }
+
+ i = 0;
+ el = locks_to_send.next;
lock_flocks();
cifs_for_each_lock(cfile->dentry->d_inode, before) {
+ if (el == &locks_to_send) {
+ /* something is really wrong */
+ cERROR(1, "Can't push all brlocks!");
+ break;
+ }
flock = *before;
+ if ((flock->fl_flags & FL_POSIX) == 0)
+ continue;
length = 1 + flock->fl_end - flock->fl_start;
if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
type = CIFS_RDLCK;
else
type = CIFS_WRLCK;
-
- lck = cifs_lock_init(flock->fl_start, length, type,
- cfile->netfid);
- if (!lck) {
- rc = -ENOMEM;
- goto send_locks;
- }
+ lck = list_entry(el, struct lock_to_push, llist);
lck->pid = flock->fl_pid;
-
- list_add_tail(&lck->llist, &locks_to_send);
+ lck->netfid = cfile->netfid;
+ lck->length = length;
+ lck->type = type;
+ lck->offset = flock->fl_start;
+ i++;
+ el = el->next;
}
-
-send_locks:
unlock_flocks();
list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
@@ -979,11 +1015,18 @@ send_locks:
kfree(lck);
}
+out:
cinode->can_cache_brlcks = false;
mutex_unlock(&cinode->lock_mutex);
FreeXid(xid);
return rc;
+err_out:
+ list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
+ list_del(&lck->llist);
+ kfree(lck);
+ }
+ goto out;
}
static int
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index a5f54b7d982..745da3d0653 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -534,6 +534,11 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
fattr->cf_dtype = DT_DIR;
+ /*
+ * Server can return wrong NumberOfLinks value for directories
+ * when Unix extensions are disabled - fake it.
+ */
+ fattr->cf_nlink = 2;
} else {
fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode;
fattr->cf_dtype = DT_REG;
@@ -541,9 +546,9 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
/* clear write bits if ATTR_READONLY is set */
if (fattr->cf_cifsattrs & ATTR_READONLY)
fattr->cf_mode &= ~(S_IWUGO);
- }
- fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
+ fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
+ }
fattr->cf_uid = cifs_sb->mnt_uid;
fattr->cf_gid = cifs_sb->mnt_gid;
@@ -1322,7 +1327,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
}
/*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need
to set uid/gid */
- inc_nlink(inode);
cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb);
cifs_fill_uniqueid(inode->i_sb, &fattr);
@@ -1355,7 +1359,6 @@ mkdir_retry_old:
d_drop(direntry);
} else {
mkdir_get_info:
- inc_nlink(inode);
if (pTcon->unix_ext)
rc = cifs_get_inode_info_unix(&newinode, full_path,
inode->i_sb, xid);
@@ -1436,6 +1439,11 @@ mkdir_get_info:
}
}
mkdir_out:
+ /*
+ * Force revalidate to get parent dir info when needed since cached
+ * attributes are invalid now.
+ */
+ CIFS_I(inode)->time = 0;
kfree(full_path);
FreeXid(xid);
cifs_put_tlink(tlink);
@@ -1475,7 +1483,6 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
cifs_put_tlink(tlink);
if (!rc) {
- drop_nlink(inode);
spin_lock(&direntry->d_inode->i_lock);
i_size_write(direntry->d_inode, 0);
clear_nlink(direntry->d_inode);
@@ -1483,12 +1490,15 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
}
cifsInode = CIFS_I(direntry->d_inode);
- cifsInode->time = 0; /* force revalidate to go get info when
- needed */
+ /* force revalidate to go get info when needed */
+ cifsInode->time = 0;
cifsInode = CIFS_I(inode);
- cifsInode->time = 0; /* force revalidate to get parent dir info
- since cached search results now invalid */
+ /*
+ * Force revalidate to get parent dir info when needed since cached
+ * attributes are invalid now.
+ */
+ cifsInode->time = 0;
direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime =
current_fs_time(inode->i_sb);
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 45f07c46f3e..10d92cf57ab 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -105,7 +105,6 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
struct cifs_tcon *pTcon;
struct super_block *sb;
char *full_path;
- struct cifs_ntsd *pacl;
if (direntry == NULL)
return -EIO;
@@ -164,23 +163,24 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
} else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL,
strlen(CIFS_XATTR_CIFS_ACL)) == 0) {
+#ifdef CONFIG_CIFS_ACL
+ struct cifs_ntsd *pacl;
pacl = kmalloc(value_size, GFP_KERNEL);
if (!pacl) {
cFYI(1, "%s: Can't allocate memory for ACL",
__func__);
rc = -ENOMEM;
} else {
-#ifdef CONFIG_CIFS_ACL
memcpy(pacl, ea_value, value_size);
rc = set_cifs_acl(pacl, value_size,
direntry->d_inode, full_path, CIFS_ACL_DACL);
if (rc == 0) /* force revalidate of the inode */
CIFS_I(direntry->d_inode)->time = 0;
kfree(pacl);
+ }
#else
cFYI(1, "Set CIFS ACL not supported yet");
#endif /* CONFIG_CIFS_ACL */
- }
} else {
int temp;
temp = strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
diff --git a/fs/dcache.c b/fs/dcache.c
index fe19ac13f75..11828de68dc 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -104,11 +104,11 @@ static unsigned int d_hash_shift __read_mostly;
static struct hlist_bl_head *dentry_hashtable __read_mostly;
-static inline struct hlist_bl_head *d_hash(struct dentry *parent,
- unsigned long hash)
+static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
+ unsigned int hash)
{
- hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
- hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
+ hash += (unsigned long) parent / L1_CACHE_BYTES;
+ hash = hash + (hash >> D_HASHBITS);
return dentry_hashtable + (hash & D_HASHMASK);
}
@@ -137,6 +137,49 @@ int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
}
#endif
+/*
+ * Compare 2 name strings, return 0 if they match, otherwise non-zero.
+ * The strings are both count bytes long, and count is non-zero.
+ */
+static inline int dentry_cmp(const unsigned char *cs, size_t scount,
+ const unsigned char *ct, size_t tcount)
+{
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+ unsigned long a,b,mask;
+
+ if (unlikely(scount != tcount))
+ return 1;
+
+ for (;;) {
+ a = *(unsigned long *)cs;
+ b = *(unsigned long *)ct;
+ if (tcount < sizeof(unsigned long))
+ break;
+ if (unlikely(a != b))
+ return 1;
+ cs += sizeof(unsigned long);
+ ct += sizeof(unsigned long);
+ tcount -= sizeof(unsigned long);
+ if (!tcount)
+ return 0;
+ }
+ mask = ~(~0ul << tcount*8);
+ return unlikely(!!((a ^ b) & mask));
+#else
+ if (scount != tcount)
+ return 1;
+
+ do {
+ if (*cs != *ct)
+ return 1;
+ cs++;
+ ct++;
+ tcount--;
+ } while (tcount);
+ return 0;
+#endif
+}
+
static void __d_free(struct rcu_head *head)
{
struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
@@ -1717,8 +1760,9 @@ EXPORT_SYMBOL(d_add_ci);
* child is looked up. Thus, an interlocking stepping of sequence lock checks
* is formed, giving integrity down the path walk.
*/
-struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
- unsigned *seq, struct inode **inode)
+struct dentry *__d_lookup_rcu(const struct dentry *parent,
+ const struct qstr *name,
+ unsigned *seqp, struct inode **inode)
{
unsigned int len = name->len;
unsigned int hash = name->hash;
@@ -1748,6 +1792,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
* See Documentation/filesystems/path-lookup.txt for more details.
*/
hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
+ unsigned seq;
struct inode *i;
const char *tname;
int tlen;
@@ -1756,7 +1801,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
continue;
seqretry:
- *seq = read_seqcount_begin(&dentry->d_seq);
+ seq = read_seqcount_begin(&dentry->d_seq);
if (dentry->d_parent != parent)
continue;
if (d_unhashed(dentry))
@@ -1771,7 +1816,7 @@ seqretry:
* edge of memory when walking. If we could load this
* atomically some other way, we could drop this check.
*/
- if (read_seqcount_retry(&dentry->d_seq, *seq))
+ if (read_seqcount_retry(&dentry->d_seq, seq))
goto seqretry;
if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
if (parent->d_op->d_compare(parent, *inode,
@@ -1788,6 +1833,7 @@ seqretry:
* order to do anything useful with the returned dentry
* anyway.
*/
+ *seqp = seq;
*inode = i;
return dentry;
}
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index 349209dc6a9..3a06f4043df 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -429,7 +429,7 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
goto memdup;
} else if (count < MIN_MSG_PKT_SIZE || count > MAX_MSG_PKT_SIZE) {
printk(KERN_WARNING "%s: Acceptable packet size range is "
- "[%d-%lu], but amount of data written is [%zu].",
+ "[%d-%zu], but amount of data written is [%zu].",
__func__, MIN_MSG_PKT_SIZE, MAX_MSG_PKT_SIZE, count);
return -EINVAL;
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index ea54cdef04d..4d9d3a45e35 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -988,6 +988,10 @@ static int path_count[PATH_ARR_SIZE];
static int path_count_inc(int nests)
{
+ /* Allow an arbitrary number of depth 1 paths */
+ if (nests == 0)
+ return 0;
+
if (++path_count[nests] > path_limits[nests])
return -1;
return 0;
diff --git a/fs/exec.c b/fs/exec.c
index 92ce83a11e9..153dee14fe5 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1915,7 +1915,6 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- struct completion *vfork_done;
int core_waiters = -EBUSY;
init_completion(&core_state->startup);
@@ -1927,22 +1926,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
core_waiters = zap_threads(tsk, mm, core_state, exit_code);
up_write(&mm->mmap_sem);
- if (unlikely(core_waiters < 0))
- goto fail;
-
- /*
- * Make sure nobody is waiting for us to release the VM,
- * otherwise we can deadlock when we wait on each other
- */
- vfork_done = tsk->vfork_done;
- if (vfork_done) {
- tsk->vfork_done = NULL;
- complete(vfork_done);
- }
-
- if (core_waiters)
+ if (core_waiters > 0)
wait_for_completion(&core_state->startup);
-fail:
+
return core_waiters;
}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 376816fcd04..351a3e79778 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -167,14 +167,19 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
spin_unlock(&lru_lock);
}
-static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
+static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
{
- spin_lock(&lru_lock);
if (!list_empty(&gl->gl_lru)) {
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);
clear_bit(GLF_LRU, &gl->gl_flags);
}
+}
+
+static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
+{
+ spin_lock(&lru_lock);
+ __gfs2_glock_remove_from_lru(gl);
spin_unlock(&lru_lock);
}
@@ -217,11 +222,12 @@ void gfs2_glock_put(struct gfs2_glock *gl)
struct gfs2_sbd *sdp = gl->gl_sbd;
struct address_space *mapping = gfs2_glock2aspace(gl);
- if (atomic_dec_and_test(&gl->gl_ref)) {
+ if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
+ __gfs2_glock_remove_from_lru(gl);
+ spin_unlock(&lru_lock);
spin_lock_bucket(gl->gl_hash);
hlist_bl_del_rcu(&gl->gl_list);
spin_unlock_bucket(gl->gl_hash);
- gfs2_glock_remove_from_lru(gl);
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
trace_gfs2_glock_put(gl);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index a7d611b93f0..56987460cda 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -391,10 +391,6 @@ static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
int error;
int dblocks = 1;
- error = gfs2_rindex_update(sdp);
- if (error)
- fs_warn(sdp, "rindex update returns %d\n", error);
-
error = gfs2_inplace_reserve(dip, RES_DINODE);
if (error)
goto out;
@@ -1043,6 +1039,7 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
if (!rgd)
goto out_inodes;
+
gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 6aacf3f230a..24f609c9ef9 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -800,6 +800,11 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
fs_err(sdp, "can't get quota file inode: %d\n", error);
goto fail_rindex;
}
+
+ error = gfs2_rindex_update(sdp);
+ if (error)
+ goto fail_qinode;
+
return 0;
fail_qinode:
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 981bfa32121..49ada95209d 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -683,16 +683,21 @@ int gfs2_rindex_update(struct gfs2_sbd *sdp)
struct gfs2_glock *gl = ip->i_gl;
struct gfs2_holder ri_gh;
int error = 0;
+ int unlock_required = 0;
/* Read new copy from disk if we don't have the latest */
if (!sdp->sd_rindex_uptodate) {
mutex_lock(&sdp->sd_rindex_mutex);
- error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
- if (error)
- return error;
+ if (!gfs2_glock_is_locked_by_me(gl)) {
+ error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
+ if (error)
+ return error;
+ unlock_required = 1;
+ }
if (!sdp->sd_rindex_uptodate)
error = gfs2_ri_update(ip);
- gfs2_glock_dq_uninit(&ri_gh);
+ if (unlock_required)
+ gfs2_glock_dq_uninit(&ri_gh);
mutex_unlock(&sdp->sd_rindex_mutex);
}
diff --git a/fs/inode.c b/fs/inode.c
index d3ebdbe723d..83ab215baab 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -938,8 +938,7 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode)
struct file_system_type *type = inode->i_sb->s_type;
/* Set new key only if filesystem hasn't already changed it */
- if (!lockdep_match_class(&inode->i_mutex,
- &type->i_mutex_key)) {
+ if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
/*
* ensure nobody is actually holding i_mutex
*/
@@ -966,6 +965,7 @@ void unlock_new_inode(struct inode *inode)
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_NEW));
inode->i_state &= ~I_NEW;
+ smp_mb();
wake_up_bit(&inode->i_state, __I_NEW);
spin_unlock(&inode->i_lock);
}
diff --git a/fs/namei.c b/fs/namei.c
index a780ea515c4..fa96a26d329 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1375,6 +1375,156 @@ static inline int can_lookup(struct inode *inode)
}
/*
+ * We can do the critical dentry name comparison and hashing
+ * operations one word at a time, but we are limited to:
+ *
+ * - Architectures with fast unaligned word accesses. We could
+ * do a "get_unaligned()" if this helps and is sufficiently
+ * fast.
+ *
+ * - Little-endian machines (so that we can generate the mask
+ * of low bytes efficiently). Again, we *could* do a byte
+ * swapping load on big-endian architectures if that is not
+ * expensive enough to make the optimization worthless.
+ *
+ * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
+ * do not trap on the (extremely unlikely) case of a page
+ * crossing operation.
+ *
+ * - Furthermore, we need an efficient 64-bit compile for the
+ * 64-bit case in order to generate the "number of bytes in
+ * the final mask". Again, that could be replaced with a
+ * efficient population count instruction or similar.
+ */
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+ return mask*0x0001020304050608 >> 56;
+}
+
+static inline unsigned int fold_hash(unsigned long hash)
+{
+ hash += hash >> (8*sizeof(int));
+ return hash;
+}
+
+#else /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+#define fold_hash(x) (x)
+
+#endif
+
+unsigned int full_name_hash(const unsigned char *name, unsigned int len)
+{
+ unsigned long a, mask;
+ unsigned long hash = 0;
+
+ for (;;) {
+ a = *(unsigned long *)name;
+ hash *= 9;
+ if (len < sizeof(unsigned long))
+ break;
+ hash += a;
+ name += sizeof(unsigned long);
+ len -= sizeof(unsigned long);
+ if (!len)
+ goto done;
+ }
+ mask = ~(~0ul << len*8);
+ hash += mask & a;
+done:
+ return fold_hash(hash);
+}
+EXPORT_SYMBOL(full_name_hash);
+
+#define ONEBYTES 0x0101010101010101ul
+#define SLASHBYTES 0x2f2f2f2f2f2f2f2ful
+#define HIGHBITS 0x8080808080808080ul
+
+/* Return the high bit set in the first byte that is a zero */
+static inline unsigned long has_zero(unsigned long a)
+{
+ return ((a - ONEBYTES) & ~a) & HIGHBITS;
+}
+
+/*
+ * Calculate the length and hash of the path component, and
+ * return the length of the component;
+ */
+static inline unsigned long hash_name(const char *name, unsigned int *hashp)
+{
+ unsigned long a, mask, hash, len;
+
+ hash = a = 0;
+ len = -sizeof(unsigned long);
+ do {
+ hash = (hash + a) * 9;
+ len += sizeof(unsigned long);
+ a = *(unsigned long *)(name+len);
+ /* Do we have any NUL or '/' bytes in this word? */
+ mask = has_zero(a) | has_zero(a ^ SLASHBYTES);
+ } while (!mask);
+
+ /* The mask *below* the first high bit set */
+ mask = (mask - 1) & ~mask;
+ mask >>= 7;
+ hash += a & mask;
+ *hashp = fold_hash(hash);
+
+ return len + count_masked_bytes(mask);
+}
+
+#else
+
+unsigned int full_name_hash(const unsigned char *name, unsigned int len)
+{
+ unsigned long hash = init_name_hash();
+ while (len--)
+ hash = partial_name_hash(*name++, hash);
+ return end_name_hash(hash);
+}
+EXPORT_SYMBOL(full_name_hash);
+
+/*
+ * We know there's a real path component here of at least
+ * one character.
+ */
+static inline unsigned long hash_name(const char *name, unsigned int *hashp)
+{
+ unsigned long hash = init_name_hash();
+ unsigned long len = 0, c;
+
+ c = (unsigned char)*name;
+ do {
+ len++;
+ hash = partial_name_hash(c, hash);
+ c = (unsigned char)name[len];
+ } while (c && c != '/');
+ *hashp = end_name_hash(hash);
+ return len;
+}
+
+#endif
+
+/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
* the final dentry. We expect 'base' to be positive and a directory.
@@ -1394,31 +1544,22 @@ static int link_path_walk(const char *name, struct nameidata *nd)
/* At this point we know we have a real path component. */
for(;;) {
- unsigned long hash;
struct qstr this;
- unsigned int c;
+ long len;
int type;
err = may_lookup(nd);
if (err)
break;
+ len = hash_name(name, &this.hash);
this.name = name;
- c = *(const unsigned char *)name;
-
- hash = init_name_hash();
- do {
- name++;
- hash = partial_name_hash(c, hash);
- c = *(const unsigned char *)name;
- } while (c && (c != '/'));
- this.len = name - (const char *) this.name;
- this.hash = end_name_hash(hash);
+ this.len = len;
type = LAST_NORM;
- if (this.name[0] == '.') switch (this.len) {
+ if (name[0] == '.') switch (len) {
case 2:
- if (this.name[1] == '.') {
+ if (name[1] == '.') {
type = LAST_DOTDOT;
nd->flags |= LOOKUP_JUMPED;
}
@@ -1437,12 +1578,18 @@ static int link_path_walk(const char *name, struct nameidata *nd)
}
}
- /* remove trailing slashes? */
- if (!c)
+ if (!name[len])
goto last_component;
- while (*++name == '/');
- if (!*name)
+ /*
+ * If it wasn't NUL, we know it was '/'. Skip that
+ * slash, and continue until no more slashes.
+ */
+ do {
+ len++;
+ } while (unlikely(name[len] == '/'));
+ if (!name[len])
goto last_component;
+ name += len;
err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW);
if (err < 0)
@@ -1775,24 +1922,21 @@ static struct dentry *lookup_hash(struct nameidata *nd)
struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
{
struct qstr this;
- unsigned long hash;
unsigned int c;
WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
this.name = name;
this.len = len;
+ this.hash = full_name_hash(name, len);
if (!len)
return ERR_PTR(-EACCES);
- hash = init_name_hash();
while (len--) {
c = *(const unsigned char *)name++;
if (c == '/' || c == '\0')
return ERR_PTR(-EACCES);
- hash = partial_name_hash(c, hash);
}
- this.hash = end_name_hash(hash);
/*
* See if the low-level filesystem might want
* to use its own hash..
@@ -2140,7 +2284,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
/* sayonara */
error = complete_walk(nd);
if (error)
- return ERR_PTR(-ECHILD);
+ return ERR_PTR(error);
error = -ENOTDIR;
if (nd->flags & LOOKUP_DIRECTORY) {
@@ -2239,7 +2383,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
error = complete_walk(nd);
if (error)
- goto exit;
+ return ERR_PTR(error);
error = -EISDIR;
if (S_ISDIR(nd->inode->i_mode))
goto exit;
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index d3271409437..501b7f8b739 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -409,6 +409,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block);
nilfs->ns_r_segments_percentage =
le32_to_cpu(sbp->s_r_segments_percentage);
+ if (nilfs->ns_r_segments_percentage < 1 ||
+ nilfs->ns_r_segments_percentage > 99) {
+ printk(KERN_ERR "NILFS: invalid reserved segments percentage.\n");
+ return -EINVAL;
+ }
+
nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
return 0;
@@ -515,6 +521,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
brelse(sbh[1]);
sbh[1] = NULL;
sbp[1] = NULL;
+ valid[1] = 0;
swp = 0;
}
if (!valid[swp]) {
diff --git a/fs/udf/file.c b/fs/udf/file.c
index dca0c3881e8..d567b8448df 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -201,12 +201,10 @@ out:
static int udf_release_file(struct inode *inode, struct file *filp)
{
if (filp->f_mode & FMODE_WRITE) {
- mutex_lock(&inode->i_mutex);
down_write(&UDF_I(inode)->i_data_sem);
udf_discard_prealloc(inode);
udf_truncate_tail_extent(inode);
up_write(&UDF_I(inode)->i_data_sem);
- mutex_unlock(&inode->i_mutex);
}
return 0;
}