summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig4
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/rxrpc.c3
-rw-r--r--fs/block_dev.c16
-rw-r--r--fs/cifs/file.c69
-rw-r--r--fs/cifs/xattr.c6
-rw-r--r--fs/dcache.c29
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/inode.c4
-rw-r--r--fs/namei.c126
-rw-r--r--fs/nilfs2/the_nilfs.c7
-rw-r--r--fs/udf/file.c2
12 files changed, 242 insertions, 30 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index d621f02a3f9..aa195265362 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -4,6 +4,10 @@
menu "File systems"
+# Use unaligned word dcache accesses
+config DCACHE_WORD_ACCESS
+ bool
+
if BLOCK
source "fs/ext2/Kconfig"
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index d2b0888126d..a306bb6d88d 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -109,7 +109,7 @@ struct afs_call {
unsigned reply_size; /* current size of reply */
unsigned first_offset; /* offset into mapping[first] */
unsigned last_to; /* amount of mapping[last] */
- unsigned short offset; /* offset into received data store */
+ unsigned offset; /* offset into received data store */
unsigned char unmarshall; /* unmarshalling phase */
bool incoming; /* T if incoming call */
bool send_pages; /* T if data from mapping should be sent */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index e45a323aebb..8ad8c2a0703 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -314,6 +314,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
struct msghdr msg;
struct kvec iov[1];
int ret;
+ struct sk_buff *skb;
_enter("%x,{%d},", addr->s_addr, ntohs(call->port));
@@ -380,6 +381,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
error_do_abort:
rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
+ while ((skb = skb_dequeue(&call->rx_queue)))
+ afs_free_skb(skb);
rxrpc_kernel_end_call(rxcall);
call->rxcall = NULL;
error_kill_call:
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 0e575d1304b..5e9f198f771 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1183,8 +1183,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
* The latter is necessary to prevent ghost
* partitions on a removed medium.
*/
- if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
- rescan_partitions(disk, bdev);
+ if (bdev->bd_invalidated) {
+ if (!ret)
+ rescan_partitions(disk, bdev);
+ else if (ret == -ENOMEDIUM)
+ invalidate_partitions(disk, bdev);
+ }
if (ret)
goto out_clear;
} else {
@@ -1214,8 +1218,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (bdev->bd_disk->fops->open)
ret = bdev->bd_disk->fops->open(bdev, mode);
/* the same as first opener case, read comment there */
- if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
- rescan_partitions(bdev->bd_disk, bdev);
+ if (bdev->bd_invalidated) {
+ if (!ret)
+ rescan_partitions(bdev->bd_disk, bdev);
+ else if (ret == -ENOMEDIUM)
+ invalidate_partitions(bdev->bd_disk, bdev);
+ }
if (ret)
goto out_unlock_bdev;
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 4dd9283885e..5e64748a291 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -920,16 +920,26 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
for (lockp = &inode->i_flock; *lockp != NULL; \
lockp = &(*lockp)->fl_next)
+struct lock_to_push {
+ struct list_head llist;
+ __u64 offset;
+ __u64 length;
+ __u32 pid;
+ __u16 netfid;
+ __u8 type;
+};
+
static int
cifs_push_posix_locks(struct cifsFileInfo *cfile)
{
struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct file_lock *flock, **before;
- struct cifsLockInfo *lck, *tmp;
+ unsigned int count = 0, i = 0;
int rc = 0, xid, type;
+ struct list_head locks_to_send, *el;
+ struct lock_to_push *lck, *tmp;
__u64 length;
- struct list_head locks_to_send;
xid = GetXid();
@@ -940,29 +950,55 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
return rc;
}
+ lock_flocks();
+ cifs_for_each_lock(cfile->dentry->d_inode, before) {
+ if ((*before)->fl_flags & FL_POSIX)
+ count++;
+ }
+ unlock_flocks();
+
INIT_LIST_HEAD(&locks_to_send);
+ /*
+ * Allocating count locks is enough because no locks can be added to
+ * the list while we are holding cinode->lock_mutex that protects
+ * locking operations of this inode.
+ */
+ for (; i < count; i++) {
+ lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
+ if (!lck) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ list_add_tail(&lck->llist, &locks_to_send);
+ }
+
+ i = 0;
+ el = locks_to_send.next;
lock_flocks();
cifs_for_each_lock(cfile->dentry->d_inode, before) {
+ if (el == &locks_to_send) {
+ /* something is really wrong */
+ cERROR(1, "Can't push all brlocks!");
+ break;
+ }
flock = *before;
+ if ((flock->fl_flags & FL_POSIX) == 0)
+ continue;
length = 1 + flock->fl_end - flock->fl_start;
if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
type = CIFS_RDLCK;
else
type = CIFS_WRLCK;
-
- lck = cifs_lock_init(flock->fl_start, length, type,
- cfile->netfid);
- if (!lck) {
- rc = -ENOMEM;
- goto send_locks;
- }
+ lck = list_entry(el, struct lock_to_push, llist);
lck->pid = flock->fl_pid;
-
- list_add_tail(&lck->llist, &locks_to_send);
+ lck->netfid = cfile->netfid;
+ lck->length = length;
+ lck->type = type;
+ lck->offset = flock->fl_start;
+ i++;
+ el = el->next;
}
-
-send_locks:
unlock_flocks();
list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
@@ -979,11 +1015,18 @@ send_locks:
kfree(lck);
}
+out:
cinode->can_cache_brlcks = false;
mutex_unlock(&cinode->lock_mutex);
FreeXid(xid);
return rc;
+err_out:
+ list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
+ list_del(&lck->llist);
+ kfree(lck);
+ }
+ goto out;
}
static int
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 45f07c46f3e..10d92cf57ab 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -105,7 +105,6 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
struct cifs_tcon *pTcon;
struct super_block *sb;
char *full_path;
- struct cifs_ntsd *pacl;
if (direntry == NULL)
return -EIO;
@@ -164,23 +163,24 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
} else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL,
strlen(CIFS_XATTR_CIFS_ACL)) == 0) {
+#ifdef CONFIG_CIFS_ACL
+ struct cifs_ntsd *pacl;
pacl = kmalloc(value_size, GFP_KERNEL);
if (!pacl) {
cFYI(1, "%s: Can't allocate memory for ACL",
__func__);
rc = -ENOMEM;
} else {
-#ifdef CONFIG_CIFS_ACL
memcpy(pacl, ea_value, value_size);
rc = set_cifs_acl(pacl, value_size,
direntry->d_inode, full_path, CIFS_ACL_DACL);
if (rc == 0) /* force revalidate of the inode */
CIFS_I(direntry->d_inode)->time = 0;
kfree(pacl);
+ }
#else
cFYI(1, "Set CIFS ACL not supported yet");
#endif /* CONFIG_CIFS_ACL */
- }
} else {
int temp;
temp = strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
diff --git a/fs/dcache.c b/fs/dcache.c
index bcbdb33fcc2..11828de68dc 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -105,10 +105,10 @@ static unsigned int d_hash_shift __read_mostly;
static struct hlist_bl_head *dentry_hashtable __read_mostly;
static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
- unsigned long hash)
+ unsigned int hash)
{
- hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
- hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
+ hash += (unsigned long) parent / L1_CACHE_BYTES;
+ hash = hash + (hash >> D_HASHBITS);
return dentry_hashtable + (hash & D_HASHMASK);
}
@@ -144,6 +144,28 @@ int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
static inline int dentry_cmp(const unsigned char *cs, size_t scount,
const unsigned char *ct, size_t tcount)
{
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+ unsigned long a,b,mask;
+
+ if (unlikely(scount != tcount))
+ return 1;
+
+ for (;;) {
+ a = *(unsigned long *)cs;
+ b = *(unsigned long *)ct;
+ if (tcount < sizeof(unsigned long))
+ break;
+ if (unlikely(a != b))
+ return 1;
+ cs += sizeof(unsigned long);
+ ct += sizeof(unsigned long);
+ tcount -= sizeof(unsigned long);
+ if (!tcount)
+ return 0;
+ }
+ mask = ~(~0ul << tcount*8);
+ return unlikely(!!((a ^ b) & mask));
+#else
if (scount != tcount)
return 1;
@@ -155,6 +177,7 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount,
tcount--;
} while (tcount);
return 0;
+#endif
}
static void __d_free(struct rcu_head *head)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index ea54cdef04d..4d9d3a45e35 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -988,6 +988,10 @@ static int path_count[PATH_ARR_SIZE];
static int path_count_inc(int nests)
{
+ /* Allow an arbitrary number of depth 1 paths */
+ if (nests == 0)
+ return 0;
+
if (++path_count[nests] > path_limits[nests])
return -1;
return 0;
diff --git a/fs/inode.c b/fs/inode.c
index d3ebdbe723d..83ab215baab 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -938,8 +938,7 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode)
struct file_system_type *type = inode->i_sb->s_type;
/* Set new key only if filesystem hasn't already changed it */
- if (!lockdep_match_class(&inode->i_mutex,
- &type->i_mutex_key)) {
+ if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
/*
* ensure nobody is actually holding i_mutex
*/
@@ -966,6 +965,7 @@ void unlock_new_inode(struct inode *inode)
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_NEW));
inode->i_state &= ~I_NEW;
+ smp_mb();
wake_up_bit(&inode->i_state, __I_NEW);
spin_unlock(&inode->i_lock);
}
diff --git a/fs/namei.c b/fs/namei.c
index e2ba62820a0..fa96a26d329 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1374,6 +1374,126 @@ static inline int can_lookup(struct inode *inode)
return 1;
}
+/*
+ * We can do the critical dentry name comparison and hashing
+ * operations one word at a time, but we are limited to:
+ *
+ * - Architectures with fast unaligned word accesses. We could
+ * do a "get_unaligned()" if this helps and is sufficiently
+ * fast.
+ *
+ * - Little-endian machines (so that we can generate the mask
+ * of low bytes efficiently). Again, we *could* do a byte
+ * swapping load on big-endian architectures if that is not
+ * expensive enough to make the optimization worthless.
+ *
+ * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
+ * do not trap on the (extremely unlikely) case of a page
+ * crossing operation.
+ *
+ * - Furthermore, we need an efficient 64-bit compile for the
+ * 64-bit case in order to generate the "number of bytes in
+ * the final mask". Again, that could be replaced with a
+ * efficient population count instruction or similar.
+ */
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+ return mask*0x0001020304050608 >> 56;
+}
+
+static inline unsigned int fold_hash(unsigned long hash)
+{
+ hash += hash >> (8*sizeof(int));
+ return hash;
+}
+
+#else /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+#define fold_hash(x) (x)
+
+#endif
+
+unsigned int full_name_hash(const unsigned char *name, unsigned int len)
+{
+ unsigned long a, mask;
+ unsigned long hash = 0;
+
+ for (;;) {
+ a = *(unsigned long *)name;
+ hash *= 9;
+ if (len < sizeof(unsigned long))
+ break;
+ hash += a;
+ name += sizeof(unsigned long);
+ len -= sizeof(unsigned long);
+ if (!len)
+ goto done;
+ }
+ mask = ~(~0ul << len*8);
+ hash += mask & a;
+done:
+ return fold_hash(hash);
+}
+EXPORT_SYMBOL(full_name_hash);
+
+#define ONEBYTES 0x0101010101010101ul
+#define SLASHBYTES 0x2f2f2f2f2f2f2f2ful
+#define HIGHBITS 0x8080808080808080ul
+
+/* Return the high bit set in the first byte that is a zero */
+static inline unsigned long has_zero(unsigned long a)
+{
+ return ((a - ONEBYTES) & ~a) & HIGHBITS;
+}
+
+/*
+ * Calculate the length and hash of the path component, and
+ * return the length of the component;
+ */
+static inline unsigned long hash_name(const char *name, unsigned int *hashp)
+{
+ unsigned long a, mask, hash, len;
+
+ hash = a = 0;
+ len = -sizeof(unsigned long);
+ do {
+ hash = (hash + a) * 9;
+ len += sizeof(unsigned long);
+ a = *(unsigned long *)(name+len);
+ /* Do we have any NUL or '/' bytes in this word? */
+ mask = has_zero(a) | has_zero(a ^ SLASHBYTES);
+ } while (!mask);
+
+ /* The mask *below* the first high bit set */
+ mask = (mask - 1) & ~mask;
+ mask >>= 7;
+ hash += a & mask;
+ *hashp = fold_hash(hash);
+
+ return len + count_masked_bytes(mask);
+}
+
+#else
+
unsigned int full_name_hash(const unsigned char *name, unsigned int len)
{
unsigned long hash = init_name_hash();
@@ -1402,6 +1522,8 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp)
return len;
}
+#endif
+
/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
@@ -2162,7 +2284,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
/* sayonara */
error = complete_walk(nd);
if (error)
- return ERR_PTR(-ECHILD);
+ return ERR_PTR(error);
error = -ENOTDIR;
if (nd->flags & LOOKUP_DIRECTORY) {
@@ -2261,7 +2383,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
error = complete_walk(nd);
if (error)
- goto exit;
+ return ERR_PTR(error);
error = -EISDIR;
if (S_ISDIR(nd->inode->i_mode))
goto exit;
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index d3271409437..501b7f8b739 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -409,6 +409,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block);
nilfs->ns_r_segments_percentage =
le32_to_cpu(sbp->s_r_segments_percentage);
+ if (nilfs->ns_r_segments_percentage < 1 ||
+ nilfs->ns_r_segments_percentage > 99) {
+ printk(KERN_ERR "NILFS: invalid reserved segments percentage.\n");
+ return -EINVAL;
+ }
+
nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
return 0;
@@ -515,6 +521,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
brelse(sbh[1]);
sbh[1] = NULL;
sbp[1] = NULL;
+ valid[1] = 0;
swp = 0;
}
if (!valid[swp]) {
diff --git a/fs/udf/file.c b/fs/udf/file.c
index dca0c3881e8..d567b8448df 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -201,12 +201,10 @@ out:
static int udf_release_file(struct inode *inode, struct file *filp)
{
if (filp->f_mode & FMODE_WRITE) {
- mutex_lock(&inode->i_mutex);
down_write(&UDF_I(inode)->i_data_sem);
udf_discard_prealloc(inode);
udf_truncate_tail_extent(inode);
up_write(&UDF_I(inode)->i_data_sem);
- mutex_unlock(&inode->i_mutex);
}
return 0;
}