summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/anon_inodes.c4
-rw-r--r--fs/binfmt_elf_fdpic.c4
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/eventfd.c2
-rw-r--r--fs/eventpoll.c2
-rw-r--r--fs/ext3/inode.c8
-rw-r--r--fs/ext3/namei.c28
-rw-r--r--fs/ext3/resize.c35
-rw-r--r--fs/ext3/super.c19
-rw-r--r--fs/ext4/ext4.h6
-rw-r--r--fs/ext4/inode.c36
-rw-r--r--fs/ext4/mballoc.c6
-rw-r--r--fs/ext4/super.c5
-rw-r--r--fs/file_table.c4
-rw-r--r--fs/internal.h7
-rw-r--r--fs/jbd/journal.c2
-rw-r--r--fs/jbd2/journal.c3
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/namei.c8
-rw-r--r--fs/nfsd/nfsfh.c2
-rw-r--r--fs/ocfs2/Kconfig10
-rw-r--r--fs/ocfs2/Makefile7
-rw-r--r--fs/ocfs2/acl.c4
-rw-r--r--fs/ocfs2/acl.h22
-rw-r--r--fs/ocfs2/alloc.c10
-rw-r--r--fs/ocfs2/alloc.h5
-rw-r--r--fs/ocfs2/cluster/heartbeat.c6
-rw-r--r--fs/ocfs2/cluster/nodemanager.c51
-rw-r--r--fs/ocfs2/cluster/nodemanager.h7
-rw-r--r--fs/ocfs2/cluster/quorum.c16
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c18
-rw-r--r--fs/ocfs2/extent_map.c25
-rw-r--r--fs/ocfs2/namei.c6
-rw-r--r--fs/ocfs2/ocfs2.h8
-rw-r--r--fs/ocfs2/ocfs2_fs.h2
-rw-r--r--fs/ocfs2/refcounttree.c150
-rw-r--r--fs/ocfs2/stack_user.c2
-rw-r--r--fs/ocfs2/super.c95
-rw-r--r--fs/ocfs2/symlink.c2
-rw-r--r--fs/ocfs2/xattr.c6
-rw-r--r--fs/ocfs2/xattr.h2
-rw-r--r--fs/open.c13
-rw-r--r--fs/quota/dquot.c288
-rw-r--r--fs/quota/quota_v2.c9
-rw-r--r--fs/signalfd.c2
-rw-r--r--fs/stat.c10
-rw-r--r--fs/super.c3
-rw-r--r--fs/sysfs/bin.c6
-rw-r--r--fs/timerfd.c2
49 files changed, 575 insertions, 397 deletions
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 2c994591f4d..9f0bf13291e 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -121,13 +121,13 @@ struct file *anon_inode_getfile(const char *name,
d_instantiate(path.dentry, anon_inode_inode);
error = -ENFILE;
- file = alloc_file(&path, FMODE_READ | FMODE_WRITE, fops);
+ file = alloc_file(&path, OPEN_FMODE(flags), fops);
if (!file)
goto err_dput;
file->f_mapping = anon_inode_inode->i_mapping;
file->f_pos = 0;
- file->f_flags = O_RDWR | (flags & O_NONBLOCK);
+ file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->f_version = 0;
file->private_data = priv;
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index c25256a5c5b..7dc85997e96 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1798,11 +1798,11 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
ELF_CORE_WRITE_EXTRA_DATA;
#endif
- if (file->f_pos != offset) {
+ if (cprm->file->f_pos != offset) {
/* Sanity check */
printk(KERN_WARNING
"elf_core_dump: file->f_pos (%lld) != offset (%lld)\n",
- file->f_pos, offset);
+ cprm->file->f_pos, offset);
}
end_coredump:
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 14cbc831422..332dd00f089 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1600,8 +1600,6 @@ static long do_ioctl_trans(int fd, unsigned int cmd,
case KDSKBMETA:
case KDSKBLED:
case KDSETLED:
- /* SG stuff */
- case SG_SET_TRANSFORM:
/* AUTOFS */
case AUTOFS_IOC_READY:
case AUTOFS_IOC_FAIL:
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 8b47e4200e6..d26402ff06e 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -339,7 +339,7 @@ struct file *eventfd_file_create(unsigned int count, int flags)
ctx->flags = flags;
file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx,
- flags & EFD_SHARED_FCNTL_FLAGS);
+ O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
if (IS_ERR(file))
eventfd_free_ctx(ctx);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 366c503f965..bd056a5b4ef 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1206,7 +1206,7 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
* a file structure and a free file descriptor.
*/
error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep,
- flags & O_CLOEXEC);
+ O_RDWR | (flags & O_CLOEXEC));
if (error < 0)
ep_free(ep);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index ad14227f509..455e6e6e5cb 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -970,7 +970,7 @@ static int ext3_get_block(struct inode *inode, sector_t iblock,
if (max_blocks > DIO_MAX_BLOCKS)
max_blocks = DIO_MAX_BLOCKS;
handle = ext3_journal_start(inode, DIO_CREDITS +
- 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb));
+ EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
@@ -3146,8 +3146,8 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
/* (user+group)*(old+new) structure, inode write (sb,
* inode block, ? - but truncate inode update has it) */
- handle = ext3_journal_start(inode, 2*(EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)+
- EXT3_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
+ handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
+ EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
goto err_out;
@@ -3239,7 +3239,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode)
#ifdef CONFIG_QUOTA
/* We know that structure was already allocated during vfs_dq_init so
* we will be updating only the data blocks + inodes */
- ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
+ ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
#endif
return ret;
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index aad6400c9b7..7b0e44f7d66 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -1699,7 +1699,7 @@ static int ext3_create (struct inode * dir, struct dentry * dentry, int mode,
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
- 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb));
+ EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -1733,7 +1733,7 @@ static int ext3_mknod (struct inode * dir, struct dentry *dentry,
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
- 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb));
+ EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -1769,7 +1769,7 @@ static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode)
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
- 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb));
+ EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -1920,7 +1920,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode)
struct ext3_iloc iloc;
int err = 0, rc;
- lock_super(sb);
+ mutex_lock(&EXT3_SB(sb)->s_orphan_lock);
if (!list_empty(&EXT3_I(inode)->i_orphan))
goto out_unlock;
@@ -1929,9 +1929,13 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode)
/* @@@ FIXME: Observation from aviro:
* I think I can trigger J_ASSERT in ext3_orphan_add(). We block
- * here (on lock_super()), so race with ext3_link() which might bump
+ * here (on s_orphan_lock), so race with ext3_link() which might bump
* ->i_nlink. For, say it, character device. Not a regular file,
* not a directory, not a symlink and ->i_nlink > 0.
+ *
+ * tytso, 4/25/2009: I'm not sure how that could happen;
+ * shouldn't the fs core protect us from these sort of
+ * unlink()/link() races?
*/
J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
@@ -1968,7 +1972,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode)
jbd_debug(4, "orphan inode %lu will point to %d\n",
inode->i_ino, NEXT_ORPHAN(inode));
out_unlock:
- unlock_super(sb);
+ mutex_unlock(&EXT3_SB(sb)->s_orphan_lock);
ext3_std_error(inode->i_sb, err);
return err;
}
@@ -1986,11 +1990,9 @@ int ext3_orphan_del(handle_t *handle, struct inode *inode)
struct ext3_iloc iloc;
int err = 0;
- lock_super(inode->i_sb);
- if (list_empty(&ei->i_orphan)) {
- unlock_super(inode->i_sb);
- return 0;
- }
+ mutex_lock(&EXT3_SB(inode->i_sb)->s_orphan_lock);
+ if (list_empty(&ei->i_orphan))
+ goto out;
ino_next = NEXT_ORPHAN(inode);
prev = ei->i_orphan.prev;
@@ -2040,7 +2042,7 @@ int ext3_orphan_del(handle_t *handle, struct inode *inode)
out_err:
ext3_std_error(inode->i_sb, err);
out:
- unlock_super(inode->i_sb);
+ mutex_unlock(&EXT3_SB(inode->i_sb)->s_orphan_lock);
return err;
out_brelse:
@@ -2175,7 +2177,7 @@ static int ext3_symlink (struct inode * dir,
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 +
- 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb));
+ EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 5f83b617917..54351ac7cef 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -209,7 +209,7 @@ static int setup_new_group_blocks(struct super_block *sb,
if (IS_ERR(handle))
return PTR_ERR(handle);
- lock_super(sb);
+ mutex_lock(&sbi->s_resize_lock);
if (input->group != sbi->s_groups_count) {
err = -EBUSY;
goto exit_journal;
@@ -324,7 +324,7 @@ exit_bh:
brelse(bh);
exit_journal:
- unlock_super(sb);
+ mutex_unlock(&sbi->s_resize_lock);
if ((err2 = ext3_journal_stop(handle)) && !err)
err = err2;
@@ -662,11 +662,12 @@ exit_free:
* important part is that the new block and inode counts are in the backup
* superblocks, and the location of the new group metadata in the GDT backups.
*
- * We do not need lock_super() for this, because these blocks are not
- * otherwise touched by the filesystem code when it is mounted. We don't
- * need to worry about last changing from sbi->s_groups_count, because the
- * worst that can happen is that we do not copy the full number of backups
- * at this time. The resize which changed s_groups_count will backup again.
+ * We do not need take the s_resize_lock for this, because these
+ * blocks are not otherwise touched by the filesystem code when it is
+ * mounted. We don't need to worry about last changing from
+ * sbi->s_groups_count, because the worst that can happen is that we
+ * do not copy the full number of backups at this time. The resize
+ * which changed s_groups_count will backup again.
*/
static void update_backups(struct super_block *sb,
int blk_off, char *data, int size)
@@ -825,7 +826,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
goto exit_put;
}
- lock_super(sb);
+ mutex_lock(&sbi->s_resize_lock);
if (input->group != sbi->s_groups_count) {
ext3_warning(sb, __func__,
"multiple resizers run on filesystem!");
@@ -856,7 +857,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
/*
* OK, now we've set up the new group. Time to make it active.
*
- * Current kernels don't lock all allocations via lock_super(),
+ * We do not lock all allocations via s_resize_lock
* so we have to be safe wrt. concurrent accesses the group
* data. So we need to be careful to set all of the relevant
* group descriptor data etc. *before* we enable the group.
@@ -900,12 +901,12 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
*
* The precise rules we use are:
*
- * * Writers of s_groups_count *must* hold lock_super
+ * * Writers of s_groups_count *must* hold s_resize_lock
* AND
* * Writers must perform a smp_wmb() after updating all dependent
* data and before modifying the groups count
*
- * * Readers must hold lock_super() over the access
+ * * Readers must hold s_resize_lock over the access
* OR
* * Readers must perform an smp_rmb() after reading the groups count
* and before reading any dependent data.
@@ -936,7 +937,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
ext3_journal_dirty_metadata(handle, sbi->s_sbh);
exit_journal:
- unlock_super(sb);
+ mutex_unlock(&sbi->s_resize_lock);
if ((err2 = ext3_journal_stop(handle)) && !err)
err = err2;
if (!err) {
@@ -973,7 +974,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
/* We don't need to worry about locking wrt other resizers just
* yet: we're going to revalidate es->s_blocks_count after
- * taking lock_super() below. */
+ * taking the s_resize_lock below. */
o_blocks_count = le32_to_cpu(es->s_blocks_count);
o_groups_count = EXT3_SB(sb)->s_groups_count;
@@ -1045,11 +1046,11 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
goto exit_put;
}
- lock_super(sb);
+ mutex_lock(&EXT3_SB(sb)->s_resize_lock);
if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) {
ext3_warning(sb, __func__,
"multiple resizers run on filesystem!");
- unlock_super(sb);
+ mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
ext3_journal_stop(handle);
err = -EBUSY;
goto exit_put;
@@ -1059,13 +1060,13 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
EXT3_SB(sb)->s_sbh))) {
ext3_warning(sb, __func__,
"error %d on journal write access", err);
- unlock_super(sb);
+ mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
ext3_journal_stop(handle);
goto exit_put;
}
es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
- unlock_super(sb);
+ mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
ext3_debug("freeing blocks %lu through "E3FSBLK"\n", o_blocks_count,
o_blocks_count + add);
ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 7ad1e8c30bd..afa2b569da1 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1928,6 +1928,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
sb->dq_op = &ext3_quota_operations;
#endif
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
+ mutex_init(&sbi->s_orphan_lock);
+ mutex_init(&sbi->s_resize_lock);
sb->s_root = NULL;
@@ -2014,14 +2016,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
}
ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
- /*
- * akpm: core read_super() calls in here with the superblock locked.
- * That deadlocks, because orphan cleanup needs to lock the superblock
- * in numerous places. Here we just pop the lock - it's relatively
- * harmless, because we are now ready to accept write_super() requests,
- * and aviro says that's the only reason for hanging onto the
- * superblock lock.
- */
+
EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS;
ext3_orphan_cleanup(sb, es);
EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS;
@@ -2403,13 +2398,11 @@ static void ext3_mark_recovery_complete(struct super_block * sb,
if (journal_flush(journal) < 0)
goto out;
- lock_super(sb);
if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) &&
sb->s_flags & MS_RDONLY) {
EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
ext3_commit_super(sb, es, 1);
}
- unlock_super(sb);
out:
journal_unlock_updates(journal);
@@ -2601,13 +2594,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
(sbi->s_mount_state & EXT3_VALID_FS))
es->s_state = cpu_to_le16(sbi->s_mount_state);
- /*
- * We have to unlock super so that we can wait for
- * transactions.
- */
- unlock_super(sb);
ext3_mark_recovery_complete(sb, es);
- lock_super(sb);
} else {
__le32 ret;
if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb,
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ab31e65d46d..56f9271ee8c 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -704,6 +704,10 @@ struct ext4_inode_info {
__u16 i_extra_isize;
spinlock_t i_block_reservation_lock;
+#ifdef CONFIG_QUOTA
+ /* quota space reservation, managed internally by quota code */
+ qsize_t i_reserved_quota;
+#endif
/* completed async DIOs that might need unwritten extents handling */
struct list_head i_aio_dio_complete_list;
@@ -1435,7 +1439,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
extern int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
-extern qsize_t ext4_get_reserved_space(struct inode *inode);
+extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern int flush_aio_dio_completed_IO(struct inode *inode);
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5352db1a308..ab807963a61 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1003,17 +1003,12 @@ out:
return err;
}
-qsize_t ext4_get_reserved_space(struct inode *inode)
+#ifdef CONFIG_QUOTA
+qsize_t *ext4_get_reserved_space(struct inode *inode)
{
- unsigned long long total;
-
- spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
- total = EXT4_I(inode)->i_reserved_data_blocks +
- EXT4_I(inode)->i_reserved_meta_blocks;
- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
-
- return (total << inode->i_blkbits);
+ return &EXT4_I(inode)->i_reserved_quota;
}
+#endif
/*
* Calculate the number of metadata blocks need to reserve
* to allocate @blocks for non extent file based file
@@ -1051,7 +1046,7 @@ static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
static void ext4_da_update_reserve_space(struct inode *inode, int used)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- int total, mdb, mdb_free;
+ int total, mdb, mdb_free, mdb_claim = 0;
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
/* recalculate the number of metablocks still need to be reserved */
@@ -1064,7 +1059,9 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
if (mdb_free) {
/* Account for allocated meta_blocks */
- mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
+ mdb_claim = EXT4_I(inode)->i_allocated_meta_blocks;
+ BUG_ON(mdb_free < mdb_claim);
+ mdb_free -= mdb_claim;
/* update fs dirty blocks counter */
percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
@@ -1075,8 +1072,11 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
/* update per-inode reservations */
BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
EXT4_I(inode)->i_reserved_data_blocks -= used;
+ percpu_counter_sub(&sbi->s_dirtyblocks_counter, used + mdb_claim);
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+ vfs_dq_claim_block(inode, used + mdb_claim);
+
/*
* free those over-booking quota for metadata blocks
*/
@@ -1816,19 +1816,17 @@ repeat:
md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
total = md_needed + nrblocks;
+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
/*
* Make quota reservation here to prevent quota overflow
* later. Real quota accounting is done at pages writeout
* time.
*/
- if (vfs_dq_reserve_block(inode, total)) {
- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+ if (vfs_dq_reserve_block(inode, total))
return -EDQUOT;
- }
if (ext4_claim_free_blocks(sbi, total)) {
- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
vfs_dq_release_reservation_block(inode, total);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
yield();
@@ -1836,10 +1834,11 @@ repeat:
}
return -ENOSPC;
}
+ spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
- EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
-
+ EXT4_I(inode)->i_reserved_meta_blocks += md_needed;
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
return 0; /* success */
}
@@ -4794,6 +4793,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
inode->i_size = ext4_isize(raw_inode);
ei->i_disksize = inode->i_size;
+#ifdef CONFIG_QUOTA
+ ei->i_reserved_quota = 0;
+#endif
inode->i_generation = le32_to_cpu(raw_inode->i_generation);
ei->i_block_group = iloc.block_group;
ei->i_last_alloc_group = ~0;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index b1fd3daadc9..d34afad3e13 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2755,12 +2755,6 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
/* release all the reserved blocks if non delalloc */
percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
- else {
- percpu_counter_sub(&sbi->s_dirtyblocks_counter,
- ac->ac_b_ex.fe_len);
- /* convert reserved quota blocks to real quota blocks */
- vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
- }
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi,
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 827bde1f259..6ed9aa91f27 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -704,6 +704,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->i_allocated_meta_blocks = 0;
ei->i_delalloc_reserved_flag = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
+#ifdef CONFIG_QUOTA
+ ei->i_reserved_quota = 0;
+#endif
INIT_LIST_HEAD(&ei->i_aio_dio_complete_list);
ei->cur_aio_dio = NULL;
ei->i_sync_tid = 0;
@@ -1014,7 +1017,9 @@ static const struct dquot_operations ext4_quota_operations = {
.reserve_space = dquot_reserve_space,
.claim_space = dquot_claim_space,
.release_rsv = dquot_release_reserved_space,
+#ifdef CONFIG_QUOTA
.get_reserved_space = ext4_get_reserved_space,
+#endif
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
.free_inode = dquot_free_inode,
diff --git a/fs/file_table.c b/fs/file_table.c
index 0afacf65439..69652c5bd5f 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -186,10 +186,8 @@ struct file *alloc_file(struct path *path, fmode_t mode,
* that we can do debugging checks at __fput()
*/
if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
- int error = 0;
file_take_write(file);
- error = mnt_clone_write(path->mnt);
- WARN_ON(error);
+ WARN_ON(mnt_clone_write(path->mnt));
}
ima_counts_get(file);
return file;
diff --git a/fs/internal.h b/fs/internal.h
index f67cd141d9a..e96a1667d74 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -85,3 +85,10 @@ extern struct file *get_empty_filp(void);
* super.c
*/
extern int do_remount_sb(struct super_block *, int, void *, int);
+
+/*
+ * open.c
+ */
+struct nameidata;
+extern struct file *nameidata_to_filp(struct nameidata *);
+extern void release_open_intent(struct nameidata *);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 4160afad6d0..bd224eec9b0 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -1913,7 +1913,7 @@ static void __init jbd_create_debugfs_entry(void)
{
jbd_debugfs_dir = debugfs_create_dir("jbd", NULL);
if (jbd_debugfs_dir)
- jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO,
+ jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO | S_IWUSR,
jbd_debugfs_dir,
&journal_enable_debug);
}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index b7ca3a92a4d..17af879e6e9 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2115,7 +2115,8 @@ static void __init jbd2_create_debugfs_entry(void)
{
jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL);
if (jbd2_debugfs_dir)
- jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME, S_IRUGO,
+ jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME,
+ S_IRUGO | S_IWUSR,
jbd2_debugfs_dir,
&jbd2_journal_enable_debug);
}
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 2234c73fc57..d929a822a74 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -524,7 +524,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
* Page cache is indexed by long.
* I would use MAX_LFS_FILESIZE, but it's only half as big
*/
- sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, sb->s_maxbytes);
+ sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, (u64)sb->s_maxbytes);
#endif
sb->s_time_gran = 1;
return 0;
diff --git a/fs/namei.c b/fs/namei.c
index dad4b80257d..68921d9b530 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -37,8 +37,6 @@
#include "internal.h"
-#define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE])
-
/* [Feb-1997 T. Schoebel-Theuer]
* Fundamental changes in the pathname lookup mechanisms (namei)
* were necessary because of omirr. The reason is that omirr needs
@@ -1640,6 +1638,7 @@ struct file *do_filp_open(int dfd, const char *pathname,
if (filp == NULL)
return ERR_PTR(-ENFILE);
nd.intent.open.file = filp;
+ filp->f_flags = open_flag;
nd.intent.open.flags = flag;
nd.intent.open.create_mode = 0;
error = do_path_lookup(dfd, pathname,
@@ -1685,6 +1684,7 @@ struct file *do_filp_open(int dfd, const char *pathname,
if (filp == NULL)
goto exit_parent;
nd.intent.open.file = filp;
+ filp->f_flags = open_flag;
nd.intent.open.flags = flag;
nd.intent.open.create_mode = mode;
dir = nd.path.dentry;
@@ -1725,7 +1725,7 @@ do_last:
mnt_drop_write(nd.path.mnt);
goto exit;
}
- filp = nameidata_to_filp(&nd, open_flag);
+ filp = nameidata_to_filp(&nd);
mnt_drop_write(nd.path.mnt);
if (nd.root.mnt)
path_put(&nd.root);
@@ -1789,7 +1789,7 @@ ok:
mnt_drop_write(nd.path.mnt);
goto exit;
}
- filp = nameidata_to_filp(&nd, open_flag);
+ filp = nameidata_to_filp(&nd);
if (!IS_ERR(filp)) {
error = ima_path_check(&filp->f_path, filp->f_mode &
(MAY_READ | MAY_WRITE | MAY_EXEC));
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 1c12177b908..55c8e63af0b 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -89,7 +89,7 @@ static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
int flags = nfsexp_flags(rqstp, exp);
/* Check if the request originated from a secure port. */
- if (!rqstp->rq_secure && (flags & NFSEXP_INSECURE_PORT)) {
+ if (!rqstp->rq_secure && !(flags & NFSEXP_INSECURE_PORT)) {
RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
dprintk(KERN_WARNING
"nfsd: request from insecure port %s!\n",
diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig
index 701b7a3a872..0d840669698 100644
--- a/fs/ocfs2/Kconfig
+++ b/fs/ocfs2/Kconfig
@@ -6,6 +6,7 @@ config OCFS2_FS
select CRC32
select QUOTA
select QUOTA_TREE
+ select FS_POSIX_ACL
help
OCFS2 is a general purpose extent based shared disk cluster file
system with many similarities to ext3. It supports 64 bit inode
@@ -74,12 +75,3 @@ config OCFS2_DEBUG_FS
This option will enable expensive consistency checks. Enable
this option for debugging only as it is likely to decrease
performance of the filesystem.
-
-config OCFS2_FS_POSIX_ACL
- bool "OCFS2 POSIX Access Control Lists"
- depends on OCFS2_FS
- select FS_POSIX_ACL
- default n
- help
- Posix Access Control Lists (ACLs) support permissions for users and
- groups beyond the owner/group/world scheme.
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index 31f25ce32c9..600d2d2ade1 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -39,11 +39,8 @@ ocfs2-objs := \
ver.o \
quota_local.o \
quota_global.o \
- xattr.o
-
-ifeq ($(CONFIG_OCFS2_FS_POSIX_ACL),y)
-ocfs2-objs += acl.o
-endif
+ xattr.o \
+ acl.o
ocfs2_stackglue-objs := stackglue.o
ocfs2_stack_o2cb-objs := stack_o2cb.o
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index e3e47415d85..0501974bedd 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -98,15 +98,11 @@ static struct posix_acl *ocfs2_get_acl_nolock(struct inode *inode,
int type,
struct buffer_head *di_bh)
{
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int name_index;
char *value = NULL;
struct posix_acl *acl;
int retval;
- if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
- return NULL;
-
switch (type) {
case ACL_TYPE_ACCESS:
name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
index 8f6389ed4da..5c5d31f0585 100644
--- a/fs/ocfs2/acl.h
+++ b/fs/ocfs2/acl.h
@@ -26,8 +26,6 @@ struct ocfs2_acl_entry {
__le32 e_id;
};
-#ifdef CONFIG_OCFS2_FS_POSIX_ACL
-
extern int ocfs2_check_acl(struct inode *, int);
extern int ocfs2_acl_chmod(struct inode *);
extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
@@ -35,24 +33,4 @@ extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
struct ocfs2_alloc_context *,
struct ocfs2_alloc_context *);
-#else /* CONFIG_OCFS2_FS_POSIX_ACL*/
-
-#define ocfs2_check_acl NULL
-static inline int ocfs2_acl_chmod(struct inode *inode)
-{
- return 0;
-}
-static inline int ocfs2_init_acl(handle_t *handle,
- struct inode *inode,
- struct inode *dir,
- struct buffer_head *di_bh,
- struct buffer_head *dir_bh,
- struct ocfs2_alloc_context *meta_ac,
- struct ocfs2_alloc_context *data_ac)
-{
- return 0;
-}
-
-#endif /* CONFIG_OCFS2_FS_POSIX_ACL*/
-
#endif /* OCFS2_ACL_H */
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index fb4e672579b..d17bdc718f7 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1765,9 +1765,9 @@ set_and_inc:
*
* The array index of the subtree root is passed back.
*/
-static int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et,
- struct ocfs2_path *left,
- struct ocfs2_path *right)
+int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et,
+ struct ocfs2_path *left,
+ struct ocfs2_path *right)
{
int i = 0;
@@ -2872,8 +2872,8 @@ out:
* This looks similar, but is subtly different to
* ocfs2_find_cpos_for_left_leaf().
*/
-static int ocfs2_find_cpos_for_right_leaf(struct super_block *sb,
- struct ocfs2_path *path, u32 *cpos)
+int ocfs2_find_cpos_for_right_leaf(struct super_block *sb,
+ struct ocfs2_path *path, u32 *cpos)
{
int i, j, ret = 0;
u64 blkno;
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 9c122d57446..1db4359ccb9 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -317,4 +317,9 @@ int ocfs2_path_bh_journal_access(handle_t *handle,
int ocfs2_journal_access_path(struct ocfs2_caching_info *ci,
handle_t *handle,
struct ocfs2_path *path);
+int ocfs2_find_cpos_for_right_leaf(struct super_block *sb,
+ struct ocfs2_path *path, u32 *cpos);
+int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et,
+ struct ocfs2_path *left,
+ struct ocfs2_path *right);
#endif /* OCFS2_ALLOC_H */
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index c452d116b89..eda5b8bcddd 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -176,7 +176,8 @@ static void o2hb_write_timeout(struct work_struct *work)
static void o2hb_arm_write_timeout(struct o2hb_region *reg)
{
- mlog(0, "Queue write timeout for %u ms\n", O2HB_MAX_WRITE_TIMEOUT_MS);
+ mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n",
+ O2HB_MAX_WRITE_TIMEOUT_MS);
cancel_delayed_work(&reg->hr_write_timeout_work);
reg->hr_last_timeout_start = jiffies;
@@ -874,7 +875,8 @@ static int o2hb_thread(void *data)
do_gettimeofday(&after_hb);
elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
- mlog(0, "start = %lu.%lu, end = %lu.%lu, msec = %u\n",
+ mlog(ML_HEARTBEAT,
+ "start = %lu.%lu, end = %lu.%lu, msec = %u\n",
before_hb.tv_sec, (unsigned long) before_hb.tv_usec,
after_hb.tv_sec, (unsigned long) after_hb.tv_usec,
elapsed_msec);
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index 7ee6188bc79..c81142e3ef8 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -35,6 +35,10 @@
* cluster references throughout where nodes are looked up */
struct o2nm_cluster *o2nm_single_cluster = NULL;
+char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
+ "reset", /* O2NM_FENCE_RESET */
+ "panic", /* O2NM_FENCE_PANIC */
+};
struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
{
@@ -579,6 +583,43 @@ static ssize_t o2nm_cluster_attr_reconnect_delay_ms_write(
return o2nm_cluster_attr_write(page, count,
&cluster->cl_reconnect_delay_ms);
}
+
+static ssize_t o2nm_cluster_attr_fence_method_read(
+ struct o2nm_cluster *cluster, char *page)
+{
+ ssize_t ret = 0;
+
+ if (cluster)
+ ret = sprintf(page, "%s\n",
+ o2nm_fence_method_desc[cluster->cl_fence_method]);
+ return ret;
+}
+
+static ssize_t o2nm_cluster_attr_fence_method_write(
+ struct o2nm_cluster *cluster, const char *page, size_t count)
+{
+ unsigned int i;
+
+ if (page[count - 1] != '\n')
+ goto bail;
+
+ for (i = 0; i < O2NM_FENCE_METHODS; ++i) {
+ if (count != strlen(o2nm_fence_method_desc[i]) + 1)
+ continue;
+ if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1))
+ continue;
+ if (cluster->cl_fence_method != i) {
+ printk(KERN_INFO "ocfs2: Changing fence method to %s\n",
+ o2nm_fence_method_desc[i]);
+ cluster->cl_fence_method = i;
+ }
+ return count;
+ }
+
+bail:
+ return -EINVAL;
+}
+
static struct o2nm_cluster_attribute o2nm_cluster_attr_idle_timeout_ms = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "idle_timeout_ms",
@@ -603,10 +644,19 @@ static struct o2nm_cluster_attribute o2nm_cluster_attr_reconnect_delay_ms = {
.store = o2nm_cluster_attr_reconnect_delay_ms_write,
};
+static struct o2nm_cluster_attribute o2nm_cluster_attr_fence_method = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "fence_method",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2nm_cluster_attr_fence_method_read,
+ .store = o2nm_cluster_attr_fence_method_write,
+};
+
static struct configfs_attribute *o2nm_cluster_attrs[] = {
&o2nm_cluster_attr_idle_timeout_ms.attr,
&o2nm_cluster_attr_keepalive_delay_ms.attr,
&o2nm_cluster_attr_reconnect_delay_ms.attr,
+ &o2nm_cluster_attr_fence_method.attr,
NULL,
};
static ssize_t o2nm_cluster_show(struct config_item *item,
@@ -778,6 +828,7 @@ static struct config_group *o2nm_cluster_group_make_group(struct config_group *g
cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT;
cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT;
cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT;
+ cluster->cl_fence_method = O2NM_FENCE_RESET;
ret = &cluster->cl_group;
o2nm_single_cluster = cluster;
diff --git a/fs/ocfs2/cluster/nodemanager.h b/fs/ocfs2/cluster/nodemanager.h
index c992ea0da4a..09ea2d388bb 100644
--- a/fs/ocfs2/cluster/nodemanager.h
+++ b/fs/ocfs2/cluster/nodemanager.h
@@ -33,6 +33,12 @@
#include <linux/configfs.h>
#include <linux/rbtree.h>
+enum o2nm_fence_method {
+ O2NM_FENCE_RESET = 0,
+ O2NM_FENCE_PANIC,
+ O2NM_FENCE_METHODS, /* Number of fence methods */
+};
+
struct o2nm_node {
spinlock_t nd_lock;
struct config_item nd_item;
@@ -58,6 +64,7 @@ struct o2nm_cluster {
unsigned int cl_idle_timeout_ms;
unsigned int cl_keepalive_delay_ms;
unsigned int cl_reconnect_delay_ms;
+ enum o2nm_fence_method cl_fence_method;
/* this bitmap is part of a hack for disk bitmap.. will go eventually. - zab */
unsigned long cl_nodes_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index bbacf7da48a..639024033fc 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -74,8 +74,20 @@ static void o2quo_fence_self(void)
* threads can still schedule, etc, etc */
o2hb_stop_all_regions();
- printk("ocfs2 is very sorry to be fencing this system by restarting\n");
- emergency_restart();
+ switch (o2nm_single_cluster->cl_fence_method) {
+ case O2NM_FENCE_PANIC:
+ panic("*** ocfs2 is very sorry to be fencing this system by "
+ "panicing ***\n");
+ break;
+ default:
+ WARN_ON(o2nm_single_cluster->cl_fence_method >=
+ O2NM_FENCE_METHODS);
+ case O2NM_FENCE_RESET:
+ printk(KERN_ERR "*** ocfs2 is very sorry to be fencing this "
+ "system by restarting ***\n");
+ emergency_restart();
+ break;
+ };
}
/* Indicate that a timeout occured on a hearbeat region write. The
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index d9fa3d22e17..2f9e4e19a4f 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2589,6 +2589,14 @@ retry:
"begin reco msg (%d)\n", dlm->name, nodenum, ret);
ret = 0;
}
+ if (ret == -EAGAIN) {
+ mlog(0, "%s: trying to start recovery of node "
+ "%u, but node %u is waiting for last recovery "
+ "to complete, backoff for a bit\n", dlm->name,
+ dead_node, nodenum);
+ msleep(100);
+ goto retry;
+ }
if (ret < 0) {
struct dlm_lock_resource *res;
/* this is now a serious problem, possibly ENOMEM
@@ -2608,14 +2616,6 @@ retry:
* another ENOMEM */
msleep(100);
goto retry;
- } else if (ret == EAGAIN) {
- mlog(0, "%s: trying to start recovery of node "
- "%u, but node %u is waiting for last recovery "
- "to complete, backoff for a bit\n", dlm->name,
- dead_node, nodenum);
- /* TODO Look into replacing msleep with cond_resched() */
- msleep(100);
- goto retry;
}
}
@@ -2639,7 +2639,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
dlm->name, br->node_idx, br->dead_node,
dlm->reco.dead_node, dlm->reco.new_master);
spin_unlock(&dlm->spinlock);
- return EAGAIN;
+ return -EAGAIN;
}
spin_unlock(&dlm->spinlock);
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 843db64e9d4..d35a27f4523 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -37,6 +37,7 @@
#include "extent_map.h"
#include "inode.h"
#include "super.h"
+#include "symlink.h"
#include "buffer_head_io.h"
@@ -703,6 +704,12 @@ out:
return ret;
}
+/*
+ * The ocfs2_fiemap_inline() may be a little bit misleading, since
+ * it not only handles the fiemap for inlined files, but also deals
+ * with the fast symlink, cause they have no difference for extent
+ * mapping per se.
+ */
static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
struct fiemap_extent_info *fieinfo,
u64 map_start)
@@ -715,11 +722,18 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
struct ocfs2_inode_info *oi = OCFS2_I(inode);
di = (struct ocfs2_dinode *)di_bh->b_data;
- id_count = le16_to_cpu(di->id2.i_data.id_count);
+ if (ocfs2_inode_is_fast_symlink(inode))
+ id_count = ocfs2_fast_symlink_chars(inode->i_sb);
+ else
+ id_count = le16_to_cpu(di->id2.i_data.id_count);
if (map_start < id_count) {
phys = oi->ip_blkno << inode->i_sb->s_blocksize_bits;
- phys += offsetof(struct ocfs2_dinode, id2.i_data.id_data);
+ if (ocfs2_inode_is_fast_symlink(inode))
+ phys += offsetof(struct ocfs2_dinode, id2.i_symlink);
+ else
+ phys += offsetof(struct ocfs2_dinode,
+ id2.i_data.id_data);
ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
flags);
@@ -756,9 +770,10 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
down_read(&OCFS2_I(inode)->ip_alloc_sem);
/*
- * Handle inline-data separately.
+ * Handle inline-data and fast symlink separately.
*/
- if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ if ((OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
+ ocfs2_inode_is_fast_symlink(inode)) {
ret = ocfs2_fiemap_inline(inode, di_bh, fieinfo, map_start);
goto out_unlock;
}
@@ -786,6 +801,8 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
fe_flags = 0;
if (rec.e_flags & OCFS2_EXT_UNWRITTEN)
fe_flags |= FIEMAP_EXTENT_UNWRITTEN;
+ if (rec.e_flags & OCFS2_EXT_REFCOUNTED)
+ fe_flags |= FIEMAP_EXTENT_SHARED;
if (is_last)
fe_flags |= FIEMAP_EXTENT_LAST;
len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index f010b22b1c4..50fb26a6a5f 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -2108,6 +2108,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
}
did_quota_inode = 1;
+ inode->i_nlink = 0;
/* do the real work now. */
status = ocfs2_mknod_locked(osb, dir, inode,
0, &new_di_bh, parent_di_bh, handle,
@@ -2136,6 +2137,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
if (status < 0)
mlog_errno(status);
+ insert_inode_hash(inode);
leave:
if (status < 0 && did_quota_inode)
vfs_dq_free_inode(inode);
@@ -2267,6 +2269,8 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
di = (struct ocfs2_dinode *)di_bh->b_data;
le32_add_cpu(&di->i_flags, -OCFS2_ORPHANED_FL);
di->i_orphaned_slot = 0;
+ inode->i_nlink = 1;
+ ocfs2_set_links_count(di, inode->i_nlink);
ocfs2_journal_dirty(handle, di_bh);
status = ocfs2_add_entry(handle, dentry, inode,
@@ -2284,7 +2288,6 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
goto out_commit;
}
- insert_inode_hash(inode);
dentry->d_op = &ocfs2_dentry_ops;
d_instantiate(dentry, inode);
status = 0;
@@ -2326,4 +2329,5 @@ const struct inode_operations ocfs2_dir_iops = {
.getxattr = generic_getxattr,
.listxattr = ocfs2_listxattr,
.removexattr = generic_removexattr,
+ .fiemap = ocfs2_fiemap,
};
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index d963d863870..9362eea7424 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -245,9 +245,11 @@ enum ocfs2_mount_options
OCFS2_MOUNT_LOCALFLOCKS = 1 << 5, /* No cluster aware user file locks */
OCFS2_MOUNT_NOUSERXATTR = 1 << 6, /* No user xattr */
OCFS2_MOUNT_INODE64 = 1 << 7, /* Allow inode numbers > 2^32 */
- OCFS2_MOUNT_POSIX_ACL = 1 << 8, /* POSIX access control lists */
- OCFS2_MOUNT_USRQUOTA = 1 << 9, /* We support user quotas */
- OCFS2_MOUNT_GRPQUOTA = 1 << 10, /* We support group quotas */
+ OCFS2_MOUNT_POSIX_ACL = 1 << 8, /* Force POSIX access control lists */
+ OCFS2_MOUNT_NO_POSIX_ACL = 1 << 9, /* Disable POSIX access
+ control lists */
+ OCFS2_MOUNT_USRQUOTA = 1 << 10, /* We support user quotas */
+ OCFS2_MOUNT_GRPQUOTA = 1 << 11, /* We support group quotas */
};
#define OCFS2_OSB_SOFT_RO 0x0001
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index e9431e4a5e7..1a1a679e51b 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -1202,7 +1202,7 @@ struct ocfs2_local_disk_dqinfo {
/* Header of one chunk of a quota file */
struct ocfs2_local_disk_chunk {
__le32 dqc_free; /* Number of free entries in the bitmap */
- u8 dqc_bitmap[0]; /* Bitmap of entries in the corresponding
+ __u8 dqc_bitmap[0]; /* Bitmap of entries in the corresponding
* chunk of quota file */
};
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 30967e3f5e4..74db2be75dd 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -276,7 +276,7 @@ static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
spin_unlock(&osb->osb_lock);
}
-void ocfs2_kref_remove_refcount_tree(struct kref *kref)
+static void ocfs2_kref_remove_refcount_tree(struct kref *kref)
{
struct ocfs2_refcount_tree *tree =
container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
@@ -524,23 +524,6 @@ out:
return ret;
}
-int ocfs2_lock_refcount_tree_by_inode(struct inode *inode, int rw,
- struct ocfs2_refcount_tree **ret_tree,
- struct buffer_head **ref_bh)
-{
- int ret;
- u64 ref_blkno;
-
- ret = ocfs2_get_refcount_block(inode, &ref_blkno);
- if (ret) {
- mlog_errno(ret);
- return ret;
- }
-
- return ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno,
- rw, ret_tree, ref_bh);
-}
-
void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
struct ocfs2_refcount_tree *tree, int rw)
{
@@ -969,6 +952,103 @@ out:
}
/*
+ * Find the end range for a leaf refcount block indicated by
+ * el->l_recs[index].e_blkno.
+ */
+static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ struct ocfs2_extent_block *eb,
+ struct ocfs2_extent_list *el,
+ int index, u32 *cpos_end)
+{
+ int ret, i, subtree_root;
+ u32 cpos;
+ u64 blkno;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+ struct ocfs2_path *left_path = NULL, *right_path = NULL;
+ struct ocfs2_extent_tree et;
+ struct ocfs2_extent_list *tmp_el;
+
+ if (index < le16_to_cpu(el->l_next_free_rec) - 1) {
+ /*
+ * We have a extent rec after index, so just use the e_cpos
+ * of the next extent rec.
+ */
+ *cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos);
+ return 0;
+ }
+
+ if (!eb || (eb && !eb->h_next_leaf_blk)) {
+ /*
+ * We are the last extent rec, so any high cpos should
+ * be stored in this leaf refcount block.
+ */
+ *cpos_end = UINT_MAX;
+ return 0;
+ }
+
+ /*
+ * If the extent block isn't the last one, we have to find
+ * the subtree root between this extent block and the next
+ * leaf extent block and get the corresponding e_cpos from
+ * the subroot. Otherwise we may corrupt the b-tree.
+ */
+ ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
+
+ left_path = ocfs2_new_path_from_et(&et);
+ if (!left_path) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos);
+ ret = ocfs2_find_path(ci, left_path, cpos);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ right_path = ocfs2_new_path_from_path(left_path);
+ if (!right_path) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_find_path(ci, right_path, cpos);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ subtree_root = ocfs2_find_subtree_root(&et, left_path,
+ right_path);
+
+ tmp_el = left_path->p_node[subtree_root].el;
+ blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
+ for (i = 0; i < le32_to_cpu(tmp_el->l_next_free_rec); i++) {
+ if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
+ *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
+ break;
+ }
+ }
+
+ BUG_ON(i == le32_to_cpu(tmp_el->l_next_free_rec));
+
+out:
+ ocfs2_free_path(left_path);
+ ocfs2_free_path(right_path);
+ return ret;
+}
+
+/*
* Given a cpos and len, try to find the refcount record which contains cpos.
* 1. If cpos can be found in one refcount record, return the record.
* 2. If cpos can't be found, return a fake record which start from cpos
@@ -983,10 +1063,10 @@ static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
struct buffer_head **ret_bh)
{
int ret = 0, i, found;
- u32 low_cpos;
+ u32 low_cpos, uninitialized_var(cpos_end);
struct ocfs2_extent_list *el;
- struct ocfs2_extent_rec *tmp, *rec = NULL;
- struct ocfs2_extent_block *eb;
+ struct ocfs2_extent_rec *rec = NULL;
+ struct ocfs2_extent_block *eb = NULL;
struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct ocfs2_refcount_block *rb =
@@ -1034,12 +1114,16 @@ static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
}
}
- /* adjust len when we have ocfs2_extent_rec after it. */
- if (found && i < le16_to_cpu(el->l_next_free_rec) - 1) {
- tmp = &el->l_recs[i+1];
+ if (found) {
+ ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh,
+ eb, el, i, &cpos_end);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
- if (le32_to_cpu(tmp->e_cpos) < cpos + len)
- len = le32_to_cpu(tmp->e_cpos) - cpos;
+ if (cpos_end < low_cpos + len)
+ len = cpos_end - low_cpos;
}
ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
@@ -1418,7 +1502,7 @@ static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
/* change old and new rl_used accordingly. */
le16_add_cpu(&rl->rl_used, -num_moved);
- new_rl->rl_used = cpu_to_le32(num_moved);
+ new_rl->rl_used = cpu_to_le16(num_moved);
sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
sizeof(struct ocfs2_refcount_rec),
@@ -1797,7 +1881,8 @@ static int ocfs2_split_refcount_rec(handle_t *handle,
recs_need++;
/* If the leaf block don't have enough record, expand it. */
- if (le16_to_cpu(rf_list->rl_used) + recs_need > rf_list->rl_count) {
+ if (le16_to_cpu(rf_list->rl_used) + recs_need >
+ le16_to_cpu(rf_list->rl_count)) {
struct ocfs2_refcount_rec tmp_rec;
u64 cpos = le64_to_cpu(orig_rec->r_cpos);
len = le32_to_cpu(orig_rec->r_clusters);
@@ -1859,7 +1944,7 @@ static int ocfs2_split_refcount_rec(handle_t *handle,
memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
le64_add_cpu(&tail_rec->r_cpos,
le32_to_cpu(tail_rec->r_clusters) - len);
- tail_rec->r_clusters = le32_to_cpu(len);
+ tail_rec->r_clusters = cpu_to_le32(len);
}
/*
@@ -3840,8 +3925,7 @@ static int ocfs2_add_refcounted_extent(struct inode *inode,
}
ret = ocfs2_insert_extent(handle, et, cpos,
- cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
- p_cluster)),
+ ocfs2_clusters_to_blocks(inode->i_sb, p_cluster),
num_clusters, ext_flags, meta_ac);
if (ret) {
mlog_errno(ret);
@@ -4253,8 +4337,8 @@ static int ocfs2_user_path_parent(const char __user *path,
* @new_dentry: target dentry
* @preserve: if true, preserve all file attributes
*/
-int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
- struct dentry *new_dentry, bool preserve)
+static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry, bool preserve)
{
struct inode *inode = old_dentry->d_inode;
int error;
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index ff4c798a563..da78a2a334f 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -814,7 +814,7 @@ static int fs_protocol_compare(struct ocfs2_protocol_version *existing,
static int user_cluster_connect(struct ocfs2_cluster_connection *conn)
{
dlm_lockspace_t *fsdlm;
- struct ocfs2_live_connection *control;
+ struct ocfs2_live_connection *uninitialized_var(control);
int rc = 0;
BUG_ON(conn == NULL);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 14f47d2bfe0..26069917a9f 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -100,6 +100,8 @@ struct mount_options
static int ocfs2_parse_options(struct super_block *sb, char *options,
struct mount_options *mopt,
int is_remount);
+static int ocfs2_check_set_options(struct super_block *sb,
+ struct mount_options *options);
static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt);
static void ocfs2_put_super(struct super_block *sb);
static int ocfs2_mount_volume(struct super_block *sb);
@@ -600,7 +602,8 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
lock_kernel();
- if (!ocfs2_parse_options(sb, data, &parsed_options, 1)) {
+ if (!ocfs2_parse_options(sb, data, &parsed_options, 1) ||
+ !ocfs2_check_set_options(sb, &parsed_options)) {
ret = -EINVAL;
goto out;
}
@@ -691,8 +694,6 @@ unlock_osb:
if (!ret) {
/* Only save off the new mount options in case of a successful
* remount. */
- if (!(osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_XATTR))
- parsed_options.mount_opt &= ~OCFS2_MOUNT_POSIX_ACL;
osb->s_mount_opt = parsed_options.mount_opt;
osb->s_atime_quantum = parsed_options.atime_quantum;
osb->preferred_slot = parsed_options.slot;
@@ -701,6 +702,10 @@ unlock_osb:
if (!ocfs2_is_hard_readonly(osb))
ocfs2_set_journal_params(osb);
+
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ?
+ MS_POSIXACL : 0);
}
out:
unlock_kernel();
@@ -1011,31 +1016,16 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
brelse(bh);
bh = NULL;
- if (!(osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_XATTR))
- parsed_options.mount_opt &= ~OCFS2_MOUNT_POSIX_ACL;
-
+ if (!ocfs2_check_set_options(sb, &parsed_options)) {
+ status = -EINVAL;
+ goto read_super_error;
+ }
osb->s_mount_opt = parsed_options.mount_opt;
osb->s_atime_quantum = parsed_options.atime_quantum;
osb->preferred_slot = parsed_options.slot;
osb->osb_commit_interval = parsed_options.commit_interval;
osb->local_alloc_default_bits = ocfs2_megabytes_to_clusters(sb, parsed_options.localalloc_opt);
osb->local_alloc_bits = osb->local_alloc_default_bits;
- if (osb->s_mount_opt & OCFS2_MOUNT_USRQUOTA &&
- !OCFS2_HAS_RO_COMPAT_FEATURE(sb,
- OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
- status = -EINVAL;
- mlog(ML_ERROR, "User quotas were requested, but this "
- "filesystem does not have the feature enabled.\n");
- goto read_super_error;
- }
- if (osb->s_mount_opt & OCFS2_MOUNT_GRPQUOTA &&
- !OCFS2_HAS_RO_COMPAT_FEATURE(sb,
- OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
- status = -EINVAL;
- mlog(ML_ERROR, "Group quotas were requested, but this "
- "filesystem does not have the feature enabled.\n");
- goto read_super_error;
- }
status = ocfs2_verify_userspace_stack(osb, &parsed_options);
if (status)
@@ -1245,6 +1235,40 @@ static struct file_system_type ocfs2_fs_type = {
.next = NULL
};
+static int ocfs2_check_set_options(struct super_block *sb,
+ struct mount_options *options)
+{
+ if (options->mount_opt & OCFS2_MOUNT_USRQUOTA &&
+ !OCFS2_HAS_RO_COMPAT_FEATURE(sb,
+ OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
+ mlog(ML_ERROR, "User quotas were requested, but this "
+ "filesystem does not have the feature enabled.\n");
+ return 0;
+ }
+ if (options->mount_opt & OCFS2_MOUNT_GRPQUOTA &&
+ !OCFS2_HAS_RO_COMPAT_FEATURE(sb,
+ OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
+ mlog(ML_ERROR, "Group quotas were requested, but this "
+ "filesystem does not have the feature enabled.\n");
+ return 0;
+ }
+ if (options->mount_opt & OCFS2_MOUNT_POSIX_ACL &&
+ !OCFS2_HAS_INCOMPAT_FEATURE(sb, OCFS2_FEATURE_INCOMPAT_XATTR)) {
+ mlog(ML_ERROR, "ACL support requested but extended attributes "
+ "feature is not enabled\n");
+ return 0;
+ }
+ /* No ACL setting specified? Use XATTR feature... */
+ if (!(options->mount_opt & (OCFS2_MOUNT_POSIX_ACL |
+ OCFS2_MOUNT_NO_POSIX_ACL))) {
+ if (OCFS2_HAS_INCOMPAT_FEATURE(sb, OCFS2_FEATURE_INCOMPAT_XATTR))
+ options->mount_opt |= OCFS2_MOUNT_POSIX_ACL;
+ else
+ options->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL;
+ }
+ return 1;
+}
+
static int ocfs2_parse_options(struct super_block *sb,
char *options,
struct mount_options *mopt,
@@ -1392,40 +1416,19 @@ static int ocfs2_parse_options(struct super_block *sb,
mopt->mount_opt |= OCFS2_MOUNT_INODE64;
break;
case Opt_usrquota:
- /* We check only on remount, otherwise features
- * aren't yet initialized. */
- if (is_remount && !OCFS2_HAS_RO_COMPAT_FEATURE(sb,
- OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
- mlog(ML_ERROR, "User quota requested but "
- "filesystem feature is not set\n");
- status = 0;
- goto bail;
- }
mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA;
break;
case Opt_grpquota:
- if (is_remount && !OCFS2_HAS_RO_COMPAT_FEATURE(sb,
- OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
- mlog(ML_ERROR, "Group quota requested but "
- "filesystem feature is not set\n");
- status = 0;
- goto bail;
- }
mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA;
break;
-#ifdef CONFIG_OCFS2_FS_POSIX_ACL
case Opt_acl:
mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL;
+ mopt->mount_opt &= ~OCFS2_MOUNT_NO_POSIX_ACL;
break;
case Opt_noacl:
+ mopt->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL;
mopt->mount_opt &= ~OCFS2_MOUNT_POSIX_ACL;
break;
-#else
- case Opt_acl:
- case Opt_noacl:
- printk(KERN_INFO "ocfs2 (no)acl options not supported\n");
- break;
-#endif
default:
mlog(ML_ERROR,
"Unrecognized mount option \"%s\" "
@@ -1502,12 +1505,10 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
if (opts & OCFS2_MOUNT_INODE64)
seq_printf(s, ",inode64");
-#ifdef CONFIG_OCFS2_FS_POSIX_ACL
if (opts & OCFS2_MOUNT_POSIX_ACL)
seq_printf(s, ",acl");
else
seq_printf(s, ",noacl");
-#endif
return 0;
}
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index e3421030a69..49b133ccbf1 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -163,6 +163,7 @@ const struct inode_operations ocfs2_symlink_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = ocfs2_listxattr,
.removexattr = generic_removexattr,
+ .fiemap = ocfs2_fiemap,
};
const struct inode_operations ocfs2_fast_symlink_inode_operations = {
.readlink = ocfs2_readlink,
@@ -174,4 +175,5 @@ const struct inode_operations ocfs2_fast_symlink_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = ocfs2_listxattr,
.removexattr = generic_removexattr,
+ .fiemap = ocfs2_fiemap,
};
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 43c114831c0..8fc6fb071c6 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -98,10 +98,8 @@ static struct ocfs2_xattr_def_value_root def_xv = {
struct xattr_handler *ocfs2_xattr_handlers[] = {
&ocfs2_xattr_user_handler,
-#ifdef CONFIG_OCFS2_FS_POSIX_ACL
&ocfs2_xattr_acl_access_handler,
&ocfs2_xattr_acl_default_handler,
-#endif
&ocfs2_xattr_trusted_handler,
&ocfs2_xattr_security_handler,
NULL
@@ -109,12 +107,10 @@ struct xattr_handler *ocfs2_xattr_handlers[] = {
static struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
[OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler,
-#ifdef CONFIG_OCFS2_FS_POSIX_ACL
[OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS]
= &ocfs2_xattr_acl_access_handler,
[OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT]
= &ocfs2_xattr_acl_default_handler,
-#endif
[OCFS2_XATTR_INDEX_TRUSTED] = &ocfs2_xattr_trusted_handler,
[OCFS2_XATTR_INDEX_SECURITY] = &ocfs2_xattr_security_handler,
};
@@ -6064,7 +6060,7 @@ static int ocfs2_value_metas_in_xattr_header(struct super_block *sb,
* to the extent block, so just calculate a maximum record num.
*/
if (!xv->xr_list.l_tree_depth)
- *num_recs += xv->xr_list.l_next_free_rec;
+ *num_recs += le16_to_cpu(xv->xr_list.l_next_free_rec);
else
*num_recs += ocfs2_clusters_for_bytes(sb,
XATTR_SIZE_MAX);
diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
index 08e36389f56..abd72a47f52 100644
--- a/fs/ocfs2/xattr.h
+++ b/fs/ocfs2/xattr.h
@@ -40,10 +40,8 @@ struct ocfs2_security_xattr_info {
extern struct xattr_handler ocfs2_xattr_user_handler;
extern struct xattr_handler ocfs2_xattr_trusted_handler;
extern struct xattr_handler ocfs2_xattr_security_handler;
-#ifdef CONFIG_OCFS2_FS_POSIX_ACL
extern struct xattr_handler ocfs2_xattr_acl_access_handler;
extern struct xattr_handler ocfs2_xattr_acl_default_handler;
-#endif
extern struct xattr_handler *ocfs2_xattr_handlers[];
ssize_t ocfs2_listxattr(struct dentry *, char *, size_t);
diff --git a/fs/open.c b/fs/open.c
index ca69241796b..040cef72bc0 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -821,15 +821,14 @@ static inline int __get_file_write_access(struct inode *inode,
}
static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
- int flags, struct file *f,
+ struct file *f,
int (*open)(struct inode *, struct file *),
const struct cred *cred)
{
struct inode *inode;
int error;
- f->f_flags = flags;
- f->f_mode = (__force fmode_t)((flags+1) & O_ACCMODE) | FMODE_LSEEK |
+ f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
FMODE_PREAD | FMODE_PWRITE;
inode = dentry->d_inode;
if (f->f_mode & FMODE_WRITE) {
@@ -930,7 +929,6 @@ struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry
if (IS_ERR(dentry))
goto out_err;
nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt),
- nd->intent.open.flags - 1,
nd->intent.open.file,
open, cred);
out:
@@ -949,7 +947,7 @@ EXPORT_SYMBOL_GPL(lookup_instantiate_filp);
*
* Note that this function destroys the original nameidata
*/
-struct file *nameidata_to_filp(struct nameidata *nd, int flags)
+struct file *nameidata_to_filp(struct nameidata *nd)
{
const struct cred *cred = current_cred();
struct file *filp;
@@ -958,7 +956,7 @@ struct file *nameidata_to_filp(struct nameidata *nd, int flags)
filp = nd->intent.open.file;
/* Has the filesystem initialised the file for us? */
if (filp->f_path.dentry == NULL)
- filp = __dentry_open(nd->path.dentry, nd->path.mnt, flags, filp,
+ filp = __dentry_open(nd->path.dentry, nd->path.mnt, filp,
NULL, cred);
else
path_put(&nd->path);
@@ -997,7 +995,8 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags,
return ERR_PTR(error);
}
- return __dentry_open(dentry, mnt, flags, f, NULL, cred);
+ f->f_flags = flags;
+ return __dentry_open(dentry, mnt, f, NULL, cred);
}
EXPORT_SYMBOL(dentry_open);
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index cd6bb9a33c1..dea86abdf2e 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -323,6 +323,30 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
}
EXPORT_SYMBOL(dquot_mark_dquot_dirty);
+/* Dirtify all the dquots - this can block when journalling */
+static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
+{
+ int ret, err, cnt;
+
+ ret = err = 0;
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (dquot[cnt])
+ /* Even in case of error we have to continue */
+ ret = mark_dquot_dirty(dquot[cnt]);
+ if (!err)
+ err = ret;
+ }
+ return err;
+}
+
+static inline void dqput_all(struct dquot **dquot)
+{
+ unsigned int cnt;
+
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ dqput(dquot[cnt]);
+}
+
/* This function needs dq_list_lock */
static inline int clear_dquot_dirty(struct dquot *dquot)
{
@@ -1268,8 +1292,7 @@ int dquot_initialize(struct inode *inode, int type)
out_err:
up_write(&sb_dqopt(sb)->dqptr_sem);
/* Drop unused references */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- dqput(got[cnt]);
+ dqput_all(got);
return ret;
}
EXPORT_SYMBOL(dquot_initialize);
@@ -1288,9 +1311,7 @@ int dquot_drop(struct inode *inode)
inode->i_dquot[cnt] = NULL;
}
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
-
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- dqput(put[cnt]);
+ dqput_all(put);
return 0;
}
EXPORT_SYMBOL(dquot_drop);
@@ -1319,6 +1340,67 @@ void vfs_dq_drop(struct inode *inode)
EXPORT_SYMBOL(vfs_dq_drop);
/*
+ * inode_reserved_space is managed internally by quota, and protected by
+ * i_lock similar to i_blocks+i_bytes.
+ */
+static qsize_t *inode_reserved_space(struct inode * inode)
+{
+ /* Filesystem must explicitly define it's own method in order to use
+ * quota reservation interface */
+ BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
+ return inode->i_sb->dq_op->get_reserved_space(inode);
+}
+
+static void inode_add_rsv_space(struct inode *inode, qsize_t number)
+{
+ spin_lock(&inode->i_lock);
+ *inode_reserved_space(inode) += number;
+ spin_unlock(&inode->i_lock);
+}
+
+
+static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
+{
+ spin_lock(&inode->i_lock);
+ *inode_reserved_space(inode) -= number;
+ __inode_add_bytes(inode, number);
+ spin_unlock(&inode->i_lock);
+}
+
+static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
+{
+ spin_lock(&inode->i_lock);
+ *inode_reserved_space(inode) -= number;
+ spin_unlock(&inode->i_lock);
+}
+
+static qsize_t inode_get_rsv_space(struct inode *inode)
+{
+ qsize_t ret;
+ spin_lock(&inode->i_lock);
+ ret = *inode_reserved_space(inode);
+ spin_unlock(&inode->i_lock);
+ return ret;
+}
+
+static void inode_incr_space(struct inode *inode, qsize_t number,
+ int reserve)
+{
+ if (reserve)
+ inode_add_rsv_space(inode, number);
+ else
+ inode_add_bytes(inode, number);
+}
+
+static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
+{
+ if (reserve)
+ inode_sub_rsv_space(inode, number);
+ else
+ inode_sub_bytes(inode, number);
+}
+
+/*
* Following four functions update i_blocks+i_bytes fields and
* quota information (together with appropriate checks)
* NOTE: We absolutely rely on the fact that caller dirties
@@ -1336,6 +1418,21 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
int cnt, ret = QUOTA_OK;
char warntype[MAXQUOTAS];
+ /*
+ * First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex
+ */
+ if (IS_NOQUOTA(inode)) {
+ inode_incr_space(inode, number, reserve);
+ goto out;
+ }
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode)) {
+ inode_incr_space(inode, number, reserve);
+ goto out_unlock;
+ }
+
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
@@ -1346,7 +1443,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
== NO_QUOTA) {
ret = NO_QUOTA;
- goto out_unlock;
+ spin_unlock(&dq_data_lock);
+ goto out_flush_warn;
}
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1357,64 +1455,29 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
else
dquot_incr_space(inode->i_dquot[cnt], number);
}
- if (!reserve)
- inode_add_bytes(inode, number);
-out_unlock:
+ inode_incr_space(inode, number, reserve);
spin_unlock(&dq_data_lock);
+
+ if (reserve)
+ goto out_flush_warn;
+ mark_all_dquot_dirty(inode->i_dquot);
+out_flush_warn:
flush_warnings(inode->i_dquot, warntype);
+out_unlock:
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
return ret;
}
int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
{
- int cnt, ret = QUOTA_OK;
-
- /*
- * First test before acquiring mutex - solves deadlocks when we
- * re-enter the quota code and are already holding the mutex
- */
- if (IS_NOQUOTA(inode)) {
- inode_add_bytes(inode, number);
- goto out;
- }
-
- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- if (IS_NOQUOTA(inode)) {
- inode_add_bytes(inode, number);
- goto out_unlock;
- }
-
- ret = __dquot_alloc_space(inode, number, warn, 0);
- if (ret == NO_QUOTA)
- goto out_unlock;
-
- /* Dirtify all the dquots - this can block when journalling */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (inode->i_dquot[cnt])
- mark_dquot_dirty(inode->i_dquot[cnt]);
-out_unlock:
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-out:
- return ret;
+ return __dquot_alloc_space(inode, number, warn, 0);
}
EXPORT_SYMBOL(dquot_alloc_space);
int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
{
- int ret = QUOTA_OK;
-
- if (IS_NOQUOTA(inode))
- goto out;
-
- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- if (IS_NOQUOTA(inode))
- goto out_unlock;
-
- ret = __dquot_alloc_space(inode, number, warn, 1);
-out_unlock:
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-out:
- return ret;
+ return __dquot_alloc_space(inode, number, warn, 1);
}
EXPORT_SYMBOL(dquot_reserve_space);
@@ -1455,10 +1518,7 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number)
warn_put_all:
spin_unlock(&dq_data_lock);
if (ret == QUOTA_OK)
- /* Dirtify all the dquots - this can block when journalling */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (inode->i_dquot[cnt])
- mark_dquot_dirty(inode->i_dquot[cnt]);
+ mark_all_dquot_dirty(inode->i_dquot);
flush_warnings(inode->i_dquot, warntype);
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return ret;
@@ -1471,14 +1531,14 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
int ret = QUOTA_OK;
if (IS_NOQUOTA(inode)) {
- inode_add_bytes(inode, number);
+ inode_claim_rsv_space(inode, number);
goto out;
}
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
if (IS_NOQUOTA(inode)) {
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- inode_add_bytes(inode, number);
+ inode_claim_rsv_space(inode, number);
goto out;
}
@@ -1490,12 +1550,9 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
number);
}
/* Update inode bytes */
- inode_add_bytes(inode, number);
+ inode_claim_rsv_space(inode, number);
spin_unlock(&dq_data_lock);
- /* Dirtify all the dquots - this can block when journalling */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (inode->i_dquot[cnt])
- mark_dquot_dirty(inode->i_dquot[cnt]);
+ mark_all_dquot_dirty(inode->i_dquot);
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
out:
return ret;
@@ -1503,38 +1560,9 @@ out:
EXPORT_SYMBOL(dquot_claim_space);
/*
- * Release reserved quota space
- */
-void dquot_release_reserved_space(struct inode *inode, qsize_t number)
-{
- int cnt;
-
- if (IS_NOQUOTA(inode))
- goto out;
-
- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- if (IS_NOQUOTA(inode))
- goto out_unlock;
-
- spin_lock(&dq_data_lock);
- /* Release reserved dquots */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt])
- dquot_free_reserved_space(inode->i_dquot[cnt], number);
- }
- spin_unlock(&dq_data_lock);
-
-out_unlock:
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-out:
- return;
-}
-EXPORT_SYMBOL(dquot_release_reserved_space);
-
-/*
* This operation can block, but only after everything is updated
*/
-int dquot_free_space(struct inode *inode, qsize_t number)
+int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
{
unsigned int cnt;
char warntype[MAXQUOTAS];
@@ -1543,7 +1571,7 @@ int dquot_free_space(struct inode *inode, qsize_t number)
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode)) {
out_sub:
- inode_sub_bytes(inode, number);
+ inode_decr_space(inode, number, reserve);
return QUOTA_OK;
}
@@ -1558,21 +1586,40 @@ out_sub:
if (!inode->i_dquot[cnt])
continue;
warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
- dquot_decr_space(inode->i_dquot[cnt], number);
+ if (reserve)
+ dquot_free_reserved_space(inode->i_dquot[cnt], number);
+ else
+ dquot_decr_space(inode->i_dquot[cnt], number);
}
- inode_sub_bytes(inode, number);
+ inode_decr_space(inode, number, reserve);
spin_unlock(&dq_data_lock);
- /* Dirtify all the dquots - this can block when journalling */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (inode->i_dquot[cnt])
- mark_dquot_dirty(inode->i_dquot[cnt]);
+
+ if (reserve)
+ goto out_unlock;
+ mark_all_dquot_dirty(inode->i_dquot);
+out_unlock:
flush_warnings(inode->i_dquot, warntype);
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return QUOTA_OK;
}
+
+int dquot_free_space(struct inode *inode, qsize_t number)
+{
+ return __dquot_free_space(inode, number, 0);
+}
EXPORT_SYMBOL(dquot_free_space);
/*
+ * Release reserved quota space
+ */
+void dquot_release_reserved_space(struct inode *inode, qsize_t number)
+{
+ __dquot_free_space(inode, number, 1);
+
+}
+EXPORT_SYMBOL(dquot_release_reserved_space);
+
+/*
* This operation can block, but only after everything is updated
*/
int dquot_free_inode(const struct inode *inode, qsize_t number)
@@ -1599,10 +1646,7 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
dquot_decr_inodes(inode->i_dquot[cnt], number);
}
spin_unlock(&dq_data_lock);
- /* Dirtify all the dquots - this can block when journalling */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (inode->i_dquot[cnt])
- mark_dquot_dirty(inode->i_dquot[cnt]);
+ mark_all_dquot_dirty(inode->i_dquot);
flush_warnings(inode->i_dquot, warntype);
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return QUOTA_OK;
@@ -1610,19 +1654,6 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
EXPORT_SYMBOL(dquot_free_inode);
/*
- * call back function, get reserved quota space from underlying fs
- */
-qsize_t dquot_get_reserved_space(struct inode *inode)
-{
- qsize_t reserved_space = 0;
-
- if (sb_any_quota_active(inode->i_sb) &&
- inode->i_sb->dq_op->get_reserved_space)
- reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
- return reserved_space;
-}
-
-/*
* Transfer the number of inode and blocks from one diskquota to an other.
*
* This operation can block, but only after everything is updated
@@ -1665,7 +1696,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
}
spin_lock(&dq_data_lock);
cur_space = inode_get_bytes(inode);
- rsv_space = dquot_get_reserved_space(inode);
+ rsv_space = inode_get_rsv_space(inode);
space = cur_space + rsv_space;
/* Build the transfer_from list and check the limits */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1709,25 +1740,18 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
spin_unlock(&dq_data_lock);
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
- /* Dirtify all the dquots - this can block when journalling */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (transfer_from[cnt])
- mark_dquot_dirty(transfer_from[cnt]);
- if (transfer_to[cnt]) {
- mark_dquot_dirty(transfer_to[cnt]);
- /* The reference we got is transferred to the inode */
- transfer_to[cnt] = NULL;
- }
- }
+ mark_all_dquot_dirty(transfer_from);
+ mark_all_dquot_dirty(transfer_to);
+ /* The reference we got is transferred to the inode */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ transfer_to[cnt] = NULL;
warn_put_all:
flush_warnings(transfer_to, warntype_to);
flush_warnings(transfer_from, warntype_from_inodes);
flush_warnings(transfer_from, warntype_from_space);
put_all:
- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- dqput(transfer_from[cnt]);
- dqput(transfer_to[cnt]);
- }
+ dqput_all(transfer_from);
+ dqput_all(transfer_to);
return ret;
over_quota:
spin_unlock(&dq_data_lock);
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index 3dfc23e0213..e3da02f4986 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -97,8 +97,11 @@ static int v2_read_file_info(struct super_block *sb, int type)
unsigned int version;
if (!v2_read_header(sb, type, &dqhead))
- return 0;
+ return -1;
version = le32_to_cpu(dqhead.dqh_version);
+ if ((info->dqi_fmt_id == QFMT_VFS_V0 && version != 0) ||
+ (info->dqi_fmt_id == QFMT_VFS_V1 && version != 1))
+ return -1;
size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
@@ -120,8 +123,8 @@ static int v2_read_file_info(struct super_block *sb, int type)
info->dqi_maxilimit = 0xffffffff;
} else {
/* used space is stored as unsigned 64-bit value */
- info->dqi_maxblimit = 0xffffffffffffffff; /* 2^64-1 */
- info->dqi_maxilimit = 0xffffffffffffffff;
+ info->dqi_maxblimit = 0xffffffffffffffffULL; /* 2^64-1 */
+ info->dqi_maxilimit = 0xffffffffffffffffULL;
}
info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
diff --git a/fs/signalfd.c b/fs/signalfd.c
index b07565c9438..1dabe4ee02f 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -236,7 +236,7 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
* anon_inode_getfd() will install the fd.
*/
ufd = anon_inode_getfd("[signalfd]", &signalfd_fops, ctx,
- flags & (O_CLOEXEC | O_NONBLOCK));
+ O_RDWR | (flags & (O_CLOEXEC | O_NONBLOCK)));
if (ufd < 0)
kfree(ctx);
} else {
diff --git a/fs/stat.c b/fs/stat.c
index 075694e31d8..c4ecd52c573 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -401,9 +401,9 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, char __user *, filename,
}
#endif /* __ARCH_WANT_STAT64 */
-void inode_add_bytes(struct inode *inode, loff_t bytes)
+/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
+void __inode_add_bytes(struct inode *inode, loff_t bytes)
{
- spin_lock(&inode->i_lock);
inode->i_blocks += bytes >> 9;
bytes &= 511;
inode->i_bytes += bytes;
@@ -411,6 +411,12 @@ void inode_add_bytes(struct inode *inode, loff_t bytes)
inode->i_blocks++;
inode->i_bytes -= 512;
}
+}
+
+void inode_add_bytes(struct inode *inode, loff_t bytes)
+{
+ spin_lock(&inode->i_lock);
+ __inode_add_bytes(inode, bytes);
spin_unlock(&inode->i_lock);
}
diff --git a/fs/super.c b/fs/super.c
index 19eb70b374b..aff046b0fe7 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -901,8 +901,9 @@ int get_sb_single(struct file_system_type *fs_type,
return error;
}
s->s_flags |= MS_ACTIVE;
+ } else {
+ do_remount_sb(s, flags, data, 0);
}
- do_remount_sb(s, flags, data, 0);
simple_set_mnt(mnt, s);
return 0;
}
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 60c702bc10a..a0a500af24a 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -483,7 +483,8 @@ void unmap_bin_file(struct sysfs_dirent *attr_sd)
* @attr: attribute descriptor.
*/
-int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr)
+int sysfs_create_bin_file(struct kobject *kobj,
+ const struct bin_attribute *attr)
{
BUG_ON(!kobj || !kobj->sd || !attr);
@@ -497,7 +498,8 @@ int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr)
* @attr: attribute descriptor.
*/
-void sysfs_remove_bin_file(struct kobject * kobj, struct bin_attribute * attr)
+void sysfs_remove_bin_file(struct kobject *kobj,
+ const struct bin_attribute *attr)
{
sysfs_hash_and_remove(kobj->sd, attr->attr.name);
}
diff --git a/fs/timerfd.c b/fs/timerfd.c
index b042bd7034b..1bfc95ad5f7 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -200,7 +200,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS);
ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
- flags & TFD_SHARED_FCNTL_FLAGS);
+ O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
if (ufd < 0)
kfree(ctx);