summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/adfs/super.c2
-rw-r--r--fs/fuse/file.c10
-rw-r--r--fs/jfs/inode.c16
-rw-r--r--fs/jfs/jfs_inode.h1
-rw-r--r--fs/jfs/super.c118
-rw-r--r--fs/locks.c6
-rw-r--r--fs/udf/super.c7
-rw-r--r--fs/xfs/xfs_alloc.c103
8 files changed, 194 insertions, 69 deletions
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index ba1c88af49f..82011019494 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -308,7 +308,7 @@ static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_di
if (adfs_checkmap(sb, dm))
return dm;
- adfs_error(sb, NULL, "map corrupted");
+ adfs_error(sb, "map corrupted");
error_free:
while (--zone >= 0)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 63614ed1633..5c4fcd1dbf5 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -395,14 +395,16 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
struct fuse_readpages_data data;
int err;
+ err = -EIO;
if (is_bad_inode(inode))
- return -EIO;
+ goto clean_pages_up;
data.file = file;
data.inode = inode;
data.req = fuse_get_req(fc);
+ err = PTR_ERR(data.req);
if (IS_ERR(data.req))
- return PTR_ERR(data.req);
+ goto clean_pages_up;
err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
if (!err) {
@@ -412,6 +414,10 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
fuse_put_request(fc, data.req);
}
return err;
+
+clean_pages_up:
+ put_pages_list(pages);
+ return err;
}
static size_t fuse_send_write(struct fuse_req *req, struct file *file,
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 43e3f566aad..a223cf4faa9 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -168,16 +168,15 @@ void jfs_dirty_inode(struct inode *inode)
set_cflag(COMMIT_Dirty, inode);
}
-static int
-jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
- struct buffer_head *bh_result, int create)
+int jfs_get_block(struct inode *ip, sector_t lblock,
+ struct buffer_head *bh_result, int create)
{
s64 lblock64 = lblock;
int rc = 0;
xad_t xad;
s64 xaddr;
int xflag;
- s32 xlen = max_blocks;
+ s32 xlen = bh_result->b_size >> ip->i_blkbits;
/*
* Take appropriate lock on inode
@@ -188,7 +187,7 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
IREAD_LOCK(ip);
if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
- (!xtLookup(ip, lblock64, max_blocks, &xflag, &xaddr, &xlen, 0)) &&
+ (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) &&
xaddr) {
if (xflag & XAD_NOTRECORDED) {
if (!create)
@@ -255,13 +254,6 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
return rc;
}
-static int jfs_get_block(struct inode *ip, sector_t lblock,
- struct buffer_head *bh_result, int create)
-{
- return jfs_get_blocks(ip, lblock, bh_result->b_size >> ip->i_blkbits,
- bh_result, create);
-}
-
static int jfs_writepage(struct page *page, struct writeback_control *wbc)
{
return nobh_writepage(page, jfs_get_block, wbc);
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index b5c7da6190d..1fc48df670c 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -32,6 +32,7 @@ extern void jfs_truncate_nolock(struct inode *, loff_t);
extern void jfs_free_zero_link(struct inode *);
extern struct dentry *jfs_get_parent(struct dentry *dentry);
extern void jfs_set_inode_flags(struct inode *);
+extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern const struct address_space_operations jfs_aops;
extern struct inode_operations jfs_dir_inode_operations;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 4f6cfebc82d..143bcd1d5ea 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -26,6 +26,7 @@
#include <linux/moduleparam.h>
#include <linux/kthread.h>
#include <linux/posix_acl.h>
+#include <linux/buffer_head.h>
#include <asm/uaccess.h>
#include <linux/seq_file.h>
@@ -298,7 +299,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
break;
}
-#if defined(CONFIG_QUOTA)
+#ifdef CONFIG_QUOTA
case Opt_quota:
case Opt_usrquota:
*flag |= JFS_USRQUOTA;
@@ -597,7 +598,7 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (sbi->flag & JFS_NOINTEGRITY)
seq_puts(seq, ",nointegrity");
-#if defined(CONFIG_QUOTA)
+#ifdef CONFIG_QUOTA
if (sbi->flag & JFS_USRQUOTA)
seq_puts(seq, ",usrquota");
@@ -608,6 +609,113 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
return 0;
}
+#ifdef CONFIG_QUOTA
+
+/* Read data from quotafile - avoid pagecache and such because we cannot afford
+ * acquiring the locks... As quota files are never truncated and quota code
+ * itself serializes the operations (and noone else should touch the files)
+ * we don't have to be afraid of races */
+static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
+ size_t len, loff_t off)
+{
+ struct inode *inode = sb_dqopt(sb)->files[type];
+ sector_t blk = off >> sb->s_blocksize_bits;
+ int err = 0;
+ int offset = off & (sb->s_blocksize - 1);
+ int tocopy;
+ size_t toread;
+ struct buffer_head tmp_bh;
+ struct buffer_head *bh;
+ loff_t i_size = i_size_read(inode);
+
+ if (off > i_size)
+ return 0;
+ if (off+len > i_size)
+ len = i_size-off;
+ toread = len;
+ while (toread > 0) {
+ tocopy = sb->s_blocksize - offset < toread ?
+ sb->s_blocksize - offset : toread;
+
+ tmp_bh.b_state = 0;
+ tmp_bh.b_size = 1 << inode->i_blkbits;
+ err = jfs_get_block(inode, blk, &tmp_bh, 0);
+ if (err)
+ return err;
+ if (!buffer_mapped(&tmp_bh)) /* A hole? */
+ memset(data, 0, tocopy);
+ else {
+ bh = sb_bread(sb, tmp_bh.b_blocknr);
+ if (!bh)
+ return -EIO;
+ memcpy(data, bh->b_data+offset, tocopy);
+ brelse(bh);
+ }
+ offset = 0;
+ toread -= tocopy;
+ data += tocopy;
+ blk++;
+ }
+ return len;
+}
+
+/* Write to quotafile */
+static ssize_t jfs_quota_write(struct super_block *sb, int type,
+ const char *data, size_t len, loff_t off)
+{
+ struct inode *inode = sb_dqopt(sb)->files[type];
+ sector_t blk = off >> sb->s_blocksize_bits;
+ int err = 0;
+ int offset = off & (sb->s_blocksize - 1);
+ int tocopy;
+ size_t towrite = len;
+ struct buffer_head tmp_bh;
+ struct buffer_head *bh;
+
+ mutex_lock(&inode->i_mutex);
+ while (towrite > 0) {
+ tocopy = sb->s_blocksize - offset < towrite ?
+ sb->s_blocksize - offset : towrite;
+
+ tmp_bh.b_state = 0;
+ tmp_bh.b_size = 1 << inode->i_blkbits;
+ err = jfs_get_block(inode, blk, &tmp_bh, 1);
+ if (err)
+ goto out;
+ if (offset || tocopy != sb->s_blocksize)
+ bh = sb_bread(sb, tmp_bh.b_blocknr);
+ else
+ bh = sb_getblk(sb, tmp_bh.b_blocknr);
+ if (!bh) {
+ err = -EIO;
+ goto out;
+ }
+ lock_buffer(bh);
+ memcpy(bh->b_data+offset, data, tocopy);
+ flush_dcache_page(bh->b_page);
+ set_buffer_uptodate(bh);
+ mark_buffer_dirty(bh);
+ unlock_buffer(bh);
+ brelse(bh);
+ offset = 0;
+ towrite -= tocopy;
+ data += tocopy;
+ blk++;
+ }
+out:
+ if (len == towrite)
+ return err;
+ if (inode->i_size < off+len-towrite)
+ i_size_write(inode, off+len-towrite);
+ inode->i_version++;
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ mark_inode_dirty(inode);
+ mutex_unlock(&inode->i_mutex);
+ return len - towrite;
+}
+
+#endif
+
static struct super_operations jfs_super_operations = {
.alloc_inode = jfs_alloc_inode,
.destroy_inode = jfs_destroy_inode,
@@ -621,7 +729,11 @@ static struct super_operations jfs_super_operations = {
.unlockfs = jfs_unlockfs,
.statfs = jfs_statfs,
.remount_fs = jfs_remount,
- .show_options = jfs_show_options
+ .show_options = jfs_show_options,
+#ifdef CONFIG_QUOTA
+ .quota_read = jfs_quota_read,
+ .quota_write = jfs_quota_write,
+#endif
};
static struct export_operations jfs_export_operations = {
diff --git a/fs/locks.c b/fs/locks.c
index b0b41a64e10..d7c53392cac 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1421,8 +1421,9 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp)
if (!leases_enable)
goto out;
- error = lease_alloc(filp, arg, &fl);
- if (error)
+ error = -ENOMEM;
+ fl = locks_alloc_lock();
+ if (fl == NULL)
goto out;
locks_copy_lock(fl, lease);
@@ -1430,6 +1431,7 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp)
locks_insert_lock(before, fl);
*flp = fl;
+ error = 0;
out:
return error;
}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 4df822c881b..7de172efa08 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -115,6 +115,13 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL);
if (!ei)
return NULL;
+
+ ei->i_unique = 0;
+ ei->i_lenExtents = 0;
+ ei->i_next_alloc_block = 0;
+ ei->i_next_alloc_goal = 0;
+ ei->i_strat4096 = 0;
+
return &ei->vfs_inode;
}
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index eef6763f3a6..d2bbcd882a6 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -1835,40 +1835,47 @@ xfs_alloc_fix_freelist(
&agbp)))
return error;
if (!pag->pagf_init) {
+ ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
+ ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
args->agbp = NULL;
return 0;
}
} else
agbp = NULL;
- /* If this is a metadata preferred pag and we are user data
+ /*
+ * If this is a metadata preferred pag and we are user data
* then try somewhere else if we are not being asked to
* try harder at this point
*/
- if (pag->pagf_metadata && args->userdata && flags) {
+ if (pag->pagf_metadata && args->userdata &&
+ (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
+ ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
args->agbp = NULL;
return 0;
}
- need = XFS_MIN_FREELIST_PAG(pag, mp);
- delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0;
- /*
- * If it looks like there isn't a long enough extent, or enough
- * total blocks, reject it.
- */
- longest = (pag->pagf_longest > delta) ?
- (pag->pagf_longest - delta) :
- (pag->pagf_flcount > 0 || pag->pagf_longest > 0);
- if (args->minlen + args->alignment + args->minalignslop - 1 > longest ||
- (!(flags & XFS_ALLOC_FLAG_FREEING) &&
- (int)(pag->pagf_freeblks + pag->pagf_flcount -
- need - args->total) <
- (int)args->minleft)) {
- if (agbp)
- xfs_trans_brelse(tp, agbp);
- args->agbp = NULL;
- return 0;
+ if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
+ need = XFS_MIN_FREELIST_PAG(pag, mp);
+ delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0;
+ /*
+ * If it looks like there isn't a long enough extent, or enough
+ * total blocks, reject it.
+ */
+ longest = (pag->pagf_longest > delta) ?
+ (pag->pagf_longest - delta) :
+ (pag->pagf_flcount > 0 || pag->pagf_longest > 0);
+ if ((args->minlen + args->alignment + args->minalignslop - 1) >
+ longest ||
+ ((int)(pag->pagf_freeblks + pag->pagf_flcount -
+ need - args->total) < (int)args->minleft)) {
+ if (agbp)
+ xfs_trans_brelse(tp, agbp);
+ args->agbp = NULL;
+ return 0;
+ }
}
+
/*
* Get the a.g. freespace buffer.
* Can fail if we're not blocking on locks, and it's held.
@@ -1878,6 +1885,8 @@ xfs_alloc_fix_freelist(
&agbp)))
return error;
if (agbp == NULL) {
+ ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
+ ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
args->agbp = NULL;
return 0;
}
@@ -1887,22 +1896,24 @@ xfs_alloc_fix_freelist(
*/
agf = XFS_BUF_TO_AGF(agbp);
need = XFS_MIN_FREELIST(agf, mp);
- delta = need > be32_to_cpu(agf->agf_flcount) ?
- (need - be32_to_cpu(agf->agf_flcount)) : 0;
/*
* If there isn't enough total or single-extent, reject it.
*/
- longest = be32_to_cpu(agf->agf_longest);
- longest = (longest > delta) ? (longest - delta) :
- (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
- if (args->minlen + args->alignment + args->minalignslop - 1 > longest ||
- (!(flags & XFS_ALLOC_FLAG_FREEING) &&
- (int)(be32_to_cpu(agf->agf_freeblks) +
- be32_to_cpu(agf->agf_flcount) - need - args->total) <
- (int)args->minleft)) {
- xfs_trans_brelse(tp, agbp);
- args->agbp = NULL;
- return 0;
+ if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
+ delta = need > be32_to_cpu(agf->agf_flcount) ?
+ (need - be32_to_cpu(agf->agf_flcount)) : 0;
+ longest = be32_to_cpu(agf->agf_longest);
+ longest = (longest > delta) ? (longest - delta) :
+ (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
+ if ((args->minlen + args->alignment + args->minalignslop - 1) >
+ longest ||
+ ((int)(be32_to_cpu(agf->agf_freeblks) +
+ be32_to_cpu(agf->agf_flcount) - need - args->total) <
+ (int)args->minleft)) {
+ xfs_trans_brelse(tp, agbp);
+ args->agbp = NULL;
+ return 0;
+ }
}
/*
* Make the freelist shorter if it's too long.
@@ -1950,12 +1961,11 @@ xfs_alloc_fix_freelist(
* on a completely full ag.
*/
if (targs.agbno == NULLAGBLOCK) {
- if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
- xfs_trans_brelse(tp, agflbp);
- args->agbp = NULL;
- return 0;
- }
- break;
+ if (flags & XFS_ALLOC_FLAG_FREEING)
+ break;
+ xfs_trans_brelse(tp, agflbp);
+ args->agbp = NULL;
+ return 0;
}
/*
* Put each allocated block on the list.
@@ -2442,31 +2452,26 @@ xfs_free_extent(
xfs_fsblock_t bno, /* starting block number of extent */
xfs_extlen_t len) /* length of extent */
{
-#ifdef DEBUG
- xfs_agf_t *agf; /* a.g. freespace header */
-#endif
- xfs_alloc_arg_t args; /* allocation argument structure */
+ xfs_alloc_arg_t args;
int error;
ASSERT(len != 0);
+ memset(&args, 0, sizeof(xfs_alloc_arg_t));
args.tp = tp;
args.mp = tp->t_mountp;
args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
ASSERT(args.agno < args.mp->m_sb.sb_agcount);
args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
- args.alignment = 1;
- args.minlen = args.minleft = args.minalignslop = 0;
down_read(&args.mp->m_peraglock);
args.pag = &args.mp->m_perag[args.agno];
if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING)))
goto error0;
#ifdef DEBUG
ASSERT(args.agbp != NULL);
- agf = XFS_BUF_TO_AGF(args.agbp);
- ASSERT(args.agbno + len <= be32_to_cpu(agf->agf_length));
+ ASSERT((args.agbno + len) <=
+ be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length));
#endif
- error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno,
- len, 0);
+ error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
error0:
up_read(&args.mp->m_peraglock);
return error;