diff options
author | John W. Linville <linville@tuxdriver.com> | 2006-08-14 15:33:54 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2006-08-14 15:33:54 -0400 |
commit | e9ffb3d7ec94083a44a8721681391beca2ffd68c (patch) | |
tree | 6768ab487b3f44c2a4995ee61307e47760ca9b88 /fs | |
parent | 8b9411014e6f18a883c18b38f41338dbd53fddea (diff) | |
parent | e9fa4f7bd291c29a785666e2fa5a9cf3241ee6c3 (diff) |
Merge branch 'from-linus' into upstream
Diffstat (limited to 'fs')
-rw-r--r-- | fs/befs/linuxvfs.c | 11 | ||||
-rw-r--r-- | fs/jfs/inode.c | 16 | ||||
-rw-r--r-- | fs/jfs/jfs_inode.h | 1 | ||||
-rw-r--r-- | fs/jfs/super.c | 118 | ||||
-rw-r--r-- | fs/lockd/svclock.c | 12 | ||||
-rw-r--r-- | fs/nfs/namespace.c | 4 | ||||
-rw-r--r-- | fs/nfs/read.c | 2 | ||||
-rw-r--r-- | fs/nfs/write.c | 2 | ||||
-rw-r--r-- | fs/reiserfs/file.c | 2 | ||||
-rw-r--r-- | fs/reiserfs/inode.c | 26 | ||||
-rw-r--r-- | fs/reiserfs/ioctl.c | 2 | ||||
-rw-r--r-- | fs/udf/ialloc.c | 11 | ||||
-rw-r--r-- | fs/ufs/balloc.c | 2 | ||||
-rw-r--r-- | fs/ufs/util.c | 17 | ||||
-rw-r--r-- | fs/xfs/xfs_alloc.c | 103 |
15 files changed, 223 insertions, 106 deletions
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index fcaeead9696..50cfca5c7ef 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -512,7 +512,11 @@ befs_utf2nls(struct super_block *sb, const char *in, wchar_t uni; int unilen, utflen; char *result; - int maxlen = in_len; /* The utf8->nls conversion can't make more chars */ + /* The utf8->nls conversion won't make the final nls string bigger + * than the utf one, but if the string is pure ascii they'll have the + * same width and an extra char is needed to save the additional \0 + */ + int maxlen = in_len + 1; befs_debug(sb, "---> utf2nls()"); @@ -588,7 +592,10 @@ befs_nls2utf(struct super_block *sb, const char *in, wchar_t uni; int unilen, utflen; char *result; - int maxlen = 3 * in_len; + /* There're nls characters that will translate to 3-chars-wide UTF-8 + * characters, a additional byte is needed to save the final \0 + * in special cases */ + int maxlen = (3 * in_len) + 1; befs_debug(sb, "---> nls2utf()\n"); diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 43e3f566aad..a223cf4faa9 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -168,16 +168,15 @@ void jfs_dirty_inode(struct inode *inode) set_cflag(COMMIT_Dirty, inode); } -static int -jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, - struct buffer_head *bh_result, int create) +int jfs_get_block(struct inode *ip, sector_t lblock, + struct buffer_head *bh_result, int create) { s64 lblock64 = lblock; int rc = 0; xad_t xad; s64 xaddr; int xflag; - s32 xlen = max_blocks; + s32 xlen = bh_result->b_size >> ip->i_blkbits; /* * Take appropriate lock on inode @@ -188,7 +187,7 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, IREAD_LOCK(ip); if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && - (!xtLookup(ip, lblock64, max_blocks, &xflag, &xaddr, &xlen, 0)) && + (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) && xaddr) { if (xflag & XAD_NOTRECORDED) { if (!create) @@ -255,13 +254,6 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, return rc; } -static int jfs_get_block(struct inode *ip, sector_t lblock, - struct buffer_head *bh_result, int create) -{ - return jfs_get_blocks(ip, lblock, bh_result->b_size >> ip->i_blkbits, - bh_result, create); -} - static int jfs_writepage(struct page *page, struct writeback_control *wbc) { return nobh_writepage(page, jfs_get_block, wbc); diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index b5c7da6190d..1fc48df670c 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h @@ -32,6 +32,7 @@ extern void jfs_truncate_nolock(struct inode *, loff_t); extern void jfs_free_zero_link(struct inode *); extern struct dentry *jfs_get_parent(struct dentry *dentry); extern void jfs_set_inode_flags(struct inode *); +extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int); extern const struct address_space_operations jfs_aops; extern struct inode_operations jfs_dir_inode_operations; diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 4f6cfebc82d..143bcd1d5ea 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -26,6 +26,7 @@ #include <linux/moduleparam.h> #include <linux/kthread.h> #include <linux/posix_acl.h> +#include <linux/buffer_head.h> #include <asm/uaccess.h> #include <linux/seq_file.h> @@ -298,7 +299,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize, break; } -#if defined(CONFIG_QUOTA) +#ifdef CONFIG_QUOTA case Opt_quota: case Opt_usrquota: *flag |= JFS_USRQUOTA; @@ -597,7 +598,7 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs) if (sbi->flag & JFS_NOINTEGRITY) seq_puts(seq, ",nointegrity"); -#if defined(CONFIG_QUOTA) +#ifdef CONFIG_QUOTA if (sbi->flag & JFS_USRQUOTA) seq_puts(seq, ",usrquota"); @@ -608,6 +609,113 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs) return 0; } +#ifdef CONFIG_QUOTA + +/* Read data from quotafile - avoid pagecache and such because we cannot afford + * acquiring the locks... As quota files are never truncated and quota code + * itself serializes the operations (and noone else should touch the files) + * we don't have to be afraid of races */ +static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data, + size_t len, loff_t off) +{ + struct inode *inode = sb_dqopt(sb)->files[type]; + sector_t blk = off >> sb->s_blocksize_bits; + int err = 0; + int offset = off & (sb->s_blocksize - 1); + int tocopy; + size_t toread; + struct buffer_head tmp_bh; + struct buffer_head *bh; + loff_t i_size = i_size_read(inode); + + if (off > i_size) + return 0; + if (off+len > i_size) + len = i_size-off; + toread = len; + while (toread > 0) { + tocopy = sb->s_blocksize - offset < toread ? + sb->s_blocksize - offset : toread; + + tmp_bh.b_state = 0; + tmp_bh.b_size = 1 << inode->i_blkbits; + err = jfs_get_block(inode, blk, &tmp_bh, 0); + if (err) + return err; + if (!buffer_mapped(&tmp_bh)) /* A hole? */ + memset(data, 0, tocopy); + else { + bh = sb_bread(sb, tmp_bh.b_blocknr); + if (!bh) + return -EIO; + memcpy(data, bh->b_data+offset, tocopy); + brelse(bh); + } + offset = 0; + toread -= tocopy; + data += tocopy; + blk++; + } + return len; +} + +/* Write to quotafile */ +static ssize_t jfs_quota_write(struct super_block *sb, int type, + const char *data, size_t len, loff_t off) +{ + struct inode *inode = sb_dqopt(sb)->files[type]; + sector_t blk = off >> sb->s_blocksize_bits; + int err = 0; + int offset = off & (sb->s_blocksize - 1); + int tocopy; + size_t towrite = len; + struct buffer_head tmp_bh; + struct buffer_head *bh; + + mutex_lock(&inode->i_mutex); + while (towrite > 0) { + tocopy = sb->s_blocksize - offset < towrite ? + sb->s_blocksize - offset : towrite; + + tmp_bh.b_state = 0; + tmp_bh.b_size = 1 << inode->i_blkbits; + err = jfs_get_block(inode, blk, &tmp_bh, 1); + if (err) + goto out; + if (offset || tocopy != sb->s_blocksize) + bh = sb_bread(sb, tmp_bh.b_blocknr); + else + bh = sb_getblk(sb, tmp_bh.b_blocknr); + if (!bh) { + err = -EIO; + goto out; + } + lock_buffer(bh); + memcpy(bh->b_data+offset, data, tocopy); + flush_dcache_page(bh->b_page); + set_buffer_uptodate(bh); + mark_buffer_dirty(bh); + unlock_buffer(bh); + brelse(bh); + offset = 0; + towrite -= tocopy; + data += tocopy; + blk++; + } +out: + if (len == towrite) + return err; + if (inode->i_size < off+len-towrite) + i_size_write(inode, off+len-towrite); + inode->i_version++; + inode->i_mtime = inode->i_ctime = CURRENT_TIME; + mark_inode_dirty(inode); + mutex_unlock(&inode->i_mutex); + return len - towrite; +} + +#endif + static struct super_operations jfs_super_operations = { .alloc_inode = jfs_alloc_inode, .destroy_inode = jfs_destroy_inode, @@ -621,7 +729,11 @@ static struct super_operations jfs_super_operations = { .unlockfs = jfs_unlockfs, .statfs = jfs_statfs, .remount_fs = jfs_remount, - .show_options = jfs_show_options + .show_options = jfs_show_options, +#ifdef CONFIG_QUOTA + .quota_read = jfs_quota_read, + .quota_write = jfs_quota_write, +#endif }; static struct export_operations jfs_export_operations = { diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index baf5ae51348..c9d419703cf 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -638,9 +638,6 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data) if (task->tk_status < 0) { /* RPC error: Re-insert for retransmission */ timeout = 10 * HZ; - } else if (block->b_done) { - /* Block already removed, kill it for real */ - timeout = 0; } else { /* Call was successful, now wait for client callback */ timeout = 60 * HZ; @@ -709,13 +706,10 @@ nlmsvc_retry_blocked(void) break; if (time_after(block->b_when,jiffies)) break; - dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n", - block, block->b_when, block->b_done); + dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", + block, block->b_when); kref_get(&block->b_count); - if (block->b_done) - nlmsvc_unlink_block(block); - else - nlmsvc_grant_blocked(block); + nlmsvc_grant_blocked(block); nlmsvc_release_block(block); } diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 19b98ca468e..86b3169c8ca 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -51,7 +51,7 @@ char *nfs_path(const char *base, const struct dentry *dentry, namelen = dentry->d_name.len; buflen -= namelen + 1; if (buflen < 0) - goto Elong; + goto Elong_unlock; end -= namelen; memcpy(end, dentry->d_name.name, namelen); *--end = '/'; @@ -68,6 +68,8 @@ char *nfs_path(const char *base, const struct dentry *dentry, end -= namelen; memcpy(end, base, namelen); return end; +Elong_unlock: + spin_unlock(&dcache_lock); Elong: return ERR_PTR(-ENAMETOOLONG); } diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 52bf634260a..65c0c5b3235 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -63,7 +63,7 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) return p; } -void nfs_readdata_free(struct nfs_read_data *p) +static void nfs_readdata_free(struct nfs_read_data *p) { if (p && (p->pagevec != &p->page_array[0])) kfree(p->pagevec); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 86bac6a5008..50774991f8d 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -137,7 +137,7 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) return p; } -void nfs_writedata_free(struct nfs_write_data *p) +static void nfs_writedata_free(struct nfs_write_data *p) { if (p && (p->pagevec != &p->page_array[0])) kfree(p->pagevec); diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index f318b58510f..1627edd5081 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -48,8 +48,8 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp) return 0; } - reiserfs_write_lock(inode->i_sb); mutex_lock(&inode->i_mutex); + reiserfs_write_lock(inode->i_sb); /* freeing preallocation only involves relogging blocks that * are already in the current transaction. preallocation gets * freed at the end of each transaction, so it is impossible for diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 12dfdcfbee3..52f1e213654 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -39,14 +39,10 @@ void reiserfs_delete_inode(struct inode *inode) /* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */ if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */ - mutex_lock(&inode->i_mutex); - reiserfs_delete_xattrs(inode); - if (journal_begin(&th, inode->i_sb, jbegin_count)) { - mutex_unlock(&inode->i_mutex); + if (journal_begin(&th, inode->i_sb, jbegin_count)) goto out; - } reiserfs_update_inode_transaction(inode); err = reiserfs_delete_object(&th, inode); @@ -57,12 +53,8 @@ void reiserfs_delete_inode(struct inode *inode) if (!err) DQUOT_FREE_INODE(inode); - if (journal_end(&th, inode->i_sb, jbegin_count)) { - mutex_unlock(&inode->i_mutex); + if (journal_end(&th, inode->i_sb, jbegin_count)) goto out; - } - - mutex_unlock(&inode->i_mutex); /* check return value from reiserfs_delete_object after * ending the transaction @@ -2348,6 +2340,7 @@ static int reiserfs_write_full_page(struct page *page, unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; int error = 0; unsigned long block; + sector_t last_block; struct buffer_head *head, *bh; int partial = 0; int nr = 0; @@ -2395,10 +2388,19 @@ static int reiserfs_write_full_page(struct page *page, } bh = head; block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); + last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; /* first map all the buffers, logging any direct items we find */ do { - if ((checked || buffer_dirty(bh)) && (!buffer_mapped(bh) || - (buffer_mapped(bh) + if (block > last_block) { + /* + * This can happen when the block size is less than + * the page size. The corresponding bytes in the page + * were zero filled above + */ + clear_buffer_dirty(bh); + set_buffer_uptodate(bh); + } else if ((checked || buffer_dirty(bh)) && + (!buffer_mapped(bh) || (buffer_mapped(bh) && bh->b_blocknr == 0))) { /* not mapped yet, or it points to a direct item, search diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c index 745c8810089..a986b5e1e28 100644 --- a/fs/reiserfs/ioctl.c +++ b/fs/reiserfs/ioctl.c @@ -116,12 +116,12 @@ static int reiserfs_unpack(struct inode *inode, struct file *filp) if (REISERFS_I(inode)->i_flags & i_nopack_mask) { return 0; } - reiserfs_write_lock(inode->i_sb); /* we need to make sure nobody is changing the file size beneath ** us */ mutex_lock(&inode->i_mutex); + reiserfs_write_lock(inode->i_sb); write_from = inode->i_size & (blocksize - 1); /* if we are on a block boundary, we are already unpacked. */ diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index 3873c672cb4..33323473e3c 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -75,6 +75,12 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) } *err = -ENOSPC; + UDF_I_UNIQUE(inode) = 0; + UDF_I_LENEXTENTS(inode) = 0; + UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; + UDF_I_NEXT_ALLOC_GOAL(inode) = 0; + UDF_I_STRAT4096(inode) = 0; + block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum, start, err); if (*err) @@ -84,11 +90,6 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) } mutex_lock(&sbi->s_alloc_mutex); - UDF_I_UNIQUE(inode) = 0; - UDF_I_LENEXTENTS(inode) = 0; - UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; - UDF_I_NEXT_ALLOC_GOAL(inode) = 0; - UDF_I_STRAT4096(inode) = 0; if (UDF_SB_LVIDBH(sb)) { struct logicalVolHeaderDesc *lvhd; diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index b01804baa12..b8238147577 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -248,7 +248,7 @@ static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk, if (likely(cur_index != index)) { page = ufs_get_locked_page(mapping, index); - if (IS_ERR(page)) + if (!page || IS_ERR(page)) /* it was truncated or EIO */ continue; } else page = locked_page; diff --git a/fs/ufs/util.c b/fs/ufs/util.c index 337cf2c46d1..22f820a9b15 100644 --- a/fs/ufs/util.c +++ b/fs/ufs/util.c @@ -251,12 +251,12 @@ struct page *ufs_get_locked_page(struct address_space *mapping, { struct page *page; -try_again: page = find_lock_page(mapping, index); if (!page) { page = read_cache_page(mapping, index, (filler_t*)mapping->a_ops->readpage, NULL); + if (IS_ERR(page)) { printk(KERN_ERR "ufs_change_blocknr: " "read_cache_page error: ino %lu, index: %lu\n", @@ -266,6 +266,14 @@ try_again: lock_page(page); + if (unlikely(page->mapping == NULL)) { + /* Truncate got there first */ + unlock_page(page); + page_cache_release(page); + page = NULL; + goto out; + } + if (!PageUptodate(page) || PageError(page)) { unlock_page(page); page_cache_release(page); @@ -275,15 +283,8 @@ try_again: mapping->host->i_ino, index); page = ERR_PTR(-EIO); - goto out; } } - - if (unlikely(!page->mapping || !page_has_buffers(page))) { - unlock_page(page); - page_cache_release(page); - goto try_again;/*we really need these buffers*/ - } out: return page; } diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index eef6763f3a6..d2bbcd882a6 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -1835,40 +1835,47 @@ xfs_alloc_fix_freelist( &agbp))) return error; if (!pag->pagf_init) { + ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK); + ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); args->agbp = NULL; return 0; } } else agbp = NULL; - /* If this is a metadata preferred pag and we are user data + /* + * If this is a metadata preferred pag and we are user data * then try somewhere else if we are not being asked to * try harder at this point */ - if (pag->pagf_metadata && args->userdata && flags) { + if (pag->pagf_metadata && args->userdata && + (flags & XFS_ALLOC_FLAG_TRYLOCK)) { + ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); args->agbp = NULL; return 0; } - need = XFS_MIN_FREELIST_PAG(pag, mp); - delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0; - /* - * If it looks like there isn't a long enough extent, or enough - * total blocks, reject it. - */ - longest = (pag->pagf_longest > delta) ? - (pag->pagf_longest - delta) : - (pag->pagf_flcount > 0 || pag->pagf_longest > 0); - if (args->minlen + args->alignment + args->minalignslop - 1 > longest || - (!(flags & XFS_ALLOC_FLAG_FREEING) && - (int)(pag->pagf_freeblks + pag->pagf_flcount - - need - args->total) < - (int)args->minleft)) { - if (agbp) - xfs_trans_brelse(tp, agbp); - args->agbp = NULL; - return 0; + if (!(flags & XFS_ALLOC_FLAG_FREEING)) { + need = XFS_MIN_FREELIST_PAG(pag, mp); + delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0; + /* + * If it looks like there isn't a long enough extent, or enough + * total blocks, reject it. + */ + longest = (pag->pagf_longest > delta) ? + (pag->pagf_longest - delta) : + (pag->pagf_flcount > 0 || pag->pagf_longest > 0); + if ((args->minlen + args->alignment + args->minalignslop - 1) > + longest || + ((int)(pag->pagf_freeblks + pag->pagf_flcount - + need - args->total) < (int)args->minleft)) { + if (agbp) + xfs_trans_brelse(tp, agbp); + args->agbp = NULL; + return 0; + } } + /* * Get the a.g. freespace buffer. * Can fail if we're not blocking on locks, and it's held. @@ -1878,6 +1885,8 @@ xfs_alloc_fix_freelist( &agbp))) return error; if (agbp == NULL) { + ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK); + ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); args->agbp = NULL; return 0; } @@ -1887,22 +1896,24 @@ xfs_alloc_fix_freelist( */ agf = XFS_BUF_TO_AGF(agbp); need = XFS_MIN_FREELIST(agf, mp); - delta = need > be32_to_cpu(agf->agf_flcount) ? - (need - be32_to_cpu(agf->agf_flcount)) : 0; /* * If there isn't enough total or single-extent, reject it. */ - longest = be32_to_cpu(agf->agf_longest); - longest = (longest > delta) ? (longest - delta) : - (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0); - if (args->minlen + args->alignment + args->minalignslop - 1 > longest || - (!(flags & XFS_ALLOC_FLAG_FREEING) && - (int)(be32_to_cpu(agf->agf_freeblks) + - be32_to_cpu(agf->agf_flcount) - need - args->total) < - (int)args->minleft)) { - xfs_trans_brelse(tp, agbp); - args->agbp = NULL; - return 0; + if (!(flags & XFS_ALLOC_FLAG_FREEING)) { + delta = need > be32_to_cpu(agf->agf_flcount) ? + (need - be32_to_cpu(agf->agf_flcount)) : 0; + longest = be32_to_cpu(agf->agf_longest); + longest = (longest > delta) ? (longest - delta) : + (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0); + if ((args->minlen + args->alignment + args->minalignslop - 1) > + longest || + ((int)(be32_to_cpu(agf->agf_freeblks) + + be32_to_cpu(agf->agf_flcount) - need - args->total) < + (int)args->minleft)) { + xfs_trans_brelse(tp, agbp); + args->agbp = NULL; + return 0; + } } /* * Make the freelist shorter if it's too long. @@ -1950,12 +1961,11 @@ xfs_alloc_fix_freelist( * on a completely full ag. */ if (targs.agbno == NULLAGBLOCK) { - if (!(flags & XFS_ALLOC_FLAG_FREEING)) { - xfs_trans_brelse(tp, agflbp); - args->agbp = NULL; - return 0; - } - break; + if (flags & XFS_ALLOC_FLAG_FREEING) + break; + xfs_trans_brelse(tp, agflbp); + args->agbp = NULL; + return 0; } /* * Put each allocated block on the list. @@ -2442,31 +2452,26 @@ xfs_free_extent( xfs_fsblock_t bno, /* starting block number of extent */ xfs_extlen_t len) /* length of extent */ { -#ifdef DEBUG - xfs_agf_t *agf; /* a.g. freespace header */ -#endif - xfs_alloc_arg_t args; /* allocation argument structure */ + xfs_alloc_arg_t args; int error; ASSERT(len != 0); + memset(&args, 0, sizeof(xfs_alloc_arg_t)); args.tp = tp; args.mp = tp->t_mountp; args.agno = XFS_FSB_TO_AGNO(args.mp, bno); ASSERT(args.agno < args.mp->m_sb.sb_agcount); args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); - args.alignment = 1; - args.minlen = args.minleft = args.minalignslop = 0; down_read(&args.mp->m_peraglock); args.pag = &args.mp->m_perag[args.agno]; if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING))) goto error0; #ifdef DEBUG ASSERT(args.agbp != NULL); - agf = XFS_BUF_TO_AGF(args.agbp); - ASSERT(args.agbno + len <= be32_to_cpu(agf->agf_length)); + ASSERT((args.agbno + len) <= + be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)); #endif - error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, - len, 0); + error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); error0: up_read(&args.mp->m_peraglock); return error; |