summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-09-14 18:05:14 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-09-14 18:05:14 -0700
commit3f0c3c8fe30c725c1264fb6db8cc4b69db3a658a (patch)
tree43801cca275bf5364e20ceba609bcefc01782440
parent9cb0ee8576eabb52ad5aad347bf0aaf997975bba (diff)
parent62e252eeefda62eb8cae9f4286270317ab8d5a42 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-fixes
Pull GFS2 fixes from Steven Whitehouse: "Here are three GFS2 fixes for the current kernel tree. These are all related to the block reservation code which was added at the merge window. That code will be getting an update at the forthcoming merge window too. In the mean time though there are a few smaller issues which should be fixed. The first patch resolves an issue with write sizes of greater than 32 bits with the size hinting code. The second ensures that the allocation data structure is initialised when using xattrs and the third takes into account allocations which may have been made by other nodes which affect a reservation on the local node." * git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-fixes: GFS2: Take account of blockages when using reserved blocks GFS2: Fix missing allocation data for set/remove xattr GFS2: Make write size hinting code common
-rw-r--r--fs/gfs2/file.c31
-rw-r--r--fs/gfs2/inode.c8
-rw-r--r--fs/gfs2/rgrp.c66
3 files changed, 61 insertions, 44 deletions
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index d1d791ef38d..382000ffac1 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -323,6 +323,29 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
/**
+ * gfs2_size_hint - Give a hint to the size of a write request
+ * @file: The struct file
+ * @offset: The file offset of the write
+ * @size: The length of the write
+ *
+ * When we are about to do a write, this function records the total
+ * write size in order to provide a suitable hint to the lower layers
+ * about how many blocks will be required.
+ *
+ */
+
+static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
+{
+ struct inode *inode = filep->f_dentry->d_inode;
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_inode *ip = GFS2_I(inode);
+ size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
+ int hint = min_t(size_t, INT_MAX, blks);
+
+ atomic_set(&ip->i_res->rs_sizehint, hint);
+}
+
+/**
* gfs2_allocate_page_backing - Use bmap to allocate blocks
* @page: The (locked) page to allocate backing for
*
@@ -382,8 +405,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
return ret;
- atomic_set(&ip->i_res->rs_sizehint,
- PAGE_CACHE_SIZE >> sdp->sd_sb.sb_bsize_shift);
+ gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
ret = gfs2_glock_nq(&gh);
@@ -663,7 +685,8 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (ret)
return ret;
- atomic_set(&ip->i_res->rs_sizehint, writesize >> sdp->sd_sb.sb_bsize_shift);
+ gfs2_size_hint(file, pos, writesize);
+
if (file->f_flags & O_APPEND) {
struct gfs2_holder gh;
@@ -789,7 +812,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
if (unlikely(error))
goto out_uninit;
- atomic_set(&ip->i_res->rs_sizehint, len >> sdp->sd_sb.sb_bsize_shift);
+ gfs2_size_hint(file, offset, len);
while (len > 0) {
if (len < bytes)
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 4ce22e54730..753af3d86bb 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1722,7 +1722,9 @@ static int gfs2_setxattr(struct dentry *dentry, const char *name,
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
ret = gfs2_glock_nq(&gh);
if (ret == 0) {
- ret = generic_setxattr(dentry, name, data, size, flags);
+ ret = gfs2_rs_alloc(ip);
+ if (ret == 0)
+ ret = generic_setxattr(dentry, name, data, size, flags);
gfs2_glock_dq(&gh);
}
gfs2_holder_uninit(&gh);
@@ -1757,7 +1759,9 @@ static int gfs2_removexattr(struct dentry *dentry, const char *name)
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
ret = gfs2_glock_nq(&gh);
if (ret == 0) {
- ret = generic_removexattr(dentry, name);
+ ret = gfs2_rs_alloc(ip);
+ if (ret == 0)
+ ret = generic_removexattr(dentry, name);
gfs2_glock_dq(&gh);
}
gfs2_holder_uninit(&gh);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 4d34887a601..c9ed814eeb6 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1961,7 +1961,7 @@ static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
* @dinode: 1 if this block is a dinode block, otherwise data block
* @nblocks: desired extent length
*
- * Lay claim to previously allocated block reservation blocks.
+ * Lay claim to previously reserved blocks.
* Returns: Starting block number of the blocks claimed.
* Sets *nblocks to the actual extent length allocated.
*/
@@ -1970,19 +1970,17 @@ static u64 claim_reserved_blks(struct gfs2_inode *ip, bool dinode,
{
struct gfs2_blkreserv *rs = ip->i_res;
struct gfs2_rgrpd *rgd = rs->rs_rgd;
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_bitmap *bi;
u64 start_block = gfs2_rs_startblk(rs);
const unsigned int elen = *nblocks;
- /*BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));*/
- gfs2_assert_withdraw(sdp, rgd);
- /*BUG_ON(!gfs2_glock_is_locked_by_me(rgd->rd_gl));*/
bi = rs->rs_bi;
gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
for (*nblocks = 0; *nblocks < elen && rs->rs_free; (*nblocks)++) {
- /* Make sure the bitmap hasn't changed */
+ if (gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
+ bi->bi_len, rs->rs_biblk) != GFS2_BLKST_FREE)
+ break;
gfs2_setbit(rgd, bi->bi_clone, bi, rs->rs_biblk,
dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
rs->rs_biblk++;
@@ -1991,20 +1989,12 @@ static u64 claim_reserved_blks(struct gfs2_inode *ip, bool dinode,
BUG_ON(!rgd->rd_reserved);
rgd->rd_reserved--;
dinode = false;
- trace_gfs2_rs(ip, rs, TRACE_RS_CLAIM);
}
- if (!rs->rs_free) {
- struct gfs2_rgrpd *rgd = ip->i_res->rs_rgd;
-
+ trace_gfs2_rs(ip, rs, TRACE_RS_CLAIM);
+ if (!rs->rs_free || *nblocks != elen)
gfs2_rs_deltree(rs);
- /* -nblocks because we haven't returned to do the math yet.
- I'm doing the math backwards to prevent negative numbers,
- but think of it as:
- if (unclaimed_blocks(rgd) - *nblocks >= RGRP_RSRV_MINBLKS */
- if (unclaimed_blocks(rgd) >= RGRP_RSRV_MINBLKS + *nblocks)
- rg_mblk_search(rgd, ip);
- }
+
return start_block;
}
@@ -2037,34 +2027,34 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
if (ip->i_res->rs_requested == 0)
return -ECANCELED;
- /* Check if we have a multi-block reservation, and if so, claim the
- next free block from it. */
+ /* If we have a reservation, claim blocks from it. */
if (gfs2_rs_active(ip->i_res)) {
BUG_ON(!ip->i_res->rs_free);
rgd = ip->i_res->rs_rgd;
block = claim_reserved_blks(ip, dinode, nblocks);
- } else {
- rgd = ip->i_rgd;
+ if (*nblocks)
+ goto found_blocks;
+ }
- if (!dinode && rgrp_contains_block(rgd, ip->i_goal))
- goal = ip->i_goal - rgd->rd_data0;
- else
- goal = rgd->rd_last_alloc;
-
- blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, &bi);
-
- /* Since all blocks are reserved in advance, this shouldn't
- happen */
- if (blk == BFITNOENT) {
- printk(KERN_WARNING "BFITNOENT, nblocks=%u\n",
- *nblocks);
- printk(KERN_WARNING "FULL=%d\n",
- test_bit(GBF_FULL, &rgd->rd_bits->bi_flags));
- goto rgrp_error;
- }
+ rgd = ip->i_rgd;
- block = gfs2_alloc_extent(rgd, bi, blk, dinode, nblocks);
+ if (!dinode && rgrp_contains_block(rgd, ip->i_goal))
+ goal = ip->i_goal - rgd->rd_data0;
+ else
+ goal = rgd->rd_last_alloc;
+
+ blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, &bi);
+
+ /* Since all blocks are reserved in advance, this shouldn't happen */
+ if (blk == BFITNOENT) {
+ printk(KERN_WARNING "BFITNOENT, nblocks=%u\n", *nblocks);
+ printk(KERN_WARNING "FULL=%d\n",
+ test_bit(GBF_FULL, &rgd->rd_bits->bi_flags));
+ goto rgrp_error;
}
+
+ block = gfs2_alloc_extent(rgd, bi, blk, dinode, nblocks);
+found_blocks:
ndata = *nblocks;
if (dinode)
ndata--;