From 7c9ca621137cde26be05448133fc1a554345f4f8 Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Wed, 31 Aug 2011 09:53:19 +0100 Subject: GFS2: Use rbtree for resource groups and clean up bitmap buffer ref count scheme Here is an update of Bob's original rbtree patch which, in addition, also resolves the rather strange ref counting that was being done relating to the bitmap blocks. Originally we had a dual system for journaling resource groups. The metadata blocks were journaled and also the rgrp itself was added to a list. The reason for adding the rgrp to the list in the journal was so that the "repolish clones" code could be run to update the free space, and potentially send any discard requests when the log was flushed. This was done by comparing the "cloned" bitmap with what had been written back on disk during the transaction commit. Due to this, there was a requirement to hang on to the rgrps' bitmap buffers until the journal had been flushed. For that reason, there was a rather complicated set up in the ->go_lock ->go_unlock functions for rgrps involving both a mutex and a spinlock (the ->sd_rindex_spin) to maintain a reference count on the buffers. However, the journal maintains a reference count on the buffers anyway, since they are being journaled as metadata buffers. So by moving the code which deals with the post-journal accounting for bitmap blocks to the metadata journaling code, we can entirely dispense with the rather strange buffer ref counting scheme and also the requirement to journal the rgrps. The net result of all this is that the ->sd_rindex_spin is left to do exactly one job, and that is to look after the rbtree or rgrps. This patch is designed to be a stepping stone towards using RCU for the rbtree of resource groups, however the reduction in the number of uses of the ->sd_rindex_spin is likely to have benefits for multi-threaded workloads, anyway. The patch retains ->go_lock and ->go_unlock for rgrps, however these maybe also be removed in future in favour of calling the functions directly where required in the code. That will allow locking of resource groups without needing to actually read them in - something that could be useful in speeding up statfs. In the mean time though it is valid to dereference ->bi_bh only when the rgrp is locked. This is basically the same rule as before, modulo the references not being valid until the following journal flush. Signed-off-by: Steven Whitehouse Signed-off-by: Bob Peterson Cc: Benjamin Marzinski --- fs/gfs2/trans.h | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'fs/gfs2/trans.h') diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h index fb56b783e02..980c5c05398 100644 --- a/fs/gfs2/trans.h +++ b/fs/gfs2/trans.h @@ -34,14 +34,12 @@ static inline unsigned int gfs2_rg_blocks(const struct gfs2_alloc *al) al->al_requested + 1 : al->al_rgd->rd_length; } -int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, - unsigned int revokes); +extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, + unsigned int revokes); -void gfs2_trans_end(struct gfs2_sbd *sdp); - -void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta); -void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); -void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len); -void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd); +extern void gfs2_trans_end(struct gfs2_sbd *sdp); +extern void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta); +extern void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); +extern void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len); #endif /* __TRANS_DOT_H__ */ -- cgit v1.2.3-70-g09d2 From 54335b1fca27b84baa75b1f45985d98262003837 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Thu, 1 Sep 2011 13:31:59 +0100 Subject: GFS2: Cache the most recently used resource group in the inode This means that after the initial allocation for any inode, the last used resource group is cached in the inode for future use. This drastically reduces the number of lookups of resource groups in the common case, and this the contention on that data structure. The allocation algorithm is the same as previously, except that we always check to see if the goal block is within the cached rgrp first before going to the rbtree to look one up. Signed-off-by: Steven Whitehouse --- fs/gfs2/aops.c | 2 +- fs/gfs2/file.c | 6 +++--- fs/gfs2/incore.h | 3 +-- fs/gfs2/inode.c | 12 +++++------- fs/gfs2/quota.c | 4 ++-- fs/gfs2/rgrp.c | 51 +++++++++++++++++++++++++-------------------------- fs/gfs2/super.c | 1 + fs/gfs2/trans.h | 8 +++++--- fs/gfs2/xattr.c | 2 +- 9 files changed, 44 insertions(+), 45 deletions(-) (limited to 'fs/gfs2/trans.h') diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 212fe74927b..4858e1fed8b 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -663,7 +663,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, if (&ip->i_inode == sdp->sd_rindex) rblocks += 2 * RES_STATFS; if (alloc_required) - rblocks += gfs2_rg_blocks(al); + rblocks += gfs2_rg_blocks(ip); error = gfs2_trans_begin(sdp, rblocks, PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index d717b72500a..3467f366214 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -397,7 +397,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) { rblocks += RES_STATFS + RES_QUOTA; - rblocks += gfs2_rg_blocks(al); + rblocks += gfs2_rg_blocks(ip); } ret = gfs2_trans_begin(sdp, rblocks, 0); if (ret) @@ -823,7 +823,7 @@ static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len, unsigned int *data_blocks, unsigned int *ind_blocks) { const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone; + unsigned int max_blocks = ip->i_rgd->rd_free_clone; unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); for (tmp = max_data; tmp > sdp->sd_diptrs;) { @@ -912,7 +912,7 @@ retry: al->al_requested = data_blocks + ind_blocks; rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + - RES_RG_HDR + gfs2_rg_blocks(al); + RES_RG_HDR + gfs2_rg_blocks(ip); if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 56847f5903a..55e335b5283 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -256,8 +256,6 @@ struct gfs2_alloc { unsigned int al_line; char *al_file; struct gfs2_holder al_rgd_gh; - struct gfs2_rgrpd *al_rgd; - }; enum { @@ -279,6 +277,7 @@ struct gfs2_inode { struct gfs2_holder i_iopen_gh; struct gfs2_holder i_gh; /* for prepare/commit_write only */ struct gfs2_alloc *i_alloc; + struct gfs2_rgrpd *i_rgd; u64 i_goal; /* goal block for allocations */ struct rw_semaphore i_rw_mutex; struct list_head i_trunc_list; diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 29703dd97dc..55b3bbaf2f2 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -583,7 +583,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name, goto fail_quota_locks; error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + - al->al_rgd->rd_length + + dip->i_rgd->rd_length + 2 * RES_DINODE + RES_STATFS + RES_QUOTA, 0); if (error) @@ -613,8 +613,7 @@ fail_end_trans: gfs2_trans_end(sdp); fail_ipreserv: - if (dip->i_alloc->al_rgd) - gfs2_inplace_release(dip); + gfs2_inplace_release(dip); fail_quota_locks: gfs2_quota_unlock(dip); @@ -731,8 +730,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, brelse(bh); gfs2_trans_end(sdp); - if (dip->i_alloc->al_rgd) - gfs2_inplace_release(dip); + gfs2_inplace_release(dip); gfs2_quota_unlock(dip); gfs2_alloc_put(dip); mark_inode_dirty(inode); @@ -896,7 +894,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, goto out_gunlock_q; error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + - gfs2_rg_blocks(al) + + gfs2_rg_blocks(dip) + 2 * RES_DINODE + RES_STATFS + RES_QUOTA, 0); if (error) @@ -1371,7 +1369,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, goto out_gunlock_q; error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + - gfs2_rg_blocks(al) + + gfs2_rg_blocks(ndip) + 4 * RES_DINODE + 4 * RES_LEAF + RES_STATFS + RES_QUOTA + 4, 0); if (error) diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 3a9a9749f49..10a59cd21f0 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -813,7 +813,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) goto out_alloc; if (nalloc) - blocks += gfs2_rg_blocks(al) + nalloc * ind_blocks + RES_STATFS; + blocks += gfs2_rg_blocks(ip) + nalloc * ind_blocks + RES_STATFS; error = gfs2_trans_begin(sdp, blocks, 0); if (error) @@ -1598,7 +1598,7 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id, error = gfs2_inplace_reserve(ip); if (error) goto out_alloc; - blocks += gfs2_rg_blocks(al); + blocks += gfs2_rg_blocks(ip); } /* Some quotas span block boundaries and can update two blocks, diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 88d5b75067a..5bfb97002c2 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -882,24 +882,21 @@ struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip) /** * try_rgrp_fit - See if a given reservation will fit in a given RG * @rgd: the RG data - * @al: the struct gfs2_alloc structure describing the reservation + * @ip: the inode * * If there's room for the requested blocks to be allocated from the RG: - * Sets the $al_rgd field in @al. * * Returns: 1 on success (it fits), 0 on failure (it doesn't fit) */ -static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al) +static int try_rgrp_fit(const struct gfs2_rgrpd *rgd, const struct gfs2_inode *ip) { + const struct gfs2_alloc *al = ip->i_alloc; + if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR)) return 0; - - if (rgd->rd_free_clone >= al->al_requested) { - al->al_rgd = rgd; + if (rgd->rd_free_clone >= al->al_requested) return 1; - } - return 0; } @@ -985,7 +982,10 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) int error, rg_locked; int loops = 0; - rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal); + if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) + rgd = begin = ip->i_rgd; + else + rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal); if (rgd == NULL) return -EBADSLT; @@ -1002,8 +1002,10 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) } switch (error) { case 0: - if (try_rgrp_fit(rgd, al)) + if (try_rgrp_fit(rgd, ip)) { + ip->i_rgd = rgd; return 0; + } if (rgd->rd_flags & GFS2_RDF_CHECK) try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr); if (!rg_locked) @@ -1014,7 +1016,6 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) if (rgd == begin) loops++; break; - default: return error; } @@ -1042,21 +1043,20 @@ int gfs2_inplace_reserve_i(struct gfs2_inode *ip, if (gfs2_assert_warn(sdp, al->al_requested)) return -EINVAL; -try_again: do { error = get_local_rgrp(ip, &last_unlinked); - /* If there is no space, flushing the log may release some */ - if (error) { - if (ip == GFS2_I(sdp->sd_rindex) && - !sdp->sd_rindex_uptodate) { - error = gfs2_ri_update(ip); - if (error) - return error; - goto try_again; - } - gfs2_log_flush(sdp, NULL); + if (error != -ENOSPC) + break; + /* Check that fs hasn't grown if writing to rindex */ + if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) { + error = gfs2_ri_update(ip); + if (error) + break; + continue; } - } while (error && tries++ < 3); + /* Flushing the log may release space */ + gfs2_log_flush(sdp, NULL); + } while (tries++ < 3); if (error) return error; @@ -1086,7 +1086,6 @@ void gfs2_inplace_release(struct gfs2_inode *ip) al->al_alloced, al->al_requested, al->al_file, al->al_line); - al->al_rgd = NULL; if (al->al_rgd_gh.gh_gl) gfs2_glock_dq_uninit(&al->al_rgd_gh); } @@ -1339,7 +1338,7 @@ int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n) if (al == NULL) return -ECANCELED; - rgd = al->al_rgd; + rgd = ip->i_rgd; if (rgrp_contains_block(rgd, ip->i_goal)) goal = ip->i_goal - rgd->rd_data0; @@ -1398,7 +1397,7 @@ int gfs2_alloc_di(struct gfs2_inode *dip, u64 *bn, u64 *generation) { struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_alloc *al = dip->i_alloc; - struct gfs2_rgrpd *rgd = al->al_rgd; + struct gfs2_rgrpd *rgd = dip->i_rgd; u32 blk; u64 block; unsigned int n = 1; diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index f716c4f8b25..87e9141a4de 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1574,6 +1574,7 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb) if (ip) { ip->i_flags = 0; ip->i_gl = NULL; + ip->i_rgd = NULL; } return &ip->i_inode; } diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h index 980c5c05398..f8f101ef600 100644 --- a/fs/gfs2/trans.h +++ b/fs/gfs2/trans.h @@ -28,10 +28,12 @@ struct gfs2_glock; /* reserve either the number of blocks to be allocated plus the rg header * block, or all of the blocks in the rg, whichever is smaller */ -static inline unsigned int gfs2_rg_blocks(const struct gfs2_alloc *al) +static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip) { - return (al->al_requested < al->al_rgd->rd_length)? - al->al_requested + 1 : al->al_rgd->rd_length; + const struct gfs2_alloc *al = ip->i_alloc; + if (al->al_requested < ip->i_rgd->rd_length) + return al->al_requested + 1; + return ip->i_rgd->rd_length; } extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c index 167f4af53b1..e7bf0ea1c3c 100644 --- a/fs/gfs2/xattr.c +++ b/fs/gfs2/xattr.c @@ -727,7 +727,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er, goto out_gunlock_q; error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), - blks + gfs2_rg_blocks(al) + + blks + gfs2_rg_blocks(ip) + RES_DINODE + RES_STATFS + RES_QUOTA, 0); if (error) goto out_ipres; -- cgit v1.2.3-70-g09d2