diff options
-rw-r--r-- | fs/gfs2/glock.c | 33 | ||||
-rw-r--r-- | fs/gfs2/glock.h | 1 | ||||
-rw-r--r-- | fs/gfs2/glops.c | 11 | ||||
-rw-r--r-- | fs/gfs2/incore.h | 3 | ||||
-rw-r--r-- | fs/gfs2/main.c | 1 | ||||
-rw-r--r-- | fs/gfs2/ops_fstype.c | 2 | ||||
-rw-r--r-- | fs/gfs2/quota.c | 31 |
7 files changed, 75 insertions, 7 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 27cb9cca9c0..4ddf3bd55dd 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -40,6 +40,7 @@ #include "quota.h" #include "super.h" #include "util.h" +#include "bmap.h" struct gfs2_gl_hash_bucket { struct hlist_head hb_list; @@ -289,7 +290,8 @@ static void gfs2_holder_wake(struct gfs2_holder *gh) * do_promote - promote as many requests as possible on the current queue * @gl: The glock * - * Returns: true if there is a blocked holder at the head of the list + * Returns: 1 if there is a blocked holder at the head of the list, or 2 + * if a type specific operation is underway. */ static int do_promote(struct gfs2_glock *gl) @@ -312,6 +314,8 @@ restart: ret = glops->go_lock(gh); spin_lock(&gl->gl_spin); if (ret) { + if (ret == 1) + return 2; gh->gh_error = ret; list_del_init(&gh->gh_list); gfs2_holder_wake(gh); @@ -416,6 +420,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_holder *gh; unsigned state = ret & LM_OUT_ST_MASK; + int rv; spin_lock(&gl->gl_spin); state_change(gl, state); @@ -470,7 +475,6 @@ retry: gfs2_demote_wake(gl); if (state != LM_ST_UNLOCKED) { if (glops->go_xmote_bh) { - int rv; spin_unlock(&gl->gl_spin); rv = glops->go_xmote_bh(gl, gh); if (rv == -EAGAIN) @@ -481,10 +485,13 @@ retry: goto out; } } - do_promote(gl); + rv = do_promote(gl); + if (rv == 2) + goto out_locked; } out: clear_bit(GLF_LOCK, &gl->gl_flags); +out_locked: spin_unlock(&gl->gl_spin); gfs2_glock_put(gl); } @@ -584,6 +591,7 @@ __releases(&gl->gl_spin) __acquires(&gl->gl_spin) { struct gfs2_holder *gh = NULL; + int ret; if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) return; @@ -602,8 +610,11 @@ __acquires(&gl->gl_spin) } else { if (test_bit(GLF_DEMOTE, &gl->gl_flags)) gfs2_demote_wake(gl); - if (do_promote(gl) == 0) + ret = do_promote(gl); + if (ret == 0) goto out; + if (ret == 2) + return; gh = find_first_waiter(gl); gl->gl_target = gh->gh_state; if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) @@ -1556,6 +1567,20 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) } } +void gfs2_glock_finish_truncate(struct gfs2_inode *ip) +{ + struct gfs2_glock *gl = ip->i_gl; + int ret; + + ret = gfs2_truncatei_resume(ip); + gfs2_assert_withdraw(gl->gl_sbd, ret == 0); + + spin_lock(&gl->gl_spin); + clear_bit(GLF_LOCK, &gl->gl_flags); + run_queue(gl, 1); + spin_unlock(&gl->gl_spin); +} + static const char *state2str(unsigned state) { switch(state) { diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 695c6b19361..13a64ee6523 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -132,6 +132,7 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data); void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl); void gfs2_reclaim_glock(struct gfs2_sbd *sdp); void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); +void gfs2_glock_finish_truncate(struct gfs2_inode *ip); int __init gfs2_glock_init(void); void gfs2_glock_exit(void); diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 68ee66552d1..8ebff8ebae2 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -227,6 +227,7 @@ static int inode_go_demote_ok(struct gfs2_glock *gl) static int inode_go_lock(struct gfs2_holder *gh) { struct gfs2_glock *gl = gh->gh_gl; + struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_inode *ip = gl->gl_object; int error = 0; @@ -241,8 +242,14 @@ static int inode_go_lock(struct gfs2_holder *gh) if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && (gl->gl_state == LM_ST_EXCLUSIVE) && - (gh->gh_state == LM_ST_EXCLUSIVE)) - error = gfs2_truncatei_resume(ip); + (gh->gh_state == LM_ST_EXCLUSIVE)) { + spin_lock(&sdp->sd_trunc_lock); + if (list_empty(&ip->i_trunc_list)) + list_add(&sdp->sd_trunc_list, &ip->i_trunc_list); + spin_unlock(&sdp->sd_trunc_lock); + wake_up(&sdp->sd_quota_wait); + return 1; + } return error; } diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index cfebc179357..dd7d0f8f357 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -244,6 +244,7 @@ struct gfs2_inode { struct gfs2_alloc *i_alloc; u64 i_goal; /* goal block for allocations */ struct rw_semaphore i_rw_mutex; + struct list_head i_trunc_list; u32 i_entries; u32 i_diskflags; u8 i_height; @@ -550,6 +551,8 @@ struct gfs2_sbd { spinlock_t sd_quota_spin; struct mutex sd_quota_mutex; wait_queue_head_t sd_quota_wait; + struct list_head sd_trunc_list; + spinlock_t sd_trunc_lock; unsigned int sd_quota_slots; unsigned int sd_quota_chunks; diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index e3f6f1844a2..cf39295ccb9 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -30,6 +30,7 @@ static void gfs2_init_inode_once(void *foo) inode_init_once(&ip->i_inode); init_rwsem(&ip->i_rw_mutex); + INIT_LIST_HEAD(&ip->i_trunc_list); ip->i_alloc = NULL; } diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 5d137063b67..a9a83804eea 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -107,6 +107,8 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) spin_lock_init(&sdp->sd_quota_spin); mutex_init(&sdp->sd_quota_mutex); init_waitqueue_head(&sdp->sd_quota_wait); + INIT_LIST_HEAD(&sdp->sd_trunc_list); + spin_lock_init(&sdp->sd_trunc_lock); spin_lock_init(&sdp->sd_log_lock); diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 0cfe44f0b6a..b08d09696b3 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1296,6 +1296,25 @@ static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, } } +static void quotad_check_trunc_list(struct gfs2_sbd *sdp) +{ + struct gfs2_inode *ip; + + while(1) { + ip = NULL; + spin_lock(&sdp->sd_trunc_lock); + if (!list_empty(&sdp->sd_trunc_list)) { + ip = list_entry(sdp->sd_trunc_list.next, + struct gfs2_inode, i_trunc_list); + list_del_init(&ip->i_trunc_list); + } + spin_unlock(&sdp->sd_trunc_lock); + if (ip == NULL) + return; + gfs2_glock_finish_truncate(ip); + } +} + /** * gfs2_quotad - Write cached quota changes into the quota file * @sdp: Pointer to GFS2 superblock @@ -1310,6 +1329,7 @@ int gfs2_quotad(void *data) unsigned long quotad_timeo = 0; unsigned long t = 0; DEFINE_WAIT(wait); + int empty; while (!kthread_should_stop()) { @@ -1324,12 +1344,21 @@ int gfs2_quotad(void *data) /* FIXME: This should be turned into a shrinker */ gfs2_quota_scan(sdp); + /* Check for & recover partially truncated inodes */ + quotad_check_trunc_list(sdp); + if (freezing(current)) refrigerator(); t = min(quotad_timeo, statfs_timeo); prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_UNINTERRUPTIBLE); - t -= schedule_timeout(t); + spin_lock(&sdp->sd_trunc_lock); + empty = list_empty(&sdp->sd_trunc_list); + spin_unlock(&sdp->sd_trunc_lock); + if (empty) + t -= schedule_timeout(t); + else + t = 0; finish_wait(&sdp->sd_quota_wait, &wait); } |