diff options
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r-- | fs/gfs2/glock.c | 188 |
1 files changed, 110 insertions, 78 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 7175a4d0643..d636b3e80f5 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1,6 +1,6 @@ /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. - * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. + * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions @@ -35,7 +35,6 @@ #include "glock.h" #include "glops.h" #include "inode.h" -#include "lm.h" #include "lops.h" #include "meta_io.h" #include "quota.h" @@ -183,7 +182,8 @@ static void glock_free(struct gfs2_glock *gl) struct gfs2_sbd *sdp = gl->gl_sbd; struct inode *aspace = gl->gl_aspace; - gfs2_lm_put_lock(sdp, gl->gl_lock); + if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) + sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock); if (aspace) gfs2_aspace_put(aspace); @@ -197,7 +197,7 @@ static void glock_free(struct gfs2_glock *gl) * */ -void gfs2_glock_hold(struct gfs2_glock *gl) +static void gfs2_glock_hold(struct gfs2_glock *gl) { atomic_inc(&gl->gl_ref); } @@ -293,6 +293,16 @@ static void glock_work_func(struct work_struct *work) gfs2_glock_put(gl); } +static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name, + void **lockp) +{ + int error = -EIO; + if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) + error = sdp->sd_lockstruct.ls_ops->lm_get_lock( + sdp->sd_lockstruct.ls_lockspace, name, lockp); + return error; +} + /** * gfs2_glock_get() - Get a glock, or create one if one doesn't exist * @sdp: The GFS2 superblock @@ -338,8 +348,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, gl->gl_ip = 0; gl->gl_ops = glops; gl->gl_req_gh = NULL; - gl->gl_req_bh = NULL; - gl->gl_vn = 0; gl->gl_stamp = jiffies; gl->gl_tchange = jiffies; gl->gl_object = NULL; @@ -595,11 +603,12 @@ static void run_queue(struct gfs2_glock *gl) blocked = rq_mutex(gh); } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { blocked = rq_demote(gl); - if (gl->gl_waiters2 && !blocked) { + if (test_bit(GLF_WAITERS2, &gl->gl_flags) && + !blocked) { set_bit(GLF_DEMOTE, &gl->gl_flags); gl->gl_demote_state = LM_ST_UNLOCKED; } - gl->gl_waiters2 = 0; + clear_bit(GLF_WAITERS2, &gl->gl_flags); } else if (!list_empty(&gl->gl_waiters3)) { gh = list_entry(gl->gl_waiters3.next, struct gfs2_holder, gh_list); @@ -710,7 +719,7 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state, } else if (gl->gl_demote_state != LM_ST_UNLOCKED && gl->gl_demote_state != state) { if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) - gl->gl_waiters2 = 1; + set_bit(GLF_WAITERS2, &gl->gl_flags); else gl->gl_demote_state = LM_ST_UNLOCKED; } @@ -743,6 +752,43 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state) } /** + * drop_bh - Called after a lock module unlock completes + * @gl: the glock + * @ret: the return status + * + * Doesn't wake up the process waiting on the struct gfs2_holder (if any) + * Doesn't drop the reference on the glock the top half took out + * + */ + +static void drop_bh(struct gfs2_glock *gl, unsigned int ret) +{ + struct gfs2_sbd *sdp = gl->gl_sbd; + struct gfs2_holder *gh = gl->gl_req_gh; + + gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); + gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); + gfs2_assert_warn(sdp, !ret); + + state_change(gl, LM_ST_UNLOCKED); + + if (test_and_clear_bit(GLF_CONV_DEADLK, &gl->gl_flags)) { + spin_lock(&gl->gl_spin); + gh->gh_error = 0; + spin_unlock(&gl->gl_spin); + gfs2_glock_xmote_th(gl, gl->gl_req_gh); + gfs2_glock_put(gl); + return; + } + + spin_lock(&gl->gl_spin); + gfs2_demote_wake(gl); + clear_bit(GLF_LOCK, &gl->gl_flags); + spin_unlock(&gl->gl_spin); + gfs2_glock_put(gl); +} + +/** * xmote_bh - Called after the lock module is done acquiring a lock * @gl: The glock in question * @ret: the int returned from the lock module @@ -754,25 +800,19 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) struct gfs2_sbd *sdp = gl->gl_sbd; const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_holder *gh = gl->gl_req_gh; - int prev_state = gl->gl_state; int op_done = 1; + if (!gh && (ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) { + drop_bh(gl, ret); + return; + } + gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); state_change(gl, ret & LM_OUT_ST_MASK); - if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) { - if (glops->go_inval) - glops->go_inval(gl, DIO_METADATA); - } else if (gl->gl_state == LM_ST_DEFERRED) { - /* We might not want to do this here. - Look at moving to the inode glops. */ - if (glops->go_inval) - glops->go_inval(gl, 0); - } - /* Deal with each possible exit condition */ if (!gh) { @@ -782,7 +822,6 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) } else { spin_lock(&gl->gl_spin); if (gl->gl_state != gl->gl_demote_state) { - gl->gl_req_bh = NULL; spin_unlock(&gl->gl_spin); gfs2_glock_drop_th(gl); gfs2_glock_put(gl); @@ -793,6 +832,14 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) } } else { spin_lock(&gl->gl_spin); + if (ret & LM_OUT_CONV_DEADLK) { + gh->gh_error = 0; + set_bit(GLF_CONV_DEADLK, &gl->gl_flags); + spin_unlock(&gl->gl_spin); + gfs2_glock_drop_th(gl); + gfs2_glock_put(gl); + return; + } list_del_init(&gh->gh_list); gh->gh_error = -EIO; if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) @@ -824,7 +871,6 @@ out: if (op_done) { spin_lock(&gl->gl_spin); gl->gl_req_gh = NULL; - gl->gl_req_bh = NULL; clear_bit(GLF_LOCK, &gl->gl_flags); spin_unlock(&gl->gl_spin); } @@ -835,6 +881,17 @@ out: gfs2_holder_wake(gh); } +static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, + unsigned int cur_state, unsigned int req_state, + unsigned int flags) +{ + int ret = 0; + if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) + ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state, + req_state, flags); + return ret; +} + /** * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock * @gl: The glock in question @@ -856,6 +913,8 @@ static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh) if (glops->go_xmote_th) glops->go_xmote_th(gl); + if (state == LM_ST_DEFERRED && glops->go_inval) + glops->go_inval(gl, DIO_METADATA); gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); @@ -863,7 +922,6 @@ static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh) gfs2_assert_warn(sdp, state != gl->gl_state); gfs2_glock_hold(gl); - gl->gl_req_bh = xmote_bh; lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags); @@ -876,49 +934,13 @@ static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh) xmote_bh(gl, lck_ret); } -/** - * drop_bh - Called after a lock module unlock completes - * @gl: the glock - * @ret: the return status - * - * Doesn't wake up the process waiting on the struct gfs2_holder (if any) - * Doesn't drop the reference on the glock the top half took out - * - */ - -static void drop_bh(struct gfs2_glock *gl, unsigned int ret) +static unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock, + unsigned int cur_state) { - struct gfs2_sbd *sdp = gl->gl_sbd; - const struct gfs2_glock_operations *glops = gl->gl_ops; - struct gfs2_holder *gh = gl->gl_req_gh; - - gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); - gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); - gfs2_assert_warn(sdp, !ret); - - state_change(gl, LM_ST_UNLOCKED); - - if (glops->go_inval) - glops->go_inval(gl, DIO_METADATA); - - if (gh) { - spin_lock(&gl->gl_spin); - list_del_init(&gh->gh_list); - gh->gh_error = 0; - spin_unlock(&gl->gl_spin); - } - - spin_lock(&gl->gl_spin); - gfs2_demote_wake(gl); - gl->gl_req_gh = NULL; - gl->gl_req_bh = NULL; - clear_bit(GLF_LOCK, &gl->gl_flags); - spin_unlock(&gl->gl_spin); - - gfs2_glock_put(gl); - - if (gh) - gfs2_holder_wake(gh); + int ret = 0; + if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) + ret = sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state); + return ret; } /** @@ -935,13 +957,14 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl) if (glops->go_xmote_th) glops->go_xmote_th(gl); + if (glops->go_inval) + glops->go_inval(gl, DIO_METADATA); gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); gfs2_glock_hold(gl); - gl->gl_req_bh = drop_bh; ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state); @@ -964,16 +987,17 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl) static void do_cancels(struct gfs2_holder *gh) { struct gfs2_glock *gl = gh->gh_gl; + struct gfs2_sbd *sdp = gl->gl_sbd; spin_lock(&gl->gl_spin); while (gl->gl_req_gh != gh && !test_bit(HIF_HOLDER, &gh->gh_iflags) && !list_empty(&gh->gh_list)) { - if (gl->gl_req_bh && !(gl->gl_req_gh && - (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) { + if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) { spin_unlock(&gl->gl_spin); - gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock); + if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) + sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock); msleep(100); spin_lock(&gl->gl_spin); } else { @@ -1041,7 +1065,6 @@ static int glock_wait_internal(struct gfs2_holder *gh) spin_lock(&gl->gl_spin); gl->gl_req_gh = NULL; - gl->gl_req_bh = NULL; clear_bit(GLF_LOCK, &gl->gl_flags); run_queue(gl); spin_unlock(&gl->gl_spin); @@ -1428,6 +1451,14 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs) gfs2_glock_dq_uninit(&ghs[x]); } +static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp) +{ + int error = -EIO; + if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) + error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp); + return error; +} + /** * gfs2_lvb_hold - attach a LVB from a glock * @gl: The glock in question @@ -1463,12 +1494,15 @@ int gfs2_lvb_hold(struct gfs2_glock *gl) void gfs2_lvb_unhold(struct gfs2_glock *gl) { + struct gfs2_sbd *sdp = gl->gl_sbd; + gfs2_glock_hold(gl); gfs2_glmutex_lock(gl); gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); if (atomic_dec_and_test(&gl->gl_lvb_count)) { - gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb); + if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) + sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb); gl->gl_lvb = NULL; gfs2_glock_put(gl); } @@ -1534,8 +1568,7 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) gl = gfs2_glock_find(sdp, &async->lc_name); if (gfs2_assert_warn(sdp, gl)) return; - if (!gfs2_assert_warn(sdp, gl->gl_req_bh)) - gl->gl_req_bh(gl, async->lc_ret); + xmote_bh(gl, async->lc_ret); if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) gfs2_glock_put(gl); up_read(&gfs2_umount_flush_sem); @@ -1594,10 +1627,10 @@ void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) gfs2_glock_hold(gl); list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list); atomic_inc(&sdp->sd_reclaim_count); - } - spin_unlock(&sdp->sd_reclaim_lock); - - wake_up(&sdp->sd_reclaim_wq); + spin_unlock(&sdp->sd_reclaim_lock); + wake_up(&sdp->sd_reclaim_wq); + } else + spin_unlock(&sdp->sd_reclaim_lock); } /** @@ -1897,7 +1930,6 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl) print_dbg(gi, " gl_owner = -1\n"); print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip); print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); - print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no"); print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count)); print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no"); print_dbg(gi, " reclaim = %s\n", |