summaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorBenjamin Marzinski <bmarzins@redhat.com>2008-03-14 13:52:52 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2008-03-31 10:41:44 +0100
commit58e9fee13e579df44922172dbe3c9e3ba3edf7a3 (patch)
tree7b134f28032a3cd498b2d9a18c02ea085c0725e2 /fs/gfs2
parentf5a8cd020173c455705fc0095b7d299da6f8f87b (diff)
[GFS2] Invalidate cache at correct point
GFS2 wasn't invalidating its cache before it called into the lock manager with a request that could potentially drop a lock. This was leaving a window where the lock could be actually be held by another node, but the file's page cache would still appear valid, causing coherency problems. This patch moves the cache invalidation to before the lock manager call when dropping a lock. It also adds the option to the lock_dlm lock manager to not use conversion mode deadlock avoidance, which, on a conversion from shared to exclusive, could internally drop the lock, and then reacquire in. GFS2 now asks lock_dlm to not do this. Instead, GFS2 manually drops the lock and reacquires it. Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c35
-rw-r--r--fs/gfs2/incore.h1
-rw-r--r--fs/gfs2/locking/dlm/lock.c3
-rw-r--r--fs/gfs2/locking/dlm/thread.c10
-rw-r--r--fs/gfs2/ops_fstype.c2
5 files changed, 34 insertions, 17 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 63981e2fb83..d636b3e80f5 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -764,7 +764,7 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
- const struct gfs2_glock_operations *glops = gl->gl_ops;
+ struct gfs2_holder *gh = gl->gl_req_gh;
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
@@ -772,8 +772,14 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
state_change(gl, LM_ST_UNLOCKED);
- if (glops->go_inval)
- glops->go_inval(gl, DIO_METADATA);
+ if (test_and_clear_bit(GLF_CONV_DEADLK, &gl->gl_flags)) {
+ spin_lock(&gl->gl_spin);
+ gh->gh_error = 0;
+ spin_unlock(&gl->gl_spin);
+ gfs2_glock_xmote_th(gl, gl->gl_req_gh);
+ gfs2_glock_put(gl);
+ return;
+ }
spin_lock(&gl->gl_spin);
gfs2_demote_wake(gl);
@@ -794,7 +800,6 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
struct gfs2_sbd *sdp = gl->gl_sbd;
const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_holder *gh = gl->gl_req_gh;
- int prev_state = gl->gl_state;
int op_done = 1;
if (!gh && (ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) {
@@ -808,16 +813,6 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
state_change(gl, ret & LM_OUT_ST_MASK);
- if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
- if (glops->go_inval)
- glops->go_inval(gl, DIO_METADATA);
- } else if (gl->gl_state == LM_ST_DEFERRED) {
- /* We might not want to do this here.
- Look at moving to the inode glops. */
- if (glops->go_inval)
- glops->go_inval(gl, 0);
- }
-
/* Deal with each possible exit condition */
if (!gh) {
@@ -837,6 +832,14 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
}
} else {
spin_lock(&gl->gl_spin);
+ if (ret & LM_OUT_CONV_DEADLK) {
+ gh->gh_error = 0;
+ set_bit(GLF_CONV_DEADLK, &gl->gl_flags);
+ spin_unlock(&gl->gl_spin);
+ gfs2_glock_drop_th(gl);
+ gfs2_glock_put(gl);
+ return;
+ }
list_del_init(&gh->gh_list);
gh->gh_error = -EIO;
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
@@ -910,6 +913,8 @@ static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
if (glops->go_xmote_th)
glops->go_xmote_th(gl);
+ if (state == LM_ST_DEFERRED && glops->go_inval)
+ glops->go_inval(gl, DIO_METADATA);
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
@@ -952,6 +957,8 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl)
if (glops->go_xmote_th)
glops->go_xmote_th(gl);
+ if (glops->go_inval)
+ glops->go_inval(gl, DIO_METADATA);
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 4ba2ea63119..9c2c0b90b22 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -167,6 +167,7 @@ enum {
GLF_DEMOTE_IN_PROGRESS = 6,
GLF_LFLUSH = 7,
GLF_WAITERS2 = 8,
+ GLF_CONV_DEADLK = 9,
};
struct gfs2_glock {
diff --git a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c
index 542a797ac89..53a6ab3c091 100644
--- a/fs/gfs2/locking/dlm/lock.c
+++ b/fs/gfs2/locking/dlm/lock.c
@@ -137,7 +137,8 @@ static inline unsigned int make_flags(struct gdlm_lock *lp,
/* Conversion deadlock avoidance by DLM */
- if (!test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
+ if (!(lp->ls->fsflags & LM_MFLAG_CONV_NODROP) &&
+ !test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
!(lkf & DLM_LKF_NOQUEUE) &&
cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req)
lkf |= DLM_LKF_CONVDEADLK;
diff --git a/fs/gfs2/locking/dlm/thread.c b/fs/gfs2/locking/dlm/thread.c
index 521694fc19d..e53db6fd28a 100644
--- a/fs/gfs2/locking/dlm/thread.c
+++ b/fs/gfs2/locking/dlm/thread.c
@@ -135,7 +135,15 @@ static void process_complete(struct gdlm_lock *lp)
lp->lksb.sb_status, lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number,
lp->flags);
- return;
+ if (lp->lksb.sb_status == -EDEADLOCK &&
+ lp->ls->fsflags & LM_MFLAG_CONV_NODROP) {
+ lp->req = lp->cur;
+ acb.lc_ret |= LM_OUT_CONV_DEADLK;
+ if (lp->cur == DLM_LOCK_IV)
+ lp->lksb.sb_lkid = 0;
+ goto out;
+ } else
+ return;
}
/*
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 5b518f73497..ef9c6c4f80f 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -723,7 +723,7 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
{
char *proto = sdp->sd_proto_name;
char *table = sdp->sd_table_name;
- int flags = 0;
+ int flags = LM_MFLAG_CONV_NODROP;
int error;
if (sdp->sd_args.ar_spectator)