diff options
author | Mauro Carvalho Chehab <mchehab@redhat.com> | 2011-12-30 13:59:37 -0200 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@redhat.com> | 2011-12-30 13:59:37 -0200 |
commit | b4d48c942c17ce3d3a330ad91e109e522bc97378 (patch) | |
tree | 3365292f3a5a502edb51492d011fd326c930ca40 /fs/ocfs2/dlm/dlmlock.c | |
parent | 1a5cd29631a6b75e49e6ad8a770ab9d69cda0fa2 (diff) | |
parent | 5f0a6e2d503896062f641639dacfe5055c2f593b (diff) |
Merge tag 'v3.2-rc7' into staging/for_v3.3
Linux 3.2-rc7
* tag 'v3.2-rc7': (1304 commits)
Linux 3.2-rc7
netfilter: xt_connbytes: handle negation correctly
Btrfs: call d_instantiate after all ops are setup
Btrfs: fix worker lock misuse in find_worker
net: relax rcvbuf limits
rps: fix insufficient bounds checking in store_rps_dev_flow_table_cnt()
net: introduce DST_NOPEER dst flag
mqprio: Avoid panic if no options are provided
bridge: provide a mtu() method for fake_dst_ops
md/bitmap: It is OK to clear bits during recovery.
md: don't give up looking for spares on first failure-to-add
md/raid5: ensure correct assessment of drives during degraded reshape.
md/linear: fix hot-add of devices to linear arrays.
sparc64: Fix MSIQ HV call ordering in pci_sun4v_msiq_build_irq().
pata_of_platform: Add missing CONFIG_OF_IRQ dependency.
ipv4: using prefetch requires including prefetch.h
VFS: Fix race between CPU hotplug and lglocks
vfs: __read_cache_page should use gfp argument rather than GFP_KERNEL
USB: Fix usb/isp1760 build on sparc
net: Add a flow_cache_flush_deferred function
...
Conflicts:
drivers/media/common/tuners/tda18218.c
drivers/media/video/omap3isp/ispccdc.c
drivers/staging/media/as102/as102_drv.h
Diffstat (limited to 'fs/ocfs2/dlm/dlmlock.c')
-rw-r--r-- | fs/ocfs2/dlm/dlmlock.c | 54 |
1 files changed, 26 insertions, 28 deletions
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 8d39e0fd66f..975810b9849 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -183,10 +183,6 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, kick_thread = 1; } } - /* reduce the inflight count, this may result in the lockres - * being purged below during calc_usage */ - if (lock->ml.node == dlm->node_num) - dlm_lockres_drop_inflight_ref(dlm, res); spin_unlock(&res->spinlock); wake_up(&res->wq); @@ -231,10 +227,16 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, lock->ml.type, res->lockname.len, res->lockname.name, flags); + /* + * Wait if resource is getting recovered, remastered, etc. + * If the resource was remastered and new owner is self, then exit. + */ spin_lock(&res->spinlock); - - /* will exit this call with spinlock held */ __dlm_wait_on_lockres(res); + if (res->owner == dlm->node_num) { + spin_unlock(&res->spinlock); + return DLM_RECOVERING; + } res->state |= DLM_LOCK_RES_IN_PROGRESS; /* add lock to local (secondary) queue */ @@ -319,27 +321,23 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, sizeof(create), res->owner, &status); if (tmpret >= 0) { - // successfully sent and received - ret = status; // this is already a dlm_status + ret = status; if (ret == DLM_REJECTED) { - mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres " - "no longer owned by %u. that node is coming back " - "up currently.\n", dlm->name, create.namelen, + mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer " + "owned by node %u. That node is coming back up " + "currently.\n", dlm->name, create.namelen, create.name, res->owner); dlm_print_one_lock_resource(res); BUG(); } } else { - mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " - "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key, - res->owner); - if (dlm_is_host_down(tmpret)) { + mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to " + "node %u\n", dlm->name, create.namelen, create.name, + tmpret, res->owner); + if (dlm_is_host_down(tmpret)) ret = DLM_RECOVERING; - mlog(0, "node %u died so returning DLM_RECOVERING " - "from lock message!\n", res->owner); - } else { + else ret = dlm_err_to_dlm_status(tmpret); - } } return ret; @@ -440,7 +438,7 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, /* zero memory only if kernel-allocated */ lksb = kzalloc(sizeof(*lksb), GFP_NOFS); if (!lksb) { - kfree(lock); + kmem_cache_free(dlm_lock_cache, lock); return NULL; } kernel_allocated = 1; @@ -718,18 +716,10 @@ retry_lock: if (status == DLM_RECOVERING || status == DLM_MIGRATING || status == DLM_FORWARD) { - mlog(0, "retrying lock with migration/" - "recovery/in progress\n"); msleep(100); - /* no waiting for dlm_reco_thread */ if (recovery) { if (status != DLM_RECOVERING) goto retry_lock; - - mlog(0, "%s: got RECOVERING " - "for $RECOVERY lock, master " - "was %u\n", dlm->name, - res->owner); /* wait to see the node go down, then * drop down and allow the lockres to * get cleaned up. need to remaster. */ @@ -741,6 +731,14 @@ retry_lock: } } + /* Inflight taken in dlm_get_lock_resource() is dropped here */ + spin_lock(&res->spinlock); + dlm_lockres_drop_inflight_ref(dlm, res); + spin_unlock(&res->spinlock); + + dlm_lockres_calc_usage(dlm, res); + dlm_kick_thread(dlm, res); + if (status != DLM_NORMAL) { lock->lksb->flags &= ~DLM_LKSB_GET_LVB; if (status != DLM_NOTQUEUED) |