diff options
Diffstat (limited to 'fs/ocfs2')
-rw-r--r-- | fs/ocfs2/dlm/dlmcommon.h | 8 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdebug.c | 12 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdomain.c | 39 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmmaster.c | 4 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmrecovery.c | 23 |
5 files changed, 41 insertions, 45 deletions
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index 23ceaa7127b..9c772583744 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h @@ -37,9 +37,7 @@ #define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes #define DLM_THREAD_MS 200 // flush at least every 200 ms -#define DLM_HASH_BITS 7 -#define DLM_HASH_SIZE (1 << DLM_HASH_BITS) -#define DLM_HASH_MASK (DLM_HASH_SIZE - 1) +#define DLM_HASH_BUCKETS (PAGE_SIZE / sizeof(struct hlist_head)) enum dlm_ast_type { DLM_AST = 0, @@ -87,7 +85,7 @@ enum dlm_ctxt_state { struct dlm_ctxt { struct list_head list; - struct list_head *resources; + struct hlist_head *lockres_hash; struct list_head dirty_list; struct list_head purge_list; struct list_head pending_asts; @@ -217,7 +215,7 @@ struct dlm_lock_resource { /* WARNING: Please see the comment in dlm_init_lockres before * adding fields here. */ - struct list_head list; + struct hlist_node hash_node; struct kref refs; /* please keep these next 3 in this order diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index f339fe27975..54f61b76ab5 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -117,8 +117,8 @@ EXPORT_SYMBOL_GPL(dlm_print_one_lock); void dlm_dump_lock_resources(struct dlm_ctxt *dlm) { struct dlm_lock_resource *res; - struct list_head *iter; - struct list_head *bucket; + struct hlist_node *iter; + struct hlist_head *bucket; int i; mlog(ML_NOTICE, "struct dlm_ctxt: %s, node=%u, key=%u\n", @@ -129,12 +129,10 @@ void dlm_dump_lock_resources(struct dlm_ctxt *dlm) } spin_lock(&dlm->spinlock); - for (i=0; i<DLM_HASH_SIZE; i++) { - bucket = &(dlm->resources[i]); - list_for_each(iter, bucket) { - res = list_entry(iter, struct dlm_lock_resource, list); + for (i=0; i<DLM_HASH_BUCKETS; i++) { + bucket = &(dlm->lockres_hash[i]); + hlist_for_each_entry(res, iter, bucket, hash_node) dlm_print_one_lock_resource(res); - } } spin_unlock(&dlm->spinlock); } diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 6ee30837389..8f3a9e3106f 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -77,26 +77,26 @@ static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); void __dlm_unhash_lockres(struct dlm_lock_resource *lockres) { - list_del_init(&lockres->list); + hlist_del_init(&lockres->hash_node); dlm_lockres_put(lockres); } void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { - struct list_head *bucket; + struct hlist_head *bucket; struct qstr *q; assert_spin_locked(&dlm->spinlock); q = &res->lockname; q->hash = full_name_hash(q->name, q->len); - bucket = &(dlm->resources[q->hash & DLM_HASH_MASK]); + bucket = &(dlm->lockres_hash[q->hash % DLM_HASH_BUCKETS]); /* get a reference for our hashtable */ dlm_lockres_get(res); - list_add_tail(&res->list, bucket); + hlist_add_head(&res->hash_node, bucket); } struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, @@ -104,9 +104,9 @@ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, unsigned int len) { unsigned int hash; - struct list_head *iter; + struct hlist_node *iter; struct dlm_lock_resource *tmpres=NULL; - struct list_head *bucket; + struct hlist_head *bucket; mlog_entry("%.*s\n", len, name); @@ -114,11 +114,11 @@ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, hash = full_name_hash(name, len); - bucket = &(dlm->resources[hash & DLM_HASH_MASK]); + bucket = &(dlm->lockres_hash[hash % DLM_HASH_BUCKETS]); /* check for pre-existing lock */ - list_for_each(iter, bucket) { - tmpres = list_entry(iter, struct dlm_lock_resource, list); + hlist_for_each(iter, bucket) { + tmpres = hlist_entry(iter, struct dlm_lock_resource, hash_node); if (tmpres->lockname.len == len && memcmp(tmpres->lockname.name, name, len) == 0) { dlm_lockres_get(tmpres); @@ -193,8 +193,8 @@ static int dlm_wait_on_domain_helper(const char *domain) static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm) { - if (dlm->resources) - free_page((unsigned long) dlm->resources); + if (dlm->lockres_hash) + free_page((unsigned long) dlm->lockres_hash); if (dlm->name) kfree(dlm->name); @@ -303,10 +303,10 @@ static void dlm_migrate_all_locks(struct dlm_ctxt *dlm) mlog(0, "Migrating locks from domain %s\n", dlm->name); restart: spin_lock(&dlm->spinlock); - for (i=0; i<DLM_HASH_SIZE; i++) { - while (!list_empty(&dlm->resources[i])) { - res = list_entry(dlm->resources[i].next, - struct dlm_lock_resource, list); + for (i = 0; i < DLM_HASH_BUCKETS; i++) { + while (!hlist_empty(&dlm->lockres_hash[i])) { + res = hlist_entry(dlm->lockres_hash[i].first, + struct dlm_lock_resource, hash_node); /* need reference when manually grabbing lockres */ dlm_lockres_get(res); /* this should unhash the lockres @@ -1191,18 +1191,17 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, goto leave; } - dlm->resources = (struct list_head *) __get_free_page(GFP_KERNEL); - if (!dlm->resources) { + dlm->lockres_hash = (struct hlist_head *) __get_free_page(GFP_KERNEL); + if (!dlm->lockres_hash) { mlog_errno(-ENOMEM); kfree(dlm->name); kfree(dlm); dlm = NULL; goto leave; } - memset(dlm->resources, 0, PAGE_SIZE); - for (i=0; i<DLM_HASH_SIZE; i++) - INIT_LIST_HEAD(&dlm->resources[i]); + for (i=0; i<DLM_HASH_BUCKETS; i++) + INIT_HLIST_HEAD(&dlm->lockres_hash[i]); strcpy(dlm->name, domain); dlm->key = key; diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 2e2e95e6949..847dd3cc4cf 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -564,7 +564,7 @@ static void dlm_lockres_release(struct kref *kref) /* By the time we're ready to blow this guy away, we shouldn't * be on any lists. */ - BUG_ON(!list_empty(&res->list)); + BUG_ON(!hlist_unhashed(&res->hash_node)); BUG_ON(!list_empty(&res->granted)); BUG_ON(!list_empty(&res->converting)); BUG_ON(!list_empty(&res->blocked)); @@ -605,7 +605,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm, init_waitqueue_head(&res->wq); spin_lock_init(&res->spinlock); - INIT_LIST_HEAD(&res->list); + INIT_HLIST_NODE(&res->hash_node); INIT_LIST_HEAD(&res->granted); INIT_LIST_HEAD(&res->converting); INIT_LIST_HEAD(&res->blocked); diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index ed76bda1a53..1e232000f3f 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -1693,7 +1693,10 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, u8 dead_node, u8 new_master) { int i; - struct list_head *iter, *iter2, *bucket; + struct list_head *iter, *iter2; + struct hlist_node *hash_iter; + struct hlist_head *bucket; + struct dlm_lock_resource *res; mlog_entry_void(); @@ -1717,10 +1720,9 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, * for now we need to run the whole hash, clear * the RECOVERING state and set the owner * if necessary */ - for (i=0; i<DLM_HASH_SIZE; i++) { - bucket = &(dlm->resources[i]); - list_for_each(iter, bucket) { - res = list_entry (iter, struct dlm_lock_resource, list); + for (i = 0; i < DLM_HASH_BUCKETS; i++) { + bucket = &(dlm->lockres_hash[i]); + hlist_for_each_entry(res, hash_iter, bucket, hash_node) { if (res->state & DLM_LOCK_RES_RECOVERING) { if (res->owner == dead_node) { mlog(0, "(this=%u) res %.*s owner=%u " @@ -1852,10 +1854,10 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) { - struct list_head *iter; + struct hlist_node *iter; struct dlm_lock_resource *res; int i; - struct list_head *bucket; + struct hlist_head *bucket; struct dlm_lock *lock; @@ -1876,10 +1878,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) * can be kicked again to see if any ASTs or BASTs * need to be fired as a result. */ - for (i=0; i<DLM_HASH_SIZE; i++) { - bucket = &(dlm->resources[i]); - list_for_each(iter, bucket) { - res = list_entry (iter, struct dlm_lock_resource, list); + for (i = 0; i < DLM_HASH_BUCKETS; i++) { + bucket = &(dlm->lockres_hash[i]); + hlist_for_each_entry(res, iter, bucket, hash_node) { /* always prune any $RECOVERY entries for dead nodes, * otherwise hangs can occur during later recovery */ if (dlm_is_recovery_lock(res->lockname.name, |