diff options
author | Tejun Heo <tj@kernel.org> | 2010-10-15 17:49:27 +0200 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2010-10-21 10:11:45 -0400 |
commit | a25e758c5fa1137e1bbc440194e55f7c59177145 (patch) | |
tree | 4563a1d423ef9c9d54ca0c1222d34274613b31cf /net/sunrpc/xprtrdma/svc_rdma.c | |
parent | 8f3a6de313391b6910aa7db185eb9f3e930a51cf (diff) |
sunrpc/xprtrdma: clean up workqueue usage
* Create and use svc_rdma_wq instead of using the system workqueue and
flush_scheduled_work(). This workqueue is necessary to serve as
flushing domain for rdma->sc_work which is used to destroy itself
and thus can't be flushed explicitly.
* Replace cancel_delayed_work() + flush_scheduled_work() with
cancel_delayed_work_sync().
* Implement synchronous connect in xprt_rdma_connect() using
flush_delayed_work() on the rdma_connect work instead of using
flush_scheduled_work().
This is to prepare for the deprecation and removal of
flush_scheduled_work().
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc/xprtrdma/svc_rdma.c')
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma.c | 11 |
1 files changed, 10 insertions, 1 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c index d718b8fa952..09af4fab1a4 100644 --- a/net/sunrpc/xprtrdma/svc_rdma.c +++ b/net/sunrpc/xprtrdma/svc_rdma.c @@ -43,6 +43,7 @@ #include <linux/slab.h> #include <linux/fs.h> #include <linux/sysctl.h> +#include <linux/workqueue.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/sched.h> #include <linux/sunrpc/svc_rdma.h> @@ -74,6 +75,8 @@ atomic_t rdma_stat_sq_prod; struct kmem_cache *svc_rdma_map_cachep; struct kmem_cache *svc_rdma_ctxt_cachep; +struct workqueue_struct *svc_rdma_wq; + /* * This function implements reading and resetting an atomic_t stat * variable through read/write to a proc file. Any write to the file @@ -231,7 +234,7 @@ static ctl_table svcrdma_root_table[] = { void svc_rdma_cleanup(void) { dprintk("SVCRDMA Module Removed, deregister RPC RDMA transport\n"); - flush_scheduled_work(); + destroy_workqueue(svc_rdma_wq); if (svcrdma_table_header) { unregister_sysctl_table(svcrdma_table_header); svcrdma_table_header = NULL; @@ -249,6 +252,11 @@ int svc_rdma_init(void) dprintk("\tsq_depth : %d\n", svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT); dprintk("\tmax_inline : %d\n", svcrdma_max_req_size); + + svc_rdma_wq = alloc_workqueue("svc_rdma", 0, 0); + if (!svc_rdma_wq) + return -ENOMEM; + if (!svcrdma_table_header) svcrdma_table_header = register_sysctl_table(svcrdma_root_table); @@ -283,6 +291,7 @@ int svc_rdma_init(void) kmem_cache_destroy(svc_rdma_map_cachep); err0: unregister_sysctl_table(svcrdma_table_header); + destroy_workqueue(svc_rdma_wq); return -ENOMEM; } MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); |