summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-07 15:48:15 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-07 15:48:15 -0500
commit34161db6b14d984fb9b06c735b7b42f8803f6851 (patch)
tree99656278b6697f1cde5b05894b7c0ee22c63a00e /fs
parent5847e1f4d058677c5e46dc6c3e3c70e8855ea3ba (diff)
parent620034c84d1d939717bdfbe02c51a3fee43541c3 (diff)
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/ into merge_linus
Conflicts: include/linux/sunrpc/xprt.h net/sunrpc/xprtsock.c Fix up conflicts with the workqueue changes.
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/mux.c16
-rw-r--r--fs/aio.c16
-rw-r--r--fs/bio.c6
-rw-r--r--fs/file.c6
-rw-r--r--fs/gfs2/glock.c8
-rw-r--r--fs/ncpfs/inode.c8
-rw-r--r--fs/ncpfs/sock.c20
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/namespace.c8
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4renewd.c5
-rw-r--r--fs/nfsd/nfs4state.c7
-rw-r--r--fs/ocfs2/alloc.c9
-rw-r--r--fs/ocfs2/cluster/heartbeat.c10
-rw-r--r--fs/ocfs2/cluster/quorum.c4
-rw-r--r--fs/ocfs2/cluster/tcp.c78
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h8
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c5
-rw-r--r--fs/ocfs2/dlm/userdlm.c10
-rw-r--r--fs/ocfs2/journal.c7
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/ocfs2.h2
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/reiserfs/journal.c12
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c21
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c9
28 files changed, 158 insertions, 129 deletions
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index 90a79c78454..944273c3dbf 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -110,8 +110,8 @@ struct v9fs_mux_rpc {
};
static int v9fs_poll_proc(void *);
-static void v9fs_read_work(void *);
-static void v9fs_write_work(void *);
+static void v9fs_read_work(struct work_struct *work);
+static void v9fs_write_work(struct work_struct *work);
static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
poll_table * p);
static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
@@ -297,8 +297,8 @@ struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize,
m->rbuf = NULL;
m->wpos = m->wsize = 0;
m->wbuf = NULL;
- INIT_WORK(&m->rq, v9fs_read_work, m);
- INIT_WORK(&m->wq, v9fs_write_work, m);
+ INIT_WORK(&m->rq, v9fs_read_work);
+ INIT_WORK(&m->wq, v9fs_write_work);
m->wsched = 0;
memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
m->poll_task = NULL;
@@ -458,13 +458,13 @@ static int v9fs_poll_proc(void *a)
/**
* v9fs_write_work - called when a transport can send some data
*/
-static void v9fs_write_work(void *a)
+static void v9fs_write_work(struct work_struct *work)
{
int n, err;
struct v9fs_mux_data *m;
struct v9fs_req *req;
- m = a;
+ m = container_of(work, struct v9fs_mux_data, wq);
if (m->err < 0) {
clear_bit(Wworksched, &m->wsched);
@@ -564,7 +564,7 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
/**
* v9fs_read_work - called when there is some data to be read from a transport
*/
-static void v9fs_read_work(void *a)
+static void v9fs_read_work(struct work_struct *work)
{
int n, err;
struct v9fs_mux_data *m;
@@ -572,7 +572,7 @@ static void v9fs_read_work(void *a)
struct v9fs_fcall *rcall;
char *rbuf;
- m = a;
+ m = container_of(work, struct v9fs_mux_data, rq);
if (m->err < 0)
return;
diff --git a/fs/aio.c b/fs/aio.c
index 277a5f2d18a..287a1bc7a18 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -53,13 +53,13 @@ static kmem_cache_t *kioctx_cachep;
static struct workqueue_struct *aio_wq;
/* Used for rare fput completion. */
-static void aio_fput_routine(void *);
-static DECLARE_WORK(fput_work, aio_fput_routine, NULL);
+static void aio_fput_routine(struct work_struct *);
+static DECLARE_WORK(fput_work, aio_fput_routine);
static DEFINE_SPINLOCK(fput_lock);
static LIST_HEAD(fput_head);
-static void aio_kick_handler(void *);
+static void aio_kick_handler(struct work_struct *);
static void aio_queue_work(struct kioctx *);
/* aio_setup
@@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
INIT_LIST_HEAD(&ctx->active_reqs);
INIT_LIST_HEAD(&ctx->run_list);
- INIT_WORK(&ctx->wq, aio_kick_handler, ctx);
+ INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
if (aio_setup_ring(ctx) < 0)
goto out_freectx;
@@ -469,7 +469,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
wake_up(&ctx->wait);
}
-static void aio_fput_routine(void *data)
+static void aio_fput_routine(struct work_struct *data)
{
spin_lock_irq(&fput_lock);
while (likely(!list_empty(&fput_head))) {
@@ -857,9 +857,9 @@ static inline void aio_run_all_iocbs(struct kioctx *ctx)
* space.
* Run on aiod's context.
*/
-static void aio_kick_handler(void *data)
+static void aio_kick_handler(struct work_struct *work)
{
- struct kioctx *ctx = data;
+ struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
mm_segment_t oldfs = get_fs();
int requeue;
@@ -874,7 +874,7 @@ static void aio_kick_handler(void *data)
* we're in a worker thread already, don't use queue_delayed_work,
*/
if (requeue)
- queue_work(aio_wq, &ctx->wq);
+ queue_delayed_work(aio_wq, &ctx->wq, 0);
}
diff --git a/fs/bio.c b/fs/bio.c
index aa4d09bd4e7..50c40ce2cea 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -940,16 +940,16 @@ static void bio_release_pages(struct bio *bio)
* run one bio_put() against the BIO.
*/
-static void bio_dirty_fn(void *data);
+static void bio_dirty_fn(struct work_struct *work);
-static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL);
+static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
static DEFINE_SPINLOCK(bio_dirty_lock);
static struct bio *bio_dirty_list;
/*
* This runs in process context
*/
-static void bio_dirty_fn(void *data)
+static void bio_dirty_fn(struct work_struct *work)
{
unsigned long flags;
struct bio *bio;
diff --git a/fs/file.c b/fs/file.c
index 8e81775c5dc..3787e82f54c 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -91,8 +91,10 @@ out:
spin_unlock(&fddef->lock);
}
-static void free_fdtable_work(struct fdtable_defer *f)
+static void free_fdtable_work(struct work_struct *work)
{
+ struct fdtable_defer *f =
+ container_of(work, struct fdtable_defer, wq);
struct fdtable *fdt;
spin_lock_bh(&f->lock);
@@ -351,7 +353,7 @@ static void __devinit fdtable_defer_list_init(int cpu)
{
struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
spin_lock_init(&fddef->lock);
- INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef);
+ INIT_WORK(&fddef->wq, free_fdtable_work);
init_timer(&fddef->timer);
fddef->timer.data = (unsigned long)fddef;
fddef->timer.function = fdtable_timer;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 78fe0fae23f..55f5333dae9 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -35,7 +35,7 @@
struct greedy {
struct gfs2_holder gr_gh;
- struct work_struct gr_work;
+ struct delayed_work gr_work;
};
struct gfs2_gl_hash_bucket {
@@ -1368,9 +1368,9 @@ static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
glops->go_xmote_th(gl, state, flags);
}
-static void greedy_work(void *data)
+static void greedy_work(struct work_struct *work)
{
- struct greedy *gr = data;
+ struct greedy *gr = container_of(work, struct greedy, gr_work.work);
struct gfs2_holder *gh = &gr->gr_gh;
struct gfs2_glock *gl = gh->gh_gl;
const struct gfs2_glock_operations *glops = gl->gl_ops;
@@ -1422,7 +1422,7 @@ int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
gfs2_holder_init(gl, 0, 0, gh);
set_bit(HIF_GREEDY, &gh->gh_iflags);
- INIT_WORK(&gr->gr_work, greedy_work, gr);
+ INIT_DELAYED_WORK(&gr->gr_work, greedy_work);
set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
schedule_delayed_work(&gr->gr_work, time);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 42e3bef270c..72dad552aa0 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -577,12 +577,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
server->rcv.ptr = (unsigned char*)&server->rcv.buf;
server->rcv.len = 10;
server->rcv.state = 0;
- INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server);
- INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server);
+ INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc);
+ INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc);
sock->sk->sk_write_space = ncp_tcp_write_space;
} else {
- INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server);
- INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server);
+ INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc);
+ INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc);
server->timeout_tm.data = (unsigned long)server;
server->timeout_tm.function = ncpdgram_timeout_call;
}
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 11c2b252ebe..e496d8b65e9 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -350,9 +350,10 @@ static void info_server(struct ncp_server *server, unsigned int id, const void *
}
}
-void ncpdgram_rcv_proc(void *s)
+void ncpdgram_rcv_proc(struct work_struct *work)
{
- struct ncp_server *server = s;
+ struct ncp_server *server =
+ container_of(work, struct ncp_server, rcv.tq);
struct socket* sock;
sock = server->ncp_sock;
@@ -468,9 +469,10 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server)
}
}
-void ncpdgram_timeout_proc(void *s)
+void ncpdgram_timeout_proc(struct work_struct *work)
{
- struct ncp_server *server = s;
+ struct ncp_server *server =
+ container_of(work, struct ncp_server, timeout_tq);
mutex_lock(&server->rcv.creq_mutex);
__ncpdgram_timeout_proc(server);
mutex_unlock(&server->rcv.creq_mutex);
@@ -652,18 +654,20 @@ skipdata:;
}
}
-void ncp_tcp_rcv_proc(void *s)
+void ncp_tcp_rcv_proc(struct work_struct *work)
{
- struct ncp_server *server = s;
+ struct ncp_server *server =
+ container_of(work, struct ncp_server, rcv.tq);
mutex_lock(&server->rcv.creq_mutex);
__ncptcp_rcv_proc(server);
mutex_unlock(&server->rcv.creq_mutex);
}
-void ncp_tcp_tx_proc(void *s)
+void ncp_tcp_tx_proc(struct work_struct *work)
{
- struct ncp_server *server = s;
+ struct ncp_server *server =
+ container_of(work, struct ncp_server, tx.tq);
mutex_lock(&server->rcv.creq_mutex);
__ncptcp_try_send(server);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 5fea638743e..23ab145daa2 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname,
INIT_LIST_HEAD(&clp->cl_state_owners);
INIT_LIST_HEAD(&clp->cl_unused);
spin_lock_init(&clp->cl_lock);
- INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
+ INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index ec1114b33d8..371b804e7cc 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -18,10 +18,10 @@
#define NFSDBG_FACILITY NFSDBG_VFS
-static void nfs_expire_automounts(void *list);
+static void nfs_expire_automounts(struct work_struct *work);
LIST_HEAD(nfs_automount_list);
-static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list);
+static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
int nfs_mountpoint_expiry_timeout = 500 * HZ;
static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
@@ -164,9 +164,9 @@ struct inode_operations nfs_referral_inode_operations = {
.follow_link = nfs_follow_mountpoint,
};
-static void nfs_expire_automounts(void *data)
+static void nfs_expire_automounts(struct work_struct *work)
{
- struct list_head *list = (struct list_head *)data;
+ struct list_head *list = &nfs_automount_list;
mark_mounts_for_expiry(list);
if (!list_empty(list))
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 6f346677332..c26cd978c7c 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -185,7 +185,7 @@ extern const u32 nfs4_fs_locations_bitmap[2];
extern void nfs4_schedule_state_renewal(struct nfs_client *);
extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
extern void nfs4_kill_renewd(struct nfs_client *);
-extern void nfs4_renew_state(void *);
+extern void nfs4_renew_state(struct work_struct *);
/* nfs4state.c */
struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 7b6df1852e7..823298561c0 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -59,9 +59,10 @@
#define NFSDBG_FACILITY NFSDBG_PROC
void
-nfs4_renew_state(void *data)
+nfs4_renew_state(struct work_struct *work)
{
- struct nfs_client *clp = (struct nfs_client *)data;
+ struct nfs_client *clp =
+ container_of(work, struct nfs_client, cl_renewd.work);
struct rpc_cred *cred;
long lease, timeout;
unsigned long last, now;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 293b6495829..e431e93ab50 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1829,9 +1829,8 @@ out:
}
static struct workqueue_struct *laundry_wq;
-static struct work_struct laundromat_work;
-static void laundromat_main(void *);
-static DECLARE_WORK(laundromat_work, laundromat_main, NULL);
+static void laundromat_main(struct work_struct *);
+static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
__be32
nfsd4_renew(clientid_t *clid)
@@ -1940,7 +1939,7 @@ nfs4_laundromat(void)
}
void
-laundromat_main(void *not_used)
+laundromat_main(struct work_struct *not_used)
{
time_t t;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 85a048b7a67..edc91ca3792 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1197,10 +1197,12 @@ int ocfs2_flush_truncate_log(struct ocfs2_super *osb)
return status;
}
-static void ocfs2_truncate_log_worker(void *data)
+static void ocfs2_truncate_log_worker(struct work_struct *work)
{
int status;
- struct ocfs2_super *osb = data;
+ struct ocfs2_super *osb =
+ container_of(work, struct ocfs2_super,
+ osb_truncate_log_wq.work);
mlog_entry_void();
@@ -1432,7 +1434,8 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb)
/* ocfs2_truncate_log_shutdown keys on the existence of
* osb->osb_tl_inode so we don't set any of the osb variables
* until we're sure all is well. */
- INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb);
+ INIT_DELAYED_WORK(&osb->osb_truncate_log_wq,
+ ocfs2_truncate_log_worker);
osb->osb_tl_bh = tl_bh;
osb->osb_tl_inode = tl_inode;
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 305cba3681f..4cd9a958045 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -141,7 +141,7 @@ struct o2hb_region {
* recognizes a node going up and down in one iteration */
u64 hr_generation;
- struct work_struct hr_write_timeout_work;
+ struct delayed_work hr_write_timeout_work;
unsigned long hr_last_timeout_start;
/* Used during o2hb_check_slot to hold a copy of the block
@@ -156,9 +156,11 @@ struct o2hb_bio_wait_ctxt {
int wc_error;
};
-static void o2hb_write_timeout(void *arg)
+static void o2hb_write_timeout(struct work_struct *work)
{
- struct o2hb_region *reg = arg;
+ struct o2hb_region *reg =
+ container_of(work, struct o2hb_region,
+ hr_write_timeout_work.work);
mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
"milliseconds\n", reg->hr_dev_name,
@@ -1404,7 +1406,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
goto out;
}
- INIT_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout, reg);
+ INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
/*
* A node is considered live after it has beat LIVE_THRESHOLD
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 7bba98fbfc1..4705d659fe5 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -88,7 +88,7 @@ void o2quo_disk_timeout(void)
o2quo_fence_self();
}
-static void o2quo_make_decision(void *arg)
+static void o2quo_make_decision(struct work_struct *work)
{
int quorum;
int lowest_hb, lowest_reachable = 0, fence = 0;
@@ -306,7 +306,7 @@ void o2quo_init(void)
struct o2quo_state *qs = &o2quo_state;
spin_lock_init(&qs->qs_lock);
- INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL);
+ INIT_WORK(&qs->qs_work, o2quo_make_decision);
}
void o2quo_exit(void)
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index b650efa8c8b..9b3209dc0b1 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -140,11 +140,11 @@ static int o2net_sys_err_translations[O2NET_ERR_MAX] =
[O2NET_ERR_DIED] = -EHOSTDOWN,};
/* can't quite avoid *all* internal declarations :/ */
-static void o2net_sc_connect_completed(void *arg);
-static void o2net_rx_until_empty(void *arg);
-static void o2net_shutdown_sc(void *arg);
+static void o2net_sc_connect_completed(struct work_struct *work);
+static void o2net_rx_until_empty(struct work_struct *work);
+static void o2net_shutdown_sc(struct work_struct *work);
static void o2net_listen_data_ready(struct sock *sk, int bytes);
-static void o2net_sc_send_keep_req(void *arg);
+static void o2net_sc_send_keep_req(struct work_struct *work);
static void o2net_idle_timer(unsigned long data);
static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
@@ -308,10 +308,10 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
o2nm_node_get(node);
sc->sc_node = node;
- INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc);
- INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc);
- INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc);
- INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc);
+ INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
+ INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
+ INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
+ INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
init_timer(&sc->sc_idle_timeout);
sc->sc_idle_timeout.function = o2net_idle_timer;
@@ -342,7 +342,7 @@ static void o2net_sc_queue_work(struct o2net_sock_container *sc,
sc_put(sc);
}
static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
- struct work_struct *work,
+ struct delayed_work *work,
int delay)
{
sc_get(sc);
@@ -350,7 +350,7 @@ static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
sc_put(sc);
}
static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
- struct work_struct *work)
+ struct delayed_work *work)
{
if (cancel_delayed_work(work))
sc_put(sc);
@@ -564,9 +564,11 @@ static void o2net_ensure_shutdown(struct o2net_node *nn,
* ourselves as state_change couldn't get the nn_lock and call set_nn_state
* itself.
*/
-static void o2net_shutdown_sc(void *arg)
+static void o2net_shutdown_sc(struct work_struct *work)
{
- struct o2net_sock_container *sc = arg;
+ struct o2net_sock_container *sc =
+ container_of(work, struct o2net_sock_container,
+ sc_shutdown_work);
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
sclog(sc, "shutting down\n");
@@ -1201,9 +1203,10 @@ out:
/* this work func is triggerd by data ready. it reads until it can read no
* more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
* our work the work struct will be marked and we'll be called again. */
-static void o2net_rx_until_empty(void *arg)
+static void o2net_rx_until_empty(struct work_struct *work)
{
- struct o2net_sock_container *sc = arg;
+ struct o2net_sock_container *sc =
+ container_of(work, struct o2net_sock_container, sc_rx_work);
int ret;
do {
@@ -1249,9 +1252,11 @@ static int o2net_set_nodelay(struct socket *sock)
/* called when a connect completes and after a sock is accepted. the
* rx path will see the response and mark the sc valid */
-static void o2net_sc_connect_completed(void *arg)
+static void o2net_sc_connect_completed(struct work_struct *work)
{
- struct o2net_sock_container *sc = arg;
+ struct o2net_sock_container *sc =
+ container_of(work, struct o2net_sock_container,
+ sc_connect_work);
mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
(unsigned long long)O2NET_PROTOCOL_VERSION,
@@ -1262,9 +1267,11 @@ static void o2net_sc_connect_completed(void *arg)
}
/* this is called as a work_struct func. */
-static void o2net_sc_send_keep_req(void *arg)
+static void o2net_sc_send_keep_req(struct work_struct *work)
{
- struct o2net_sock_container *sc = arg;
+ struct o2net_sock_container *sc =
+ container_of(work, struct o2net_sock_container,
+ sc_keepalive_work.work);
o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req));
sc_put(sc);
@@ -1314,14 +1321,15 @@ static void o2net_sc_postpone_idle(struct o2net_sock_container *sc)
* having a connect attempt fail, etc. This centralizes the logic which decides
* if a connect attempt should be made or if we should give up and all future
* transmit attempts should fail */
-static void o2net_start_connect(void *arg)
+static void o2net_start_connect(struct work_struct *work)
{
- struct o2net_node *nn = arg;
+ struct o2net_node *nn =
+ container_of(work, struct o2net_node, nn_connect_work.work);
struct o2net_sock_container *sc = NULL;
struct o2nm_node *node = NULL, *mynode = NULL;
struct socket *sock = NULL;
struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
- int ret = 0;
+ int ret = 0, stop;
/* if we're greater we initiate tx, otherwise we accept */
if (o2nm_this_node() <= o2net_num_from_nn(nn))
@@ -1342,10 +1350,9 @@ static void o2net_start_connect(void *arg)
spin_lock(&nn->nn_lock);
/* see if we already have one pending or have given up */
- if (nn->nn_sc || nn->nn_persistent_error)
- arg = NULL;
+ stop = (nn->nn_sc || nn->nn_persistent_error);
spin_unlock(&nn->nn_lock);
- if (arg == NULL) /* *shrug*, needed some indicator */
+ if (stop)
goto out;
nn->nn_last_connect_attempt = jiffies;
@@ -1421,9 +1428,10 @@ out:
return;
}
-static void o2net_connect_expired(void *arg)
+static void o2net_connect_expired(struct work_struct *work)
{
- struct o2net_node *nn = arg;
+ struct o2net_node *nn =
+ container_of(work, struct o2net_node, nn_connect_expired.work);
spin_lock(&nn->nn_lock);
if (!nn->nn_sc_valid) {
@@ -1436,9 +1444,10 @@ static void o2net_connect_expired(void *arg)
spin_unlock(&nn->nn_lock);
}
-static void o2net_still_up(void *arg)
+static void o2net_still_up(struct work_struct *work)
{
- struct o2net_node *nn = arg;
+ struct o2net_node *nn =
+ container_of(work, struct o2net_node, nn_still_up.work);
o2quo_hb_still_up(o2net_num_from_nn(nn));
}
@@ -1644,9 +1653,9 @@ out:
return ret;
}
-static void o2net_accept_many(void *arg)
+static void o2net_accept_many(struct work_struct *work)
{
- struct socket *sock = arg;
+ struct socket *sock = o2net_listen_sock;
while (o2net_accept_one(sock) == 0)
cond_resched();
}
@@ -1700,7 +1709,7 @@ static int o2net_open_listening_sock(__be16 port)
write_unlock_bh(&sock->sk->sk_callback_lock);
o2net_listen_sock = sock;
- INIT_WORK(&o2net_listen_work, o2net_accept_many, sock);
+ INIT_WORK(&o2net_listen_work, o2net_accept_many);
sock->sk->sk_reuse = 1;
ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
@@ -1819,9 +1828,10 @@ int o2net_init(void)
struct o2net_node *nn = o2net_nn_from_num(i);
spin_lock_init(&nn->nn_lock);
- INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn);
- INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn);
- INIT_WORK(&nn->nn_still_up, o2net_still_up, nn);
+ INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect);
+ INIT_DELAYED_WORK(&nn->nn_connect_expired,
+ o2net_connect_expired);
+ INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up);
/* until we see hb from a node we'll return einval */
nn->nn_persistent_error = -ENOTCONN;
init_waitqueue_head(&nn->nn_sc_wq);
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 4b46aac7d24..daebbd3a2c8 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -86,18 +86,18 @@ struct o2net_node {
* connect attempt fails and so can be self-arming. shutdown is
* careful to first mark the nn such that no connects will be attempted
* before canceling delayed connect work and flushing the queue. */
- struct work_struct nn_connect_work;
+ struct delayed_work nn_connect_work;
unsigned long nn_last_connect_attempt;
/* this is queued as nodes come up and is canceled when a connection is
* established. this expiring gives up on the node and errors out
* transmits */
- struct work_struct nn_connect_expired;
+ struct delayed_work nn_connect_expired;
/* after we give up on a socket we wait a while before deciding
* that it is still heartbeating and that we should do some
* quorum work */
- struct work_struct nn_still_up;
+ struct delayed_work nn_still_up;
};
struct o2net_sock_container {
@@ -129,7 +129,7 @@ struct o2net_sock_container {
struct work_struct sc_shutdown_work;
struct timer_list sc_idle_timeout;
- struct work_struct sc_keepalive_work;
+ struct delayed_work sc_keepalive_work;
unsigned sc_handshake_ok:1;
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index fa968180b07..6b6ff76538c 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -153,7 +153,7 @@ static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned
* called functions that cannot be directly called from the
* net message handlers for some reason, usually because
* they need to send net messages of their own. */
-void dlm_dispatch_work(void *data);
+void dlm_dispatch_work(struct work_struct *work);
struct dlm_lock_resource;
struct dlm_work_item;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index f6cdab3a2c6..420a375a394 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1297,7 +1297,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
spin_lock_init(&dlm->work_lock);
INIT_LIST_HEAD(&dlm->work_list);
- INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm);
+ INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
kref_init(&dlm->dlm_refs);
dlm->dlm_state = DLM_CTXT_NEW;
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 9d950d7cea3..fb3e2b0817f 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -153,9 +153,10 @@ static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
}
/* Worker function used during recovery. */
-void dlm_dispatch_work(void *data)
+void dlm_dispatch_work(struct work_struct *work)
{
- struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
+ struct dlm_ctxt *dlm =
+ container_of(work, struct dlm_ctxt, dispatched_work);
LIST_HEAD(tmp_list);
struct list_head *iter, *iter2;
struct dlm_work_item *item;
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c
index eead48bbfac..7d2f578b267 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlm/userdlm.c
@@ -171,15 +171,14 @@ static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres)
BUG();
}
-static void user_dlm_unblock_lock(void *opaque);
+static void user_dlm_unblock_lock(struct work_struct *work);
static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
{
if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
user_dlm_grab_inode_ref(lockres);
- INIT_WORK(&lockres->l_work, user_dlm_unblock_lock,
- lockres);
+ INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
queue_work(user_dlm_worker, &lockres->l_work);
lockres->l_flags |= USER_LOCK_QUEUED;
@@ -279,10 +278,11 @@ static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
iput(inode);
}
-static void user_dlm_unblock_lock(void *opaque)
+static void user_dlm_unblock_lock(struct work_struct *work)
{
int new_level, status;
- struct user_lock_res *lockres = (struct user_lock_res *) opaque;
+ struct user_lock_res *lockres =
+ container_of(work, struct user_lock_res, l_work);
struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index c0ad7cb5952..1d7f4ab1e5e 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -703,11 +703,12 @@ struct ocfs2_la_recovery_item {
* NOTE: This function can and will sleep on recovery of other nodes
* during cluster locking, just like any other ocfs2 process.
*/
-void ocfs2_complete_recovery(void *data)
+void ocfs2_complete_recovery(struct work_struct *work)
{
int ret;
- struct ocfs2_super *osb = data;
- struct ocfs2_journal *journal = osb->journal;
+ struct ocfs2_journal *journal =
+ container_of(work, struct ocfs2_journal, j_recovery_work);
+ struct ocfs2_super *osb = journal->j_osb;
struct ocfs2_dinode *la_dinode, *tl_dinode;
struct ocfs2_la_recovery_item *item;
struct list_head *p, *n;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index d86cb960b7e..899112ad813 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -133,7 +133,7 @@ static inline void ocfs2_inode_set_new(struct ocfs2_super *osb,
}
/* Exported only for the journal struct init code in super.c. Do not call. */
-void ocfs2_complete_recovery(void *data);
+void ocfs2_complete_recovery(struct work_struct *work);
/*
* Journal Control:
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 078883772bd..b767fd7da6e 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -285,7 +285,7 @@ struct ocfs2_super
/* Truncate log info */
struct inode *osb_tl_inode;
struct buffer_head *osb_tl_bh;
- struct work_struct osb_truncate_log_wq;
+ struct delayed_work osb_truncate_log_wq;
struct ocfs2_node_map osb_recovering_orphan_dirs;
unsigned int *osb_orphan_wipes;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index b0992573dee..d9b4214a12d 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1365,7 +1365,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
spin_lock_init(&journal->j_lock);
journal->j_trans_id = (unsigned long) 1;
INIT_LIST_HEAD(&journal->j_la_cleanups);
- INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb);
+ INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
journal->j_state = OCFS2_JOURNAL_FREE;
/* get some pseudo constants for clustersize bits */
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index ac93174c963..7280a23ef34 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -104,7 +104,7 @@ static int release_journal_dev(struct super_block *super,
struct reiserfs_journal *journal);
static int dirty_one_transaction(struct super_block *s,
struct reiserfs_journal_list *jl);
-static void flush_async_commits(void *p);
+static void flush_async_commits(struct work_struct *work);
static void queue_log_writer(struct super_block *s);
/* values for join in do_journal_begin_r */
@@ -2836,7 +2836,8 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name,
if (reiserfs_mounted_fs_count <= 1)
commit_wq = create_workqueue("reiserfs");
- INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
+ INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
+ journal->j_work_sb = p_s_sb;
return 0;
free_and_return:
free_journal_ram(p_s_sb);
@@ -3447,10 +3448,11 @@ int journal_end_sync(struct reiserfs_transaction_handle *th,
/*
** writeback the pending async commits to disk
*/
-static void flush_async_commits(void *p)
+static void flush_async_commits(struct work_struct *work)
{
- struct super_block *p_s_sb = p;
- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
+ struct reiserfs_journal *journal =
+ container_of(work, struct reiserfs_journal, j_work.work);
+ struct super_block *p_s_sb = journal->j_work_sb;
struct reiserfs_journal_list *jl;
struct list_head *entry;
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 09360cf1e1f..8e6b56fc1ca 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -149,9 +149,10 @@ xfs_destroy_ioend(
*/
STATIC void
xfs_end_bio_delalloc(
- void *data)
+ struct work_struct *work)
{
- xfs_ioend_t *ioend = data;
+ xfs_ioend_t *ioend =
+ container_of(work, xfs_ioend_t, io_work);
xfs_destroy_ioend(ioend);
}
@@ -161,9 +162,10 @@ xfs_end_bio_delalloc(
*/
STATIC void
xfs_end_bio_written(
- void *data)
+ struct work_struct *work)
{
- xfs_ioend_t *ioend = data;
+ xfs_ioend_t *ioend =
+ container_of(work, xfs_ioend_t, io_work);
xfs_destroy_ioend(ioend);
}
@@ -176,9 +178,10 @@ xfs_end_bio_written(
*/
STATIC void
xfs_end_bio_unwritten(
- void *data)
+ struct work_struct *work)
{
- xfs_ioend_t *ioend = data;
+ xfs_ioend_t *ioend =
+ container_of(work, xfs_ioend_t, io_work);
bhv_vnode_t *vp = ioend->io_vnode;
xfs_off_t offset = ioend->io_offset;
size_t size = ioend->io_size;
@@ -220,11 +223,11 @@ xfs_alloc_ioend(
ioend->io_size = 0;
if (type == IOMAP_UNWRITTEN)
- INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
+ INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
else if (type == IOMAP_DELAY)
- INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
+ INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
else
- INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
+ INIT_WORK(&ioend->io_work, xfs_end_bio_written);
return ioend;
}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index d3382843698..eef4a0ba11e 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -994,9 +994,10 @@ xfs_buf_wait_unpin(
STATIC void
xfs_buf_iodone_work(
- void *v)
+ struct work_struct *work)
{
- xfs_buf_t *bp = (xfs_buf_t *)v;
+ xfs_buf_t *bp =
+ container_of(work, xfs_buf_t, b_iodone_work);
if (bp->b_iodone)
(*(bp->b_iodone))(bp);
@@ -1017,10 +1018,10 @@ xfs_buf_ioend(
if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
if (schedule) {
- INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
+ INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
queue_work(xfslogd_workqueue, &bp->b_iodone_work);
} else {
- xfs_buf_iodone_work(bp);
+ xfs_buf_iodone_work(&bp->b_iodone_work);
}
} else {
up(&bp->b_iodonesema);