diff options
author | Andreas Gruenbacher <agruen@linbit.com> | 2011-07-28 15:27:51 +0200 |
---|---|---|
committer | Philipp Reisner <philipp.reisner@linbit.com> | 2014-02-17 16:50:39 +0100 |
commit | 84b8c06b6591e73250e6ab4834a02a86c8994b91 (patch) | |
tree | ef2be94ac7b57c1042560a9a39d8c84550803b85 /drivers/block/drbd/drbd_req.c | |
parent | 8682eae9b4b26d54b9eeac8e17c534197e6d8744 (diff) |
drbd: Create a dedicated struct drbd_device_work
drbd_device_work is a work item that has a reference to a device,
while drbd_work is a more generic work item that does not carry
a reference to a device.
All callbacks get a pointer to a drbd_work instance, those callbacks
that expect a drbd_device_work use the container_of macro to get it.
Signed-off-by: Andreas Gruenbacher <agruen@linbit.com>
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_req.c')
-rw-r--r-- | drivers/block/drbd/drbd_req.c | 41 |
1 files changed, 23 insertions, 18 deletions
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index f74c0a244e9..3779c8d2875 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -72,7 +72,7 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, drbd_req_make_private_bio(req, bio_src); req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; - req->w.device = device; + req->device = device; req->master_bio = bio_src; req->epoch = 0; @@ -95,7 +95,7 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, void drbd_req_destroy(struct kref *kref) { struct drbd_request *req = container_of(kref, struct drbd_request, kref); - struct drbd_device *device = req->w.device; + struct drbd_device *device = req->device; const unsigned s = req->rq_state; if ((req->master_bio && !(s & RQ_POSTPONED)) || @@ -191,7 +191,7 @@ void complete_master_bio(struct drbd_device *device, static void drbd_remove_request_interval(struct rb_root *root, struct drbd_request *req) { - struct drbd_device *device = req->w.device; + struct drbd_device *device = req->device; struct drbd_interval *i = &req->i; drbd_remove_interval(root, i); @@ -211,7 +211,7 @@ static void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) { const unsigned s = req->rq_state; - struct drbd_device *device = req->w.device; + struct drbd_device *device = req->device; int rw; int error, ok; @@ -306,7 +306,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) { - struct drbd_device *device = req->w.device; + struct drbd_device *device = req->device; D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); if (!atomic_sub_and_test(put, &req->completion_ref)) @@ -329,7 +329,7 @@ static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, int clear, int set) { - struct drbd_device *device = req->w.device; + struct drbd_device *device = req->device; unsigned s = req->rq_state; int c_put = 0; int k_put = 0; @@ -454,7 +454,7 @@ static void drbd_report_io_error(struct drbd_device *device, struct drbd_request int __req_mod(struct drbd_request *req, enum drbd_req_event what, struct bio_and_error *m) { - struct drbd_device *device = req->w.device; + struct drbd_device *device = req->device; struct net_conf *nc; int p, rv = 0; @@ -542,7 +542,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); mod_rq_state(req, m, 0, RQ_NET_QUEUED); req->w.cb = w_send_read_req; - drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); + drbd_queue_work(&first_peer_device(device)->connection->sender_work, + &req->w); break; case QUEUE_FOR_NET_WRITE: @@ -577,7 +578,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, D_ASSERT(device, req->rq_state & RQ_NET_PENDING); mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); req->w.cb = w_send_dblock; - drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); + drbd_queue_work(&first_peer_device(device)->connection->sender_work, + &req->w); /* close the epoch, in case it outgrew the limit */ rcu_read_lock(); @@ -592,7 +594,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, case QUEUE_FOR_SEND_OOS: mod_rq_state(req, m, 0, RQ_NET_QUEUED); req->w.cb = w_send_out_of_sync; - drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); + drbd_queue_work(&first_peer_device(device)->connection->sender_work, + &req->w); break; case READ_RETRY_REMOTE_CANCELED: @@ -704,7 +707,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, get_ldev(device); /* always succeeds in this call path */ req->w.cb = w_restart_disk_io; - drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); + drbd_queue_work(&first_peer_device(device)->connection->sender_work, + &req->w); break; case RESEND: @@ -720,12 +724,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, Throwing them out of the TL here by pretending we got a BARRIER_ACK. During connection handshake, we ensure that the peer was not rebooted. */ if (!(req->rq_state & RQ_NET_OK)) { - /* FIXME could this possibly be a req->w.cb == w_send_out_of_sync? + /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync? * in that case we must not set RQ_NET_PENDING. */ mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); if (req->w.cb) { - drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); + drbd_queue_work(&first_peer_device(device)->connection->sender_work, + &req->w); rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; } /* else: FIXME can this happen? */ break; @@ -835,7 +840,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se static void complete_conflicting_writes(struct drbd_request *req) { DEFINE_WAIT(wait); - struct drbd_device *device = req->w.device; + struct drbd_device *device = req->device; struct drbd_interval *i; sector_t sector = req->i.sector; int size = req->i.size; @@ -915,7 +920,7 @@ static void maybe_pull_ahead(struct drbd_device *device) */ static bool do_remote_read(struct drbd_request *req) { - struct drbd_device *device = req->w.device; + struct drbd_device *device = req->device; enum drbd_read_balancing rbm; if (req->private_bio) { @@ -960,7 +965,7 @@ static bool do_remote_read(struct drbd_request *req) * which does NOT include those that we are L_AHEAD for. */ static int drbd_process_write_request(struct drbd_request *req) { - struct drbd_device *device = req->w.device; + struct drbd_device *device = req->device; int remote, send_oos; remote = drbd_should_do_remote(device->state); @@ -997,7 +1002,7 @@ static int drbd_process_write_request(struct drbd_request *req) static void drbd_submit_req_private_bio(struct drbd_request *req) { - struct drbd_device *device = req->w.device; + struct drbd_device *device = req->device; struct bio *bio = req->private_bio; const int rw = bio_rw(bio); @@ -1390,7 +1395,7 @@ void request_timer_fn(unsigned long data) drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n"); _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); } - if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.device == device && + if (dt && req->rq_state & RQ_LOCAL_PENDING && req->device == device && time_after(now, req->start_time + dt) && !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) { drbd_warn(device, "Local backing device failed to meet the disk-timeout\n"); |