diff options
author | Philipp Reisner <philipp.reisner@linbit.com> | 2012-11-09 14:18:43 +0100 |
---|---|---|
committer | Philipp Reisner <philipp.reisner@linbit.com> | 2012-11-09 14:20:23 +0100 |
commit | 986836503e49ccf7e84b813715d344964ec93566 (patch) | |
tree | b3bea7428efde5b77096cef80e5b6bfee494cc12 /drivers/block/drbd/drbd_req.c | |
parent | ccae7868b0c5697508a541c531cf96b361d62c1c (diff) | |
parent | 328e0f125bf41f4f33f684db22015f92cb44fe56 (diff) |
Merge branch 'drbd-8.4_ed6' into for-3.8-drivers-drbd-8.4_ed6
Diffstat (limited to 'drivers/block/drbd/drbd_req.c')
-rw-r--r-- | drivers/block/drbd/drbd_req.c | 1569 |
1 files changed, 755 insertions, 814 deletions
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 135ea76ed50..f58a4a4b4df 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -31,6 +31,8 @@ #include "drbd_req.h" +static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size); + /* Update disk stats at start of I/O request */ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio) { @@ -40,6 +42,8 @@ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req part_round_stats(cpu, &mdev->vdisk->part0); part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]); part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio)); + (void) cpu; /* The macro invocations above want the cpu argument, I do not like + the compiler warning about cpu only assigned but never used... */ part_inc_in_flight(&mdev->vdisk->part0, rw); part_stat_unlock(); } @@ -57,9 +61,51 @@ static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req) part_stat_unlock(); } -static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw) +static struct drbd_request *drbd_req_new(struct drbd_conf *mdev, + struct bio *bio_src) +{ + struct drbd_request *req; + + req = mempool_alloc(drbd_request_mempool, GFP_NOIO); + if (!req) + return NULL; + + drbd_req_make_private_bio(req, bio_src); + req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; + req->w.mdev = mdev; + req->master_bio = bio_src; + req->epoch = 0; + + drbd_clear_interval(&req->i); + req->i.sector = bio_src->bi_sector; + req->i.size = bio_src->bi_size; + req->i.local = true; + req->i.waiting = false; + + INIT_LIST_HEAD(&req->tl_requests); + INIT_LIST_HEAD(&req->w.list); + + /* one reference to be put by __drbd_make_request */ + atomic_set(&req->completion_ref, 1); + /* one kref as long as completion_ref > 0 */ + kref_init(&req->kref); + return req; +} + +void drbd_req_destroy(struct kref *kref) { - const unsigned long s = req->rq_state; + struct drbd_request *req = container_of(kref, struct drbd_request, kref); + struct drbd_conf *mdev = req->w.mdev; + const unsigned s = req->rq_state; + + if ((req->master_bio && !(s & RQ_POSTPONED)) || + atomic_read(&req->completion_ref) || + (s & RQ_LOCAL_PENDING) || + ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) { + dev_err(DEV, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n", + s, atomic_read(&req->completion_ref)); + return; + } /* remove it from the transfer log. * well, only if it had been there in the first @@ -67,24 +113,33 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const * and never sent), it should still be "empty" as * initialized in drbd_req_new(), so we can list_del() it * here unconditionally */ - list_del(&req->tl_requests); + list_del_init(&req->tl_requests); /* if it was a write, we may have to set the corresponding * bit(s) out-of-sync first. If it had a local part, we need to * release the reference to the activity log. */ - if (rw == WRITE) { + if (s & RQ_WRITE) { /* Set out-of-sync unless both OK flags are set * (local only or remote failed). * Other places where we set out-of-sync: * READ with local io-error */ - if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK)) - drbd_set_out_of_sync(mdev, req->sector, req->size); - if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS)) - drbd_set_in_sync(mdev, req->sector, req->size); + /* There is a special case: + * we may notice late that IO was suspended, + * and postpone, or schedule for retry, a write, + * before it even was submitted or sent. + * In that case we do not want to touch the bitmap at all. + */ + if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) { + if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK)) + drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); + + if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS)) + drbd_set_in_sync(mdev, req->i.sector, req->i.size); + } /* one might be tempted to move the drbd_al_complete_io - * to the local io completion callback drbd_endio_pri. + * to the local io completion callback drbd_request_endio. * but, if this was a mirror write, we may only * drbd_al_complete_io after this is RQ_NET_DONE, * otherwise the extent could be dropped from the al @@ -93,109 +148,35 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const * but after the extent has been dropped from the al, * we would forget to resync the corresponding extent. */ - if (s & RQ_LOCAL_MASK) { + if (s & RQ_IN_ACT_LOG) { if (get_ldev_if_state(mdev, D_FAILED)) { - if (s & RQ_IN_ACT_LOG) - drbd_al_complete_io(mdev, req->sector); + drbd_al_complete_io(mdev, &req->i); put_ldev(mdev); } else if (__ratelimit(&drbd_ratelimit_state)) { - dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), " - "but my Disk seems to have failed :(\n", - (unsigned long long) req->sector); + dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), " + "but my Disk seems to have failed :(\n", + (unsigned long long) req->i.sector, req->i.size); } } } - drbd_req_free(req); + mempool_free(req, drbd_request_mempool); } -static void queue_barrier(struct drbd_conf *mdev) -{ - struct drbd_tl_epoch *b; - - /* We are within the req_lock. Once we queued the barrier for sending, - * we set the CREATE_BARRIER bit. It is cleared as soon as a new - * barrier/epoch object is added. This is the only place this bit is - * set. It indicates that the barrier for this epoch is already queued, - * and no new epoch has been created yet. */ - if (drbd_test_flag(mdev, CREATE_BARRIER)) - return; - - b = mdev->newest_tle; - b->w.cb = w_send_barrier; - /* inc_ap_pending done here, so we won't - * get imbalanced on connection loss. - * dec_ap_pending will be done in got_BarrierAck - * or (on connection loss) in tl_clear. */ - inc_ap_pending(mdev); - drbd_queue_work(&mdev->data.work, &b->w); - drbd_set_flag(mdev, CREATE_BARRIER); +static void wake_all_senders(struct drbd_tconn *tconn) { + wake_up(&tconn->sender_work.q_wait); } -static void _about_to_complete_local_write(struct drbd_conf *mdev, - struct drbd_request *req) +/* must hold resource->req_lock */ +static void start_new_tl_epoch(struct drbd_tconn *tconn) { - const unsigned long s = req->rq_state; - struct drbd_request *i; - struct drbd_epoch_entry *e; - struct hlist_node *n; - struct hlist_head *slot; - - /* Before we can signal completion to the upper layers, - * we may need to close the current epoch. - * We can skip this, if this request has not even been sent, because we - * did not have a fully established connection yet/anymore, during - * bitmap exchange, or while we are C_AHEAD due to congestion policy. - */ - if (mdev->state.conn >= C_CONNECTED && - (s & RQ_NET_SENT) != 0 && - req->epoch == mdev->newest_tle->br_number) - queue_barrier(mdev); - - /* we need to do the conflict detection stuff, - * if we have the ee_hash (two_primaries) and - * this has been on the network */ - if ((s & RQ_NET_DONE) && mdev->ee_hash != NULL) { - const sector_t sector = req->sector; - const int size = req->size; - - /* ASSERT: - * there must be no conflicting requests, since - * they must have been failed on the spot */ -#define OVERLAPS overlaps(sector, size, i->sector, i->size) - slot = tl_hash_slot(mdev, sector); - hlist_for_each_entry(i, n, slot, collision) { - if (OVERLAPS) { - dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; " - "other: %p %llus +%u\n", - req, (unsigned long long)sector, size, - i, (unsigned long long)i->sector, i->size); - } - } + /* no point closing an epoch, if it is empty, anyways. */ + if (tconn->current_tle_writes == 0) + return; - /* maybe "wake" those conflicting epoch entries - * that wait for this request to finish. - * - * currently, there can be only _one_ such ee - * (well, or some more, which would be pending - * P_DISCARD_ACK not yet sent by the asender...), - * since we block the receiver thread upon the - * first conflict detection, which will wait on - * misc_wait. maybe we want to assert that? - * - * anyways, if we found one, - * we just have to do a wake_up. */ -#undef OVERLAPS -#define OVERLAPS overlaps(sector, size, e->sector, e->size) - slot = ee_hash_slot(mdev, req->sector); - hlist_for_each_entry(e, n, slot, collision) { - if (OVERLAPS) { - wake_up(&mdev->misc_wait); - break; - } - } - } -#undef OVERLAPS + tconn->current_tle_writes = 0; + atomic_inc(&tconn->current_tle_nr); + wake_all_senders(tconn); } void complete_master_bio(struct drbd_conf *mdev, @@ -205,17 +186,33 @@ void complete_master_bio(struct drbd_conf *mdev, dec_ap_bio(mdev); } + +static void drbd_remove_request_interval(struct rb_root *root, + struct drbd_request *req) +{ + struct drbd_conf *mdev = req->w.mdev; + struct drbd_interval *i = &req->i; + + drbd_remove_interval(root, i); + + /* Wake up any processes waiting for this request to complete. */ + if (i->waiting) + wake_up(&mdev->misc_wait); +} + /* Helper for __req_mod(). * Set m->bio to the master bio, if it is fit to be completed, * or leave it alone (it is initialized to NULL in __req_mod), * if it has already been completed, or cannot be completed yet. * If m->bio is set, the error status to be returned is placed in m->error. */ -void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) +static +void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) { - const unsigned long s = req->rq_state; - struct drbd_conf *mdev = req->mdev; - int rw = req->rq_state & RQ_WRITE ? WRITE : READ; + const unsigned s = req->rq_state; + struct drbd_conf *mdev = req->w.mdev; + int rw; + int error, ok; /* we must not complete the master bio, while it is * still being processed by _drbd_send_zc_bio (drbd_send_dblock) @@ -226,178 +223,219 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) * the receiver, * the bio_endio completion callbacks. */ - if (s & RQ_NET_QUEUED) - return; - if (s & RQ_NET_PENDING) + if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) || + (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) || + (s & RQ_COMPLETION_SUSP)) { + dev_err(DEV, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s); return; - if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) + } + + if (!req->master_bio) { + dev_err(DEV, "drbd_req_complete: Logic BUG, master_bio == NULL!\n"); return; + } - if (req->master_bio) { - /* this is data_received (remote read) - * or protocol C P_WRITE_ACK - * or protocol B P_RECV_ACK - * or protocol A "handed_over_to_network" (SendAck) - * or canceled or failed, - * or killed from the transfer log due to connection loss. - */ + rw = bio_rw(req->master_bio); - /* - * figure out whether to report success or failure. - * - * report success when at least one of the operations succeeded. - * or, to put the other way, - * only report failure, when both operations failed. - * - * what to do about the failures is handled elsewhere. - * what we need to do here is just: complete the master_bio. - * - * local completion error, if any, has been stored as ERR_PTR - * in private_bio within drbd_endio_pri. - */ - int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); - int error = PTR_ERR(req->private_bio); + /* + * figure out whether to report success or failure. + * + * report success when at least one of the operations succeeded. + * or, to put the other way, + * only report failure, when both operations failed. + * + * what to do about the failures is handled elsewhere. + * what we need to do here is just: complete the master_bio. + * + * local completion error, if any, has been stored as ERR_PTR + * in private_bio within drbd_request_endio. + */ + ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); + error = PTR_ERR(req->private_bio); - /* remove the request from the conflict detection - * respective block_id verification hash */ - if (!hlist_unhashed(&req->collision)) - hlist_del(&req->collision); - else - D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); + /* remove the request from the conflict detection + * respective block_id verification hash */ + if (!drbd_interval_empty(&req->i)) { + struct rb_root *root; - /* for writes we need to do some extra housekeeping */ if (rw == WRITE) - _about_to_complete_local_write(mdev, req); + root = &mdev->write_requests; + else + root = &mdev->read_requests; + drbd_remove_request_interval(root, req); + } else if (!(s & RQ_POSTPONED)) + D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); - /* Update disk stats */ - _drbd_end_io_acct(mdev, req); + /* Before we can signal completion to the upper layers, + * we may need to close the current transfer log epoch. + * We are within the request lock, so we can simply compare + * the request epoch number with the current transfer log + * epoch number. If they match, increase the current_tle_nr, + * and reset the transfer log epoch write_cnt. + */ + if (rw == WRITE && + req->epoch == atomic_read(&mdev->tconn->current_tle_nr)) + start_new_tl_epoch(mdev->tconn); + + /* Update disk stats */ + _drbd_end_io_acct(mdev, req); + + /* If READ failed, + * have it be pushed back to the retry work queue, + * so it will re-enter __drbd_make_request(), + * and be re-assigned to a suitable local or remote path, + * or failed if we do not have access to good data anymore. + * + * Unless it was failed early by __drbd_make_request(), + * because no path was available, in which case + * it was not even added to the transfer_log. + * + * READA may fail, and will not be retried. + * + * WRITE should have used all available paths already. + */ + if (!ok && rw == READ && !list_empty(&req->tl_requests)) + req->rq_state |= RQ_POSTPONED; + if (!(req->rq_state & RQ_POSTPONED)) { m->error = ok ? 0 : (error ?: -EIO); m->bio = req->master_bio; req->master_bio = NULL; } +} - if (s & RQ_LOCAL_PENDING) - return; +static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) +{ + struct drbd_conf *mdev = req->w.mdev; + D_ASSERT(m || (req->rq_state & RQ_POSTPONED)); + + if (!atomic_sub_and_test(put, &req->completion_ref)) + return 0; - if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) { - /* this is disconnected (local only) operation, - * or protocol C P_WRITE_ACK, - * or protocol A or B P_BARRIER_ACK, - * or killed from the transfer log due to connection loss. */ - _req_is_done(mdev, req, rw); + drbd_req_complete(req, m); + + if (req->rq_state & RQ_POSTPONED) { + /* don't destroy the req object just yet, + * but queue it for retry */ + drbd_restart_request(req); + return 0; } - /* else: network part and not DONE yet. that is - * protocol A or B, barrier ack still pending... */ + + return 1; } -static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m) +/* I'd like this to be the only place that manipulates + * req->completion_ref and req->kref. */ +static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, + int clear, int set) { - struct drbd_conf *mdev = req->mdev; + struct drbd_conf *mdev = req->w.mdev; + unsigned s = req->rq_state; + int c_put = 0; + int k_put = 0; - if (!is_susp(mdev->state)) - _req_may_be_done(req, m); -} + if (drbd_suspended(mdev) && !((s | clear) & RQ_COMPLETION_SUSP)) + set |= RQ_COMPLETION_SUSP; -/* - * checks whether there was an overlapping request - * or ee already registered. - * - * if so, return 1, in which case this request is completed on the spot, - * without ever being submitted or send. - * - * return 0 if it is ok to submit this request. - * - * NOTE: - * paranoia: assume something above us is broken, and issues different write - * requests for the same block simultaneously... - * - * To ensure these won't be reordered differently on both nodes, resulting in - * diverging data sets, we discard the later one(s). Not that this is supposed - * to happen, but this is the rationale why we also have to check for - * conflicting requests with local origin, and why we have to do so regardless - * of whether we allowed multiple primaries. - * - * BTW, in case we only have one primary, the ee_hash is empty anyways, and the - * second hlist_for_each_entry becomes a noop. This is even simpler than to - * grab a reference on the net_conf, and check for the two_primaries flag... - */ -static int _req_conflicts(struct drbd_request *req) -{ - struct drbd_conf *mdev = req->mdev; - const sector_t sector = req->sector; - const int size = req->size; - struct drbd_request *i; - struct drbd_epoch_entry *e; - struct hlist_node *n; - struct hlist_head *slot; + /* apply */ - D_ASSERT(hlist_unhashed(&req->collision)); + req->rq_state &= ~clear; + req->rq_state |= set; - if (!get_net_conf(mdev)) - return 0; + /* no change? */ + if (req->rq_state == s) + return; - /* BUG_ON */ - ERR_IF (mdev->tl_hash_s == 0) - goto out_no_conflict; - BUG_ON(mdev->tl_hash == NULL); - -#define OVERLAPS overlaps(i->sector, i->size, sector, size) - slot = tl_hash_slot(mdev, sector); - hlist_for_each_entry(i, n, slot, collision) { - if (OVERLAPS) { - dev_alert(DEV, "%s[%u] Concurrent local write detected! " - "[DISCARD L] new: %llus +%u; " - "pending: %llus +%u\n", - current->comm, current->pid, - (unsigned long long)sector, size, - (unsigned long long)i->sector, i->size); - goto out_conflict; - } + /* intent: get references */ + + if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING)) + atomic_inc(&req->completion_ref); + + if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) { + inc_ap_pending(mdev); + atomic_inc(&req->completion_ref); } - if (mdev->ee_hash_s) { - /* now, check for overlapping requests with remote origin */ - BUG_ON(mdev->ee_hash == NULL); -#undef OVERLAPS -#define OVERLAPS overlaps(e->sector, e->size, sector, size) - slot = ee_hash_slot(mdev, sector); - hlist_for_each_entry(e, n, slot, collision) { - if (OVERLAPS) { - dev_alert(DEV, "%s[%u] Concurrent remote write detected!" - " [DISCARD L] new: %llus +%u; " - "pending: %llus +%u\n", - current->comm, current->pid, - (unsigned long long)sector, size, - (unsigned long long)e->sector, e->size); - goto out_conflict; - } - } + if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) + atomic_inc(&req->completion_ref); + + if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK)) + kref_get(&req->kref); /* wait for the DONE */ + + if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) + atomic_add(req->i.size >> 9, &mdev->ap_in_flight); + + if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP)) + atomic_inc(&req->completion_ref); + + /* progress: put references */ + + if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP)) + ++c_put; + + if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) { + D_ASSERT(req->rq_state & RQ_LOCAL_PENDING); + /* local completion may still come in later, + * we need to keep the req object around. */ + kref_get(&req->kref); + ++c_put; } -#undef OVERLAPS -out_no_conflict: - /* this is like it should be, and what we expected. - * our users do behave after all... */ - put_net_conf(mdev); - return 0; + if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) { + if (req->rq_state & RQ_LOCAL_ABORTED) + ++k_put; + else + ++c_put; + } -out_conflict: - put_net_conf(mdev); - return 1; + if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) { + dec_ap_pending(mdev); + ++c_put; + } + + if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) + ++c_put; + + if ((s & RQ_EXP_BARR_ACK) && !(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) { + if (req->rq_state & RQ_NET_SENT) + atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); + ++k_put; + } + + /* potentially complete and destroy */ + + if (k_put || c_put) { + /* Completion does it's own kref_put. If we are going to + * kref_sub below, we need req to be still around then. */ + int at_least = k_put + !!c_put; + int refcount = atomic_read(&req->kref.refcount); + if (refcount < at_least) + dev_err(DEV, + "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n", + s, req->rq_state, refcount, at_least); + } + + /* If we made progress, retry conflicting peer requests, if any. */ + if (req->i.waiting) + wake_up(&mdev->misc_wait); + + if (c_put) + k_put += drbd_req_put_completion_ref(req, m, c_put); + if (k_put) + kref_sub(&req->kref, k_put, drbd_req_destroy); } static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *req) { char b[BDEVNAME_SIZE]; - if (__ratelimit(&drbd_ratelimit_state)) + if (!__ratelimit(&drbd_ratelimit_state)) return; dev_warn(DEV, "local %s IO error sector %llu+%u on %s\n", (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", - (unsigned long long)req->sector, - req->size >> 9, + (unsigned long long)req->i.sector, + req->i.size >> 9, bdevname(mdev->ldev->backing_bdev, b)); } @@ -416,9 +454,12 @@ static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *re int __req_mod(struct drbd_request *req, enum drbd_req_event what, struct bio_and_error *m) { - struct drbd_conf *mdev = req->mdev; - int rv = 0; - m->bio = NULL; + struct drbd_conf *mdev = req->w.mdev; + struct net_conf *nc; + int p, rv = 0; + + if (m) + m->bio = NULL; switch (what) { default: @@ -427,118 +468,91 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, /* does not happen... * initialization done in drbd_req_new - case created: + case CREATED: break; */ - case to_be_send: /* via network */ - /* reached via drbd_make_request_common + case TO_BE_SENT: /* via network */ + /* reached via __drbd_make_request * and from w_read_retry_remote */ D_ASSERT(!(req->rq_state & RQ_NET_MASK)); - req->rq_state |= RQ_NET_PENDING; - inc_ap_pending(mdev); + rcu_read_lock(); + nc = rcu_dereference(mdev->tconn->net_conf); + p = nc->wire_protocol; + rcu_read_unlock(); + req->rq_state |= + p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK : + p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0; + mod_rq_state(req, m, 0, RQ_NET_PENDING); break; - case to_be_submitted: /* locally */ - /* reached via drbd_make_request_common */ + case TO_BE_SUBMITTED: /* locally */ + /* reached via __drbd_make_request */ D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK)); - req->rq_state |= RQ_LOCAL_PENDING; + mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); break; - case completed_ok: + case COMPLETED_OK: if (req->rq_state & RQ_WRITE) - mdev->writ_cnt += req->size>>9; + mdev->writ_cnt += req->i.size >> 9; else - mdev->read_cnt += req->size>>9; + mdev->read_cnt += req->i.size >> 9; - req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); - req->rq_state &= ~RQ_LOCAL_PENDING; - - _req_may_be_done_not_susp(req, m); + mod_rq_state(req, m, RQ_LOCAL_PENDING, + RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); break; - case abort_disk_io: - req->rq_state |= RQ_LOCAL_ABORTED; - if (req->rq_state & RQ_WRITE) - _req_may_be_done_not_susp(req, m); - else - goto goto_queue_for_net_read; + case ABORT_DISK_IO: + mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED); break; - case write_completed_with_error: - req->rq_state |= RQ_LOCAL_COMPLETED; - req->rq_state &= ~RQ_LOCAL_PENDING; - + case WRITE_COMPLETED_WITH_ERROR: drbd_report_io_error(mdev, req); __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR); - _req_may_be_done_not_susp(req, m); + mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); break; - case read_ahead_completed_with_error: - /* it is legal to fail READA */ - req->rq_state |= RQ_LOCAL_COMPLETED; - req->rq_state &= ~RQ_LOCAL_PENDING; - _req_may_be_done_not_susp(req, m); - break; - - case read_completed_with_error: - drbd_set_out_of_sync(mdev, req->sector, req->size); - - req->rq_state |= RQ_LOCAL_COMPLETED; - req->rq_state &= ~RQ_LOCAL_PENDING; - - if (req->rq_state & RQ_LOCAL_ABORTED) { - _req_may_be_done(req, m); - break; - } - + case READ_COMPLETED_WITH_ERROR: + drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); drbd_report_io_error(mdev, req); __drbd_chk_io_error(mdev, DRBD_READ_ERROR); + /* fall through. */ + case READ_AHEAD_COMPLETED_WITH_ERROR: + /* it is legal to fail READA, no __drbd_chk_io_error in that case. */ + mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); + break; - goto_queue_for_net_read: - - D_ASSERT(!(req->rq_state & RQ_NET_MASK)); - - /* no point in retrying if there is no good remote data, - * or we have no connection. */ - if (mdev->state.pdsk != D_UP_TO_DATE) { - _req_may_be_done_not_susp(req, m); - break; - } - - /* _req_mod(req,to_be_send); oops, recursion... */ - req->rq_state |= RQ_NET_PENDING; - inc_ap_pending(mdev); - /* fall through: _req_mod(req,queue_for_net_read); */ - - case queue_for_net_read: + case QUEUE_FOR_NET_READ: /* READ or READA, and * no local disk, * or target area marked as invalid, * or just got an io-error. */ - /* from drbd_make_request_common + /* from __drbd_make_request * or from bio_endio during read io-error recovery */ - /* so we can verify the handle in the answer packet - * corresponding hlist_del is in _req_may_be_done() */ - hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector)); + /* So we can verify the handle in the answer packet. + * Corresponding drbd_remove_request_interval is in + * drbd_req_complete() */ + D_ASSERT(drbd_interval_empty(&req->i)); + drbd_insert_interval(&mdev->read_requests, &req->i); - drbd_set_flag(mdev, UNPLUG_REMOTE); + set_bit(UNPLUG_REMOTE, &mdev->flags); D_ASSERT(req->rq_state & RQ_NET_PENDING); - req->rq_state |= RQ_NET_QUEUED; - req->w.cb = (req->rq_state & RQ_LOCAL_MASK) - ? w_read_retry_remote - : w_send_read_req; - drbd_queue_work(&mdev->data.work, &req->w); + D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0); + mod_rq_state(req, m, 0, RQ_NET_QUEUED); + req->w.cb = w_send_read_req; + drbd_queue_work(&mdev->tconn->sender_work, &req->w); break; - case queue_for_net_write: + case QUEUE_FOR_NET_WRITE: /* assert something? */ - /* from drbd_make_request_common only */ + /* from __drbd_make_request only */ - hlist_add_head(&req->collision, tl_hash_slot(mdev, req->sector)); - /* corresponding hlist_del is in _req_may_be_done() */ + /* Corresponding drbd_remove_request_interval is in + * drbd_req_complete() */ + D_ASSERT(drbd_interval_empty(&req->i)); + drbd_insert_interval(&mdev->write_requests, &req->i); /* NOTE * In case the req ended up on the transfer log before being @@ -549,7 +563,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, * * _req_add_to_epoch(req); this has to be after the * _maybe_start_new_epoch(req); which happened in - * drbd_make_request_common, because we now may set the bit + * __drbd_make_request, because we now may set the bit * again ourselves to close the current epoch. * * Add req to the (now) current epoch (barrier). */ @@ -557,204 +571,189 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, /* otherwise we may lose an unplug, which may cause some remote * io-scheduler timeout to expire, increasing maximum latency, * hurting performance. */ - drbd_set_flag(mdev, UNPLUG_REMOTE); - - /* see drbd_make_request_common, - * just after it grabs the req_lock */ - D_ASSERT(drbd_test_flag(mdev, CREATE_BARRIER) == 0); - - req->epoch = mdev->newest_tle->br_number; - - /* increment size of current epoch */ - mdev->newest_tle->n_writes++; + set_bit(UNPLUG_REMOTE, &mdev->flags); /* queue work item to send data */ D_ASSERT(req->rq_state & RQ_NET_PENDING); - req->rq_state |= RQ_NET_QUEUED; + mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); req->w.cb = w_send_dblock; - drbd_queue_work(&mdev->data.work, &req->w); + drbd_queue_work(&mdev->tconn->sender_work, &req->w); /* close the epoch, in case it outgrew the limit */ - if (mdev->newest_tle->n_writes >= mdev->net_conf->max_epoch_size) - queue_barrier(mdev); + rcu_read_lock(); + nc = rcu_dereference(mdev->tconn->net_conf); + p = nc->max_epoch_size; + rcu_read_unlock(); + if (mdev->tconn->current_tle_writes >= p) + start_new_tl_epoch(mdev->tconn); break; - case queue_for_send_oos: - req->rq_state |= RQ_NET_QUEUED; - req->w.cb = w_send_oos; - drbd_queue_work(&mdev->data.work, &req->w); + case QUEUE_FOR_SEND_OOS: + mod_rq_state(req, m, 0, RQ_NET_QUEUED); + req->w.cb = w_send_out_of_sync; + drbd_queue_work(&mdev->tconn->sender_work, &req->w); break; - case read_retry_remote_canceled: - case send_canceled: - case send_failed: + case READ_RETRY_REMOTE_CANCELED: + case SEND_CANCELED: + case SEND_FAILED: /* real cleanup will be done from tl_clear. just update flags * so it is no longer marked as on the worker queue */ - req->rq_state &= ~RQ_NET_QUEUED; - /* if we did it right, tl_clear should be scheduled only after - * this, so this should not be necessary! */ - _req_may_be_done_not_susp(req, m); + mod_rq_state(req, m, RQ_NET_QUEUED, 0); break; - case handed_over_to_network: + case HANDED_OVER_TO_NETWORK: /* assert something? */ - if (bio_data_dir(req->master_bio) == WRITE) - atomic_add(req->size>>9, &mdev->ap_in_flight); - if (bio_data_dir(req->master_bio) == WRITE && - mdev->net_conf->wire_protocol == DRBD_PROT_A) { + !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) { /* this is what is dangerous about protocol A: * pretend it was successfully written on the peer. */ - if (req->rq_state & RQ_NET_PENDING) { - dec_ap_pending(mdev); - req->rq_state &= ~RQ_NET_PENDING; - req->rq_state |= RQ_NET_OK; - } /* else: neg-ack was faster... */ + if (req->rq_state & RQ_NET_PENDING) + mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); + /* else: neg-ack was faster... */ /* it is still not yet RQ_NET_DONE until the * corresponding epoch barrier got acked as well, * so we know what to dirty on connection loss */ } - req->rq_state &= ~RQ_NET_QUEUED; - req->rq_state |= RQ_NET_SENT; - _req_may_be_done_not_susp(req, m); + mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); break; - case oos_handed_to_network: + case OOS_HANDED_TO_NETWORK: /* Was not set PENDING, no longer QUEUED, so is now DONE * as far as this connection is concerned. */ - req->rq_state &= ~RQ_NET_QUEUED; - req->rq_state |= RQ_NET_DONE; - _req_may_be_done_not_susp(req, m); + mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE); break; - case connection_lost_while_pending: + case CONNECTION_LOST_WHILE_PENDING: /* transfer log cleanup after connection loss */ - /* assert something? */ - if (req->rq_state & RQ_NET_PENDING) - dec_ap_pending(mdev); - req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); - req->rq_state |= RQ_NET_DONE; - if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE) - atomic_sub(req->size>>9, &mdev->ap_in_flight); - - /* if it is still queued, we may not complete it here. - * it will be canceled soon. */ - if (!(req->rq_state & RQ_NET_QUEUED)) - _req_may_be_done(req, m); /* Allowed while state.susp */ + mod_rq_state(req, m, + RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP, + RQ_NET_DONE); break; - case conflict_discarded_by_peer: - /* for discarded conflicting writes of multiple primaries, + case CONFLICT_RESOLVED: + /* for superseded conflicting writes of multiple primaries, * there is no need to keep anything in the tl, potential - * node crashes are covered by the activity log. */ - if (what == conflict_discarded_by_peer) - dev_alert(DEV, "Got DiscardAck packet %llus +%u!" - " DRBD is not a random data generator!\n", - (unsigned long long)req->sector, req->size); - req->rq_state |= RQ_NET_DONE; - /* fall through */ - case write_acked_by_peer_and_sis: - case write_acked_by_peer: - if (what == write_acked_by_peer_and_sis) - req->rq_state |= RQ_NET_SIS; + * node crashes are covered by the activity log. + * + * If this request had been marked as RQ_POSTPONED before, + * it will actually not be completed, but "restarted", + * resubmitted from the retry worker context. */ + D_ASSERT(req->rq_state & RQ_NET_PENDING); + D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); + mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); + break; + + case WRITE_ACKED_BY_PEER_AND_SIS: + req->rq_state |= RQ_NET_SIS; + case WRITE_ACKED_BY_PEER: + D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); /* protocol C; successfully written on peer. * Nothing more to do here. * We want to keep the tl in place for all protocols, to cater * for volatile write-back caches on lower level devices. */ - case recv_acked_by_peer: + goto ack_common; + case RECV_ACKED_BY_PEER: + D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK); /* protocol B; pretends to be successfully written on peer. - * see also notes above in handed_over_to_network about + * see also notes above in HANDED_OVER_TO_NETWORK about * protocol != C */ - req->rq_state |= RQ_NET_OK; + ack_common: D_ASSERT(req->rq_state & RQ_NET_PENDING); - dec_ap_pending(mdev); - atomic_sub(req->size>>9, &mdev->ap_in_flight); - req->rq_state &= ~RQ_NET_PENDING; - _req_may_be_done_not_susp(req, m); + mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); break; - case neg_acked: - /* assert something? */ - if (req->rq_state & RQ_NET_PENDING) { - dec_ap_pending(mdev); - atomic_sub(req->size>>9, &mdev->ap_in_flight); - } - req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); + case POSTPONE_WRITE: + D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); + /* If this node has already detected the write conflict, the + * worker will be waiting on misc_wait. Wake it up once this + * request has completed locally. + */ + D_ASSERT(req->rq_state & RQ_NET_PENDING); + req->rq_state |= RQ_POSTPONED; + if (req->i.waiting) + wake_up(&mdev->misc_wait); + /* Do not clear RQ_NET_PENDING. This request will make further + * progress via restart_conflicting_writes() or + * fail_postponed_requests(). Hopefully. */ + break; - req->rq_state |= RQ_NET_DONE; - _req_may_be_done_not_susp(req, m); - /* else: done by handed_over_to_network */ + case NEG_ACKED: + mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0); break; - case fail_frozen_disk_io: + case FAIL_FROZEN_DISK_IO: if (!(req->rq_state & RQ_LOCAL_COMPLETED)) break; - - _req_may_be_done(req, m); /* Allowed while state.susp */ + mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); break; - case restart_frozen_disk_io: + case RESTART_FROZEN_DISK_IO: if (!(req->rq_state & RQ_LOCAL_COMPLETED)) break; - req->rq_state &= ~RQ_LOCAL_COMPLETED; + mod_rq_state(req, m, + RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED, + RQ_LOCAL_PENDING); rv = MR_READ; if (bio_data_dir(req->master_bio) == WRITE) rv = MR_WRITE; - get_ldev(mdev); + get_ldev(mdev); /* always succeeds in this call path */ req->w.cb = w_restart_disk_io; - drbd_queue_work(&mdev->data.work, &req->w); + drbd_queue_work(&mdev->tconn->sender_work, &req->w); break; - case resend: + case RESEND: /* Simply complete (local only) READs. */ if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { - _req_may_be_done(req, m); + mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); break; } /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK - before the connection loss (B&C only); only P_BARRIER_ACK was missing. - Trowing them out of the TL here by pretending we got a BARRIER_ACK - We ensure that the peer was not rebooted */ + before the connection loss (B&C only); only P_BARRIER_ACK + (or the local completion?) was missing when we suspended. + Throwing them out of the TL here by pretending we got a BARRIER_ACK. + During connection handshake, we ensure that the peer was not rebooted. */ if (!(req->rq_state & RQ_NET_OK)) { + /* FIXME could this possibly be a req->w.cb == w_send_out_of_sync? + * in that case we must not set RQ_NET_PENDING. */ + + mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); if (req->w.cb) { - drbd_queue_work(&mdev->data.work, &req->w); + drbd_queue_work(&mdev->tconn->sender_work, &req->w); rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; - } + } /* else: FIXME can this happen? */ break; } - /* else, fall through to barrier_acked */ + /* else, fall through to BARRIER_ACKED */ - case barrier_acked: + case BARRIER_ACKED: + /* barrier ack for READ requests does not make sense */ if (!(req->rq_state & RQ_WRITE)) break; if (req->rq_state & RQ_NET_PENDING) { - /* barrier came in before all requests have been acked. + /* barrier came in before all requests were acked. * this is bad, because if the connection is lost now, * we won't be able to clean them up... */ - dev_err(DEV, "FIXME (barrier_acked but pending)\n"); - list_move(&req->tl_requests, &mdev->out_of_sequence_requests); + dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n"); } - if ((req->rq_state & RQ_NET_MASK) != 0) { - req->rq_state |= RQ_NET_DONE; - if (mdev->net_conf->wire_protocol == DRBD_PROT_A) - atomic_sub(req->size>>9, &mdev->ap_in_flight); - } - _req_may_be_done(req, m); /* Allowed while state.susp */ + /* Allowed to complete requests, even while suspended. + * As this is called for all requests within a matching epoch, + * we need to filter, and only set RQ_NET_DONE for those that + * have actually been on the wire. */ + mod_rq_state(req, m, RQ_COMPLETION_SUSP, + (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0); break; - case data_received: + case DATA_RECEIVED: D_ASSERT(req->rq_state & RQ_NET_PENDING); - dec_ap_pending(mdev); - req->rq_state &= ~RQ_NET_PENDING; - req->rq_state |= (RQ_NET_OK|RQ_NET_DONE); - _req_may_be_done_not_susp(req, m); + mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); break; }; @@ -768,75 +767,265 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, * since size may be bigger than BM_BLOCK_SIZE, * we may need to check several bits. */ -static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size) +static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size) { unsigned long sbnr, ebnr; sector_t esector, nr_sectors; if (mdev->state.disk == D_UP_TO_DATE) - return 1; - if (mdev->state.disk >= D_OUTDATED) - return 0; - if (mdev->state.disk < D_INCONSISTENT) - return 0; - /* state.disk == D_INCONSISTENT We will have a look at the BitMap */ - nr_sectors = drbd_get_capacity(mdev->this_bdev); + return true; + if (mdev->state.disk != D_INCONSISTENT) + return false; esector = sector + (size >> 9) - 1; - + nr_sectors = drbd_get_capacity(mdev->this_bdev); D_ASSERT(sector < nr_sectors); D_ASSERT(esector < nr_sectors); sbnr = BM_SECT_TO_BIT(sector); ebnr = BM_SECT_TO_BIT(esector); - return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr); + return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0; +} + +static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector, + enum drbd_read_balancing rbm) +{ + struct backing_dev_info *bdi; + int stripe_shift; + + switch (rbm) { + case RB_CONGESTED_REMOTE: + bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info; + return bdi_read_congested(bdi); + case RB_LEAST_PENDING: + return atomic_read(&mdev->local_cnt) > + atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt); + case RB_32K_STRIPING: /* stripe_shift = 15 */ + case RB_64K_STRIPING: + case RB_128K_STRIPING: + case RB_256K_STRIPING: + case RB_512K_STRIPING: + case RB_1M_STRIPING: /* stripe_shift = 20 */ + stripe_shift = (rbm - RB_32K_STRIPING + 15); + return (sector >> (stripe_shift - 9)) & 1; + case RB_ROUND_ROBIN: + return test_and_change_bit(READ_BALANCE_RR, &mdev->flags); + case RB_PREFER_REMOTE: + return true; + case RB_PREFER_LOCAL: + default: + return false; + } +} + +/* + * complete_conflicting_writes - wait for any conflicting write requests + * + * The write_requests tree contains all active write requests which we + * currently know about. Wait for any requests to complete which conflict with + * the new one. + * + * Only way out: remove the conflicting intervals from the tree. + */ +static void complete_conflicting_writes(struct drbd_request *req) +{ + DEFINE_WAIT(wait); + struct drbd_conf *mdev = req->w.mdev; + struct drbd_interval *i; + sector_t sector = req->i.sector; + int size = req->i.size; + + i = drbd_find_overlap(&mdev->write_requests, sector, size); + if (!i) + return; + + for (;;) { + prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE); + i = drbd_find_overlap(&mdev->write_requests, sector, size); + if (!i) + break; + /* Indicate to wake up device->misc_wait on progress. */ + i->waiting = true; + spin_unlock_irq(&mdev->tconn->req_lock); + schedule(); + spin_lock_irq(&mdev->tconn->req_lock); + } + finish_wait(&mdev->misc_wait, &wait); } +/* called within req_lock and rcu_read_lock() */ static void maybe_pull_ahead(struct drbd_conf *mdev) { - int congested = 0; + struct drbd_tconn *tconn = mdev->tconn; + struct net_conf *nc; + bool congested = false; + enum drbd_on_congestion on_congestion; + + nc = rcu_dereference(tconn->net_conf); + on_congestion = nc ? nc->on_congestion : OC_BLOCK; + if (on_congestion == OC_BLOCK || + tconn->agreed_pro_version < 96) + return; /* If I don't even have good local storage, we can not reasonably try * to pull ahead of the peer. We also need the local reference to make * sure mdev->act_log is there. - * Note: caller has to make sure that net_conf is there. */ if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) return; - if (mdev->net_conf->cong_fill && - atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) { + if (nc->cong_fill && + atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) { dev_info(DEV, "Congestion-fill threshold reached\n"); - congested = 1; + congested = true; } - if (mdev->act_log->used >= mdev->net_conf->cong_extents) { + if (mdev->act_log->used >= nc->cong_extents) { dev_info(DEV, "Congestion-extents threshold reached\n"); - congested = 1; + congested = true; } if (congested) { - queue_barrier(mdev); /* last barrier, after mirrored writes */ + /* start a new epoch for non-mirrored writes */ + start_new_tl_epoch(mdev->tconn); - if (mdev->net_conf->on_congestion == OC_PULL_AHEAD) + if (on_congestion == OC_PULL_AHEAD) _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); - else /*mdev->net_conf->on_congestion == OC_DISCONNECT */ + else /*nc->on_congestion == OC_DISCONNECT */ _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL); } put_ldev(mdev); } -static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) +/* If this returns false, and req->private_bio is still set, + * this should be submitted locally. + * + * If it returns false, but req->private_bio is not set, + * we do not have access to good data :( + * + * Otherwise, this destroys req->private_bio, if any, + * and returns true. + */ +static bool do_remote_read(struct drbd_request *req) +{ + struct drbd_conf *mdev = req->w.mdev; + enum drbd_read_balancing rbm; + + if (req->private_bio) { + if (!drbd_may_do_local_read(mdev, + req->i.sector, req->i.size)) { + bio_put(req->private_bio); + req->private_bio = NULL; + put_ldev(mdev); + } + } + + if (mdev->state.pdsk != D_UP_TO_DATE) + return false; + + if (req->private_bio == NULL) + return true; + + /* TODO: improve read balancing decisions, take into account drbd + * protocol, pending requests etc. */ + + rcu_read_lock(); + rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing; + rcu_read_unlock(); + + if (rbm == RB_PREFER_LOCAL && req->private_bio) + return false; /* submit locally */ + + if (remote_due_to_read_balancing(mdev, req->i.sector, rbm)) { + if (req->private_bio) { + bio_put(req->private_bio); + req->private_bio = NULL; + put_ldev(mdev); + } + return true; + } + + return false; +} + +/* returns number of connections (== 1, for drbd 8.4) + * expected to actually write this data, + * which does NOT include those that we are L_AHEAD for. */ +static int drbd_process_write_request(struct drbd_request *req) +{ + struct drbd_conf *mdev = req->w.mdev; + int remote, send_oos; + + rcu_read_lock(); + remote = drbd_should_do_remote(mdev->state); + if (remote) { + maybe_pull_ahead(mdev); + remote = drbd_should_do_remote(mdev->state); + } + send_oos = drbd_should_send_out_of_sync(mdev->state); + rcu_read_unlock(); + + /* Need to replicate writes. Unless it is an empty flush, + * which is better mapped to a DRBD P_BARRIER packet, + * also for drbd wire protocol compatibility reasons. + * If this was a flush, just start a new epoch. + * Unless the current epoch was empty anyways, or we are not currently + * replicating, in which case there is no point. */ + if (unlikely(req->i.size == 0)) { + /* The only size==0 bios we expect are empty flushes. */ + D_ASSERT(req->master_bio->bi_rw & REQ_FLUSH); + if (remote) + start_new_tl_epoch(mdev->tconn); + return 0; + } + + if (!remote && !send_oos) + return 0; + + D_ASSERT(!(remote && send_oos)); + + if (remote) { + _req_mod(req, TO_BE_SENT); + _req_mod(req, QUEUE_FOR_NET_WRITE); + } else if (drbd_set_out_of_sync(mdev, req->i.sector, req->i.size)) + _req_mod(req, QUEUE_FOR_SEND_OOS); + + return remote; +} + +static void +drbd_submit_req_private_bio(struct drbd_request *req) +{ + struct drbd_conf *mdev = req->w.mdev; + struct bio *bio = req->private_bio; + const int rw = bio_rw(bio); + + bio->bi_bdev = mdev->ldev->backing_bdev; + + /* State may have changed since we grabbed our reference on the + * ->ldev member. Double check, and short-circuit to endio. + * In case the last activity log transaction failed to get on + * stable storage, and this is a WRITE, we may not even submit + * this bio. */ + if (get_ldev(mdev)) { + if (drbd_insert_fault(mdev, + rw == WRITE ? DRBD_FAULT_DT_WR + : rw == READ ? DRBD_FAULT_DT_RD + : DRBD_FAULT_DT_RA)) + bio_endio(bio, -EIO); + else + generic_make_request(bio); + put_ldev(mdev); + } else + bio_endio(bio, -EIO); +} + +void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) { const int rw = bio_rw(bio); - const int size = bio->bi_size; - const sector_t sector = bio->bi_sector; - struct drbd_tl_epoch *b = NULL; + struct bio_and_error m = { NULL, }; struct drbd_request *req; - int local, remote, send_oos = 0; - int err = -EIO; - int ret = 0; - union drbd_state s; + bool no_remote = false; /* allocate outside of all locks; */ req = drbd_req_new(mdev, bio); @@ -846,55 +1035,14 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns * if user cannot handle io errors, that's not our business. */ dev_err(DEV, "could not kmalloc() req\n"); bio_endio(bio, -ENOMEM); - return 0; + return; } req->start_time = start_time; - local = get_ldev(mdev); - if (!local) { - bio_put(req->private_bio); /* or we get a bio leak */ + if (!get_ldev(mdev)) { + bio_put(req->private_bio); req->private_bio = NULL; } - if (rw == WRITE) { - /* Need to replicate writes. Unless it is an empty flush, - * which is better mapped to a DRBD P_BARRIER packet, - * also for drbd wire protocol compatibility reasons. */ - if (unlikely(size == 0)) { - /* The only size==0 bios we expect are empty flushes. */ - D_ASSERT(bio->bi_rw & REQ_FLUSH); - remote = 0; - } else - remote = 1; - } else { - /* READ || READA */ - if (local) { - if (!drbd_may_do_local_read(mdev, sector, size)) { - /* we could kick the syncer to - * sync this extent asap, wait for - * it, then continue locally. - * Or just issue the request remotely. - */ - local = 0; - bio_put(req->private_bio); - req->private_bio = NULL; - put_ldev(mdev); - } - } - remote = !local && mdev->state.pdsk >= D_UP_TO_DATE; - } - - /* If we have a disk, but a READA request is mapped to remote, - * we are R_PRIMARY, D_INCONSISTENT, SyncTarget. - * Just fail that READA request right here. - * - * THINK: maybe fail all READA when not local? - * or make this configurable... - * if network is slow, READA won't do any good. - */ - if (rw == READA && mdev->state.disk >= D_INCONSISTENT && !local) { - err = -EWOULDBLOCK; - goto fail_and_free_req; - } /* For WRITES going to the local disk, grab a reference on the target * extent. This waits for any resync activity in the corresponding @@ -903,349 +1051,131 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns * of transactional on-disk meta data updates. * Empty flushes don't need to go into the activity log, they can only * flush data for pending writes which are already in there. */ - if (rw == WRITE && local && size - && !drbd_test_flag(mdev, AL_SUSPENDED)) { + if (rw == WRITE && req->private_bio && req->i.size + && !test_bit(AL_SUSPENDED, &mdev->flags)) { req->rq_state |= RQ_IN_ACT_LOG; - drbd_al_begin_io(mdev, sector); + drbd_al_begin_io(mdev, &req->i); } - s = mdev->state; - remote = remote && drbd_should_do_remote(s); - send_oos = rw == WRITE && drbd_should_send_oos(s); - D_ASSERT(!(remote && send_oos)); - - if (!(local || remote) && !is_susp(mdev->state)) { - if (__ratelimit(&drbd_ratelimit_state)) - dev_err(DEV, "IO ERROR: neither local nor remote data, sector %llu+%u\n", - (unsigned long long)req->sector, req->size >> 9); - goto fail_free_complete; - } - - /* For WRITE request, we have to make sure that we have an - * unused_spare_tle, in case we need to start a new epoch. - * I try to be smart and avoid to pre-allocate always "just in case", - * but there is a race between testing the bit and pointer outside the - * spinlock, and grabbing the spinlock. - * if we lost that race, we retry. */ - if (rw == WRITE && (remote || send_oos) && - mdev->unused_spare_tle == NULL && - drbd_test_flag(mdev, CREATE_BARRIER)) { -allocate_barrier: - b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO); - if (!b) { - dev_err(DEV, "Failed to alloc barrier.\n"); - err = -ENOMEM; - goto fail_free_complete; - } + spin_lock_irq(&mdev->tconn->req_lock); + if (rw == WRITE) { + /* This may temporarily give up the req_lock, + * but will re-aquire it before it returns here. + * Needs to be before the check on drbd_suspended() */ + complete_conflicting_writes(req); } - /* GOOD, everything prepared, grab the spin_lock */ - spin_lock_irq(&mdev->req_lock); - - if (is_susp(mdev->state)) { - /* If we got suspended, use the retry mechanism of - drbd_make_request() to restart processing of this - bio. In the next call to drbd_make_request - we sleep in inc_ap_bio() */ - ret = 1; - spin_unlock_irq(&mdev->req_lock); - goto fail_free_complete; - } + /* no more giving up req_lock from now on! */ - if (remote || send_oos) { - remote = drbd_should_do_remote(mdev->state); - send_oos = rw == WRITE && drbd_should_send_oos(mdev->state); - D_ASSERT(!(remote && send_oos)); - - if (!(remote || send_oos)) - dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); - if (!(local || remote)) { - dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); - spin_unlock_irq(&mdev->req_lock); - goto fail_free_complete; + if (drbd_suspended(mdev)) { + /* push back and retry: */ + req->rq_state |= RQ_POSTPONED; + if (req->private_bio) { + bio_put(req->private_bio); + req->private_bio = NULL; + put_ldev(mdev); } + goto out; } - if (b && mdev->unused_spare_tle == NULL) { - mdev->unused_spare_tle = b; - b = NULL; - } - if (rw == WRITE && (remote || send_oos) && - mdev->unused_spare_tle == NULL && - drbd_test_flag(mdev, CREATE_BARRIER)) { - /* someone closed the current epoch - * while we were grabbing the spinlock */ - spin_unlock_irq(&mdev->req_lock); - goto allocate_barrier; - } - - /* Update disk stats */ _drbd_start_io_acct(mdev, req, bio); - /* _maybe_start_new_epoch(mdev); - * If we need to generate a write barrier packet, we have to add the - * new epoch (barrier) object, and queue the barrier packet for sending, - * and queue the req's data after it _within the same lock_, otherwise - * we have race conditions were the reorder domains could be mixed up. - * - * Even read requests may start a new epoch and queue the corresponding - * barrier packet. To get the write ordering right, we only have to - * make sure that, if this is a write request and it triggered a - * barrier packet, this request is queued within the same spinlock. */ - if ((remote || send_oos) && mdev->unused_spare_tle && - drbd_test_and_clear_flag(mdev, CREATE_BARRIER)) { - _tl_add_barrier(mdev, mdev->unused_spare_tle); - mdev->unused_spare_tle = NULL; - } else { - D_ASSERT(!(remote && rw == WRITE && - drbd_test_flag(mdev, CREATE_BARRIER))); + /* We fail READ/READA early, if we can not serve it. + * We must do this before req is registered on any lists. + * Otherwise, drbd_req_complete() will queue failed READ for retry. */ + if (rw != WRITE) { + if (!do_remote_read(req) && !req->private_bio) + goto nodata; } - /* NOTE - * Actually, 'local' may be wrong here already, since we may have failed - * to write to the meta data, and may become wrong anytime because of - * local io-error for some other request, which would lead to us - * "detaching" the local disk. - * - * 'remote' may become wrong any time because the network could fail. - * - * This is a harmless race condition, though, since it is handled - * correctly at the appropriate places; so it just defers the failure - * of the respective operation. - */ - - /* mark them early for readability. - * this just sets some state flags. */ - if (remote) - _req_mod(req, to_be_send); - if (local) - _req_mod(req, to_be_submitted); - - /* check this request on the collision detection hash tables. - * if we have a conflict, just complete it here. - * THINK do we want to check reads, too? (I don't think so...) */ - if (rw == WRITE && _req_conflicts(req)) - goto fail_conflicting; + /* which transfer log epoch does this belong to? */ + req->epoch = atomic_read(&mdev->tconn->current_tle_nr); /* no point in adding empty flushes to the transfer log, * they are mapped to drbd barriers already. */ - if (likely(size!=0)) - list_add_tail(&req->tl_requests, &mdev->newest_tle->requests); + if (likely(req->i.size!=0)) { + if (rw == WRITE) + mdev->tconn->current_tle_writes++; - /* NOTE remote first: to get the concurrent write detection right, - * we must register the request before start of local IO. */ - if (remote) { - /* either WRITE and C_CONNECTED, - * or READ, and no local disk, - * or READ, but not in sync. - */ - _req_mod(req, (rw == WRITE) - ? queue_for_net_write - : queue_for_net_read); + list_add_tail(&req->tl_requests, &mdev->tconn->transfer_log); } - if (send_oos && drbd_set_out_of_sync(mdev, sector, size)) - _req_mod(req, queue_for_send_oos); - - if (remote && - mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) - maybe_pull_ahead(mdev); - /* If this was a flush, queue a drbd barrier/start a new epoch. - * Unless the current epoch was empty anyways, or we are not currently - * replicating, in which case there is no point. */ - if (unlikely(bio->bi_rw & REQ_FLUSH) - && mdev->newest_tle->n_writes - && drbd_should_do_remote(mdev->state)) - queue_barrier(mdev); - - spin_unlock_irq(&mdev->req_lock); - kfree(b); /* if someone else has beaten us to it... */ - - if (local) { - req->private_bio->bi_bdev = mdev->ldev->backing_bdev; - - /* State may have changed since we grabbed our reference on the - * mdev->ldev member. Double check, and short-circuit to endio. - * In case the last activity log transaction failed to get on - * stable storage, and this is a WRITE, we may not even submit - * this bio. */ - if (get_ldev(mdev)) { - if (drbd_insert_fault(mdev, rw == WRITE ? DRBD_FAULT_DT_WR - : rw == READ ? DRBD_FAULT_DT_RD - : DRBD_FAULT_DT_RA)) - bio_endio(req->private_bio, -EIO); - else - generic_make_request(req->private_bio); - put_ldev(mdev); + if (rw == WRITE) { + if (!drbd_process_write_request(req)) + no_remote = true; + } else { + /* We either have a private_bio, or we can read from remote. + * Otherwise we had done the goto nodata above. */ + if (req->private_bio == NULL) { + _req_mod(req, TO_BE_SENT); + _req_mod(req, QUEUE_FOR_NET_READ); } else - bio_endio(req->private_bio, -EIO); + no_remote = true; } - return 0; - -fail_conflicting: - /* this is a conflicting request. - * even though it may have been only _partially_ - * overlapping with one of the currently pending requests, - * without even submitting or sending it, we will - * pretend that it was successfully served right now. - */ - _drbd_end_io_acct(mdev, req); - spin_unlock_irq(&mdev->req_lock); - if (remote) - dec_ap_pending(mdev); - /* THINK: do we want to fail it (-EIO), or pretend success? - * this pretends success. */ - err = 0; - -fail_free_complete: - if (req->rq_state & RQ_IN_ACT_LOG) - drbd_al_complete_io(mdev, sector); -fail_and_free_req: - if (local) { - bio_put(req->private_bio); - req->private_bio = NULL; - put_ldev(mdev); + if (req->private_bio) { + /* needs to be marked within the same spinlock */ + _req_mod(req, TO_BE_SUBMITTED); + /* but we need to give up the spinlock to submit */ + spin_unlock_irq(&mdev->tconn->req_lock); + drbd_submit_req_private_bio(req); + spin_lock_irq(&mdev->tconn->req_lock); + } else if (no_remote) { +nodata: + if (__ratelimit(&drbd_ratelimit_state)) + dev_err(DEV, "IO ERROR: neither local nor remote data, sector %llu+%u\n", + (unsigned long long)req->i.sector, req->i.size >> 9); + /* A write may have been queued for send_oos, however. + * So we can not simply free it, we must go through drbd_req_put_completion_ref() */ } - if (!ret) - bio_endio(bio, err); - - drbd_req_free(req); - dec_ap_bio(mdev); - kfree(b); - - return ret; -} -/* helper function for drbd_make_request - * if we can determine just by the mdev (state) that this request will fail, - * return 1 - * otherwise return 0 - */ -static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write) -{ - if (mdev->state.role != R_PRIMARY && - (!allow_oos || is_write)) { - if (__ratelimit(&drbd_ratelimit_state)) { - dev_err(DEV, "Process %s[%u] tried to %s; " - "since we are not in Primary state, " - "we cannot allow this\n", - current->comm, current->pid, - is_write ? "WRITE" : "READ"); - } - return 1; - } +out: + if (drbd_req_put_completion_ref(req, &m, 1)) + kref_put(&req->kref, drbd_req_destroy); + spin_unlock_irq(&mdev->tconn->req_lock); - return 0; + if (m.bio) + complete_master_bio(mdev, &m); + return; } void drbd_make_request(struct request_queue *q, struct bio *bio) { - unsigned int s_enr, e_enr; struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; unsigned long start_time; - if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) { - bio_endio(bio, -EPERM); - return; - } - start_time = jiffies; /* * what we "blindly" assume: */ - D_ASSERT((bio->bi_size & 0x1ff) == 0); - - /* to make some things easier, force alignment of requests within the - * granularity of our hash tables */ - s_enr = bio->bi_sector >> HT_SHIFT; - e_enr = bio->bi_size ? (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT : s_enr; - - if (likely(s_enr == e_enr)) { - do { - inc_ap_bio(mdev, 1); - } while (drbd_make_request_common(mdev, bio, start_time)); - return; - } - - /* can this bio be split generically? - * Maybe add our own split-arbitrary-bios function. */ - if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_BIO_SIZE) { - /* rather error out here than BUG in bio_split */ - dev_err(DEV, "bio would need to, but cannot, be split: " - "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n", - bio->bi_vcnt, bio->bi_idx, bio->bi_size, - (unsigned long long)bio->bi_sector); - bio_endio(bio, -EINVAL); - } else { - /* This bio crosses some boundary, so we have to split it. */ - struct bio_pair *bp; - /* works for the "do not cross hash slot boundaries" case - * e.g. sector 262269, size 4096 - * s_enr = 262269 >> 6 = 4097 - * e_enr = (262269+8-1) >> 6 = 4098 - * HT_SHIFT = 6 - * sps = 64, mask = 63 - * first_sectors = 64 - (262269 & 63) = 3 - */ - const sector_t sect = bio->bi_sector; - const int sps = 1 << HT_SHIFT; /* sectors per slot */ - const int mask = sps - 1; - const sector_t first_sectors = sps - (sect & mask); - bp = bio_split(bio, first_sectors); + D_ASSERT(IS_ALIGNED(bio->bi_size, 512)); - /* we need to get a "reference count" (ap_bio_cnt) - * to avoid races with the disconnect/reconnect/suspend code. - * In case we need to split the bio here, we need to get three references - * atomically, otherwise we might deadlock when trying to submit the - * second one! */ - inc_ap_bio(mdev, 3); - - D_ASSERT(e_enr == s_enr + 1); - - while (drbd_make_request_common(mdev, &bp->bio1, start_time)) - inc_ap_bio(mdev, 1); - - while (drbd_make_request_common(mdev, &bp->bio2, start_time)) - inc_ap_bio(mdev, 1); - - dec_ap_bio(mdev); - - bio_pair_release(bp); - } + inc_ap_bio(mdev); + __drbd_make_request(mdev, bio, start_time); } -/* This is called by bio_add_page(). With this function we reduce - * the number of BIOs that span over multiple DRBD_MAX_BIO_SIZEs - * units (was AL_EXTENTs). +/* This is called by bio_add_page(). + * + * q->max_hw_sectors and other global limits are already enforced there. * - * we do the calculation within the lower 32bit of the byte offsets, - * since we don't care for actual offset, but only check whether it - * would cross "activity log extent" boundaries. + * We need to call down to our lower level device, + * in case it has special restrictions. + * + * We also may need to enforce configured max-bio-bvecs limits. * * As long as the BIO is empty we have to allow at least one bvec, - * regardless of size and offset. so the resulting bio may still - * cross extent boundaries. those are dealt with (bio_split) in - * drbd_make_request. + * regardless of size and offset, so no need to ask lower levels. */ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) { struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; - unsigned int bio_offset = - (unsigned int)bvm->bi_sector << 9; /* 32 bit */ unsigned int bio_size = bvm->bi_size; - int limit, backing_limit; - - limit = DRBD_MAX_BIO_SIZE - - ((bio_offset & (DRBD_MAX_BIO_SIZE-1)) + bio_size); - if (limit < 0) - limit = 0; - if (bio_size == 0) { - if (limit <= bvec->bv_len) - limit = bvec->bv_len; - } else if (limit && get_ldev(mdev)) { + int limit = DRBD_MAX_BIO_SIZE; + int backing_limit; + + if (bio_size && get_ldev(mdev)) { struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; if (b->merge_bvec_fn) { @@ -1257,24 +1187,38 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct return limit; } +struct drbd_request *find_oldest_request(struct drbd_tconn *tconn) +{ + /* Walk the transfer log, + * and find the oldest not yet completed request */ + struct drbd_request *r; + list_for_each_entry(r, &tconn->transfer_log, tl_requests) { + if (atomic_read(&r->completion_ref)) + return r; + } + return NULL; +} + void request_timer_fn(unsigned long data) { struct drbd_conf *mdev = (struct drbd_conf *) data; + struct drbd_tconn *tconn = mdev->tconn; struct drbd_request *req; /* oldest request */ - struct list_head *le; + struct net_conf *nc; unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ unsigned long now; - if (get_net_conf(mdev)) { - if (mdev->state.conn >= C_WF_REPORT_PARAMS) - ent = mdev->net_conf->timeout*HZ/10 - * mdev->net_conf->ko_count; - put_net_conf(mdev); - } + rcu_read_lock(); + nc = rcu_dereference(tconn->net_conf); + if (nc && mdev->state.conn >= C_WF_REPORT_PARAMS) + ent = nc->timeout * HZ/10 * nc->ko_count; + if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */ - dt = mdev->ldev->dc.disk_timeout * HZ / 10; + dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10; put_ldev(mdev); } + rcu_read_unlock(); + et = min_not_zero(dt, ent); if (!et) @@ -1282,17 +1226,14 @@ void request_timer_fn(unsigned long data) now = jiffies; - spin_lock_irq(&mdev->req_lock); - le = &mdev->oldest_tle->requests; - if (list_empty(le)) { - spin_unlock_irq(&mdev->req_lock); + spin_lock_irq(&tconn->req_lock); + req = find_oldest_request(tconn); + if (!req) { + spin_unlock_irq(&tconn->req_lock); mod_timer(&mdev->request_timer, now + et); return; } - le = le->prev; - req = list_entry(le, struct drbd_request, tl_requests); - /* The request is considered timed out, if * - we have some effective timeout from the configuration, * with above state restrictions applied, @@ -1311,17 +1252,17 @@ void request_timer_fn(unsigned long data) */ if (ent && req->rq_state & RQ_NET_PENDING && time_after(now, req->start_time + ent) && - !time_in_range(now, mdev->last_reconnect_jif, mdev->last_reconnect_jif + ent)) { + !time_in_range(now, tconn->last_reconnect_jif, tconn->last_reconnect_jif + ent)) { dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n"); _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); } - if (dt && req->rq_state & RQ_LOCAL_PENDING && + if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.mdev == mdev && time_after(now, req->start_time + dt) && !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) { dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n"); __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH); } nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; - spin_unlock_irq(&mdev->req_lock); + spin_unlock_irq(&tconn->req_lock); mod_timer(&mdev->request_timer, nt); } |