diff options
author | Lars Ellenberg <lars.ellenberg@linbit.com> | 2012-07-30 09:00:54 +0200 |
---|---|---|
committer | Philipp Reisner <philipp.reisner@linbit.com> | 2012-11-09 14:05:44 +0100 |
commit | 81a3537a9730116dfaafeed728f2d460c872c0d5 (patch) | |
tree | 44d6e0672f13830aa4a468818a0c2ce425a1cfac /drivers | |
parent | 58ffa580a748dd16b1e5ab260bea39cdbd1e94ef (diff) |
drbd: announce FLUSH/FUA capability to upper layers
In 8.4, we may have bios spanning two activity log extents.
Fixup drbd_al_begin_io() and drbd_al_complete_io() to deal with zero sized bios.
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/drbd/drbd_actlog.c | 12 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 1 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_receiver.c | 22 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_req.c | 1 |
4 files changed, 26 insertions, 10 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 209b2e063b9..e81085795ec 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -248,11 +248,12 @@ void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i) /* for bios crossing activity log extent boundaries, * we may need to activate two extents in one go */ unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); - unsigned last = (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9); + unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9); unsigned enr; bool locked = false; + D_ASSERT(first <= last); D_ASSERT(atomic_read(&mdev->local_cnt) > 0); for (enr = first; enr <= last; enr++) @@ -305,11 +306,12 @@ void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i) /* for bios crossing activity log extent boundaries, * we may need to activate two extents in one go */ unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); - unsigned last = (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9); + unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9); unsigned enr; struct lc_element *extent; unsigned long flags; + D_ASSERT(first <= last); spin_lock_irqsave(&mdev->al_lock, flags); for (enr = first; enr <= last; enr++) { @@ -756,7 +758,11 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, unsigned int enr, count = 0; struct lc_element *e; - if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { + /* this should be an empty REQ_FLUSH */ + if (size == 0) + return 0; + + if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { dev_err(DEV, "sector: %llus, size: %d\n", (unsigned long long)sector, size); return 0; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index f2af74d0686..85d95ec405e 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2640,6 +2640,7 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, q->backing_dev_info.congested_data = mdev; blk_queue_make_request(q, drbd_make_request); + blk_queue_flush(q, REQ_FLUSH | REQ_FUA); /* Setting the max_hw_sectors to an odd value of 8kibyte here This triggers a max_bio_size message upon first attach or connect */ blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 8fddec96dfb..a6f0b293836 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -295,6 +295,9 @@ static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_ne atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; int i; + if (page == NULL) + return; + if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count) i = page_chain_free(page); else { @@ -331,7 +334,7 @@ drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector, unsigned int data_size, gfp_t gfp_mask) __must_hold(local) { struct drbd_peer_request *peer_req; - struct page *page; + struct page *page = NULL; unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) @@ -344,9 +347,11 @@ drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector, return NULL; } - page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); - if (!page) - goto fail; + if (data_size) { + page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); + if (!page) + goto fail; + } drbd_clear_interval(&peer_req->i); peer_req->i.size = data_size; @@ -1513,8 +1518,6 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, data_size -= dgs; } - if (!expect(data_size != 0)) - return NULL; if (!expect(IS_ALIGNED(data_size, 512))) return NULL; if (!expect(data_size <= DRBD_MAX_BIO_SIZE)) @@ -1537,6 +1540,9 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, if (!peer_req) return NULL; + if (!data_size) + return peer_req; + ds = data_size; page = peer_req->pages; page_chain_for_each(page) { @@ -2199,6 +2205,10 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi) dp_flags = be32_to_cpu(p->dp_flags); rw |= wire_flags_to_bio(mdev, dp_flags); + if (peer_req->pages == NULL) { + D_ASSERT(peer_req->i.size == 0); + D_ASSERT(dp_flags & DP_FLUSH); + } if (dp_flags & DP_MAY_SET_IN_SYNC) peer_req->flags |= EE_MAY_SET_IN_SYNC; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index e215dce4c69..8323449fbba 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1097,7 +1097,6 @@ int drbd_make_request(struct request_queue *q, struct bio *bio) /* * what we "blindly" assume: */ - D_ASSERT(bio->bi_size > 0); D_ASSERT(IS_ALIGNED(bio->bi_size, 512)); inc_ap_bio(mdev); |