diff options
author | Christoph Hellwig <hch@lst.de> | 2009-09-17 19:57:42 +0200 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-09-23 22:26:36 +0930 |
commit | f1b0ef062602713c2c7cfa12362d5d90ed01c5f6 (patch) | |
tree | 30a130141b7c4b69ff20d134734acd40f1ee0960 /drivers/block | |
parent | 3ca4f5ca73057a617f9444a91022d7127041970a (diff) |
virtio_blk: add support for cache flush
Recent qemu has added a VIRTIO_BLK_F_FLUSH flag to advertise that the
virtual disk has a volatile write cache that needs to be flushed. In case
we see this feature implement tell the Linux block layer about the fact
and use the new VIRTIO_BLK_T_FLUSH to flush the cache when required. This
allows for an correct and simple implementation of write barriers.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/virtio_blk.c | 30 |
1 files changed, 25 insertions, 5 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 73de7532fcf..3d5fe97569c 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -92,15 +92,26 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, return false; vbr->req = req; - if (blk_fs_request(vbr->req)) { + switch (req->cmd_type) { + case REQ_TYPE_FS: vbr->out_hdr.type = 0; vbr->out_hdr.sector = blk_rq_pos(vbr->req); vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); - } else if (blk_pc_request(vbr->req)) { + break; + case REQ_TYPE_BLOCK_PC: vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); - } else { + break; + case REQ_TYPE_LINUX_BLOCK: + if (req->cmd[0] == REQ_LB_OP_FLUSH) { + vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; + vbr->out_hdr.sector = 0; + vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); + break; + } + /*FALLTHRU*/ + default: /* We don't put anything else in the queue. */ BUG(); } @@ -200,6 +211,12 @@ out: return err; } +static void virtblk_prepare_flush(struct request_queue *q, struct request *req) +{ + req->cmd_type = REQ_TYPE_LINUX_BLOCK; + req->cmd[0] = REQ_LB_OP_FLUSH; +} + static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long data) { @@ -338,7 +355,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) index++; /* If barriers are supported, tell block layer that queue is ordered */ - if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) + if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) + blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_DRAIN_FLUSH, + virtblk_prepare_flush); + else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL); /* If disk is read-only in the host, the guest should obey */ @@ -425,7 +445,7 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, - VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY + VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY, VIRTIO_BLK_F_FLUSH }; /* |