summaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/DAC960.c8
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/amiflop.c3
-rw-r--r--drivers/block/aoe/aoeblk.c6
-rw-r--r--drivers/block/aoe/aoechr.c3
-rw-r--r--drivers/block/aoe/aoecmd.c6
-rw-r--r--drivers/block/ataflop.c5
-rw-r--r--drivers/block/brd.c3
-rw-r--r--drivers/block/cciss.c29
-rw-r--r--drivers/block/cciss_scsi.c96
-rw-r--r--drivers/block/cpqarray.c8
-rw-r--r--drivers/block/drbd/drbd_actlog.c246
-rw-r--r--drivers/block/drbd/drbd_bitmap.c13
-rw-r--r--drivers/block/drbd/drbd_int.h179
-rw-r--r--drivers/block/drbd/drbd_main.c256
-rw-r--r--drivers/block/drbd/drbd_nl.c200
-rw-r--r--drivers/block/drbd/drbd_proc.c10
-rw-r--r--drivers/block/drbd/drbd_receiver.c21
-rw-r--r--drivers/block/drbd/drbd_req.c192
-rw-r--r--drivers/block/drbd/drbd_req.h8
-rw-r--r--drivers/block/drbd/drbd_state.c28
-rw-r--r--drivers/block/drbd/drbd_strings.c1
-rw-r--r--drivers/block/drbd/drbd_worker.c24
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c79
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h11
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/nvme-core.c (renamed from drivers/block/nvme.c)594
-rw-r--r--drivers/block/nvme-scsi.c3053
-rw-r--r--drivers/block/paride/pcd.c3
-rw-r--r--drivers/block/paride/pd.c4
-rw-r--r--drivers/block/paride/pf.c9
-rw-r--r--drivers/block/pktcdvd.c108
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/rbd.c2864
-rw-r--r--drivers/block/swim.c4
-rw-r--r--drivers/block/swim3.c5
-rw-r--r--drivers/block/virtio_blk.c148
-rw-r--r--drivers/block/xen-blkfront.c3
-rw-r--r--drivers/block/xsysace.c3
-rw-r--r--drivers/block/z2ram.c6
43 files changed, 6437 insertions, 1822 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 5b5ee79ff23..eb3950113e4 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6473,7 +6473,7 @@ static int dac960_initial_status_proc_show(struct seq_file *m, void *v)
static int dac960_initial_status_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, dac960_initial_status_proc_show, PDE(inode)->data);
+ return single_open(file, dac960_initial_status_proc_show, PDE_DATA(inode));
}
static const struct file_operations dac960_initial_status_proc_fops = {
@@ -6519,7 +6519,7 @@ static int dac960_current_status_proc_show(struct seq_file *m, void *v)
static int dac960_current_status_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, dac960_current_status_proc_show, PDE(inode)->data);
+ return single_open(file, dac960_current_status_proc_show, PDE_DATA(inode));
}
static const struct file_operations dac960_current_status_proc_fops = {
@@ -6540,14 +6540,14 @@ static int dac960_user_command_proc_show(struct seq_file *m, void *v)
static int dac960_user_command_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, dac960_user_command_proc_show, PDE(inode)->data);
+ return single_open(file, dac960_user_command_proc_show, PDE_DATA(inode));
}
static ssize_t dac960_user_command_proc_write(struct file *file,
const char __user *Buffer,
size_t Count, loff_t *pos)
{
- DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file_inode(file))->data;
+ DAC960_Controller_T *Controller = PDE_DATA(file_inode(file));
unsigned char CommandBuffer[80];
int Length;
if (Count > sizeof(CommandBuffer)-1) return -EINVAL;
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index a3b40232c6a..ca07399a8d9 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -42,4 +42,5 @@ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
+nvme-y := nvme-core.o nvme-scsi.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 386146d792d..4ff85b8785e 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1634,7 +1634,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
return 0;
}
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
{
struct amiga_floppy_struct *p = disk->private_data;
int drive = p - unit;
@@ -1654,7 +1654,6 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
floppy_off (drive | 0x40000000);
#endif
mutex_unlock(&amiflop_mutex);
- return 0;
}
/*
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index a129f8c8073..916d9ed5c8a 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -169,7 +169,7 @@ aoeblk_open(struct block_device *bdev, fmode_t mode)
return -ENODEV;
}
-static int
+static void
aoeblk_release(struct gendisk *disk, fmode_t mode)
{
struct aoedev *d = disk->private_data;
@@ -180,11 +180,9 @@ aoeblk_release(struct gendisk *disk, fmode_t mode)
if (--d->nopen == 0) {
spin_unlock_irqrestore(&d->lock, flags);
aoecmd_cfg(d->aoemajor, d->aoeminor);
- return 0;
+ return;
}
spin_unlock_irqrestore(&d->lock, flags);
-
- return 0;
}
static void
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 42e67ad6bd2..ab41be625a5 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -139,13 +139,12 @@ bail: spin_unlock_irqrestore(&emsgs_lock, flags);
return;
}
- mp = kmalloc(n, GFP_ATOMIC);
+ mp = kmemdup(msg, n, GFP_ATOMIC);
if (mp == NULL) {
printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n);
goto bail;
}
- memcpy(mp, msg, n);
em->msg = mp;
em->flags |= EMFL_VALID;
em->len = n;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 92b6d7c51e3..fc803ecbbce 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -920,16 +920,14 @@ bio_pagedec(struct bio *bio)
static void
bufinit(struct buf *buf, struct request *rq, struct bio *bio)
{
- struct bio_vec *bv;
-
memset(buf, 0, sizeof(*buf));
buf->rq = rq;
buf->bio = bio;
buf->resid = bio->bi_size;
buf->sector = bio->bi_sector;
bio_pageinc(bio);
- buf->bv = bv = &bio->bi_io_vec[bio->bi_idx];
- buf->bv_resid = bv->bv_len;
+ buf->bv = bio_iovec(bio);
+ buf->bv_resid = buf->bv->bv_len;
WARN_ON(buf->bv_resid == 0);
}
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index ede16c64ff0..0e30c6e5492 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -367,7 +367,7 @@ static void fd_probe( int drive );
static int fd_test_drive_present( int drive );
static void config_types( void );
static int floppy_open(struct block_device *bdev, fmode_t mode);
-static int floppy_release(struct gendisk *disk, fmode_t mode);
+static void floppy_release(struct gendisk *disk, fmode_t mode);
/************************* End of Prototypes **************************/
@@ -1886,7 +1886,7 @@ static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
{
struct atari_floppy_struct *p = disk->private_data;
mutex_lock(&ataflop_mutex);
@@ -1897,7 +1897,6 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
p->ref = 0;
}
mutex_unlock(&ataflop_mutex);
- return 0;
}
static const struct block_device_operations floppy_fops = {
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 531ceb31d0f..f1a29f8e9d3 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -334,8 +334,7 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
int err = -EIO;
sector = bio->bi_sector;
- if (sector + (bio->bi_size >> SECTOR_SHIFT) >
- get_capacity(bdev->bd_disk))
+ if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
goto out;
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 1c1b8e544aa..6374dc10352 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -75,6 +75,12 @@ module_param(cciss_simple_mode, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(cciss_simple_mode,
"Use 'simple mode' rather than 'performant mode'");
+static int cciss_allow_hpsa;
+module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cciss_allow_hpsa,
+ "Prevent cciss driver from accessing hardware known to be "
+ " supported by the hpsa driver");
+
static DEFINE_MUTEX(cciss_mutex);
static struct proc_dir_entry *proc_cciss;
@@ -161,7 +167,7 @@ static irqreturn_t do_cciss_intx(int irq, void *dev_id);
static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
static int cciss_open(struct block_device *bdev, fmode_t mode);
static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
-static int cciss_release(struct gendisk *disk, fmode_t mode);
+static void cciss_release(struct gendisk *disk, fmode_t mode);
static int do_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
@@ -493,7 +499,7 @@ static int cciss_seq_open(struct inode *inode, struct file *file)
struct seq_file *seq = file->private_data;
if (!ret)
- seq->private = PDE(inode)->data;
+ seq->private = PDE_DATA(inode);
return ret;
}
@@ -1123,7 +1129,7 @@ static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode)
/*
* Close. Sync first.
*/
-static int cciss_release(struct gendisk *disk, fmode_t mode)
+static void cciss_release(struct gendisk *disk, fmode_t mode)
{
ctlr_info_t *h;
drive_info_struct *drv;
@@ -1135,7 +1141,6 @@ static int cciss_release(struct gendisk *disk, fmode_t mode)
drv->usage_count--;
h->usage_count--;
mutex_unlock(&cciss_mutex);
- return 0;
}
static int do_ioctl(struct block_device *bdev, fmode_t mode,
@@ -4116,9 +4121,13 @@ static int cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
subsystem_vendor_id;
- for (i = 0; i < ARRAY_SIZE(products); i++)
+ for (i = 0; i < ARRAY_SIZE(products); i++) {
+ /* Stand aside for hpsa driver on request */
+ if (cciss_allow_hpsa)
+ return -ENODEV;
if (*board_id == products[i].board_id)
return i;
+ }
dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
*board_id);
return -ENODEV;
@@ -4960,6 +4969,16 @@ static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ctlr_info_t *h;
unsigned long flags;
+ /*
+ * By default the cciss driver is used for all older HP Smart Array
+ * controllers. There are module paramaters that allow a user to
+ * override this behavior and instead use the hpsa SCSI driver. If
+ * this is the case cciss may be loaded first from the kdump initrd
+ * image and cause a kernel panic. So if reset_devices is true and
+ * cciss_allow_hpsa is set just bail.
+ */
+ if ((reset_devices) && (cciss_allow_hpsa == 1))
+ return -ENODEV;
rc = cciss_init_reset_devices(pdev);
if (rc) {
if (rc != -ENOTSUPP)
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index da3311129a0..ecd845cd28d 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -54,13 +54,11 @@ static CommandList_struct *cmd_special_alloc(ctlr_info_t *h);
static void cmd_free(ctlr_info_t *h, CommandList_struct *c);
static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c);
-static int cciss_scsi_proc_info(
- struct Scsi_Host *sh,
+static int cciss_scsi_write_info(struct Scsi_Host *sh,
char *buffer, /* data buffer */
- char **start, /* where data in buffer starts */
- off_t offset, /* offset from start of imaginary file */
- int length, /* length of data in buffer */
- int func); /* 0 == read, 1 == write */
+ int length); /* length of data in buffer */
+static int cciss_scsi_show_info(struct seq_file *m,
+ struct Scsi_Host *sh);
static int cciss_scsi_queue_command (struct Scsi_Host *h,
struct scsi_cmnd *cmd);
@@ -82,7 +80,8 @@ static struct scsi_host_template cciss_driver_template = {
.module = THIS_MODULE,
.name = "cciss",
.proc_name = "cciss",
- .proc_info = cciss_scsi_proc_info,
+ .write_info = cciss_scsi_write_info,
+ .show_info = cciss_scsi_show_info,
.queuecommand = cciss_scsi_queue_command,
.this_id = 7,
.cmd_per_lun = 1,
@@ -1302,59 +1301,54 @@ cciss_scsi_user_command(ctlr_info_t *h, int hostno, char *buffer, int length)
return length;
}
-
static int
-cciss_scsi_proc_info(struct Scsi_Host *sh,
+cciss_scsi_write_info(struct Scsi_Host *sh,
char *buffer, /* data buffer */
- char **start, /* where data in buffer starts */
- off_t offset, /* offset from start of imaginary file */
- int length, /* length of data in buffer */
- int func) /* 0 == read, 1 == write */
+ int length) /* length of data in buffer */
{
+ ctlr_info_t *h = (ctlr_info_t *) sh->hostdata[0];
+ if (h == NULL) /* This really shouldn't ever happen. */
+ return -EINVAL;
- int buflen, datalen;
- ctlr_info_t *h;
+ return cciss_scsi_user_command(h, sh->host_no,
+ buffer, length);
+}
+
+static int
+cciss_scsi_show_info(struct seq_file *m, struct Scsi_Host *sh)
+{
+
+ ctlr_info_t *h = (ctlr_info_t *) sh->hostdata[0];
int i;
- h = (ctlr_info_t *) sh->hostdata[0];
if (h == NULL) /* This really shouldn't ever happen. */
return -EINVAL;
- if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */
- buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n",
- h->ctlr, sh->host_no);
-
- /* this information is needed by apps to know which cciss
- device corresponds to which scsi host number without
- having to open a scsi target device node. The device
- information is not a duplicate of /proc/scsi/scsi because
- the two may be out of sync due to scsi hotplug, rather
- this info is for an app to be able to use to know how to
- get them back in sync. */
-
- for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
- struct cciss_scsi_dev_t *sd =
- &ccissscsi[h->ctlr].dev[i];
- buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d "
- "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- sh->host_no, sd->bus, sd->target, sd->lun,
- sd->devtype,
- sd->scsi3addr[0], sd->scsi3addr[1],
- sd->scsi3addr[2], sd->scsi3addr[3],
- sd->scsi3addr[4], sd->scsi3addr[5],
- sd->scsi3addr[6], sd->scsi3addr[7]);
- }
- datalen = buflen - offset;
- if (datalen < 0) { /* they're reading past EOF. */
- datalen = 0;
- *start = buffer+buflen;
- } else
- *start = buffer + offset;
- return(datalen);
- } else /* User is writing to /proc/scsi/cciss*?/?* ... */
- return cciss_scsi_user_command(h, sh->host_no,
- buffer, length);
-}
+ seq_printf(m, "cciss%d: SCSI host: %d\n",
+ h->ctlr, sh->host_no);
+
+ /* this information is needed by apps to know which cciss
+ device corresponds to which scsi host number without
+ having to open a scsi target device node. The device
+ information is not a duplicate of /proc/scsi/scsi because
+ the two may be out of sync due to scsi hotplug, rather
+ this info is for an app to be able to use to know how to
+ get them back in sync. */
+
+ for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
+ struct cciss_scsi_dev_t *sd =
+ &ccissscsi[h->ctlr].dev[i];
+ seq_printf(m, "c%db%dt%dl%d %02d "
+ "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ sh->host_no, sd->bus, sd->target, sd->lun,
+ sd->devtype,
+ sd->scsi3addr[0], sd->scsi3addr[1],
+ sd->scsi3addr[2], sd->scsi3addr[3],
+ sd->scsi3addr[4], sd->scsi3addr[5],
+ sd->scsi3addr[6], sd->scsi3addr[7]);
+ }
+ return 0;
+}
/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
dma mapping and fills in the scatter gather entries of the
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 3f087133a25..639d26b90b9 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -160,7 +160,7 @@ static int sendcmd(
unsigned int log_unit );
static int ida_unlocked_open(struct block_device *bdev, fmode_t mode);
-static int ida_release(struct gendisk *disk, fmode_t mode);
+static void ida_release(struct gendisk *disk, fmode_t mode);
static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
@@ -296,7 +296,7 @@ static int ida_proc_show(struct seq_file *m, void *v)
static int ida_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, ida_proc_show, PDE(inode)->data);
+ return single_open(file, ida_proc_show, PDE_DATA(inode));
}
static const struct file_operations ida_proc_fops = {
@@ -856,7 +856,7 @@ static int ida_unlocked_open(struct block_device *bdev, fmode_t mode)
/*
* Close. Sync first.
*/
-static int ida_release(struct gendisk *disk, fmode_t mode)
+static void ida_release(struct gendisk *disk, fmode_t mode)
{
ctlr_info_t *host;
@@ -864,8 +864,6 @@ static int ida_release(struct gendisk *disk, fmode_t mode)
host = get_host(disk);
host->usage_count--;
mutex_unlock(&cpqarray_mutex);
-
- return 0;
}
/*
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 92510f8ad01..6608076dc39 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -104,7 +104,6 @@ struct update_al_work {
int err;
};
-static int al_write_transaction(struct drbd_conf *mdev);
void *drbd_md_get_buffer(struct drbd_conf *mdev)
{
@@ -168,7 +167,11 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
bio->bi_end_io = drbd_md_io_complete;
bio->bi_rw = rw;
- if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* Corresponding put_ldev in drbd_md_io_complete() */
+ if (!(rw & WRITE) && mdev->state.disk == D_DISKLESS && mdev->ldev == NULL)
+ /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
+ ;
+ else if (!get_ldev_if_state(mdev, D_ATTACHING)) {
+ /* Corresponding put_ldev in drbd_md_io_complete() */
dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
err = -ENODEV;
goto out;
@@ -199,9 +202,10 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
BUG_ON(!bdev->md_bdev);
- dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s)\n",
+ dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
current->comm, current->pid, __func__,
- (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
+ (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
+ (void*)_RET_IP_ );
if (sector < drbd_md_first_sector(bdev) ||
sector + 7 > drbd_md_last_sector(bdev))
@@ -209,7 +213,8 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
current->comm, current->pid, __func__,
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
- err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, MD_BLOCK_SIZE);
+ /* we do all our meta data IO in aligned 4k blocks. */
+ err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, 4096);
if (err) {
dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
@@ -217,44 +222,99 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
return err;
}
-static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
+static struct bm_extent *find_active_resync_extent(struct drbd_conf *mdev, unsigned int enr)
{
- struct lc_element *al_ext;
struct lc_element *tmp;
- int wake;
-
- spin_lock_irq(&mdev->al_lock);
tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
if (unlikely(tmp != NULL)) {
struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
- if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
- wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
- spin_unlock_irq(&mdev->al_lock);
- if (wake)
- wake_up(&mdev->al_wait);
- return NULL;
- }
+ if (test_bit(BME_NO_WRITES, &bm_ext->flags))
+ return bm_ext;
+ }
+ return NULL;
+}
+
+static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr, bool nonblock)
+{
+ struct lc_element *al_ext;
+ struct bm_extent *bm_ext;
+ int wake;
+
+ spin_lock_irq(&mdev->al_lock);
+ bm_ext = find_active_resync_extent(mdev, enr);
+ if (bm_ext) {
+ wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
+ spin_unlock_irq(&mdev->al_lock);
+ if (wake)
+ wake_up(&mdev->al_wait);
+ return NULL;
}
- al_ext = lc_get(mdev->act_log, enr);
+ if (nonblock)
+ al_ext = lc_try_get(mdev->act_log, enr);
+ else
+ al_ext = lc_get(mdev->act_log, enr);
spin_unlock_irq(&mdev->al_lock);
return al_ext;
}
-void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
+bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
- unsigned enr;
- bool locked = false;
+ D_ASSERT((unsigned)(last - first) <= 1);
+ D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
+
+ /* FIXME figure out a fast path for bios crossing AL extent boundaries */
+ if (first != last)
+ return false;
+
+ return _al_get(mdev, first, true);
+}
+
+bool drbd_al_begin_io_prepare(struct drbd_conf *mdev, struct drbd_interval *i)
+{
+ /* for bios crossing activity log extent boundaries,
+ * we may need to activate two extents in one go */
+ unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
+ unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
+ unsigned enr;
+ bool need_transaction = false;
D_ASSERT(first <= last);
D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
- for (enr = first; enr <= last; enr++)
- wait_event(mdev->al_wait, _al_get(mdev, enr) != NULL);
+ for (enr = first; enr <= last; enr++) {
+ struct lc_element *al_ext;
+ wait_event(mdev->al_wait,
+ (al_ext = _al_get(mdev, enr, false)) != NULL);
+ if (al_ext->lc_number != enr)
+ need_transaction = true;
+ }
+ return need_transaction;
+}
+
+static int al_write_transaction(struct drbd_conf *mdev, bool delegate);
+
+/* When called through generic_make_request(), we must delegate
+ * activity log I/O to the worker thread: a further request
+ * submitted via generic_make_request() within the same task
+ * would be queued on current->bio_list, and would only start
+ * after this function returns (see generic_make_request()).
+ *
+ * However, if we *are* the worker, we must not delegate to ourselves.
+ */
+
+/*
+ * @delegate: delegate activity log I/O to the worker thread
+ */
+void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate)
+{
+ bool locked = false;
+
+ BUG_ON(delegate && current == mdev->tconn->worker.task);
/* Serialize multiple transactions.
* This uses test_and_set_bit, memory barrier is implicit.
@@ -264,13 +324,6 @@ void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
(locked = lc_try_lock_for_transaction(mdev->act_log)));
if (locked) {
- /* drbd_al_write_transaction(mdev,al_ext,enr);
- * recurses into generic_make_request(), which
- * disallows recursion, bios being serialized on the
- * current->bio_tail list now.
- * we have to delegate updates to the activity log
- * to the worker thread. */
-
/* Double check: it may have been committed by someone else,
* while we have been waiting for the lock. */
if (mdev->act_log->pending_changes) {
@@ -280,11 +333,8 @@ void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
rcu_read_unlock();
- if (write_al_updates) {
- al_write_transaction(mdev);
- mdev->al_writ_cnt++;
- }
-
+ if (write_al_updates)
+ al_write_transaction(mdev, delegate);
spin_lock_irq(&mdev->al_lock);
/* FIXME
if (err)
@@ -298,6 +348,66 @@ void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
}
}
+/*
+ * @delegate: delegate activity log I/O to the worker thread
+ */
+void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate)
+{
+ BUG_ON(delegate && current == mdev->tconn->worker.task);
+
+ if (drbd_al_begin_io_prepare(mdev, i))
+ drbd_al_begin_io_commit(mdev, delegate);
+}
+
+int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i)
+{
+ struct lru_cache *al = mdev->act_log;
+ /* for bios crossing activity log extent boundaries,
+ * we may need to activate two extents in one go */
+ unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
+ unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
+ unsigned nr_al_extents;
+ unsigned available_update_slots;
+ unsigned enr;
+
+ D_ASSERT(first <= last);
+
+ nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */
+ available_update_slots = min(al->nr_elements - al->used,
+ al->max_pending_changes - al->pending_changes);
+
+ /* We want all necessary updates for a given request within the same transaction
+ * We could first check how many updates are *actually* needed,
+ * and use that instead of the worst-case nr_al_extents */
+ if (available_update_slots < nr_al_extents)
+ return -EWOULDBLOCK;
+
+ /* Is resync active in this area? */
+ for (enr = first; enr <= last; enr++) {
+ struct lc_element *tmp;
+ tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
+ if (unlikely(tmp != NULL)) {
+ struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
+ if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
+ if (!test_and_set_bit(BME_PRIORITY, &bm_ext->flags))
+ return -EBUSY;
+ return -EWOULDBLOCK;
+ }
+ }
+ }
+
+ /* Checkout the refcounts.
+ * Given that we checked for available elements and update slots above,
+ * this has to be successful. */
+ for (enr = first; enr <= last; enr++) {
+ struct lc_element *al_ext;
+ al_ext = lc_get_cumulative(mdev->act_log, enr);
+ if (!al_ext)
+ dev_info(DEV, "LOGIC BUG for enr=%u\n", enr);
+ }
+ return 0;
+}
+
void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
@@ -350,6 +460,24 @@ static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
(BM_EXT_SHIFT - BM_BLOCK_SHIFT));
}
+static sector_t al_tr_number_to_on_disk_sector(struct drbd_conf *mdev)
+{
+ const unsigned int stripes = mdev->ldev->md.al_stripes;
+ const unsigned int stripe_size_4kB = mdev->ldev->md.al_stripe_size_4k;
+
+ /* transaction number, modulo on-disk ring buffer wrap around */
+ unsigned int t = mdev->al_tr_number % (mdev->ldev->md.al_size_4k);
+
+ /* ... to aligned 4k on disk block */
+ t = ((t % stripes) * stripe_size_4kB) + t/stripes;
+
+ /* ... to 512 byte sector in activity log */
+ t *= 8;
+
+ /* ... plus offset to the on disk position */
+ return mdev->ldev->md.md_offset + mdev->ldev->md.al_offset + t;
+}
+
static int
_al_write_transaction(struct drbd_conf *mdev)
{
@@ -432,23 +560,27 @@ _al_write_transaction(struct drbd_conf *mdev)
if (mdev->al_tr_cycle >= mdev->act_log->nr_elements)
mdev->al_tr_cycle = 0;
- sector = mdev->ldev->md.md_offset
- + mdev->ldev->md.al_offset
- + mdev->al_tr_pos * (MD_BLOCK_SIZE>>9);
+ sector = al_tr_number_to_on_disk_sector(mdev);
crc = crc32c(0, buffer, 4096);
buffer->crc32c = cpu_to_be32(crc);
if (drbd_bm_write_hinted(mdev))
err = -EIO;
- /* drbd_chk_io_error done already */
- else if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
- err = -EIO;
- drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
- } else {
- /* advance ringbuffer position and transaction counter */
- mdev->al_tr_pos = (mdev->al_tr_pos + 1) % (MD_AL_SECTORS*512/MD_BLOCK_SIZE);
- mdev->al_tr_number++;
+ else {
+ bool write_al_updates;
+ rcu_read_lock();
+ write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
+ rcu_read_unlock();
+ if (write_al_updates) {
+ if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
+ err = -EIO;
+ drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ } else {
+ mdev->al_tr_number++;
+ mdev->al_writ_cnt++;
+ }
+ }
}
drbd_md_put_buffer(mdev);
@@ -474,20 +606,18 @@ static int w_al_write_transaction(struct drbd_work *w, int unused)
/* Calls from worker context (see w_restart_disk_io()) need to write the
transaction directly. Others came through generic_make_request(),
those need to delegate it to the worker. */
-static int al_write_transaction(struct drbd_conf *mdev)
+static int al_write_transaction(struct drbd_conf *mdev, bool delegate)
{
- struct update_al_work al_work;
-
- if (current == mdev->tconn->worker.task)
+ if (delegate) {
+ struct update_al_work al_work;
+ init_completion(&al_work.event);
+ al_work.w.cb = w_al_write_transaction;
+ al_work.w.mdev = mdev;
+ drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
+ wait_for_completion(&al_work.event);
+ return al_work.err;
+ } else
return _al_write_transaction(mdev);
-
- init_completion(&al_work.event);
- al_work.w.cb = w_al_write_transaction;
- al_work.w.mdev = mdev;
- drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
- wait_for_completion(&al_work.event);
-
- return al_work.err;
}
static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 8dc29502dc0..64fbb8385cd 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -612,6 +612,17 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
}
}
+/* For the layout, see comment above drbd_md_set_sector_offsets(). */
+static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
+{
+ u64 bitmap_sectors;
+ if (ldev->md.al_offset == 8)
+ bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset;
+ else
+ bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset;
+ return bitmap_sectors << (9 + 3);
+}
+
/*
* make sure the bitmap has enough room for the attached storage,
* if necessary, resize.
@@ -668,7 +679,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
words = ALIGN(bits, 64) >> LN2_BPL;
if (get_ldev(mdev)) {
- u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12;
+ u64 bits_on_disk = drbd_md_on_disk_bits(mdev->ldev);
put_ldev(mdev);
if (bits > bits_on_disk) {
dev_info(DEV, "bits = %lu\n", bits);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 6b51afa1aae..f943aacfdad 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -753,13 +753,16 @@ struct drbd_md {
u32 flags;
u32 md_size_sect;
- s32 al_offset; /* signed relative sector offset to al area */
+ s32 al_offset; /* signed relative sector offset to activity log */
s32 bm_offset; /* signed relative sector offset to bitmap */
- /* u32 al_nr_extents; important for restoring the AL
- * is stored into ldev->dc.al_extents, which in turn
- * gets applied to act_log->nr_elements
- */
+ /* cached value of bdev->disk_conf->meta_dev_idx (see below) */
+ s32 meta_dev_idx;
+
+ /* see al_tr_number_to_on_disk_sector() */
+ u32 al_stripes;
+ u32 al_stripe_size_4k;
+ u32 al_size_4k; /* cached product of the above */
};
struct drbd_backing_dev {
@@ -891,6 +894,14 @@ struct drbd_tconn { /* is a resource from the config file */
} send;
};
+struct submit_worker {
+ struct workqueue_struct *wq;
+ struct work_struct worker;
+
+ spinlock_t lock;
+ struct list_head writes;
+};
+
struct drbd_conf {
struct drbd_tconn *tconn;
int vnr; /* volume number within the connection */
@@ -1009,7 +1020,6 @@ struct drbd_conf {
struct lru_cache *act_log; /* activity log */
unsigned int al_tr_number;
int al_tr_cycle;
- int al_tr_pos; /* position of the next transaction in the journal */
wait_queue_head_t seq_wait;
atomic_t packet_seq;
unsigned int peer_seq;
@@ -1032,6 +1042,10 @@ struct drbd_conf {
atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
unsigned int peer_max_bio_size;
unsigned int local_max_bio_size;
+
+ /* any requests that would block in drbd_make_request()
+ * are deferred to this single-threaded work queue */
+ struct submit_worker submit;
};
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1148,25 +1162,44 @@ extern int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
char *why, enum bm_flag flags);
extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
-extern void drbd_go_diskless(struct drbd_conf *mdev);
extern void drbd_ldev_destroy(struct drbd_conf *mdev);
/* Meta data layout
- We reserve a 128MB Block (4k aligned)
- * either at the end of the backing device
- * or on a separate meta data device. */
+ *
+ * We currently have two possible layouts.
+ * Offsets in (512 byte) sectors.
+ * external:
+ * |----------- md_size_sect ------------------|
+ * [ 4k superblock ][ activity log ][ Bitmap ]
+ * | al_offset == 8 |
+ * | bm_offset = al_offset + X |
+ * ==> bitmap sectors = md_size_sect - bm_offset
+ *
+ * Variants:
+ * old, indexed fixed size meta data:
+ *
+ * internal:
+ * |----------- md_size_sect ------------------|
+ * [data.....][ Bitmap ][ activity log ][ 4k superblock ][padding*]
+ * | al_offset < 0 |
+ * | bm_offset = al_offset - Y |
+ * ==> bitmap sectors = Y = al_offset - bm_offset
+ *
+ * [padding*] are zero or up to 7 unused 512 Byte sectors to the
+ * end of the device, so that the [4k superblock] will be 4k aligned.
+ *
+ * The activity log consists of 4k transaction blocks,
+ * which are written in a ring-buffer, or striped ring-buffer like fashion,
+ * which are writtensize used to be fixed 32kB,
+ * but is about to become configurable.
+ */
-/* The following numbers are sectors */
-/* Allows up to about 3.8TB, so if you want more,
+/* Our old fixed size meta data layout
+ * allows up to about 3.8TB, so if you want more,
* you need to use the "flexible" meta data format. */
-#define MD_RESERVED_SECT (128LU << 11) /* 128 MB, unit sectors */
-#define MD_AL_OFFSET 8 /* 8 Sectors after start of meta area */
-#define MD_AL_SECTORS 64 /* = 32 kB on disk activity log ring buffer */
-#define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_SECTORS)
-
-/* we do all meta data IO in 4k blocks */
-#define MD_BLOCK_SHIFT 12
-#define MD_BLOCK_SIZE (1<<MD_BLOCK_SHIFT)
+#define MD_128MB_SECT (128LLU << 11) /* 128 MB, unit sectors */
+#define MD_4kB_SECT 8
+#define MD_32kB_SECT 64
/* One activity log extent represents 4M of storage */
#define AL_EXTENT_SHIFT 22
@@ -1256,7 +1289,6 @@ struct bm_extent {
/* in one sector of the bitmap, we have this many activity_log extents. */
#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
-#define BM_WORDS_PER_AL_EXT (1 << (AL_EXTENT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
#define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT)
#define BM_BLOCKS_PER_BM_EXT_MASK ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1)
@@ -1276,16 +1308,18 @@ struct bm_extent {
*/
#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
-#define DRBD_MAX_SECTORS_BM \
- ((MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SHIFT-9)))
-#if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
-#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM
-#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
-#elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
+/* we have a certain meta data variant that has a fixed on-disk size of 128
+ * MiB, of which 4k are our "superblock", and 32k are the fixed size activity
+ * log, leaving this many sectors for the bitmap.
+ */
+
+#define DRBD_MAX_SECTORS_FIXED_BM \
+ ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
+#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
#else
-#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM
+#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
/* 16 TB in units of sectors */
#if BITS_PER_LONG == 32
/* adjust by one page worth of bitmap,
@@ -1418,6 +1452,7 @@ extern void conn_free_crypto(struct drbd_tconn *tconn);
extern int proc_details;
/* drbd_req */
+extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_conf *, struct bio *, unsigned long);
extern void drbd_make_request(struct request_queue *q, struct bio *bio);
extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
@@ -1576,7 +1611,10 @@ extern const char *drbd_conn_str(enum drbd_conns s);
extern const char *drbd_role_str(enum drbd_role s);
/* drbd_actlog.c */
-extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i);
+extern int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i);
+extern void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate);
+extern bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i);
+extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate);
extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i);
extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
@@ -1755,9 +1793,9 @@ static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
* BTW, for internal meta data, this happens to be the maximum capacity
* we could agree upon with our peer node.
*/
-static inline sector_t _drbd_md_first_sector(int meta_dev_idx, struct drbd_backing_dev *bdev)
+static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
{
- switch (meta_dev_idx) {
+ switch (bdev->md.meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
return bdev->md.md_offset + bdev->md.bm_offset;
@@ -1767,36 +1805,19 @@ static inline sector_t _drbd_md_first_sector(int meta_dev_idx, struct drbd_backi
}
}
-static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
-{
- int meta_dev_idx;
-
- rcu_read_lock();
- meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
- rcu_read_unlock();
-
- return _drbd_md_first_sector(meta_dev_idx, bdev);
-}
-
/**
* drbd_md_last_sector() - Return the last sector number of the meta data area
* @bdev: Meta data block device.
*/
static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
{
- int meta_dev_idx;
-
- rcu_read_lock();
- meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
- rcu_read_unlock();
-
- switch (meta_dev_idx) {
+ switch (bdev->md.meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
- return bdev->md.md_offset + MD_AL_OFFSET - 1;
+ return bdev->md.md_offset + MD_4kB_SECT -1;
case DRBD_MD_INDEX_FLEX_EXT:
default:
- return bdev->md.md_offset + bdev->md.md_size_sect;
+ return bdev->md.md_offset + bdev->md.md_size_sect -1;
}
}
@@ -1818,18 +1839,13 @@ static inline sector_t drbd_get_capacity(struct block_device *bdev)
static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
{
sector_t s;
- int meta_dev_idx;
- rcu_read_lock();
- meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
- rcu_read_unlock();
-
- switch (meta_dev_idx) {
+ switch (bdev->md.meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
s = drbd_get_capacity(bdev->backing_bdev)
? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
- _drbd_md_first_sector(meta_dev_idx, bdev))
+ drbd_md_first_sector(bdev))
: 0;
break;
case DRBD_MD_INDEX_FLEX_EXT:
@@ -1848,39 +1864,24 @@ static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
}
/**
- * drbd_md_ss__() - Return the sector number of our meta data super block
- * @mdev: DRBD device.
+ * drbd_md_ss() - Return the sector number of our meta data super block
* @bdev: Meta data block device.
*/
-static inline sector_t drbd_md_ss__(struct drbd_conf *mdev,
- struct drbd_backing_dev *bdev)
+static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
{
- int meta_dev_idx;
+ const int meta_dev_idx = bdev->md.meta_dev_idx;
- rcu_read_lock();
- meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
- rcu_read_unlock();
-
- switch (meta_dev_idx) {
- default: /* external, some index */
- return MD_RESERVED_SECT * meta_dev_idx;
- case DRBD_MD_INDEX_INTERNAL:
- /* with drbd08, internal meta data is always "flexible" */
- case DRBD_MD_INDEX_FLEX_INT:
- /* sizeof(struct md_on_disk_07) == 4k
- * position: last 4k aligned block of 4k size */
- if (!bdev->backing_bdev) {
- if (__ratelimit(&drbd_ratelimit_state)) {
- dev_err(DEV, "bdev->backing_bdev==NULL\n");
- dump_stack();
- }
- return 0;
- }
- return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL)
- - MD_AL_OFFSET;
- case DRBD_MD_INDEX_FLEX_EXT:
+ if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
return 0;
- }
+
+ /* Since drbd08, internal meta data is always "flexible".
+ * position: last 4k aligned block of 4k size */
+ if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
+ meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
+ return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
+
+ /* external, some index; this is the old fixed size layout */
+ return MD_128MB_SECT * bdev->md.meta_dev_idx;
}
static inline void
@@ -2053,9 +2054,11 @@ static inline void put_ldev(struct drbd_conf *mdev)
if (mdev->state.disk == D_DISKLESS)
/* even internal references gone, safe to destroy */
drbd_ldev_destroy(mdev);
- if (mdev->state.disk == D_FAILED)
+ if (mdev->state.disk == D_FAILED) {
/* all application IO references gone. */
- drbd_go_diskless(mdev);
+ if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
+ }
wake_up(&mdev->misc_wait);
}
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index e98da675f0c..a5dca6affcb 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -45,7 +45,7 @@
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
-
+#include <linux/workqueue.h>
#define __KERNEL_SYSCALLS__
#include <linux/unistd.h>
#include <linux/vmalloc.h>
@@ -63,7 +63,7 @@ int drbd_asender(struct drbd_thread *);
int drbd_init(void);
static int drbd_open(struct block_device *bdev, fmode_t mode);
-static int drbd_release(struct gendisk *gd, fmode_t mode);
+static void drbd_release(struct gendisk *gd, fmode_t mode);
static int w_md_sync(struct drbd_work *w, int unused);
static void md_sync_timer_fn(unsigned long data);
static int w_bitmap_io(struct drbd_work *w, int unused);
@@ -1849,13 +1849,12 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
return rv;
}
-static int drbd_release(struct gendisk *gd, fmode_t mode)
+static void drbd_release(struct gendisk *gd, fmode_t mode)
{
struct drbd_conf *mdev = gd->private_data;
mutex_lock(&drbd_main_mutex);
mdev->open_cnt--;
mutex_unlock(&drbd_main_mutex);
- return 0;
}
static void drbd_set_defaults(struct drbd_conf *mdev)
@@ -2300,6 +2299,7 @@ static void drbd_cleanup(void)
idr_for_each_entry(&minors, mdev, i) {
idr_remove(&minors, mdev_to_minor(mdev));
idr_remove(&mdev->tconn->volumes, mdev->vnr);
+ destroy_workqueue(mdev->submit.wq);
del_gendisk(mdev->vdisk);
/* synchronize_rcu(); No other threads running at this point */
kref_put(&mdev->kref, &drbd_minor_destroy);
@@ -2589,6 +2589,21 @@ void conn_destroy(struct kref *kref)
kfree(tconn);
}
+int init_submitter(struct drbd_conf *mdev)
+{
+ /* opencoded create_singlethread_workqueue(),
+ * to be able to say "drbd%d", ..., minor */
+ mdev->submit.wq = alloc_workqueue("drbd%u_submit",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1, mdev->minor);
+ if (!mdev->submit.wq)
+ return -ENOMEM;
+
+ INIT_WORK(&mdev->submit.worker, do_submit);
+ spin_lock_init(&mdev->submit.lock);
+ INIT_LIST_HEAD(&mdev->submit.writes);
+ return 0;
+}
+
enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
{
struct drbd_conf *mdev;
@@ -2678,6 +2693,12 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
goto out_idr_remove_minor;
}
+ if (init_submitter(mdev)) {
+ err = ERR_NOMEM;
+ drbd_msg_put_info("unable to create submit workqueue");
+ goto out_idr_remove_vol;
+ }
+
add_disk(disk);
kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
@@ -2688,6 +2709,8 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
return NO_ERROR;
+out_idr_remove_vol:
+ idr_remove(&tconn->volumes, vnr_got);
out_idr_remove_minor:
idr_remove(&minors, minor_got);
synchronize_rcu();
@@ -2795,6 +2818,7 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ kfree(ldev->disk_conf);
kfree(ldev);
}
@@ -2834,8 +2858,9 @@ void conn_md_sync(struct drbd_tconn *tconn)
rcu_read_unlock();
}
+/* aligned 4kByte */
struct meta_data_on_disk {
- u64 la_size; /* last agreed size. */
+ u64 la_size_sect; /* last agreed size. */
u64 uuid[UI_SIZE]; /* UUIDs. */
u64 device_uuid;
u64 reserved_u64_1;
@@ -2843,13 +2868,17 @@ struct meta_data_on_disk {
u32 magic;
u32 md_size_sect;
u32 al_offset; /* offset to this block */
- u32 al_nr_extents; /* important for restoring the AL */
+ u32 al_nr_extents; /* important for restoring the AL (userspace) */
/* `-- act_log->nr_elements <-- ldev->dc.al_extents */
u32 bm_offset; /* offset to the bitmap, from here */
u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
u32 la_peer_max_bio_size; /* last peer max_bio_size */
- u32 reserved_u32[3];
+ /* see al_tr_number_to_on_disk_sector() */
+ u32 al_stripes;
+ u32 al_stripe_size_4k;
+
+ u8 reserved_u8[4096 - (7*8 + 10*4)];
} __packed;
/**
@@ -2862,6 +2891,10 @@ void drbd_md_sync(struct drbd_conf *mdev)
sector_t sector;
int i;
+ /* Don't accidentally change the DRBD meta data layout. */
+ BUILD_BUG_ON(UI_SIZE != 4);
+ BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
+
del_timer(&mdev->md_sync_timer);
/* timer may be rearmed by drbd_md_mark_dirty() now. */
if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
@@ -2876,9 +2909,9 @@ void drbd_md_sync(struct drbd_conf *mdev)
if (!buffer)
goto out;
- memset(buffer, 0, 512);
+ memset(buffer, 0, sizeof(*buffer));
- buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
+ buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
for (i = UI_CURRENT; i < UI_SIZE; i++)
buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
@@ -2893,7 +2926,10 @@ void drbd_md_sync(struct drbd_conf *mdev)
buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
- D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
+ buffer->al_stripes = cpu_to_be32(mdev->ldev->md.al_stripes);
+ buffer->al_stripe_size_4k = cpu_to_be32(mdev->ldev->md.al_stripe_size_4k);
+
+ D_ASSERT(drbd_md_ss(mdev->ldev) == mdev->ldev->md.md_offset);
sector = mdev->ldev->md.md_offset;
if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
@@ -2911,13 +2947,141 @@ out:
put_ldev(mdev);
}
+static int check_activity_log_stripe_size(struct drbd_conf *mdev,
+ struct meta_data_on_disk *on_disk,
+ struct drbd_md *in_core)
+{
+ u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
+ u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
+ u64 al_size_4k;
+
+ /* both not set: default to old fixed size activity log */
+ if (al_stripes == 0 && al_stripe_size_4k == 0) {
+ al_stripes = 1;
+ al_stripe_size_4k = MD_32kB_SECT/8;
+ }
+
+ /* some paranoia plausibility checks */
+
+ /* we need both values to be set */
+ if (al_stripes == 0 || al_stripe_size_4k == 0)
+ goto err;
+
+ al_size_4k = (u64)al_stripes * al_stripe_size_4k;
+
+ /* Upper limit of activity log area, to avoid potential overflow
+ * problems in al_tr_number_to_on_disk_sector(). As right now, more
+ * than 72 * 4k blocks total only increases the amount of history,
+ * limiting this arbitrarily to 16 GB is not a real limitation ;-) */
+ if (al_size_4k > (16 * 1024 * 1024/4))
+ goto err;
+
+ /* Lower limit: we need at least 8 transaction slots (32kB)
+ * to not break existing setups */
+ if (al_size_4k < MD_32kB_SECT/8)
+ goto err;
+
+ in_core->al_stripe_size_4k = al_stripe_size_4k;
+ in_core->al_stripes = al_stripes;
+ in_core->al_size_4k = al_size_4k;
+
+ return 0;
+err:
+ dev_err(DEV, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
+ al_stripes, al_stripe_size_4k);
+ return -EINVAL;
+}
+
+static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+{
+ sector_t capacity = drbd_get_capacity(bdev->md_bdev);
+ struct drbd_md *in_core = &bdev->md;
+ s32 on_disk_al_sect;
+ s32 on_disk_bm_sect;
+
+ /* The on-disk size of the activity log, calculated from offsets, and
+ * the size of the activity log calculated from the stripe settings,
+ * should match.
+ * Though we could relax this a bit: it is ok, if the striped activity log
+ * fits in the available on-disk activity log size.
+ * Right now, that would break how resize is implemented.
+ * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
+ * of possible unused padding space in the on disk layout. */
+ if (in_core->al_offset < 0) {
+ if (in_core->bm_offset > in_core->al_offset)
+ goto err;
+ on_disk_al_sect = -in_core->al_offset;
+ on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
+ } else {
+ if (in_core->al_offset != MD_4kB_SECT)
+ goto err;
+ if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
+ goto err;
+
+ on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
+ on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
+ }
+
+ /* old fixed size meta data is exactly that: fixed. */
+ if (in_core->meta_dev_idx >= 0) {
+ if (in_core->md_size_sect != MD_128MB_SECT
+ || in_core->al_offset != MD_4kB_SECT
+ || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
+ || in_core->al_stripes != 1
+ || in_core->al_stripe_size_4k != MD_32kB_SECT/8)
+ goto err;
+ }
+
+ if (capacity < in_core->md_size_sect)
+ goto err;
+ if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
+ goto err;
+
+ /* should be aligned, and at least 32k */
+ if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
+ goto err;
+
+ /* should fit (for now: exactly) into the available on-disk space;
+ * overflow prevention is in check_activity_log_stripe_size() above. */
+ if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
+ goto err;
+
+ /* again, should be aligned */
+ if (in_core->bm_offset & 7)
+ goto err;
+
+ /* FIXME check for device grow with flex external meta data? */
+
+ /* can the available bitmap space cover the last agreed device size? */
+ if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(DEV, "meta data offsets don't make sense: idx=%d "
+ "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
+ "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
+ in_core->meta_dev_idx,
+ in_core->al_stripes, in_core->al_stripe_size_4k,
+ in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
+ (unsigned long long)in_core->la_size_sect,
+ (unsigned long long)capacity);
+
+ return -EINVAL;
+}
+
+
/**
* drbd_md_read() - Reads in the meta data super block
* @mdev: DRBD device.
* @bdev: Device from which the meta data should be read in.
*
- * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
+ * Return NO_ERROR on success, and an enum drbd_ret_code in case
* something goes wrong.
+ *
+ * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
+ * even before @bdev is assigned to @mdev->ldev.
*/
int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
{
@@ -2925,12 +3089,17 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
u32 magic, flags;
int i, rv = NO_ERROR;
- if (!get_ldev_if_state(mdev, D_ATTACHING))
- return ERR_IO_MD_DISK;
+ if (mdev->state.disk != D_DISKLESS)
+ return ERR_DISK_CONFIGURED;
buffer = drbd_md_get_buffer(mdev);
if (!buffer)
- goto out;
+ return ERR_NOMEM;
+
+ /* First, figure out where our meta data superblock is located,
+ * and read it. */
+ bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
+ bdev->md.md_offset = drbd_md_ss(bdev);
if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
/* NOTE: can't do normal error processing here as this is
@@ -2949,45 +3118,51 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
rv = ERR_MD_UNCLEAN;
goto err;
}
+
+ rv = ERR_MD_INVALID;
if (magic != DRBD_MD_MAGIC_08) {
if (magic == DRBD_MD_MAGIC_07)
dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
else
dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
- rv = ERR_MD_INVALID;
goto err;
}
- if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
- dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
- be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
- rv = ERR_MD_INVALID;
+
+ if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
+ dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
+ be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
goto err;
}
+
+
+ /* convert to in_core endian */
+ bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
+ for (i = UI_CURRENT; i < UI_SIZE; i++)
+ bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
+ bdev->md.flags = be32_to_cpu(buffer->flags);
+ bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
+
+ bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
+ bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
+ bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
+
+ if (check_activity_log_stripe_size(mdev, buffer, &bdev->md))
+ goto err;
+ if (check_offsets_and_sizes(mdev, bdev))
+ goto err;
+
if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
- rv = ERR_MD_INVALID;
goto err;
}
if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
- rv = ERR_MD_INVALID;
goto err;
}
- if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
- dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
- be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
- rv = ERR_MD_INVALID;
- goto err;
- }
-
- bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
- for (i = UI_CURRENT; i < UI_SIZE; i++)
- bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
- bdev->md.flags = be32_to_cpu(buffer->flags);
- bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
+ rv = NO_ERROR;
spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED) {
@@ -3000,8 +3175,6 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
err:
drbd_md_put_buffer(mdev);
- out:
- put_ldev(mdev);
return rv;
}
@@ -3239,8 +3412,12 @@ static int w_go_diskless(struct drbd_work *w, int unused)
* end up here after a failed attach, before ldev was even assigned.
*/
if (mdev->bitmap && mdev->ldev) {
+ /* An interrupted resync or similar is allowed to recounts bits
+ * while we detach.
+ * Any modifications would not be expected anymore, though.
+ */
if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write,
- "detach", BM_LOCKED_MASK)) {
+ "detach", BM_LOCKED_TEST_ALLOWED)) {
if (test_bit(WAS_READ_ERROR, &mdev->flags)) {
drbd_md_set_flag(mdev, MDF_FULL_SYNC);
drbd_md_sync(mdev);
@@ -3252,13 +3429,6 @@ static int w_go_diskless(struct drbd_work *w, int unused)
return 0;
}
-void drbd_go_diskless(struct drbd_conf *mdev)
-{
- D_ASSERT(mdev->state.disk == D_FAILED);
- if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
- drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
-}
-
/**
* drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
* @mdev: DRBD device.
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 2af26fc9528..9e3f441e7e8 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -696,37 +696,52 @@ out:
return 0;
}
-/* initializes the md.*_offset members, so we are able to find
- * the on disk meta data */
+/* Initializes the md.*_offset members, so we are able to find
+ * the on disk meta data.
+ *
+ * We currently have two possible layouts:
+ * external:
+ * |----------- md_size_sect ------------------|
+ * [ 4k superblock ][ activity log ][ Bitmap ]
+ * | al_offset == 8 |
+ * | bm_offset = al_offset + X |
+ * ==> bitmap sectors = md_size_sect - bm_offset
+ *
+ * internal:
+ * |----------- md_size_sect ------------------|
+ * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
+ * | al_offset < 0 |
+ * | bm_offset = al_offset - Y |
+ * ==> bitmap sectors = Y = al_offset - bm_offset
+ *
+ * Activity log size used to be fixed 32kB,
+ * but is about to become configurable.
+ */
static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev)
{
sector_t md_size_sect = 0;
- int meta_dev_idx;
+ unsigned int al_size_sect = bdev->md.al_size_4k * 8;
- rcu_read_lock();
- meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+ bdev->md.md_offset = drbd_md_ss(bdev);
- switch (meta_dev_idx) {
+ switch (bdev->md.meta_dev_idx) {
default:
/* v07 style fixed size indexed meta data */
- bdev->md.md_size_sect = MD_RESERVED_SECT;
- bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
- bdev->md.al_offset = MD_AL_OFFSET;
- bdev->md.bm_offset = MD_BM_OFFSET;
+ bdev->md.md_size_sect = MD_128MB_SECT;
+ bdev->md.al_offset = MD_4kB_SECT;
+ bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
break;
case DRBD_MD_INDEX_FLEX_EXT:
/* just occupy the full device; unit: sectors */
bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
- bdev->md.md_offset = 0;
- bdev->md.al_offset = MD_AL_OFFSET;
- bdev->md.bm_offset = MD_BM_OFFSET;
+ bdev->md.al_offset = MD_4kB_SECT;
+ bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
break;
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
- bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
/* al size is still fixed */
- bdev->md.al_offset = -MD_AL_SECTORS;
+ bdev->md.al_offset = -al_size_sect;
/* we need (slightly less than) ~ this much bitmap sectors: */
md_size_sect = drbd_get_capacity(bdev->backing_bdev);
md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
@@ -735,14 +750,13 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
/* plus the "drbd meta data super block",
* and the activity log; */
- md_size_sect += MD_BM_OFFSET;
+ md_size_sect += MD_4kB_SECT + al_size_sect;
bdev->md.md_size_sect = md_size_sect;
/* bitmap offset is adjusted by 'super' block size */
- bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
+ bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
break;
}
- rcu_read_unlock();
}
/* input size is expected to be in KB */
@@ -805,7 +819,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
- sector_t la_size, u_size;
+ sector_t la_size_sect, u_size;
sector_t size;
char ppb[10];
@@ -828,7 +842,7 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
prev_first_sect = drbd_md_first_sector(mdev->ldev);
prev_size = mdev->ldev->md.md_size_sect;
- la_size = mdev->ldev->md.la_size_sect;
+ la_size_sect = mdev->ldev->md.la_size_sect;
/* TODO: should only be some assert here, not (re)init... */
drbd_md_set_sector_offsets(mdev, mdev->ldev);
@@ -864,7 +878,7 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
if (rv == dev_size_error)
goto out;
- la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
+ la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect);
md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
|| prev_size != mdev->ldev->md.md_size_sect;
@@ -886,9 +900,9 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
drbd_md_mark_dirty(mdev);
}
- if (size > la_size)
+ if (size > la_size_sect)
rv = grew;
- if (size < la_size)
+ if (size < la_size_sect)
rv = shrunk;
out:
lc_unlock(mdev->act_log);
@@ -903,7 +917,7 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
sector_t u_size, int assume_peer_has_space)
{
sector_t p_size = mdev->p_size; /* partner's disk size. */
- sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
+ sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
sector_t m_size; /* my size */
sector_t size = 0;
@@ -917,8 +931,8 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
if (p_size && m_size) {
size = min_t(sector_t, p_size, m_size);
} else {
- if (la_size) {
- size = la_size;
+ if (la_size_sect) {
+ size = la_size_sect;
if (m_size && m_size < size)
size = m_size;
if (p_size && p_size < size)
@@ -1127,15 +1141,32 @@ static bool should_set_defaults(struct genl_info *info)
return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
}
-static void enforce_disk_conf_limits(struct disk_conf *dc)
+static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
{
- if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
- dc->al_extents = DRBD_AL_EXTENTS_MIN;
- if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
- dc->al_extents = DRBD_AL_EXTENTS_MAX;
+ /* This is limited by 16 bit "slot" numbers,
+ * and by available on-disk context storage.
+ *
+ * Also (u16)~0 is special (denotes a "free" extent).
+ *
+ * One transaction occupies one 4kB on-disk block,
+ * we have n such blocks in the on disk ring buffer,
+ * the "current" transaction may fail (n-1),
+ * and there is 919 slot numbers context information per transaction.
+ *
+ * 72 transaction blocks amounts to more than 2**16 context slots,
+ * so cap there first.
+ */
+ const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
+ const unsigned int sufficient_on_disk =
+ (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
+ /AL_CONTEXT_PER_TRANSACTION;
+
+ unsigned int al_size_4k = bdev->md.al_size_4k;
+
+ if (al_size_4k > sufficient_on_disk)
+ return max_al_nr;
- if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
- dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
+ return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
}
int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
@@ -1182,7 +1213,13 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (!expect(new_disk_conf->resync_rate >= 1))
new_disk_conf->resync_rate = 1;
- enforce_disk_conf_limits(new_disk_conf);
+ if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
+ new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
+ if (new_disk_conf->al_extents > drbd_al_extents_max(mdev->ldev))
+ new_disk_conf->al_extents = drbd_al_extents_max(mdev->ldev);
+
+ if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
+ new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
if (fifo_size != mdev->rs_plan_s->size) {
@@ -1330,7 +1367,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
goto fail;
}
- enforce_disk_conf_limits(new_disk_conf);
+ if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
+ new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
if (!new_plan) {
@@ -1343,6 +1381,12 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
goto fail;
}
+ write_lock_irq(&global_state_lock);
+ retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
+ write_unlock_irq(&global_state_lock);
+ if (retcode != NO_ERROR)
+ goto fail;
+
rcu_read_lock();
nc = rcu_dereference(mdev->tconn->net_conf);
if (nc) {
@@ -1399,8 +1443,16 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
goto fail;
}
- /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
- drbd_md_set_sector_offsets(mdev, nbc);
+ /* Read our meta data super block early.
+ * This also sets other on-disk offsets. */
+ retcode = drbd_md_read(mdev, nbc);
+ if (retcode != NO_ERROR)
+ goto fail;
+
+ if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
+ new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
+ if (new_disk_conf->al_extents > drbd_al_extents_max(nbc))
+ new_disk_conf->al_extents = drbd_al_extents_max(nbc);
if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
@@ -1416,7 +1468,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
min_md_device_sectors = (2<<10);
} else {
max_possible_sectors = DRBD_MAX_SECTORS;
- min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
+ min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
}
if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
@@ -1467,8 +1519,6 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
if (!get_ldev_if_state(mdev, D_ATTACHING))
goto force_diskless;
- drbd_md_set_sector_offsets(mdev, nbc);
-
if (!mdev->bitmap) {
if (drbd_bm_init(mdev)) {
retcode = ERR_NOMEM;
@@ -1476,10 +1526,6 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
}
}
- retcode = drbd_md_read(mdev, nbc);
- if (retcode != NO_ERROR)
- goto force_diskless_dec;
-
if (mdev->state.conn < C_CONNECTED &&
mdev->state.role == R_PRIMARY &&
(mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
@@ -2158,8 +2204,11 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
return SS_SUCCESS;
case SS_PRIMARY_NOP:
/* Our state checking code wants to see the peer outdated. */
- rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
- pdsk, D_OUTDATED), CS_VERBOSE);
+ rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
+
+ if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
+ rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_VERBOSE);
+
break;
case SS_CW_FAILED_BY_PEER:
/* The peer probably wants to see us outdated. */
@@ -2406,22 +2455,19 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
drbd_flush_workqueue(mdev);
- retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
-
- if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
- retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
-
- while (retcode == SS_NEED_CONNECTION) {
- spin_lock_irq(&mdev->tconn->req_lock);
- if (mdev->state.conn < C_CONNECTED)
- retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->tconn->req_lock);
-
- if (retcode != SS_NEED_CONNECTION)
- break;
-
+ /* If we happen to be C_STANDALONE R_SECONDARY, just change to
+ * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
+ * try to start a resync handshake as sync target for full sync.
+ */
+ if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_SECONDARY) {
+ retcode = drbd_request_state(mdev, NS(disk, D_INCONSISTENT));
+ if (retcode >= SS_SUCCESS) {
+ if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
+ "set_n_write from invalidate", BM_LOCKED_MASK))
+ retcode = ERR_IO_MD_DISK;
+ }
+ } else
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
- }
drbd_resume_io(mdev);
out:
@@ -2475,21 +2521,22 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
drbd_flush_workqueue(mdev);
- retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
- if (retcode < SS_SUCCESS) {
- if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
- /* The peer will get a resync upon connect anyways.
- * Just make that into a full resync. */
- retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
- if (retcode >= SS_SUCCESS) {
- if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
- "set_n_write from invalidate_peer",
- BM_LOCKED_SET_ALLOWED))
- retcode = ERR_IO_MD_DISK;
- }
- } else
- retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
- }
+ /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
+ * in the bitmap. Otherwise, try to start a resync handshake
+ * as sync source for full sync.
+ */
+ if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_PRIMARY) {
+ /* The peer will get a resync upon connect anyways. Just make that
+ into a full resync. */
+ retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
+ if (retcode >= SS_SUCCESS) {
+ if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
+ "set_n_write from invalidate_peer",
+ BM_LOCKED_SET_ALLOWED))
+ retcode = ERR_IO_MD_DISK;
+ }
+ } else
+ retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
drbd_resume_io(mdev);
out:
@@ -3162,6 +3209,7 @@ static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
CS_VERBOSE + CS_WAIT_COMPLETE);
idr_remove(&mdev->tconn->volumes, mdev->vnr);
idr_remove(&minors, mdev_to_minor(mdev));
+ destroy_workqueue(mdev->submit.wq);
del_gendisk(mdev->vdisk);
synchronize_rcu();
kref_put(&mdev->kref, &drbd_minor_destroy);
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 56672a61eb9..bf31d41dbaa 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -313,8 +313,14 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
static int drbd_proc_open(struct inode *inode, struct file *file)
{
- if (try_module_get(THIS_MODULE))
- return single_open(file, drbd_seq_show, PDE(inode)->data);
+ int err;
+
+ if (try_module_get(THIS_MODULE)) {
+ err = single_open(file, drbd_seq_show, PDE_DATA(inode));
+ if (err)
+ module_put(THIS_MODULE);
+ return err;
+ }
return -ENODEV;
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index a9eccfc6079..4222affff48 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -757,7 +757,8 @@ static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct acc
rcu_read_unlock();
timeo = connect_int * HZ;
- timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
+ /* 28.5% random jitter */
+ timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
if (err <= 0)
@@ -849,6 +850,7 @@ int drbd_connected(struct drbd_conf *mdev)
err = drbd_send_current_state(mdev);
clear_bit(USE_DEGR_WFC_T, &mdev->flags);
clear_bit(RESIZE_PENDING, &mdev->flags);
+ atomic_set(&mdev->ap_in_flight, 0);
mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
return err;
}
@@ -953,7 +955,7 @@ retry:
conn_warn(tconn, "Error receiving initial packet\n");
sock_release(s);
randomize:
- if (random32() & 1)
+ if (prandom_u32() & 1)
goto retry;
}
}
@@ -2265,7 +2267,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
- drbd_al_begin_io(mdev, &peer_req->i);
+ drbd_al_begin_io(mdev, &peer_req->i, true);
}
err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
@@ -2661,7 +2663,6 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
if (hg == -1 && mdev->state.role == R_PRIMARY) {
enum drbd_state_rv rv2;
- drbd_set_role(mdev, R_SECONDARY, 0);
/* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
* we might be here in C_WF_REPORT_PARAMS which is transient.
* we do not need to wait for the after state change work either. */
@@ -3992,7 +3993,7 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
clear_bit(DISCARD_MY_DATA, &mdev->flags);
- drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
+ drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */
return 0;
}
@@ -4659,8 +4660,8 @@ static int drbd_do_features(struct drbd_tconn *tconn)
#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
static int drbd_do_auth(struct drbd_tconn *tconn)
{
- dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
- dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
+ conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
+ conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
return -1;
}
#else
@@ -5257,9 +5258,11 @@ int drbd_asender(struct drbd_thread *thi)
bool ping_timeout_active = false;
struct net_conf *nc;
int ping_timeo, tcp_cork, ping_int;
+ struct sched_param param = { .sched_priority = 2 };
- current->policy = SCHED_RR; /* Make this a realtime task! */
- current->rt_priority = 2; /* more important than all other tasks */
+ rv = sched_setscheduler(current, SCHED_RR, &param);
+ if (rv < 0)
+ conn_err(tconn, "drbd_asender: ERROR set priority, ret=%d\n", rv);
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 2b8303ad63c..c24379ffd4e 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -34,14 +34,14 @@
static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size);
/* Update disk stats at start of I/O request */
-static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
+static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
{
- const int rw = bio_data_dir(bio);
+ const int rw = bio_data_dir(req->master_bio);
int cpu;
cpu = part_stat_lock();
part_round_stats(cpu, &mdev->vdisk->part0);
part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
- part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
+ part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], req->i.size >> 9);
(void) cpu; /* The macro invocations above want the cpu argument, I do not like
the compiler warning about cpu only assigned but never used... */
part_inc_in_flight(&mdev->vdisk->part0, rw);
@@ -263,8 +263,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
else
root = &mdev->read_requests;
drbd_remove_request_interval(root, req);
- } else if (!(s & RQ_POSTPONED))
- D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
+ }
/* Before we can signal completion to the upper layers,
* we may need to close the current transfer log epoch.
@@ -755,6 +754,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
D_ASSERT(req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
break;
+
+ case QUEUE_AS_DRBD_BARRIER:
+ start_new_tl_epoch(mdev->tconn);
+ mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
+ break;
};
return rv;
@@ -861,8 +865,10 @@ static void maybe_pull_ahead(struct drbd_conf *mdev)
bool congested = false;
enum drbd_on_congestion on_congestion;
+ rcu_read_lock();
nc = rcu_dereference(tconn->net_conf);
on_congestion = nc ? nc->on_congestion : OC_BLOCK;
+ rcu_read_unlock();
if (on_congestion == OC_BLOCK ||
tconn->agreed_pro_version < 96)
return;
@@ -956,14 +962,8 @@ static int drbd_process_write_request(struct drbd_request *req)
struct drbd_conf *mdev = req->w.mdev;
int remote, send_oos;
- rcu_read_lock();
remote = drbd_should_do_remote(mdev->state);
- if (remote) {
- maybe_pull_ahead(mdev);
- remote = drbd_should_do_remote(mdev->state);
- }
send_oos = drbd_should_send_out_of_sync(mdev->state);
- rcu_read_unlock();
/* Need to replicate writes. Unless it is an empty flush,
* which is better mapped to a DRBD P_BARRIER packet,
@@ -975,8 +975,8 @@ static int drbd_process_write_request(struct drbd_request *req)
/* The only size==0 bios we expect are empty flushes. */
D_ASSERT(req->master_bio->bi_rw & REQ_FLUSH);
if (remote)
- start_new_tl_epoch(mdev->tconn);
- return 0;
+ _req_mod(req, QUEUE_AS_DRBD_BARRIER);
+ return remote;
}
if (!remote && !send_oos)
@@ -1020,12 +1020,24 @@ drbd_submit_req_private_bio(struct drbd_request *req)
bio_endio(bio, -EIO);
}
-void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+static void drbd_queue_write(struct drbd_conf *mdev, struct drbd_request *req)
{
- const int rw = bio_rw(bio);
- struct bio_and_error m = { NULL, };
+ spin_lock(&mdev->submit.lock);
+ list_add_tail(&req->tl_requests, &mdev->submit.writes);
+ spin_unlock(&mdev->submit.lock);
+ queue_work(mdev->submit.wq, &mdev->submit.worker);
+}
+
+/* returns the new drbd_request pointer, if the caller is expected to
+ * drbd_send_and_submit() it (to save latency), or NULL if we queued the
+ * request on the submitter thread.
+ * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
+ */
+struct drbd_request *
+drbd_request_prepare(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+{
+ const int rw = bio_data_dir(bio);
struct drbd_request *req;
- bool no_remote = false;
/* allocate outside of all locks; */
req = drbd_req_new(mdev, bio);
@@ -1035,7 +1047,7 @@ void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long
* if user cannot handle io errors, that's not our business. */
dev_err(DEV, "could not kmalloc() req\n");
bio_endio(bio, -ENOMEM);
- return;
+ return ERR_PTR(-ENOMEM);
}
req->start_time = start_time;
@@ -1044,28 +1056,40 @@ void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long
req->private_bio = NULL;
}
- /* For WRITES going to the local disk, grab a reference on the target
- * extent. This waits for any resync activity in the corresponding
- * resync extent to finish, and, if necessary, pulls in the target
- * extent into the activity log, which involves further disk io because
- * of transactional on-disk meta data updates.
- * Empty flushes don't need to go into the activity log, they can only
- * flush data for pending writes which are already in there. */
+ /* Update disk stats */
+ _drbd_start_io_acct(mdev, req);
+
if (rw == WRITE && req->private_bio && req->i.size
&& !test_bit(AL_SUSPENDED, &mdev->flags)) {
+ if (!drbd_al_begin_io_fastpath(mdev, &req->i)) {
+ drbd_queue_write(mdev, req);
+ return NULL;
+ }
req->rq_state |= RQ_IN_ACT_LOG;
- drbd_al_begin_io(mdev, &req->i);
}
+ return req;
+}
+
+static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *req)
+{
+ const int rw = bio_rw(req->master_bio);
+ struct bio_and_error m = { NULL, };
+ bool no_remote = false;
+
spin_lock_irq(&mdev->tconn->req_lock);
if (rw == WRITE) {
/* This may temporarily give up the req_lock,
* but will re-aquire it before it returns here.
* Needs to be before the check on drbd_suspended() */
complete_conflicting_writes(req);
+ /* no more giving up req_lock from now on! */
+
+ /* check for congestion, and potentially stop sending
+ * full data updates, but start sending "dirty bits" only. */
+ maybe_pull_ahead(mdev);
}
- /* no more giving up req_lock from now on! */
if (drbd_suspended(mdev)) {
/* push back and retry: */
@@ -1078,9 +1102,6 @@ void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long
goto out;
}
- /* Update disk stats */
- _drbd_start_io_acct(mdev, req, bio);
-
/* We fail READ/READA early, if we can not serve it.
* We must do this before req is registered on any lists.
* Otherwise, drbd_req_complete() will queue failed READ for retry. */
@@ -1137,7 +1158,116 @@ out:
if (m.bio)
complete_master_bio(mdev, &m);
- return;
+}
+
+void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+{
+ struct drbd_request *req = drbd_request_prepare(mdev, bio, start_time);
+ if (IS_ERR_OR_NULL(req))
+ return;
+ drbd_send_and_submit(mdev, req);
+}
+
+static void submit_fast_path(struct drbd_conf *mdev, struct list_head *incoming)
+{
+ struct drbd_request *req, *tmp;
+ list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
+ const int rw = bio_data_dir(req->master_bio);
+
+ if (rw == WRITE /* rw != WRITE should not even end up here! */
+ && req->private_bio && req->i.size
+ && !test_bit(AL_SUSPENDED, &mdev->flags)) {
+ if (!drbd_al_begin_io_fastpath(mdev, &req->i))
+ continue;
+
+ req->rq_state |= RQ_IN_ACT_LOG;
+ }
+
+ list_del_init(&req->tl_requests);
+ drbd_send_and_submit(mdev, req);
+ }
+}
+
+static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev,
+ struct list_head *incoming,
+ struct list_head *pending)
+{
+ struct drbd_request *req, *tmp;
+ int wake = 0;
+ int err;
+
+ spin_lock_irq(&mdev->al_lock);
+ list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
+ err = drbd_al_begin_io_nonblock(mdev, &req->i);
+ if (err == -EBUSY)
+ wake = 1;
+ if (err)
+ continue;
+ req->rq_state |= RQ_IN_ACT_LOG;
+ list_move_tail(&req->tl_requests, pending);
+ }
+ spin_unlock_irq(&mdev->al_lock);
+ if (wake)
+ wake_up(&mdev->al_wait);
+
+ return !list_empty(pending);
+}
+
+void do_submit(struct work_struct *ws)
+{
+ struct drbd_conf *mdev = container_of(ws, struct drbd_conf, submit.worker);
+ LIST_HEAD(incoming);
+ LIST_HEAD(pending);
+ struct drbd_request *req, *tmp;
+
+ for (;;) {
+ spin_lock(&mdev->submit.lock);
+ list_splice_tail_init(&mdev->submit.writes, &incoming);
+ spin_unlock(&mdev->submit.lock);
+
+ submit_fast_path(mdev, &incoming);
+ if (list_empty(&incoming))
+ break;
+
+ wait_event(mdev->al_wait, prepare_al_transaction_nonblock(mdev, &incoming, &pending));
+ /* Maybe more was queued, while we prepared the transaction?
+ * Try to stuff them into this transaction as well.
+ * Be strictly non-blocking here, no wait_event, we already
+ * have something to commit.
+ * Stop if we don't make any more progres.
+ */
+ for (;;) {
+ LIST_HEAD(more_pending);
+ LIST_HEAD(more_incoming);
+ bool made_progress;
+
+ /* It is ok to look outside the lock,
+ * it's only an optimization anyways */
+ if (list_empty(&mdev->submit.writes))
+ break;
+
+ spin_lock(&mdev->submit.lock);
+ list_splice_tail_init(&mdev->submit.writes, &more_incoming);
+ spin_unlock(&mdev->submit.lock);
+
+ if (list_empty(&more_incoming))
+ break;
+
+ made_progress = prepare_al_transaction_nonblock(mdev, &more_incoming, &more_pending);
+
+ list_splice_tail_init(&more_pending, &pending);
+ list_splice_tail_init(&more_incoming, &incoming);
+
+ if (!made_progress)
+ break;
+ }
+ drbd_al_begin_io_commit(mdev, false);
+
+ list_for_each_entry_safe(req, tmp, &pending, tl_requests) {
+ list_del_init(&req->tl_requests);
+ drbd_send_and_submit(mdev, req);
+ }
+ }
}
void drbd_make_request(struct request_queue *q, struct bio *bio)
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index c08d22964d0..978cb1addc9 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -88,6 +88,14 @@ enum drbd_req_event {
QUEUE_FOR_NET_READ,
QUEUE_FOR_SEND_OOS,
+ /* An empty flush is queued as P_BARRIER,
+ * which will cause it to complete "successfully",
+ * even if the local disk flush failed.
+ *
+ * Just like "real" requests, empty flushes (blkdev_issue_flush()) will
+ * only see an error if neither local nor remote data is reachable. */
+ QUEUE_AS_DRBD_BARRIER,
+
SEND_CANCELED,
SEND_FAILED,
HANDED_OVER_TO_NETWORK,
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 0fe220cfb9e..90c5be2b1d3 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -570,6 +570,13 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
mdev->tconn->agreed_pro_version < 88)
rv = SS_NOT_SUPPORTED;
+ else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
+ rv = SS_NO_UP_TO_DATE_DISK;
+
+ else if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
+ ns.pdsk == D_UNKNOWN)
+ rv = SS_NEED_CONNECTION;
+
else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
rv = SS_CONNECTED_OUTDATES;
@@ -635,6 +642,10 @@ is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_t
&& os.conn < C_WF_REPORT_PARAMS)
rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
+ if (ns.conn == C_DISCONNECTING && ns.pdsk == D_OUTDATED &&
+ os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)
+ rv = SS_OUTDATE_WO_CONN;
+
return rv;
}
@@ -1377,13 +1388,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
&drbd_bmio_set_n_write, &abw_start_sync,
"set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
- /* We are invalidating our self... */
- if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
- os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
- /* other bitmap operation expected during this phase */
- drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
- "set_n_write from invalidate", BM_LOCKED_MASK);
-
/* first half of local IO error, failure to attach,
* or administrative detach */
if (os.disk != D_FAILED && ns.disk == D_FAILED) {
@@ -1748,13 +1752,9 @@ _conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags))
return SS_CW_FAILED_BY_PEER;
- rv = tconn->cstate != C_WF_REPORT_PARAMS ? SS_CW_NO_NEED : SS_UNKNOWN_ERROR;
-
- if (rv == SS_UNKNOWN_ERROR)
- rv = conn_is_valid_transition(tconn, mask, val, 0);
-
- if (rv == SS_SUCCESS)
- rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
+ rv = conn_is_valid_transition(tconn, mask, val, 0);
+ if (rv == SS_SUCCESS && tconn->cstate == C_WF_REPORT_PARAMS)
+ rv = SS_UNKNOWN_ERROR; /* continue waiting */
return rv;
}
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index 9a664bd2740..58e08ff2b2c 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -89,6 +89,7 @@ static const char *drbd_state_sw_errors[] = {
[-SS_LOWER_THAN_OUTDATED] = "Disk state is lower than outdated",
[-SS_IN_TRANSIENT_STATE] = "In transient state, retry after next state change",
[-SS_CONCURRENT_ST_CHG] = "Concurrent state changes detected and aborted",
+ [-SS_OUTDATE_WO_CONN] = "Need a connection for a graceful disconnect/outdate peer",
[-SS_O_VOL_PEER_PRI] = "Other vol primary on peer not allowed by config",
};
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 424dc7bdf9b..891c0ecaa29 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -89,7 +89,8 @@ void drbd_md_io_complete(struct bio *bio, int error)
md_io->done = 1;
wake_up(&mdev->misc_wait);
bio_put(bio);
- put_ldev(mdev);
+ if (mdev->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */
+ put_ldev(mdev);
}
/* reads on behalf of the partner,
@@ -1410,7 +1411,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
struct drbd_conf *mdev = w->mdev;
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
- drbd_al_begin_io(mdev, &req->i);
+ drbd_al_begin_io(mdev, &req->i, false);
drbd_req_make_private_bio(req, req->master_bio);
req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
@@ -1425,7 +1426,7 @@ static int _drbd_may_sync_now(struct drbd_conf *mdev)
int resync_after;
while (1) {
- if (!odev->ldev)
+ if (!odev->ldev || odev->state.disk == D_DISKLESS)
return 1;
rcu_read_lock();
resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
@@ -1433,7 +1434,7 @@ static int _drbd_may_sync_now(struct drbd_conf *mdev)
if (resync_after == -1)
return 1;
odev = minor_to_mdev(resync_after);
- if (!expect(odev))
+ if (!odev)
return 1;
if ((odev->state.conn >= C_SYNC_SOURCE &&
odev->state.conn <= C_PAUSED_SYNC_T) ||
@@ -1515,7 +1516,7 @@ enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
if (o_minor == -1)
return NO_ERROR;
- if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
+ if (o_minor < -1 || o_minor > MINORMASK)
return ERR_RESYNC_AFTER;
/* check for loops */
@@ -1524,6 +1525,15 @@ enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
if (odev == mdev)
return ERR_RESYNC_AFTER_CYCLE;
+ /* You are free to depend on diskless, non-existing,
+ * or not yet/no longer existing minors.
+ * We only reject dependency loops.
+ * We cannot follow the dependency chain beyond a detached or
+ * missing minor.
+ */
+ if (!odev || !odev->ldev || odev->state.disk == D_DISKLESS)
+ return NO_ERROR;
+
rcu_read_lock();
resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
rcu_read_unlock();
@@ -1652,7 +1662,9 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
clear_bit(B_RS_H_DONE, &mdev->flags);
write_lock_irq(&global_state_lock);
- if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
+ /* Did some connection breakage or IO error race with us? */
+ if (mdev->state.conn < C_CONNECTED
+ || !get_ldev_if_state(mdev, D_NEGOTIATING)) {
write_unlock_irq(&global_state_lock);
mutex_unlock(mdev->state_mutex);
return;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 2ddd64a9ffd..04ceb7e2fad 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3601,7 +3601,7 @@ static void __init config_types(void)
pr_cont("\n");
}
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
{
int drive = (long)disk->private_data;
@@ -3615,8 +3615,6 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
opened_bdev[drive] = NULL;
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
-
- return 0;
}
/*
@@ -3777,7 +3775,6 @@ static int __floppy_read_block_0(struct block_device *bdev)
bio_vec.bv_len = size;
bio_vec.bv_offset = 0;
bio.bi_vcnt = 1;
- bio.bi_idx = 0;
bio.bi_size = size;
bio.bi_bdev = bdev;
bio.bi_sector = 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index dfe758382ea..d92d50fd84b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -230,9 +230,11 @@ static int __do_lo_send_write(struct file *file,
ssize_t bw;
mm_segment_t old_fs = get_fs();
+ file_start_write(file);
set_fs(get_ds());
bw = file->f_op->write(file, buf, len, &pos);
set_fs(old_fs);
+ file_end_write(file);
if (likely(bw == len))
return 0;
printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
@@ -1516,7 +1518,7 @@ out:
return err;
}
-static int lo_release(struct gendisk *disk, fmode_t mode)
+static void lo_release(struct gendisk *disk, fmode_t mode)
{
struct loop_device *lo = disk->private_data;
int err;
@@ -1533,7 +1535,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
*/
err = loop_clr_fd(lo);
if (!err)
- goto out_unlocked;
+ return;
} else {
/*
* Otherwise keep thread (if running) and config,
@@ -1544,8 +1546,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
out:
mutex_unlock(&lo->lo_ctl_mutex);
-out_unlocked:
- return 0;
}
static const struct block_device_operations lo_fops = {
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 076ae7f1b78..a56cfcd5d64 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -780,6 +780,7 @@ static const struct block_device_operations mg_disk_ops = {
.getgeo = mg_getgeo
};
+#ifdef CONFIG_PM_SLEEP
static int mg_suspend(struct device *dev)
{
struct mg_drv_data *prv_data = dev->platform_data;
@@ -824,6 +825,7 @@ static int mg_resume(struct device *dev)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(mg_pm, mg_suspend, mg_resume);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 32c678028e5..847107ef0cc 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -728,7 +728,10 @@ static void mtip_async_complete(struct mtip_port *port,
atomic_set(&port->commands[tag].active, 0);
release_slot(port, tag);
- up(&port->cmd_slot);
+ if (unlikely(command->unaligned))
+ up(&port->cmd_slot_unal);
+ else
+ up(&port->cmd_slot);
}
/*
@@ -1560,10 +1563,12 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
}
#endif
+#ifdef MTIP_TRIM /* Disabling TRIM support temporarily */
/* Demux ID.DRAT & ID.RZAT to determine trim support */
if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
port->dd->trim_supp = true;
else
+#endif
port->dd->trim_supp = false;
/* Set the identify buffer as valid. */
@@ -2557,7 +2562,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
*/
static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
int nsect, int nents, int tag, void *callback,
- void *data, int dir)
+ void *data, int dir, int unaligned)
{
struct host_to_dev_fis *fis;
struct mtip_port *port = dd->port;
@@ -2570,6 +2575,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
command->scatter_ents = nents;
+ command->unaligned = unaligned;
/*
* The number of retries for this command before it is
* reported as a failure to the upper layers.
@@ -2598,6 +2604,9 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
fis->res3 = 0;
fill_command_sg(dd, command, nents);
+ if (unaligned)
+ fis->device |= 1 << 7;
+
/* Populate the command header */
command->command_header->opts =
__force_bit2int cpu_to_le32(
@@ -2644,9 +2653,13 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
* return value
* None
*/
-static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag)
+static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag,
+ int unaligned)
{
+ struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal :
+ &dd->port->cmd_slot;
release_slot(dd->port, tag);
+ up(sem);
}
/*
@@ -2661,22 +2674,25 @@ static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag)
* or NULL if no command slots are available.
*/
static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
- int *tag)
+ int *tag, int unaligned)
{
+ struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal :
+ &dd->port->cmd_slot;
+
/*
* It is possible that, even with this semaphore, a thread
* may think that no command slots are available. Therefore, we
* need to make an attempt to get_slot().
*/
- down(&dd->port->cmd_slot);
+ down(sem);
*tag = get_slot(dd->port);
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
- up(&dd->port->cmd_slot);
+ up(sem);
return NULL;
}
if (unlikely(*tag < 0)) {
- up(&dd->port->cmd_slot);
+ up(sem);
return NULL;
}
@@ -3010,6 +3026,11 @@ static inline void hba_setup(struct driver_data *dd)
dd->mmio + HOST_HSORG);
}
+static int mtip_device_unaligned_constrained(struct driver_data *dd)
+{
+ return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0);
+}
+
/*
* Detect the details of the product, and store anything needed
* into the driver data structure. This includes product type and
@@ -3232,8 +3253,15 @@ static int mtip_hw_init(struct driver_data *dd)
for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
dd->work[i].port = dd->port;
+ /* Enable unaligned IO constraints for some devices */
+ if (mtip_device_unaligned_constrained(dd))
+ dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS;
+ else
+ dd->unal_qdepth = 0;
+
/* Counting semaphore to track command slot usage */
- sema_init(&dd->port->cmd_slot, num_command_slots - 1);
+ sema_init(&dd->port->cmd_slot, num_command_slots - 1 - dd->unal_qdepth);
+ sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth);
/* Spinlock to prevent concurrent issue */
for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
@@ -3836,7 +3864,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
struct scatterlist *sg;
struct bio_vec *bvec;
int nents = 0;
- int tag = 0;
+ int tag = 0, unaligned = 0;
if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
@@ -3872,7 +3900,15 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
return;
}
- sg = mtip_hw_get_scatterlist(dd, &tag);
+ if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
+ dd->unal_qdepth) {
+ if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */
+ unaligned = 1;
+ else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
+ unaligned = 1;
+ }
+
+ sg = mtip_hw_get_scatterlist(dd, &tag, unaligned);
if (likely(sg != NULL)) {
blk_queue_bounce(queue, &bio);
@@ -3880,7 +3916,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
dev_warn(&dd->pdev->dev,
"Maximum number of SGL entries exceeded\n");
bio_io_error(bio);
- mtip_hw_release_scatterlist(dd, tag);
+ mtip_hw_release_scatterlist(dd, tag, unaligned);
return;
}
@@ -3900,7 +3936,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
tag,
bio_endio,
bio,
- bio_data_dir(bio));
+ bio_data_dir(bio),
+ unaligned);
} else
bio_io_error(bio);
}
@@ -4156,26 +4193,24 @@ static int mtip_block_remove(struct driver_data *dd)
*/
static int mtip_block_shutdown(struct driver_data *dd)
{
- dev_info(&dd->pdev->dev,
- "Shutting down %s ...\n", dd->disk->disk_name);
-
/* Delete our gendisk structure, and cleanup the blk queue. */
if (dd->disk) {
- if (dd->disk->queue)
+ dev_info(&dd->pdev->dev,
+ "Shutting down %s ...\n", dd->disk->disk_name);
+
+ if (dd->disk->queue) {
del_gendisk(dd->disk);
- else
+ blk_cleanup_queue(dd->queue);
+ } else
put_disk(dd->disk);
+ dd->disk = NULL;
+ dd->queue = NULL;
}
-
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
spin_unlock(&rssd_index_lock);
- blk_cleanup_queue(dd->queue);
- dd->disk = NULL;
- dd->queue = NULL;
-
mtip_hw_shutdown(dd);
return 0;
}
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 8e8334c9dd0..3bb8a295fbe 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -52,6 +52,9 @@
#define MTIP_FTL_REBUILD_MAGIC 0xED51
#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000
+/* unaligned IO handling */
+#define MTIP_MAX_UNALIGNED_SLOTS 8
+
/* Macro to extract the tag bit number from a tag value. */
#define MTIP_TAG_BIT(tag) (tag & 0x1F)
@@ -333,6 +336,8 @@ struct mtip_cmd {
int scatter_ents; /* Number of scatter list entries used */
+ int unaligned; /* command is unaligned on 4k boundary */
+
struct scatterlist sg[MTIP_MAX_SG]; /* Scatter list entries */
int retries; /* The number of retries left for this command. */
@@ -452,6 +457,10 @@ struct mtip_port {
* command slots available.
*/
struct semaphore cmd_slot;
+
+ /* Semaphore to control queue depth of unaligned IOs */
+ struct semaphore cmd_slot_unal;
+
/* Spinlock for working around command-issue bug. */
spinlock_t cmd_issue_lock[MTIP_MAX_SLOT_GROUPS];
};
@@ -502,6 +511,8 @@ struct driver_data {
int isr_binding;
+ int unal_qdepth; /* qdepth of unaligned IO queue */
+
struct list_head online_list; /* linkage for online list */
struct list_head remove_list; /* linkage for removing list */
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7fecc784be0..037288e7874 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -856,6 +856,8 @@ static int __init nbd_init(void)
disk->queue->limits.discard_granularity = 512;
disk->queue->limits.max_discard_sectors = UINT_MAX;
disk->queue->limits.discard_zeroes_data = 0;
+ blk_queue_max_hw_sectors(disk->queue, 65536);
+ disk->queue->limits.max_sectors = 256;
}
if (register_blkdev(NBD_MAJOR, "nbd")) {
diff --git a/drivers/block/nvme.c b/drivers/block/nvme-core.c
index 9dcefe40380..8efdfaa44a5 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme-core.c
@@ -39,14 +39,13 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
-
+#include <scsi/sg.h>
#include <asm-generic/io-64-nonatomic-lo-hi.h>
#define NVME_Q_DEPTH 1024
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
#define NVME_MINORS 64
-#define NVME_IO_TIMEOUT (5 * HZ)
#define ADMIN_TIMEOUT (60 * HZ)
static int nvme_major;
@@ -60,43 +59,6 @@ static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
/*
- * Represents an NVM Express device. Each nvme_dev is a PCI function.
- */
-struct nvme_dev {
- struct list_head node;
- struct nvme_queue **queues;
- u32 __iomem *dbs;
- struct pci_dev *pci_dev;
- struct dma_pool *prp_page_pool;
- struct dma_pool *prp_small_pool;
- int instance;
- int queue_count;
- int db_stride;
- u32 ctrl_config;
- struct msix_entry *entry;
- struct nvme_bar __iomem *bar;
- struct list_head namespaces;
- char serial[20];
- char model[40];
- char firmware_rev[8];
- u32 max_hw_sectors;
-};
-
-/*
- * An NVM Express namespace is equivalent to a SCSI LUN
- */
-struct nvme_ns {
- struct list_head list;
-
- struct nvme_dev *dev;
- struct request_queue *queue;
- struct gendisk *disk;
-
- int ns_id;
- int lba_shift;
-};
-
-/*
* An NVM Express queue. Each device has at least two (one for admin
* commands and one for I/O commands).
*/
@@ -131,6 +93,7 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
@@ -261,12 +224,12 @@ static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
return ctx;
}
-static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
{
return dev->queues[get_cpu() + 1];
}
-static void put_nvmeq(struct nvme_queue *nvmeq)
+void put_nvmeq(struct nvme_queue *nvmeq)
{
put_cpu();
}
@@ -294,22 +257,6 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
return 0;
}
-/*
- * The nvme_iod describes the data in an I/O, including the list of PRP
- * entries. You can't see it in this data structure because C doesn't let
- * me express that. Use nvme_alloc_iod to ensure there's enough space
- * allocated to store the PRP list.
- */
-struct nvme_iod {
- void *private; /* For the use of the submitter of the I/O */
- int npages; /* In the PRP list. 0 means small pool in use */
- int offset; /* Of PRP list */
- int nents; /* Used in scatterlist */
- int length; /* Of data, in bytes */
- dma_addr_t first_dma;
- struct scatterlist sg[0];
-};
-
static __le64 **iod_list(struct nvme_iod *iod)
{
return ((void *)iod) + iod->offset;
@@ -343,7 +290,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
return iod;
}
-static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
+void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
{
const int last_prp = PAGE_SIZE / 8 - 1;
int i;
@@ -361,16 +308,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
kfree(iod);
}
-static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
-{
- struct nvme_queue *nvmeq = get_nvmeq(dev);
- if (bio_list_empty(&nvmeq->sq_cong))
- add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- bio_list_add(&nvmeq->sq_cong, bio);
- put_nvmeq(nvmeq);
- wake_up_process(nvme_thread);
-}
-
static void bio_completion(struct nvme_dev *dev, void *ctx,
struct nvme_completion *cqe)
{
@@ -382,19 +319,15 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
nvme_free_iod(dev, iod);
- if (status) {
+ if (status)
bio_endio(bio, -EIO);
- } else if (bio->bi_vcnt > bio->bi_idx) {
- requeue_bio(dev, bio);
- } else {
+ else
bio_endio(bio, 0);
- }
}
/* length is in bytes. gfp flags indicates whether we may sleep. */
-static int nvme_setup_prps(struct nvme_dev *dev,
- struct nvme_common_command *cmd, struct nvme_iod *iod,
- int total_len, gfp_t gfp)
+int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
+ struct nvme_iod *iod, int total_len, gfp_t gfp)
{
struct dma_pool *pool;
int length = total_len;
@@ -473,43 +406,193 @@ static int nvme_setup_prps(struct nvme_dev *dev,
return total_len;
}
+struct nvme_bio_pair {
+ struct bio b1, b2, *parent;
+ struct bio_vec *bv1, *bv2;
+ int err;
+ atomic_t cnt;
+};
+
+static void nvme_bio_pair_endio(struct bio *bio, int err)
+{
+ struct nvme_bio_pair *bp = bio->bi_private;
+
+ if (err)
+ bp->err = err;
+
+ if (atomic_dec_and_test(&bp->cnt)) {
+ bio_endio(bp->parent, bp->err);
+ if (bp->bv1)
+ kfree(bp->bv1);
+ if (bp->bv2)
+ kfree(bp->bv2);
+ kfree(bp);
+ }
+}
+
+static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
+ int len, int offset)
+{
+ struct nvme_bio_pair *bp;
+
+ BUG_ON(len > bio->bi_size);
+ BUG_ON(idx > bio->bi_vcnt);
+
+ bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
+ if (!bp)
+ return NULL;
+ bp->err = 0;
+
+ bp->b1 = *bio;
+ bp->b2 = *bio;
+
+ bp->b1.bi_size = len;
+ bp->b2.bi_size -= len;
+ bp->b1.bi_vcnt = idx;
+ bp->b2.bi_idx = idx;
+ bp->b2.bi_sector += len >> 9;
+
+ if (offset) {
+ bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
+ GFP_ATOMIC);
+ if (!bp->bv1)
+ goto split_fail_1;
+
+ bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
+ GFP_ATOMIC);
+ if (!bp->bv2)
+ goto split_fail_2;
+
+ memcpy(bp->bv1, bio->bi_io_vec,
+ bio->bi_max_vecs * sizeof(struct bio_vec));
+ memcpy(bp->bv2, bio->bi_io_vec,
+ bio->bi_max_vecs * sizeof(struct bio_vec));
+
+ bp->b1.bi_io_vec = bp->bv1;
+ bp->b2.bi_io_vec = bp->bv2;
+ bp->b2.bi_io_vec[idx].bv_offset += offset;
+ bp->b2.bi_io_vec[idx].bv_len -= offset;
+ bp->b1.bi_io_vec[idx].bv_len = offset;
+ bp->b1.bi_vcnt++;
+ } else
+ bp->bv1 = bp->bv2 = NULL;
+
+ bp->b1.bi_private = bp;
+ bp->b2.bi_private = bp;
+
+ bp->b1.bi_end_io = nvme_bio_pair_endio;
+ bp->b2.bi_end_io = nvme_bio_pair_endio;
+
+ bp->parent = bio;
+ atomic_set(&bp->cnt, 2);
+
+ return bp;
+
+ split_fail_2:
+ kfree(bp->bv1);
+ split_fail_1:
+ kfree(bp);
+ return NULL;
+}
+
+static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
+ int idx, int len, int offset)
+{
+ struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
+ if (!bp)
+ return -ENOMEM;
+
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, &bp->b1);
+ bio_list_add(&nvmeq->sq_cong, &bp->b2);
+
+ return 0;
+}
+
/* NVMe scatterlists require no holes in the virtual address */
#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
(((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
-static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
+static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
struct bio *bio, enum dma_data_direction dma_dir, int psegs)
{
struct bio_vec *bvec, *bvprv = NULL;
struct scatterlist *sg = NULL;
- int i, old_idx, length = 0, nsegs = 0;
+ int i, length = 0, nsegs = 0, split_len = bio->bi_size;
+
+ if (nvmeq->dev->stripe_size)
+ split_len = nvmeq->dev->stripe_size -
+ ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
sg_init_table(iod->sg, psegs);
- old_idx = bio->bi_idx;
bio_for_each_segment(bvec, bio, i) {
if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
sg->length += bvec->bv_len;
} else {
if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
- break;
+ return nvme_split_and_submit(bio, nvmeq, i,
+ length, 0);
+
sg = sg ? sg + 1 : iod->sg;
sg_set_page(sg, bvec->bv_page, bvec->bv_len,
bvec->bv_offset);
nsegs++;
}
+
+ if (split_len - length < bvec->bv_len)
+ return nvme_split_and_submit(bio, nvmeq, i, split_len,
+ split_len - length);
length += bvec->bv_len;
bvprv = bvec;
}
- bio->bi_idx = i;
iod->nents = nsegs;
sg_mark_end(sg);
- if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
- bio->bi_idx = old_idx;
+ if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
return -ENOMEM;
- }
+
+ BUG_ON(length != bio->bi_size);
return length;
}
+/*
+ * We reuse the small pool to allocate the 16-byte range here as it is not
+ * worth having a special pool for these or additional cases to handle freeing
+ * the iod.
+ */
+static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ struct bio *bio, struct nvme_iod *iod, int cmdid)
+{
+ struct nvme_dsm_range *range;
+ struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+ range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
+ &iod->first_dma);
+ if (!range)
+ return -ENOMEM;
+
+ iod_list(iod)[0] = (__le64 *)range;
+ iod->npages = 0;
+
+ range->cattr = cpu_to_le32(0);
+ range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
+ range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->dsm.opcode = nvme_cmd_dsm;
+ cmnd->dsm.command_id = cmdid;
+ cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
+ cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
+ cmnd->dsm.nr = 0;
+ cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+
+ return 0;
+}
+
static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
int cmdid)
{
@@ -527,7 +610,7 @@ static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
return 0;
}
-static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
+int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
{
int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
special_completion, NVME_IO_TIMEOUT);
@@ -567,6 +650,12 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
if (unlikely(cmdid < 0))
goto free_iod;
+ if (bio->bi_rw & REQ_DISCARD) {
+ result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
+ if (result)
+ goto free_cmdid;
+ return result;
+ }
if ((bio->bi_rw & REQ_FLUSH) && !psegs)
return nvme_submit_flush(nvmeq, ns, cmdid);
@@ -591,8 +680,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
dma_dir = DMA_FROM_DEVICE;
}
- result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
- if (result < 0)
+ result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
+ if (result <= 0)
goto free_cmdid;
length = result;
@@ -600,13 +689,11 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
GFP_ATOMIC);
- cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
+ cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
- bio->bi_sector += length >> 9;
-
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db);
@@ -724,8 +811,8 @@ static void sync_completion(struct nvme_dev *dev, void *ctx,
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
*/
-static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
- struct nvme_command *cmd, u32 *result, unsigned timeout)
+int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
+ u32 *result, unsigned timeout)
{
int cmdid;
struct sync_cmd_info cmdinfo;
@@ -741,7 +828,7 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
set_current_state(TASK_KILLABLE);
nvme_submit_cmd(nvmeq, cmd);
- schedule();
+ schedule_timeout(timeout);
if (cmdinfo.status == -EINTR) {
nvme_abort_command(nvmeq, cmdid);
@@ -754,7 +841,7 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
return cmdinfo.status;
}
-static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
@@ -827,7 +914,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}
-static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
+int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
dma_addr_t dma_addr)
{
struct nvme_command c;
@@ -841,7 +928,7 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
return nvme_submit_admin_cmd(dev, &c, NULL);
}
-static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
+int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
dma_addr_t dma_addr, u32 *result)
{
struct nvme_command c;
@@ -855,8 +942,8 @@ static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
return nvme_submit_admin_cmd(dev, &c, result);
}
-static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
- unsigned dword11, dma_addr_t dma_addr, u32 *result)
+int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
+ dma_addr_t dma_addr, u32 *result)
{
struct nvme_command c;
@@ -885,7 +972,7 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
void *ctx;
nvme_completion_fn fn;
static struct nvme_completion cqe = {
- .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1,
+ .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
};
if (timeout && !time_after(now, info[cmdid].timeout))
@@ -966,7 +1053,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
return nvmeq;
free_cqdma:
- dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
+ dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
nvmeq->cq_dma_addr);
free_nvmeq:
kfree(nvmeq);
@@ -1021,15 +1108,60 @@ static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid,
return ERR_PTR(result);
}
+static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
+{
+ unsigned long timeout;
+ u32 bit = enabled ? NVME_CSTS_RDY : 0;
+
+ timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+
+ while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&dev->pci_dev->dev,
+ "Device not ready; aborting initialisation\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * If the device has been passed off to us in an enabled state, just clear
+ * the enabled bit. The spec says we should set the 'shutdown notification
+ * bits', but doing so may cause the device to complete commands to the
+ * admin queue ... and we don't know what memory that might be pointing at!
+ */
+static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
+{
+ u32 cc = readl(&dev->bar->cc);
+
+ if (cc & NVME_CC_ENABLE)
+ writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc);
+ return nvme_wait_ready(dev, cap, false);
+}
+
+static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
+{
+ return nvme_wait_ready(dev, cap, true);
+}
+
static int nvme_configure_admin_queue(struct nvme_dev *dev)
{
- int result = 0;
+ int result;
u32 aqa;
- u64 cap;
- unsigned long timeout;
+ u64 cap = readq(&dev->bar->cap);
struct nvme_queue *nvmeq;
dev->dbs = ((void __iomem *)dev->bar) + 4096;
+ dev->db_stride = NVME_CAP_STRIDE(cap);
+
+ result = nvme_disable_ctrl(dev, cap);
+ if (result < 0)
+ return result;
nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
if (!nvmeq)
@@ -1043,38 +1175,28 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
- writel(0, &dev->bar->cc);
writel(aqa, &dev->bar->aqa);
writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
writel(dev->ctrl_config, &dev->bar->cc);
- cap = readq(&dev->bar->cap);
- timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
- dev->db_stride = NVME_CAP_STRIDE(cap);
-
- while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
- msleep(100);
- if (fatal_signal_pending(current))
- result = -EINTR;
- if (time_after(jiffies, timeout)) {
- dev_err(&dev->pci_dev->dev,
- "Device not ready; aborting initialisation\n");
- result = -ENODEV;
- }
- }
-
- if (result) {
- nvme_free_queue_mem(nvmeq);
- return result;
- }
+ result = nvme_enable_ctrl(dev, cap);
+ if (result)
+ goto free_q;
result = queue_request_irq(dev, nvmeq, "nvme admin");
+ if (result)
+ goto free_q;
+
dev->queues[0] = nvmeq;
return result;
+
+ free_q:
+ nvme_free_queue_mem(nvmeq);
+ return result;
}
-static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
+struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
unsigned long addr, unsigned length)
{
int i, err, count, nents, offset;
@@ -1130,7 +1252,7 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
return ERR_PTR(err);
}
-static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
struct nvme_iod *iod)
{
int i;
@@ -1148,13 +1270,19 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
struct nvme_queue *nvmeq;
struct nvme_user_io io;
struct nvme_command c;
- unsigned length;
- int status;
- struct nvme_iod *iod;
+ unsigned length, meta_len;
+ int status, i;
+ struct nvme_iod *iod, *meta_iod = NULL;
+ dma_addr_t meta_dma_addr;
+ void *meta, *uninitialized_var(meta_mem);
if (copy_from_user(&io, uio, sizeof(io)))
return -EFAULT;
length = (io.nblocks + 1) << ns->lba_shift;
+ meta_len = (io.nblocks + 1) * ns->ms;
+
+ if (meta_len && ((io.metadata & 3) || !io.metadata))
+ return -EINVAL;
switch (io.opcode) {
case nvme_cmd_write:
@@ -1176,11 +1304,42 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.slba = cpu_to_le64(io.slba);
c.rw.length = cpu_to_le16(io.nblocks);
c.rw.control = cpu_to_le16(io.control);
- c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
- c.rw.reftag = io.reftag;
- c.rw.apptag = io.apptag;
- c.rw.appmask = io.appmask;
- /* XXX: metadata */
+ c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
+ c.rw.reftag = cpu_to_le32(io.reftag);
+ c.rw.apptag = cpu_to_le16(io.apptag);
+ c.rw.appmask = cpu_to_le16(io.appmask);
+
+ if (meta_len) {
+ meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, meta_len);
+ if (IS_ERR(meta_iod)) {
+ status = PTR_ERR(meta_iod);
+ meta_iod = NULL;
+ goto unmap;
+ }
+
+ meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
+ &meta_dma_addr, GFP_KERNEL);
+ if (!meta_mem) {
+ status = -ENOMEM;
+ goto unmap;
+ }
+
+ if (io.opcode & 1) {
+ int meta_offset = 0;
+
+ for (i = 0; i < meta_iod->nents; i++) {
+ meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
+ meta_iod->sg[i].offset;
+ memcpy(meta_mem + meta_offset, meta,
+ meta_iod->sg[i].length);
+ kunmap_atomic(meta);
+ meta_offset += meta_iod->sg[i].length;
+ }
+ }
+
+ c.rw.metadata = cpu_to_le64(meta_dma_addr);
+ }
+
length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
nvmeq = get_nvmeq(dev);
@@ -1196,8 +1355,33 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
else
status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+ if (meta_len) {
+ if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
+ int meta_offset = 0;
+
+ for (i = 0; i < meta_iod->nents; i++) {
+ meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
+ meta_iod->sg[i].offset;
+ memcpy(meta, meta_mem + meta_offset,
+ meta_iod->sg[i].length);
+ kunmap_atomic(meta);
+ meta_offset += meta_iod->sg[i].length;
+ }
+ }
+
+ dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
+ meta_dma_addr);
+ }
+
+ unmap:
nvme_unmap_user_pages(dev, io.opcode & 1, iod);
nvme_free_iod(dev, iod);
+
+ if (meta_iod) {
+ nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod);
+ nvme_free_iod(dev, meta_iod);
+ }
+
return status;
}
@@ -1208,6 +1392,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
struct nvme_command c;
int status, length;
struct nvme_iod *uninitialized_var(iod);
+ unsigned timeout;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -1237,10 +1422,13 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
GFP_KERNEL);
}
+ timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
+ ADMIN_TIMEOUT;
if (length != cmd.data_len)
status = -ENOMEM;
else
- status = nvme_submit_admin_cmd(dev, &c, &cmd.result);
+ status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result,
+ timeout);
if (cmd.data_len) {
nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
@@ -1266,6 +1454,10 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, (void __user *)arg);
+ case SG_GET_VERSION_NUM:
+ return nvme_sg_get_version_num((void __user *)arg);
+ case SG_IO:
+ return nvme_sg_io(ns, (void __user *)arg);
default:
return -ENOTTY;
}
@@ -1282,13 +1474,17 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
while (bio_list_peek(&nvmeq->sq_cong)) {
struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
+
+ if (bio_list_empty(&nvmeq->sq_cong))
+ remove_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
bio_list_add_head(&nvmeq->sq_cong, bio);
break;
}
- if (bio_list_empty(&nvmeq->sq_cong))
- remove_wait_queue(&nvmeq->sq_full,
- &nvmeq->sq_cong_wait);
}
}
@@ -1297,7 +1493,7 @@ static int nvme_kthread(void *data)
struct nvme_dev *dev;
while (!kthread_should_stop()) {
- __set_current_state(TASK_RUNNING);
+ set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&dev_list_lock);
list_for_each_entry(dev, &dev_list, node) {
int i;
@@ -1314,8 +1510,7 @@ static int nvme_kthread(void *data)
}
}
spin_unlock(&dev_list_lock);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
+ schedule_timeout(round_jiffies_relative(HZ));
}
return 0;
}
@@ -1347,6 +1542,16 @@ static void nvme_put_ns_idx(int index)
spin_unlock(&dev_list_lock);
}
+static void nvme_config_discard(struct nvme_ns *ns)
+{
+ u32 logical_block_size = queue_logical_block_size(ns->queue);
+ ns->queue->limits.discard_zeroes_data = 0;
+ ns->queue->limits.discard_alignment = logical_block_size;
+ ns->queue->limits.discard_granularity = logical_block_size;
+ ns->queue->limits.max_discard_sectors = 0xffffffff;
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
+}
+
static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
{
@@ -1366,7 +1571,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
-/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
blk_queue_make_request(ns->queue, nvme_make_request);
ns->dev = dev;
ns->queue->queuedata = ns;
@@ -1378,6 +1582,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
ns->disk = disk;
lbaf = id->flbas & 0xf;
ns->lba_shift = id->lbaf[lbaf].ds;
+ ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
if (dev->max_hw_sectors)
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
@@ -1392,6 +1597,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+ if (dev->oncs & NVME_CTRL_ONCS_DSM)
+ nvme_config_discard(ns);
+
return ns;
out_free_queue:
@@ -1496,14 +1704,21 @@ static void nvme_free_queues(struct nvme_dev *dev)
nvme_free_queue(dev, i);
}
+/*
+ * Return: error value if an error occurred setting up the queues or calling
+ * Identify Device. 0 if these succeeded, even if adding some of the
+ * namespaces failed. At the moment, these failures are silent. TBD which
+ * failures should be reported.
+ */
static int nvme_dev_add(struct nvme_dev *dev)
{
int res, nn, i;
- struct nvme_ns *ns, *next;
+ struct nvme_ns *ns;
struct nvme_id_ctrl *ctrl;
struct nvme_id_ns *id_ns;
void *mem;
dma_addr_t dma_addr;
+ int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
res = nvme_setup_io_queues(dev);
if (res)
@@ -1511,22 +1726,26 @@ static int nvme_dev_add(struct nvme_dev *dev)
mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
res = nvme_identify(dev, 0, 1, dma_addr);
if (res) {
res = -EIO;
- goto out_free;
+ goto out;
}
ctrl = mem;
nn = le32_to_cpup(&ctrl->nn);
+ dev->oncs = le16_to_cpup(&ctrl->oncs);
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
- if (ctrl->mdts) {
- int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
+ if (ctrl->mdts)
dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
- }
+ if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
+ (dev->pci_dev->device == 0x0953) && ctrl->vs[3])
+ dev->stripe_size = 1 << (ctrl->vs[3] + shift);
id_ns = mem;
for (i = 1; i <= nn; i++) {
@@ -1548,14 +1767,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
}
list_for_each_entry(ns, &dev->namespaces, list)
add_disk(ns->disk);
-
- goto out;
-
- out_free:
- list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
- list_del(&ns->list);
- nvme_ns_free(ns);
- }
+ res = 0;
out:
dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
@@ -1634,6 +1846,56 @@ static void nvme_release_instance(struct nvme_dev *dev)
spin_unlock(&dev_list_lock);
}
+static void nvme_free_dev(struct kref *kref)
+{
+ struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
+ nvme_dev_remove(dev);
+ pci_disable_msix(dev->pci_dev);
+ iounmap(dev->bar);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
+ pci_disable_device(dev->pci_dev);
+ pci_release_regions(dev->pci_dev);
+ kfree(dev->queues);
+ kfree(dev->entry);
+ kfree(dev);
+}
+
+static int nvme_dev_open(struct inode *inode, struct file *f)
+{
+ struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
+ miscdev);
+ kref_get(&dev->kref);
+ f->private_data = dev;
+ return 0;
+}
+
+static int nvme_dev_release(struct inode *inode, struct file *f)
+{
+ struct nvme_dev *dev = f->private_data;
+ kref_put(&dev->kref, nvme_free_dev);
+ return 0;
+}
+
+static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ struct nvme_dev *dev = f->private_data;
+ switch (cmd) {
+ case NVME_IOCTL_ADMIN_CMD:
+ return nvme_user_admin_cmd(dev, (void __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations nvme_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = nvme_dev_open,
+ .release = nvme_dev_release,
+ .unlocked_ioctl = nvme_dev_ioctl,
+ .compat_ioctl = nvme_dev_ioctl,
+};
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int bars, result = -ENOMEM;
@@ -1692,8 +1954,20 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto delete;
+ scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
+ dev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ dev->miscdev.parent = &pdev->dev;
+ dev->miscdev.name = dev->name;
+ dev->miscdev.fops = &nvme_dev_fops;
+ result = misc_register(&dev->miscdev);
+ if (result)
+ goto remove;
+
+ kref_init(&dev->kref);
return 0;
+ remove:
+ nvme_dev_remove(dev);
delete:
spin_lock(&dev_list_lock);
list_del(&dev->node);
@@ -1719,16 +1993,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_dev_remove(dev);
- pci_disable_msix(pdev);
- iounmap(dev->bar);
- nvme_release_instance(dev);
- nvme_release_prp_pools(dev);
- pci_disable_device(pdev);
- pci_release_regions(pdev);
- kfree(dev->queues);
- kfree(dev->entry);
- kfree(dev);
+ misc_deregister(&dev->miscdev);
+ kref_put(&dev->kref, nvme_free_dev);
}
/* These functions are yet to be implemented */
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
new file mode 100644
index 00000000000..fed54b03989
--- /dev/null
+++ b/drivers/block/nvme-scsi.c
@@ -0,0 +1,3053 @@
+/*
+ * NVM Express device driver
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * Refer to the SCSI-NVMe Translation spec for details on how
+ * each command is translated.
+ */
+
+#include <linux/nvme.h>
+#include <linux/bio.h>
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kdev_t.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <scsi/sg.h>
+#include <scsi/scsi.h>
+
+
+static int sg_version_num = 30534; /* 2 digits for each component */
+
+#define SNTI_TRANSLATION_SUCCESS 0
+#define SNTI_INTERNAL_ERROR 1
+
+/* VPD Page Codes */
+#define VPD_SUPPORTED_PAGES 0x00
+#define VPD_SERIAL_NUMBER 0x80
+#define VPD_DEVICE_IDENTIFIERS 0x83
+#define VPD_EXTENDED_INQUIRY 0x86
+#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1
+
+/* CDB offsets */
+#define REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET 6
+#define REPORT_LUNS_SR_OFFSET 2
+#define READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET 10
+#define REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET 4
+#define REQUEST_SENSE_DESC_OFFSET 1
+#define REQUEST_SENSE_DESC_MASK 0x01
+#define DESCRIPTOR_FORMAT_SENSE_DATA_TYPE 1
+#define INQUIRY_EVPD_BYTE_OFFSET 1
+#define INQUIRY_PAGE_CODE_BYTE_OFFSET 2
+#define INQUIRY_EVPD_BIT_MASK 1
+#define INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET 3
+#define START_STOP_UNIT_CDB_IMMED_OFFSET 1
+#define START_STOP_UNIT_CDB_IMMED_MASK 0x1
+#define START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET 3
+#define START_STOP_UNIT_CDB_POWER_COND_MOD_MASK 0xF
+#define START_STOP_UNIT_CDB_POWER_COND_OFFSET 4
+#define START_STOP_UNIT_CDB_POWER_COND_MASK 0xF0
+#define START_STOP_UNIT_CDB_NO_FLUSH_OFFSET 4
+#define START_STOP_UNIT_CDB_NO_FLUSH_MASK 0x4
+#define START_STOP_UNIT_CDB_START_OFFSET 4
+#define START_STOP_UNIT_CDB_START_MASK 0x1
+#define WRITE_BUFFER_CDB_MODE_OFFSET 1
+#define WRITE_BUFFER_CDB_MODE_MASK 0x1F
+#define WRITE_BUFFER_CDB_BUFFER_ID_OFFSET 2
+#define WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET 3
+#define WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET 6
+#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET 1
+#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK 0xC0
+#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT 6
+#define FORMAT_UNIT_CDB_LONG_LIST_OFFSET 1
+#define FORMAT_UNIT_CDB_LONG_LIST_MASK 0x20
+#define FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET 1
+#define FORMAT_UNIT_CDB_FORMAT_DATA_MASK 0x10
+#define FORMAT_UNIT_SHORT_PARM_LIST_LEN 4
+#define FORMAT_UNIT_LONG_PARM_LIST_LEN 8
+#define FORMAT_UNIT_PROT_INT_OFFSET 3
+#define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET 0
+#define FORMAT_UNIT_PROT_FIELD_USAGE_MASK 0x07
+#define UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET 7
+
+/* Misc. defines */
+#define NIBBLE_SHIFT 4
+#define FIXED_SENSE_DATA 0x70
+#define DESC_FORMAT_SENSE_DATA 0x72
+#define FIXED_SENSE_DATA_ADD_LENGTH 10
+#define LUN_ENTRY_SIZE 8
+#define LUN_DATA_HEADER_SIZE 8
+#define ALL_LUNS_RETURNED 0x02
+#define ALL_WELL_KNOWN_LUNS_RETURNED 0x01
+#define RESTRICTED_LUNS_RETURNED 0x00
+#define NVME_POWER_STATE_START_VALID 0x00
+#define NVME_POWER_STATE_ACTIVE 0x01
+#define NVME_POWER_STATE_IDLE 0x02
+#define NVME_POWER_STATE_STANDBY 0x03
+#define NVME_POWER_STATE_LU_CONTROL 0x07
+#define POWER_STATE_0 0
+#define POWER_STATE_1 1
+#define POWER_STATE_2 2
+#define POWER_STATE_3 3
+#define DOWNLOAD_SAVE_ACTIVATE 0x05
+#define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E
+#define ACTIVATE_DEFERRED_MICROCODE 0x0F
+#define FORMAT_UNIT_IMMED_MASK 0x2
+#define FORMAT_UNIT_IMMED_OFFSET 1
+#define KELVIN_TEMP_FACTOR 273
+#define FIXED_FMT_SENSE_DATA_SIZE 18
+#define DESC_FMT_SENSE_DATA_SIZE 8
+
+/* SCSI/NVMe defines and bit masks */
+#define INQ_STANDARD_INQUIRY_PAGE 0x00
+#define INQ_SUPPORTED_VPD_PAGES_PAGE 0x00
+#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80
+#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83
+#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86
+#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1
+#define INQ_SERIAL_NUMBER_LENGTH 0x14
+#define INQ_NUM_SUPPORTED_VPD_PAGES 5
+#define VERSION_SPC_4 0x06
+#define ACA_UNSUPPORTED 0
+#define STANDARD_INQUIRY_LENGTH 36
+#define ADDITIONAL_STD_INQ_LENGTH 31
+#define EXTENDED_INQUIRY_DATA_PAGE_LENGTH 0x3C
+#define RESERVED_FIELD 0
+
+/* SCSI READ/WRITE Defines */
+#define IO_CDB_WP_MASK 0xE0
+#define IO_CDB_WP_SHIFT 5
+#define IO_CDB_FUA_MASK 0x8
+#define IO_6_CDB_LBA_OFFSET 0
+#define IO_6_CDB_LBA_MASK 0x001FFFFF
+#define IO_6_CDB_TX_LEN_OFFSET 4
+#define IO_6_DEFAULT_TX_LEN 256
+#define IO_10_CDB_LBA_OFFSET 2
+#define IO_10_CDB_TX_LEN_OFFSET 7
+#define IO_10_CDB_WP_OFFSET 1
+#define IO_10_CDB_FUA_OFFSET 1
+#define IO_12_CDB_LBA_OFFSET 2
+#define IO_12_CDB_TX_LEN_OFFSET 6
+#define IO_12_CDB_WP_OFFSET 1
+#define IO_12_CDB_FUA_OFFSET 1
+#define IO_16_CDB_FUA_OFFSET 1
+#define IO_16_CDB_WP_OFFSET 1
+#define IO_16_CDB_LBA_OFFSET 2
+#define IO_16_CDB_TX_LEN_OFFSET 10
+
+/* Mode Sense/Select defines */
+#define MODE_PAGE_INFO_EXCEP 0x1C
+#define MODE_PAGE_CACHING 0x08
+#define MODE_PAGE_CONTROL 0x0A
+#define MODE_PAGE_POWER_CONDITION 0x1A
+#define MODE_PAGE_RETURN_ALL 0x3F
+#define MODE_PAGE_BLK_DES_LEN 0x08
+#define MODE_PAGE_LLBAA_BLK_DES_LEN 0x10
+#define MODE_PAGE_CACHING_LEN 0x14
+#define MODE_PAGE_CONTROL_LEN 0x0C
+#define MODE_PAGE_POW_CND_LEN 0x28
+#define MODE_PAGE_INF_EXC_LEN 0x0C
+#define MODE_PAGE_ALL_LEN 0x54
+#define MODE_SENSE6_MPH_SIZE 4
+#define MODE_SENSE6_ALLOC_LEN_OFFSET 4
+#define MODE_SENSE_PAGE_CONTROL_OFFSET 2
+#define MODE_SENSE_PAGE_CONTROL_MASK 0xC0
+#define MODE_SENSE_PAGE_CODE_OFFSET 2
+#define MODE_SENSE_PAGE_CODE_MASK 0x3F
+#define MODE_SENSE_LLBAA_OFFSET 1
+#define MODE_SENSE_LLBAA_MASK 0x10
+#define MODE_SENSE_LLBAA_SHIFT 4
+#define MODE_SENSE_DBD_OFFSET 1
+#define MODE_SENSE_DBD_MASK 8
+#define MODE_SENSE_DBD_SHIFT 3
+#define MODE_SENSE10_MPH_SIZE 8
+#define MODE_SENSE10_ALLOC_LEN_OFFSET 7
+#define MODE_SELECT_CDB_PAGE_FORMAT_OFFSET 1
+#define MODE_SELECT_CDB_SAVE_PAGES_OFFSET 1
+#define MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET 4
+#define MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET 7
+#define MODE_SELECT_CDB_PAGE_FORMAT_MASK 0x10
+#define MODE_SELECT_CDB_SAVE_PAGES_MASK 0x1
+#define MODE_SELECT_6_BD_OFFSET 3
+#define MODE_SELECT_10_BD_OFFSET 6
+#define MODE_SELECT_10_LLBAA_OFFSET 4
+#define MODE_SELECT_10_LLBAA_MASK 1
+#define MODE_SELECT_6_MPH_SIZE 4
+#define MODE_SELECT_10_MPH_SIZE 8
+#define CACHING_MODE_PAGE_WCE_MASK 0x04
+#define MODE_SENSE_BLK_DESC_ENABLED 0
+#define MODE_SENSE_BLK_DESC_COUNT 1
+#define MODE_SELECT_PAGE_CODE_MASK 0x3F
+#define SHORT_DESC_BLOCK 8
+#define LONG_DESC_BLOCK 16
+#define MODE_PAGE_POW_CND_LEN_FIELD 0x26
+#define MODE_PAGE_INF_EXC_LEN_FIELD 0x0A
+#define MODE_PAGE_CACHING_LEN_FIELD 0x12
+#define MODE_PAGE_CONTROL_LEN_FIELD 0x0A
+#define MODE_SENSE_PC_CURRENT_VALUES 0
+
+/* Log Sense defines */
+#define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE 0x00
+#define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH 0x07
+#define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE 0x2F
+#define LOG_PAGE_TEMPERATURE_PAGE 0x0D
+#define LOG_SENSE_CDB_SP_OFFSET 1
+#define LOG_SENSE_CDB_SP_NOT_ENABLED 0
+#define LOG_SENSE_CDB_PC_OFFSET 2
+#define LOG_SENSE_CDB_PC_MASK 0xC0
+#define LOG_SENSE_CDB_PC_SHIFT 6
+#define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES 1
+#define LOG_SENSE_CDB_PAGE_CODE_MASK 0x3F
+#define LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET 7
+#define REMAINING_INFO_EXCP_PAGE_LENGTH 0x8
+#define LOG_INFO_EXCP_PAGE_LENGTH 0xC
+#define REMAINING_TEMP_PAGE_LENGTH 0xC
+#define LOG_TEMP_PAGE_LENGTH 0x10
+#define LOG_TEMP_UNKNOWN 0xFF
+#define SUPPORTED_LOG_PAGES_PAGE_LENGTH 0x3
+
+/* Read Capacity defines */
+#define READ_CAP_10_RESP_SIZE 8
+#define READ_CAP_16_RESP_SIZE 32
+
+/* NVMe Namespace and Command Defines */
+#define NVME_GET_SMART_LOG_PAGE 0x02
+#define NVME_GET_FEAT_TEMP_THRESH 0x04
+#define BYTES_TO_DWORDS 4
+#define NVME_MAX_FIRMWARE_SLOT 7
+
+/* Report LUNs defines */
+#define REPORT_LUNS_FIRST_LUN_OFFSET 8
+
+/* SCSI ADDITIONAL SENSE Codes */
+
+#define SCSI_ASC_NO_SENSE 0x00
+#define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03
+#define SCSI_ASC_LUN_NOT_READY 0x04
+#define SCSI_ASC_WARNING 0x0B
+#define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10
+#define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10
+#define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10
+#define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11
+#define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D
+#define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20
+#define SCSI_ASC_ILLEGAL_COMMAND 0x20
+#define SCSI_ASC_ILLEGAL_BLOCK 0x21
+#define SCSI_ASC_INVALID_CDB 0x24
+#define SCSI_ASC_INVALID_LUN 0x25
+#define SCSI_ASC_INVALID_PARAMETER 0x26
+#define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31
+#define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44
+
+/* SCSI ADDITIONAL SENSE Code Qualifiers */
+
+#define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00
+#define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01
+#define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01
+#define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02
+#define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03
+#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04
+#define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08
+#define SCSI_ASCQ_INVALID_LUN_ID 0x09
+
+/**
+ * DEVICE_SPECIFIC_PARAMETER in mode parameter header (see sbc2r16) to
+ * enable DPOFUA support type 0x10 value.
+ */
+#define DEVICE_SPECIFIC_PARAMETER 0
+#define VPD_ID_DESCRIPTOR_LENGTH sizeof(VPD_IDENTIFICATION_DESCRIPTOR)
+
+/* MACROs to extract information from CDBs */
+
+#define GET_OPCODE(cdb) cdb[0]
+
+#define GET_U8_FROM_CDB(cdb, index) (cdb[index] << 0)
+
+#define GET_U16_FROM_CDB(cdb, index) ((cdb[index] << 8) | (cdb[index + 1] << 0))
+
+#define GET_U24_FROM_CDB(cdb, index) ((cdb[index] << 16) | \
+(cdb[index + 1] << 8) | \
+(cdb[index + 2] << 0))
+
+#define GET_U32_FROM_CDB(cdb, index) ((cdb[index] << 24) | \
+(cdb[index + 1] << 16) | \
+(cdb[index + 2] << 8) | \
+(cdb[index + 3] << 0))
+
+#define GET_U64_FROM_CDB(cdb, index) ((((u64)cdb[index]) << 56) | \
+(((u64)cdb[index + 1]) << 48) | \
+(((u64)cdb[index + 2]) << 40) | \
+(((u64)cdb[index + 3]) << 32) | \
+(((u64)cdb[index + 4]) << 24) | \
+(((u64)cdb[index + 5]) << 16) | \
+(((u64)cdb[index + 6]) << 8) | \
+(((u64)cdb[index + 7]) << 0))
+
+/* Inquiry Helper Macros */
+#define GET_INQ_EVPD_BIT(cdb) \
+((GET_U8_FROM_CDB(cdb, INQUIRY_EVPD_BYTE_OFFSET) & \
+INQUIRY_EVPD_BIT_MASK) ? 1 : 0)
+
+#define GET_INQ_PAGE_CODE(cdb) \
+(GET_U8_FROM_CDB(cdb, INQUIRY_PAGE_CODE_BYTE_OFFSET))
+
+#define GET_INQ_ALLOC_LENGTH(cdb) \
+(GET_U16_FROM_CDB(cdb, INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET))
+
+/* Report LUNs Helper Macros */
+#define GET_REPORT_LUNS_ALLOC_LENGTH(cdb) \
+(GET_U32_FROM_CDB(cdb, REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET))
+
+/* Read Capacity Helper Macros */
+#define GET_READ_CAP_16_ALLOC_LENGTH(cdb) \
+(GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET))
+
+#define IS_READ_CAP_16(cdb) \
+((cdb[0] == SERVICE_ACTION_IN && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0)
+
+/* Request Sense Helper Macros */
+#define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \
+(GET_U8_FROM_CDB(cdb, REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET))
+
+/* Mode Sense Helper Macros */
+#define GET_MODE_SENSE_DBD(cdb) \
+((GET_U8_FROM_CDB(cdb, MODE_SENSE_DBD_OFFSET) & MODE_SENSE_DBD_MASK) >> \
+MODE_SENSE_DBD_SHIFT)
+
+#define GET_MODE_SENSE_LLBAA(cdb) \
+((GET_U8_FROM_CDB(cdb, MODE_SENSE_LLBAA_OFFSET) & \
+MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT)
+
+#define GET_MODE_SENSE_MPH_SIZE(cdb10) \
+(cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE)
+
+
+/* Struct to gather data that needs to be extracted from a SCSI CDB.
+ Not conforming to any particular CDB variant, but compatible with all. */
+
+struct nvme_trans_io_cdb {
+ u8 fua;
+ u8 prot_info;
+ u64 lba;
+ u32 xfer_len;
+};
+
+
+/* Internal Helper Functions */
+
+
+/* Copy data to userspace memory */
+
+static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from,
+ unsigned long n)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ unsigned long not_copied;
+ int i;
+ void *index = from;
+ size_t remaining = n;
+ size_t xfer_len;
+
+ if (hdr->iovec_count > 0) {
+ struct sg_iovec sgl;
+
+ for (i = 0; i < hdr->iovec_count; i++) {
+ not_copied = copy_from_user(&sgl, hdr->dxferp +
+ i * sizeof(struct sg_iovec),
+ sizeof(struct sg_iovec));
+ if (not_copied)
+ return -EFAULT;
+ xfer_len = min(remaining, sgl.iov_len);
+ not_copied = copy_to_user(sgl.iov_base, index,
+ xfer_len);
+ if (not_copied) {
+ res = -EFAULT;
+ break;
+ }
+ index += xfer_len;
+ remaining -= xfer_len;
+ if (remaining == 0)
+ break;
+ }
+ return res;
+ }
+ not_copied = copy_to_user(hdr->dxferp, from, n);
+ if (not_copied)
+ res = -EFAULT;
+ return res;
+}
+
+/* Copy data from userspace memory */
+
+static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to,
+ unsigned long n)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ unsigned long not_copied;
+ int i;
+ void *index = to;
+ size_t remaining = n;
+ size_t xfer_len;
+
+ if (hdr->iovec_count > 0) {
+ struct sg_iovec sgl;
+
+ for (i = 0; i < hdr->iovec_count; i++) {
+ not_copied = copy_from_user(&sgl, hdr->dxferp +
+ i * sizeof(struct sg_iovec),
+ sizeof(struct sg_iovec));
+ if (not_copied)
+ return -EFAULT;
+ xfer_len = min(remaining, sgl.iov_len);
+ not_copied = copy_from_user(index, sgl.iov_base,
+ xfer_len);
+ if (not_copied) {
+ res = -EFAULT;
+ break;
+ }
+ index += xfer_len;
+ remaining -= xfer_len;
+ if (remaining == 0)
+ break;
+ }
+ return res;
+ }
+
+ not_copied = copy_from_user(to, hdr->dxferp, n);
+ if (not_copied)
+ res = -EFAULT;
+ return res;
+}
+
+/* Status/Sense Buffer Writeback */
+
+static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key,
+ u8 asc, u8 ascq)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 xfer_len;
+ u8 resp[DESC_FMT_SENSE_DATA_SIZE];
+
+ if (scsi_status_is_good(status)) {
+ hdr->status = SAM_STAT_GOOD;
+ hdr->masked_status = GOOD;
+ hdr->host_status = DID_OK;
+ hdr->driver_status = DRIVER_OK;
+ hdr->sb_len_wr = 0;
+ } else {
+ hdr->status = status;
+ hdr->masked_status = status >> 1;
+ hdr->host_status = DID_OK;
+ hdr->driver_status = DRIVER_OK;
+
+ memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE);
+ resp[0] = DESC_FORMAT_SENSE_DATA;
+ resp[1] = sense_key;
+ resp[2] = asc;
+ resp[3] = ascq;
+
+ xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE);
+ hdr->sb_len_wr = xfer_len;
+ if (copy_to_user(hdr->sbp, resp, xfer_len) > 0)
+ res = -EFAULT;
+ }
+
+ return res;
+}
+
+static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc)
+{
+ u8 status, sense_key, asc, ascq;
+ int res = SNTI_TRANSLATION_SUCCESS;
+
+ /* For non-nvme (Linux) errors, simply return the error code */
+ if (nvme_sc < 0)
+ return nvme_sc;
+
+ /* Mask DNR, More, and reserved fields */
+ nvme_sc &= 0x7FF;
+
+ switch (nvme_sc) {
+ /* Generic Command Status */
+ case NVME_SC_SUCCESS:
+ status = SAM_STAT_GOOD;
+ sense_key = NO_SENSE;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_INVALID_OPCODE:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_ILLEGAL_COMMAND;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_INVALID_FIELD:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_INVALID_CDB;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_DATA_XFER_ERROR:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_POWER_LOSS:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_WARNING;
+ ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
+ break;
+ case NVME_SC_INTERNAL:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = HARDWARE_ERROR;
+ asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_ABORT_REQ:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_ABORT_QUEUE:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_FUSED_FAIL:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_FUSED_MISSING:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_INVALID_NS:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
+ ascq = SCSI_ASCQ_INVALID_LUN_ID;
+ break;
+ case NVME_SC_LBA_RANGE:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_ILLEGAL_BLOCK;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_CAP_EXCEEDED:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_NS_NOT_READY:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = NOT_READY;
+ asc = SCSI_ASC_LUN_NOT_READY;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+
+ /* Command Specific Status */
+ case NVME_SC_INVALID_FORMAT:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
+ ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
+ break;
+ case NVME_SC_BAD_ATTRIBUTES:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_INVALID_CDB;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+
+ /* Media Errors */
+ case NVME_SC_WRITE_FAULT:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_READ_ERROR:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_GUARD_CHECK:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
+ ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
+ break;
+ case NVME_SC_APPTAG_CHECK:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
+ ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
+ break;
+ case NVME_SC_REFTAG_CHECK:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
+ ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
+ break;
+ case NVME_SC_COMPARE_FAILED:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MISCOMPARE;
+ asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_ACCESS_DENIED:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
+ ascq = SCSI_ASCQ_INVALID_LUN_ID;
+ break;
+
+ /* Unspecified/Default */
+ case NVME_SC_CMDID_CONFLICT:
+ case NVME_SC_CMD_SEQ_ERROR:
+ case NVME_SC_CQ_INVALID:
+ case NVME_SC_QID_INVALID:
+ case NVME_SC_QUEUE_SIZE:
+ case NVME_SC_ABORT_LIMIT:
+ case NVME_SC_ABORT_MISSING:
+ case NVME_SC_ASYNC_LIMIT:
+ case NVME_SC_FIRMWARE_SLOT:
+ case NVME_SC_FIRMWARE_IMAGE:
+ case NVME_SC_INVALID_VECTOR:
+ case NVME_SC_INVALID_LOG_PAGE:
+ default:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ }
+
+ res = nvme_trans_completion(hdr, status, sense_key, asc, ascq);
+
+ return res;
+}
+
+/* INQUIRY Helper Functions */
+
+static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *inq_response,
+ int alloc_len)
+{
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ int xfer_len;
+ u8 resp_data_format = 0x02;
+ u8 protect;
+ u8 cmdque = 0x01 << 1;
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* nvme ns identify - use DPS value for PROTECT field */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ /*
+ * If nvme_sc was -ve, res will be -ve here.
+ * If nvme_sc was +ve, the status would bace been translated, and res
+ * can only be 0 or -ve.
+ * - If 0 && nvme_sc > 0, then go into next if where res gets nvme_sc
+ * - If -ve, return because its a Linux error.
+ */
+ if (res)
+ goto out_free;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_free;
+ }
+ id_ns = mem;
+ (id_ns->dps) ? (protect = 0x01) : (protect = 0);
+
+ memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+ inq_response[2] = VERSION_SPC_4;
+ inq_response[3] = resp_data_format; /*normaca=0 | hisup=0 */
+ inq_response[4] = ADDITIONAL_STD_INQ_LENGTH;
+ inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
+ inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */
+ strncpy(&inq_response[8], "NVMe ", 8);
+ strncpy(&inq_response[16], dev->model, 16);
+ strncpy(&inq_response[32], dev->firmware_rev, 4);
+
+ xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ out_free:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out_dma:
+ return res;
+}
+
+static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *inq_response,
+ int alloc_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+
+ memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+ inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE; /* Page Code */
+ inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES; /* Page Length */
+ inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE;
+ inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE;
+ inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
+ inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
+ inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
+
+ xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ return res;
+}
+
+static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *inq_response,
+ int alloc_len)
+{
+ struct nvme_dev *dev = ns->dev;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+
+ memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+ inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
+ inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */
+ strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH);
+
+ xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ return res;
+}
+
+static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *inq_response, int alloc_len)
+{
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ctrl *id_ctrl;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ u8 ieee[4];
+ int xfer_len;
+ __be32 tmp_id = cpu_to_be32(ns->ns_id);
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* nvme controller identify */
+ nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_free;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_free;
+ }
+ id_ctrl = mem;
+
+ /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */
+ ieee[0] = id_ctrl->ieee[0] << 4;
+ ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4;
+ ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4;
+ ieee[3] = id_ctrl->ieee[2] >> 4;
+
+ memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+ inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */
+ inq_response[3] = 20; /* Page Length */
+ /* Designation Descriptor start */
+ inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */
+ inq_response[5] = 0x03; /* PIV=0b | Asso=00b | Designator Type=3h */
+ inq_response[6] = 0x00; /* Rsvd */
+ inq_response[7] = 16; /* Designator Length */
+ /* Designator start */
+ inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/
+ inq_response[9] = ieee[2]; /* IEEE ID */
+ inq_response[10] = ieee[1]; /* IEEE ID */
+ inq_response[11] = ieee[0]; /* IEEE ID| Vendor Specific ID... */
+ inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8;
+ inq_response[13] = (dev->pci_dev->vendor & 0x00FF);
+ inq_response[14] = dev->serial[0];
+ inq_response[15] = dev->serial[1];
+ inq_response[16] = dev->model[0];
+ inq_response[17] = dev->model[1];
+ memcpy(&inq_response[18], &tmp_id, sizeof(u32));
+ /* Last 2 bytes are zero */
+
+ xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ out_free:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out_dma:
+ return res;
+}
+
+static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ int alloc_len)
+{
+ u8 *inq_response;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ctrl *id_ctrl;
+ struct nvme_id_ns *id_ns;
+ int xfer_len;
+ u8 microcode = 0x80;
+ u8 spt;
+ u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7};
+ u8 grd_chk, app_chk, ref_chk, protect;
+ u8 uask_sup = 0x20;
+ u8 v_sup;
+ u8 luiclr = 0x01;
+
+ inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
+ if (inq_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_free;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_free;
+ }
+ id_ns = mem;
+ spt = spt_lut[(id_ns->dpc) & 0x07] << 3;
+ (id_ns->dps) ? (protect = 0x01) : (protect = 0);
+ grd_chk = protect << 2;
+ app_chk = protect << 1;
+ ref_chk = protect;
+
+ /* nvme controller identify */
+ nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_free;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_free;
+ }
+ id_ctrl = mem;
+ v_sup = id_ctrl->vwc;
+
+ memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+ inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */
+ inq_response[2] = 0x00; /* Page Length MSB */
+ inq_response[3] = 0x3C; /* Page Length LSB */
+ inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk;
+ inq_response[5] = uask_sup;
+ inq_response[6] = v_sup;
+ inq_response[7] = luiclr;
+ inq_response[8] = 0;
+ inq_response[9] = 0;
+
+ xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ out_free:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out_dma:
+ kfree(inq_response);
+ out_mem:
+ return res;
+}
+
+static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ int alloc_len)
+{
+ u8 *inq_response;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+
+ inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
+ if (inq_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+
+ memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+ inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */
+ inq_response[2] = 0x00; /* Page Length MSB */
+ inq_response[3] = 0x3C; /* Page Length LSB */
+ inq_response[4] = 0x00; /* Medium Rotation Rate MSB */
+ inq_response[5] = 0x01; /* Medium Rotation Rate LSB */
+ inq_response[6] = 0x00; /* Form Factor */
+
+ xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ kfree(inq_response);
+ out_mem:
+ return res;
+}
+
+/* LOG SENSE Helper Functions */
+
+static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ int alloc_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+ u8 *log_response;
+
+ log_response = kmalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
+ if (log_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+ memset(log_response, 0, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
+
+ log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
+ /* Subpage=0x00, Page Length MSB=0 */
+ log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH;
+ log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
+ log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
+ log_response[6] = LOG_PAGE_TEMPERATURE_PAGE;
+
+ xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
+
+ kfree(log_response);
+ out_mem:
+ return res;
+}
+
+static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, int alloc_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+ u8 *log_response;
+ struct nvme_command c;
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_smart_log *smart_log;
+ dma_addr_t dma_addr;
+ void *mem;
+ u8 temp_c;
+ u16 temp_k;
+
+ log_response = kmalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
+ if (log_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+ memset(log_response, 0, LOG_INFO_EXCP_PAGE_LENGTH);
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_smart_log),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* Get SMART Log Page */
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_admin_get_log_page;
+ c.common.nsid = cpu_to_le32(0xFFFFFFFF);
+ c.common.prp1 = cpu_to_le64(dma_addr);
+ c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) /
+ BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE);
+ res = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (res != NVME_SC_SUCCESS) {
+ temp_c = LOG_TEMP_UNKNOWN;
+ } else {
+ smart_log = mem;
+ temp_k = (smart_log->temperature[1] << 8) +
+ (smart_log->temperature[0]);
+ temp_c = temp_k - KELVIN_TEMP_FACTOR;
+ }
+
+ log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
+ /* Subpage=0x00, Page Length MSB=0 */
+ log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH;
+ /* Informational Exceptions Log Parameter 1 Start */
+ /* Parameter Code=0x0000 bytes 4,5 */
+ log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */
+ log_response[7] = 0x04; /* PARAMETER LENGTH */
+ /* Add sense Code and qualifier = 0x00 each */
+ /* Use Temperature from NVMe Get Log Page, convert to C from K */
+ log_response[10] = temp_c;
+
+ xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
+
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
+ mem, dma_addr);
+ out_dma:
+ kfree(log_response);
+ out_mem:
+ return res;
+}
+
+static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ int alloc_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+ u8 *log_response;
+ struct nvme_command c;
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_smart_log *smart_log;
+ dma_addr_t dma_addr;
+ void *mem;
+ u32 feature_resp;
+ u8 temp_c_cur, temp_c_thresh;
+ u16 temp_k;
+
+ log_response = kmalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
+ if (log_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+ memset(log_response, 0, LOG_TEMP_PAGE_LENGTH);
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_smart_log),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* Get SMART Log Page */
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_admin_get_log_page;
+ c.common.nsid = cpu_to_le32(0xFFFFFFFF);
+ c.common.prp1 = cpu_to_le64(dma_addr);
+ c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) /
+ BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE);
+ res = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (res != NVME_SC_SUCCESS) {
+ temp_c_cur = LOG_TEMP_UNKNOWN;
+ } else {
+ smart_log = mem;
+ temp_k = (smart_log->temperature[1] << 8) +
+ (smart_log->temperature[0]);
+ temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
+ }
+
+ /* Get Features for Temp Threshold */
+ res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
+ &feature_resp);
+ if (res != NVME_SC_SUCCESS)
+ temp_c_thresh = LOG_TEMP_UNKNOWN;
+ else
+ temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR;
+
+ log_response[0] = LOG_PAGE_TEMPERATURE_PAGE;
+ /* Subpage=0x00, Page Length MSB=0 */
+ log_response[3] = REMAINING_TEMP_PAGE_LENGTH;
+ /* Temperature Log Parameter 1 (Temperature) Start */
+ /* Parameter Code = 0x0000 */
+ log_response[6] = 0x01; /* Format and Linking = 01b */
+ log_response[7] = 0x02; /* Parameter Length */
+ /* Use Temperature from NVMe Get Log Page, convert to C from K */
+ log_response[9] = temp_c_cur;
+ /* Temperature Log Parameter 2 (Reference Temperature) Start */
+ log_response[11] = 0x01; /* Parameter Code = 0x0001 */
+ log_response[12] = 0x01; /* Format and Linking = 01b */
+ log_response[13] = 0x02; /* Parameter Length */
+ /* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */
+ log_response[15] = temp_c_thresh;
+
+ xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
+
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
+ mem, dma_addr);
+ out_dma:
+ kfree(log_response);
+ out_mem:
+ return res;
+}
+
+/* MODE SENSE Helper Functions */
+
+static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa,
+ u16 mode_data_length, u16 blk_desc_len)
+{
+ /* Quick check to make sure I don't stomp on my own memory... */
+ if ((cdb10 && len < 8) || (!cdb10 && len < 4))
+ return SNTI_INTERNAL_ERROR;
+
+ if (cdb10) {
+ resp[0] = (mode_data_length & 0xFF00) >> 8;
+ resp[1] = (mode_data_length & 0x00FF);
+ /* resp[2] and [3] are zero */
+ resp[4] = llbaa;
+ resp[5] = RESERVED_FIELD;
+ resp[6] = (blk_desc_len & 0xFF00) >> 8;
+ resp[7] = (blk_desc_len & 0x00FF);
+ } else {
+ resp[0] = (mode_data_length & 0x00FF);
+ /* resp[1] and [2] are zero */
+ resp[3] = (blk_desc_len & 0x00FF);
+ }
+
+ return SNTI_TRANSLATION_SUCCESS;
+}
+
+static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *resp, int len, u8 llbaa)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ u8 flbas;
+ u32 lba_length;
+
+ if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN)
+ return SNTI_INTERNAL_ERROR;
+ else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ns = mem;
+ flbas = (id_ns->flbas) & 0x0F;
+ lba_length = (1 << (id_ns->lbaf[flbas].ds));
+
+ if (llbaa == 0) {
+ __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap));
+ /* Byte 4 is reserved */
+ __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF);
+
+ memcpy(resp, &tmp_cap, sizeof(u32));
+ memcpy(&resp[4], &tmp_len, sizeof(u32));
+ } else {
+ __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap));
+ __be32 tmp_len = cpu_to_be32(lba_length);
+
+ memcpy(resp, &tmp_cap, sizeof(u64));
+ /* Bytes 8, 9, 10, 11 are reserved */
+ memcpy(&resp[12], &tmp_len, sizeof(u32));
+ }
+
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+static int nvme_trans_fill_control_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *resp,
+ int len)
+{
+ if (len < MODE_PAGE_CONTROL_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ resp[0] = MODE_PAGE_CONTROL;
+ resp[1] = MODE_PAGE_CONTROL_LEN_FIELD;
+ resp[2] = 0x0E; /* TST=000b, TMF_ONLY=0, DPICZ=1,
+ * D_SENSE=1, GLTSD=1, RLEC=0 */
+ resp[3] = 0x12; /* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */
+ /* Byte 4: VS=0, RAC=0, UA_INT=0, SWP=0 */
+ resp[5] = 0x40; /* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */
+ /* resp[6] and [7] are obsolete, thus zero */
+ resp[8] = 0xFF; /* Busy timeout period = 0xffff */
+ resp[9] = 0xFF;
+ /* Bytes 10,11: Extended selftest completion time = 0x0000 */
+
+ return SNTI_TRANSLATION_SUCCESS;
+}
+
+static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr,
+ u8 *resp, int len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ u32 feature_resp;
+ u8 vwc;
+
+ if (len < MODE_PAGE_CACHING_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0,
+ &feature_resp);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out;
+ }
+ vwc = feature_resp & 0x00000001;
+
+ resp[0] = MODE_PAGE_CACHING;
+ resp[1] = MODE_PAGE_CACHING_LEN_FIELD;
+ resp[2] = vwc << 2;
+
+ out:
+ return res;
+}
+
+static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *resp,
+ int len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+
+ if (len < MODE_PAGE_POW_CND_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ resp[0] = MODE_PAGE_POWER_CONDITION;
+ resp[1] = MODE_PAGE_POW_CND_LEN_FIELD;
+ /* All other bytes are zero */
+
+ return res;
+}
+
+static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *resp,
+ int len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+
+ if (len < MODE_PAGE_INF_EXC_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ resp[0] = MODE_PAGE_INFO_EXCEP;
+ resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD;
+ resp[2] = 0x88;
+ /* All other bytes are zero */
+
+ return res;
+}
+
+static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *resp, int len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u16 mode_pages_offset_1 = 0;
+ u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4;
+
+ mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN;
+ mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN;
+ mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN;
+
+ res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1],
+ MODE_PAGE_CACHING_LEN);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2],
+ MODE_PAGE_CONTROL_LEN);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3],
+ MODE_PAGE_POW_CND_LEN);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ res = nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4],
+ MODE_PAGE_INF_EXC_LEN);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+
+ out:
+ return res;
+}
+
+static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa)
+{
+ if (dbd == MODE_SENSE_BLK_DESC_ENABLED) {
+ /* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */
+ return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT;
+ } else {
+ return 0;
+ }
+}
+
+static int nvme_trans_mode_page_create(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *cmd,
+ u16 alloc_len, u8 cdb10,
+ int (*mode_page_fill_func)
+ (struct nvme_ns *,
+ struct sg_io_hdr *hdr, u8 *, int),
+ u16 mode_pages_tot_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+ u8 *response;
+ u8 dbd, llbaa;
+ u16 resp_size;
+ int mph_size;
+ u16 mode_pages_offset_1;
+ u16 blk_desc_len, blk_desc_offset, mode_data_length;
+
+ dbd = GET_MODE_SENSE_DBD(cmd);
+ llbaa = GET_MODE_SENSE_LLBAA(cmd);
+ mph_size = GET_MODE_SENSE_MPH_SIZE(cdb10);
+ blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa);
+
+ resp_size = mph_size + blk_desc_len + mode_pages_tot_len;
+ /* Refer spc4r34 Table 440 for calculation of Mode data Length field */
+ mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len;
+
+ blk_desc_offset = mph_size;
+ mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
+
+ response = kmalloc(resp_size, GFP_KERNEL);
+ if (response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+ memset(response, 0, resp_size);
+
+ res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
+ llbaa, mode_data_length, blk_desc_len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_free;
+ if (blk_desc_len > 0) {
+ res = nvme_trans_fill_blk_desc(ns, hdr,
+ &response[blk_desc_offset],
+ blk_desc_len, llbaa);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_free;
+ }
+ res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1],
+ mode_pages_tot_len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_free;
+
+ xfer_len = min(alloc_len, resp_size);
+ res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+ out_free:
+ kfree(response);
+ out_mem:
+ return res;
+}
+
+/* Read Capacity Helper Functions */
+
+static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
+ u8 cdb16)
+{
+ u8 flbas;
+ u32 lba_length;
+ u64 rlba;
+ u8 prot_en;
+ u8 p_type_lut[4] = {0, 0, 1, 2};
+ __be64 tmp_rlba;
+ __be32 tmp_rlba_32;
+ __be32 tmp_len;
+
+ flbas = (id_ns->flbas) & 0x0F;
+ lba_length = (1 << (id_ns->lbaf[flbas].ds));
+ rlba = le64_to_cpup(&id_ns->nsze) - 1;
+ (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0);
+
+ if (!cdb16) {
+ if (rlba > 0xFFFFFFFF)
+ rlba = 0xFFFFFFFF;
+ tmp_rlba_32 = cpu_to_be32(rlba);
+ tmp_len = cpu_to_be32(lba_length);
+ memcpy(response, &tmp_rlba_32, sizeof(u32));
+ memcpy(&response[4], &tmp_len, sizeof(u32));
+ } else {
+ tmp_rlba = cpu_to_be64(rlba);
+ tmp_len = cpu_to_be32(lba_length);
+ memcpy(response, &tmp_rlba, sizeof(u64));
+ memcpy(&response[8], &tmp_len, sizeof(u32));
+ response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en;
+ /* P_I_Exponent = 0x0 | LBPPBE = 0x0 */
+ /* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */
+ /* Bytes 16-31 - Reserved */
+ }
+}
+
+/* Start Stop Unit Helper Functions */
+
+static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 pc, u8 pcmod, u8 start)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ctrl *id_ctrl;
+ int lowest_pow_st; /* max npss = lowest power consumption */
+ unsigned ps_desired = 0;
+
+ /* NVMe Controller Identify */
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_id_ctrl),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ctrl = mem;
+ lowest_pow_st = id_ctrl->npss - 1;
+
+ switch (pc) {
+ case NVME_POWER_STATE_START_VALID:
+ /* Action unspecified if POWER CONDITION MODIFIER != 0 */
+ if (pcmod == 0 && start == 0x1)
+ ps_desired = POWER_STATE_0;
+ if (pcmod == 0 && start == 0x0)
+ ps_desired = lowest_pow_st;
+ break;
+ case NVME_POWER_STATE_ACTIVE:
+ /* Action unspecified if POWER CONDITION MODIFIER != 0 */
+ if (pcmod == 0)
+ ps_desired = POWER_STATE_0;
+ break;
+ case NVME_POWER_STATE_IDLE:
+ /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
+ /* min of desired state and (lps-1) because lps is STOP */
+ if (pcmod == 0x0)
+ ps_desired = min(POWER_STATE_1, (lowest_pow_st - 1));
+ else if (pcmod == 0x1)
+ ps_desired = min(POWER_STATE_2, (lowest_pow_st - 1));
+ else if (pcmod == 0x2)
+ ps_desired = min(POWER_STATE_3, (lowest_pow_st - 1));
+ break;
+ case NVME_POWER_STATE_STANDBY:
+ /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
+ if (pcmod == 0x0)
+ ps_desired = max(0, (lowest_pow_st - 2));
+ else if (pcmod == 0x1)
+ ps_desired = max(0, (lowest_pow_st - 1));
+ break;
+ case NVME_POWER_STATE_LU_CONTROL:
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+ nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
+ NULL);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc)
+ res = nvme_sc;
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+/* Write Buffer Helper Functions */
+/* Also using this for Format Unit with hdr passed as NULL, and buffer_id, 0 */
+
+static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 opcode, u32 tot_len, u32 offset,
+ u8 buffer_id)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_command c;
+ struct nvme_iod *iod = NULL;
+ unsigned length;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = opcode;
+ if (opcode == nvme_admin_download_fw) {
+ if (hdr->iovec_count > 0) {
+ /* Assuming SGL is not allowed for this command */
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ iod = nvme_map_user_pages(dev, DMA_TO_DEVICE,
+ (unsigned long)hdr->dxferp, tot_len);
+ if (IS_ERR(iod)) {
+ res = PTR_ERR(iod);
+ goto out;
+ }
+ length = nvme_setup_prps(dev, &c.common, iod, tot_len,
+ GFP_KERNEL);
+ if (length != tot_len) {
+ res = -ENOMEM;
+ goto out_unmap;
+ }
+
+ c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
+ c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
+ } else if (opcode == nvme_admin_activate_fw) {
+ u32 cdw10 = buffer_id | NVME_FWACT_REPL_ACTV;
+ c.common.cdw10[0] = cpu_to_le32(cdw10);
+ }
+
+ nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_unmap;
+ if (nvme_sc)
+ res = nvme_sc;
+
+ out_unmap:
+ if (opcode == nvme_admin_download_fw) {
+ nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod);
+ nvme_free_iod(dev, iod);
+ }
+ out:
+ return res;
+}
+
+/* Mode Select Helper Functions */
+
+static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
+ u16 *bd_len, u8 *llbaa)
+{
+ if (cdb10) {
+ /* 10 Byte CDB */
+ *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
+ parm_list[MODE_SELECT_10_BD_OFFSET + 1];
+ *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &&
+ MODE_SELECT_10_LLBAA_MASK;
+ } else {
+ /* 6 Byte CDB */
+ *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET];
+ }
+}
+
+static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
+ u16 idx, u16 bd_len, u8 llbaa)
+{
+ u16 bd_num;
+
+ bd_num = bd_len / ((llbaa == 0) ?
+ SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
+ /* Store block descriptor info if a FORMAT UNIT comes later */
+ /* TODO Saving 1st BD info; what to do if multiple BD received? */
+ if (llbaa == 0) {
+ /* Standard Block Descriptor - spc4r34 7.5.5.1 */
+ ns->mode_select_num_blocks =
+ (parm_list[idx + 1] << 16) +
+ (parm_list[idx + 2] << 8) +
+ (parm_list[idx + 3]);
+
+ ns->mode_select_block_len =
+ (parm_list[idx + 5] << 16) +
+ (parm_list[idx + 6] << 8) +
+ (parm_list[idx + 7]);
+ } else {
+ /* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */
+ ns->mode_select_num_blocks =
+ (((u64)parm_list[idx + 0]) << 56) +
+ (((u64)parm_list[idx + 1]) << 48) +
+ (((u64)parm_list[idx + 2]) << 40) +
+ (((u64)parm_list[idx + 3]) << 32) +
+ (((u64)parm_list[idx + 4]) << 24) +
+ (((u64)parm_list[idx + 5]) << 16) +
+ (((u64)parm_list[idx + 6]) << 8) +
+ ((u64)parm_list[idx + 7]);
+
+ ns->mode_select_block_len =
+ (parm_list[idx + 12] << 24) +
+ (parm_list[idx + 13] << 16) +
+ (parm_list[idx + 14] << 8) +
+ (parm_list[idx + 15]);
+ }
+}
+
+static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *mode_page, u8 page_code)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ unsigned dword11;
+
+ switch (page_code) {
+ case MODE_PAGE_CACHING:
+ dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
+ nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11,
+ 0, NULL);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ break;
+ if (nvme_sc) {
+ res = nvme_sc;
+ break;
+ }
+ break;
+ case MODE_PAGE_CONTROL:
+ break;
+ case MODE_PAGE_POWER_CONDITION:
+ /* Verify the OS is not trying to set timers */
+ if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_PARAMETER,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ if (!res)
+ res = SNTI_INTERNAL_ERROR;
+ break;
+ }
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ if (!res)
+ res = SNTI_INTERNAL_ERROR;
+ break;
+ }
+
+ return res;
+}
+
+static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd, u16 parm_list_len, u8 pf,
+ u8 sp, u8 cdb10)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 *parm_list;
+ u16 bd_len;
+ u8 llbaa = 0;
+ u16 index, saved_index;
+ u8 page_code;
+ u16 mp_size;
+
+ /* Get parm list from data-in/out buffer */
+ parm_list = kmalloc(parm_list_len, GFP_KERNEL);
+ if (parm_list == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_mem;
+
+ nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa);
+ index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE);
+
+ if (bd_len != 0) {
+ /* Block Descriptors present, parse */
+ nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa);
+ index += bd_len;
+ }
+ saved_index = index;
+
+ /* Multiple mode pages may be present; iterate through all */
+ /* In 1st Iteration, don't do NVME Command, only check for CDB errors */
+ do {
+ page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
+ mp_size = parm_list[index + 1] + 2;
+ if ((page_code != MODE_PAGE_CACHING) &&
+ (page_code != MODE_PAGE_CONTROL) &&
+ (page_code != MODE_PAGE_POWER_CONDITION)) {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out_mem;
+ }
+ index += mp_size;
+ } while (index < parm_list_len);
+
+ /* In 2nd Iteration, do the NVME Commands */
+ index = saved_index;
+ do {
+ page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
+ mp_size = parm_list[index + 1] + 2;
+ res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index],
+ page_code);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ break;
+ index += mp_size;
+ } while (index < parm_list_len);
+
+ out_mem:
+ kfree(parm_list);
+ out:
+ return res;
+}
+
+/* Format Unit Helper Functions */
+
+static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ u8 flbas;
+
+ /*
+ * SCSI Expects a MODE SELECT would have been issued prior to
+ * a FORMAT UNIT, and the block size and number would be used
+ * from the block descriptor in it. If a MODE SELECT had not
+ * been issued, FORMAT shall use the current values for both.
+ */
+
+ if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ns = mem;
+
+ if (ns->mode_select_num_blocks == 0)
+ ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
+ if (ns->mode_select_block_len == 0) {
+ flbas = (id_ns->flbas) & 0x0F;
+ ns->mode_select_block_len =
+ (1 << (id_ns->lbaf[flbas].ds));
+ }
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ mem, dma_addr);
+ }
+ out:
+ return res;
+}
+
+static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
+ u8 format_prot_info, u8 *nvme_pf_code)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 *parm_list;
+ u8 pf_usage, pf_code;
+
+ parm_list = kmalloc(len, GFP_KERNEL);
+ if (parm_list == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ res = nvme_trans_copy_from_user(hdr, parm_list, len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_mem;
+
+ if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] &
+ FORMAT_UNIT_IMMED_MASK) != 0) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out_mem;
+ }
+
+ if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN &&
+ (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out_mem;
+ }
+ pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] &
+ FORMAT_UNIT_PROT_FIELD_USAGE_MASK;
+ pf_code = (pf_usage << 2) | format_prot_info;
+ switch (pf_code) {
+ case 0:
+ *nvme_pf_code = 0;
+ break;
+ case 2:
+ *nvme_pf_code = 1;
+ break;
+ case 3:
+ *nvme_pf_code = 2;
+ break;
+ case 7:
+ *nvme_pf_code = 3;
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+
+ out_mem:
+ kfree(parm_list);
+ out:
+ return res;
+}
+
+static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 prot_info)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ u8 i;
+ u8 flbas, nlbaf;
+ u8 selected_lbaf = 0xFF;
+ u32 cdw10 = 0;
+ struct nvme_command c;
+
+ /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ns = mem;
+ flbas = (id_ns->flbas) & 0x0F;
+ nlbaf = id_ns->nlbaf;
+
+ for (i = 0; i < nlbaf; i++) {
+ if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) {
+ selected_lbaf = i;
+ break;
+ }
+ }
+ if (selected_lbaf > 0x0F) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ }
+ if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ }
+
+ cdw10 |= prot_info << 5;
+ cdw10 |= selected_lbaf & 0x0F;
+ memset(&c, 0, sizeof(c));
+ c.format.opcode = nvme_admin_format_nvm;
+ c.format.nsid = cpu_to_le32(ns->ns_id);
+ c.format.cdw10 = cpu_to_le32(cdw10);
+
+ nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc)
+ res = nvme_sc;
+
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+/* Read/Write Helper Functions */
+
+static inline void nvme_trans_get_io_cdb6(u8 *cmd,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ cdb_info->fua = 0;
+ cdb_info->prot_info = 0;
+ cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_6_CDB_LBA_OFFSET) &
+ IO_6_CDB_LBA_MASK;
+ cdb_info->xfer_len = GET_U8_FROM_CDB(cmd, IO_6_CDB_TX_LEN_OFFSET);
+
+ /* sbc3r27 sec 5.32 - TRANSFER LEN of 0 implies a 256 Block transfer */
+ if (cdb_info->xfer_len == 0)
+ cdb_info->xfer_len = IO_6_DEFAULT_TX_LEN;
+}
+
+static inline void nvme_trans_get_io_cdb10(u8 *cmd,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_10_CDB_FUA_OFFSET) &
+ IO_CDB_FUA_MASK;
+ cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_10_CDB_WP_OFFSET) &
+ IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
+ cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_10_CDB_LBA_OFFSET);
+ cdb_info->xfer_len = GET_U16_FROM_CDB(cmd, IO_10_CDB_TX_LEN_OFFSET);
+}
+
+static inline void nvme_trans_get_io_cdb12(u8 *cmd,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_12_CDB_FUA_OFFSET) &
+ IO_CDB_FUA_MASK;
+ cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_12_CDB_WP_OFFSET) &
+ IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
+ cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_12_CDB_LBA_OFFSET);
+ cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_12_CDB_TX_LEN_OFFSET);
+}
+
+static inline void nvme_trans_get_io_cdb16(u8 *cmd,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_16_CDB_FUA_OFFSET) &
+ IO_CDB_FUA_MASK;
+ cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_16_CDB_WP_OFFSET) &
+ IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
+ cdb_info->lba = GET_U64_FROM_CDB(cmd, IO_16_CDB_LBA_OFFSET);
+ cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_16_CDB_TX_LEN_OFFSET);
+}
+
+static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr,
+ struct nvme_trans_io_cdb *cdb_info,
+ u32 max_blocks)
+{
+ /* If using iovecs, send one nvme command per vector */
+ if (hdr->iovec_count > 0)
+ return hdr->iovec_count;
+ else if (cdb_info->xfer_len > max_blocks)
+ return ((cdb_info->xfer_len - 1) / max_blocks) + 1;
+ else
+ return 1;
+}
+
+static u16 nvme_trans_io_get_control(struct nvme_ns *ns,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ u16 control = 0;
+
+ /* When Protection information support is added, implement here */
+
+ if (cdb_info->fua > 0)
+ control |= NVME_RW_FUA;
+
+ return control;
+}
+
+static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ struct nvme_trans_io_cdb *cdb_info, u8 is_write)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_queue *nvmeq;
+ u32 num_cmds;
+ struct nvme_iod *iod;
+ u64 unit_len;
+ u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */
+ u32 retcode;
+ u32 i = 0;
+ u64 nvme_offset = 0;
+ void __user *next_mapping_addr;
+ struct nvme_command c;
+ u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
+ u16 control;
+ u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors);
+
+ num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
+
+ /*
+ * This loop handles two cases.
+ * First, when an SGL is used in the form of an iovec list:
+ * - Use iov_base as the next mapping address for the nvme command_id
+ * - Use iov_len as the data transfer length for the command.
+ * Second, when we have a single buffer
+ * - If larger than max_blocks, split into chunks, offset
+ * each nvme command accordingly.
+ */
+ for (i = 0; i < num_cmds; i++) {
+ memset(&c, 0, sizeof(c));
+ if (hdr->iovec_count > 0) {
+ struct sg_iovec sgl;
+
+ retcode = copy_from_user(&sgl, hdr->dxferp +
+ i * sizeof(struct sg_iovec),
+ sizeof(struct sg_iovec));
+ if (retcode)
+ return -EFAULT;
+ unit_len = sgl.iov_len;
+ unit_num_blocks = unit_len >> ns->lba_shift;
+ next_mapping_addr = sgl.iov_base;
+ } else {
+ unit_num_blocks = min((u64)max_blocks,
+ (cdb_info->xfer_len - nvme_offset));
+ unit_len = unit_num_blocks << ns->lba_shift;
+ next_mapping_addr = hdr->dxferp +
+ ((1 << ns->lba_shift) * nvme_offset);
+ }
+
+ c.rw.opcode = opcode;
+ c.rw.nsid = cpu_to_le32(ns->ns_id);
+ c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset);
+ c.rw.length = cpu_to_le16(unit_num_blocks - 1);
+ control = nvme_trans_io_get_control(ns, cdb_info);
+ c.rw.control = cpu_to_le16(control);
+
+ iod = nvme_map_user_pages(dev,
+ (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ (unsigned long)next_mapping_addr, unit_len);
+ if (IS_ERR(iod)) {
+ res = PTR_ERR(iod);
+ goto out;
+ }
+ retcode = nvme_setup_prps(dev, &c.common, iod, unit_len,
+ GFP_KERNEL);
+ if (retcode != unit_len) {
+ nvme_unmap_user_pages(dev,
+ (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ iod);
+ nvme_free_iod(dev, iod);
+ res = -ENOMEM;
+ goto out;
+ }
+
+ nvme_offset += unit_num_blocks;
+
+ nvmeq = get_nvmeq(dev);
+ /*
+ * Since nvme_submit_sync_cmd sleeps, we can't keep
+ * preemption disabled. We may be preempted at any
+ * point, and be rescheduled to a different CPU. That
+ * will cause cacheline bouncing, but no additional
+ * races since q_lock already protects against other
+ * CPUs.
+ */
+ put_nvmeq(nvmeq);
+ nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL,
+ NVME_IO_TIMEOUT);
+ if (nvme_sc != NVME_SC_SUCCESS) {
+ nvme_unmap_user_pages(dev,
+ (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ iod);
+ nvme_free_iod(dev, iod);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ goto out;
+ }
+ nvme_unmap_user_pages(dev,
+ (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ iod);
+ nvme_free_iod(dev, iod);
+ }
+ res = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
+
+ out:
+ return res;
+}
+
+
+/* SCSI Command Translation Functions */
+
+static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ struct nvme_trans_io_cdb cdb_info;
+ u8 opcode = cmd[0];
+ u64 xfer_bytes;
+ u64 sum_iov_len = 0;
+ struct sg_iovec sgl;
+ int i;
+ size_t not_copied;
+
+ /* Extract Fields from CDB */
+ switch (opcode) {
+ case WRITE_6:
+ case READ_6:
+ nvme_trans_get_io_cdb6(cmd, &cdb_info);
+ break;
+ case WRITE_10:
+ case READ_10:
+ nvme_trans_get_io_cdb10(cmd, &cdb_info);
+ break;
+ case WRITE_12:
+ case READ_12:
+ nvme_trans_get_io_cdb12(cmd, &cdb_info);
+ break;
+ case WRITE_16:
+ case READ_16:
+ nvme_trans_get_io_cdb16(cmd, &cdb_info);
+ break;
+ default:
+ /* Will never really reach here */
+ res = SNTI_INTERNAL_ERROR;
+ goto out;
+ }
+
+ /* Calculate total length of transfer (in bytes) */
+ if (hdr->iovec_count > 0) {
+ for (i = 0; i < hdr->iovec_count; i++) {
+ not_copied = copy_from_user(&sgl, hdr->dxferp +
+ i * sizeof(struct sg_iovec),
+ sizeof(struct sg_iovec));
+ if (not_copied)
+ return -EFAULT;
+ sum_iov_len += sgl.iov_len;
+ /* IO vector sizes should be multiples of block size */
+ if (sgl.iov_len % (1 << ns->lba_shift) != 0) {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_PARAMETER,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ }
+ } else {
+ sum_iov_len = hdr->dxfer_len;
+ }
+
+ /* As Per sg ioctl howto, if the lengths differ, use the lower one */
+ xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len);
+
+ /* If block count and actual data buffer size dont match, error out */
+ if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) {
+ res = -EINVAL;
+ goto out;
+ }
+
+ /* Check for 0 length transfer - it is not illegal */
+ if (cdb_info.xfer_len == 0)
+ goto out;
+
+ /* Send NVMe IO Command(s) */
+ res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+
+ out:
+ return res;
+}
+
+static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 evpd;
+ u8 page_code;
+ int alloc_len;
+ u8 *inq_response;
+
+ evpd = GET_INQ_EVPD_BIT(cmd);
+ page_code = GET_INQ_PAGE_CODE(cmd);
+ alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
+
+ inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL);
+ if (inq_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+
+ if (evpd == 0) {
+ if (page_code == INQ_STANDARD_INQUIRY_PAGE) {
+ res = nvme_trans_standard_inquiry_page(ns, hdr,
+ inq_response, alloc_len);
+ } else {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ }
+ } else {
+ switch (page_code) {
+ case VPD_SUPPORTED_PAGES:
+ res = nvme_trans_supported_vpd_pages(ns, hdr,
+ inq_response, alloc_len);
+ break;
+ case VPD_SERIAL_NUMBER:
+ res = nvme_trans_unit_serial_page(ns, hdr, inq_response,
+ alloc_len);
+ break;
+ case VPD_DEVICE_IDENTIFIERS:
+ res = nvme_trans_device_id_page(ns, hdr, inq_response,
+ alloc_len);
+ break;
+ case VPD_EXTENDED_INQUIRY:
+ res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
+ break;
+ case VPD_BLOCK_DEV_CHARACTERISTICS:
+ res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
+ break;
+ default:
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+ }
+ kfree(inq_response);
+ out_mem:
+ return res;
+}
+
+static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u16 alloc_len;
+ u8 sp;
+ u8 pc;
+ u8 page_code;
+
+ sp = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_SP_OFFSET);
+ if (sp != LOG_SENSE_CDB_SP_NOT_ENABLED) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ pc = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_PC_OFFSET);
+ page_code = pc & LOG_SENSE_CDB_PAGE_CODE_MASK;
+ pc = (pc & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT;
+ if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ alloc_len = GET_U16_FROM_CDB(cmd, LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET);
+ switch (page_code) {
+ case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE:
+ res = nvme_trans_log_supp_pages(ns, hdr, alloc_len);
+ break;
+ case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE:
+ res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len);
+ break;
+ case LOG_PAGE_TEMPERATURE_PAGE:
+ res = nvme_trans_log_temperature(ns, hdr, alloc_len);
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+
+ out:
+ return res;
+}
+
+static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 cdb10 = 0;
+ u16 parm_list_len;
+ u8 page_format;
+ u8 save_pages;
+
+ page_format = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_PAGE_FORMAT_OFFSET);
+ page_format &= MODE_SELECT_CDB_PAGE_FORMAT_MASK;
+
+ save_pages = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_SAVE_PAGES_OFFSET);
+ save_pages &= MODE_SELECT_CDB_SAVE_PAGES_MASK;
+
+ if (GET_OPCODE(cmd) == MODE_SELECT) {
+ parm_list_len = GET_U8_FROM_CDB(cmd,
+ MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET);
+ } else {
+ parm_list_len = GET_U16_FROM_CDB(cmd,
+ MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET);
+ cdb10 = 1;
+ }
+
+ if (parm_list_len != 0) {
+ /*
+ * According to SPC-4 r24, a paramter list length field of 0
+ * shall not be considered an error
+ */
+ res = nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len,
+ page_format, save_pages, cdb10);
+ }
+
+ return res;
+}
+
+static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u16 alloc_len;
+ u8 cdb10 = 0;
+ u8 page_code;
+ u8 pc;
+
+ if (GET_OPCODE(cmd) == MODE_SENSE) {
+ alloc_len = GET_U8_FROM_CDB(cmd, MODE_SENSE6_ALLOC_LEN_OFFSET);
+ } else {
+ alloc_len = GET_U16_FROM_CDB(cmd,
+ MODE_SENSE10_ALLOC_LEN_OFFSET);
+ cdb10 = 1;
+ }
+
+ pc = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CONTROL_OFFSET) &
+ MODE_SENSE_PAGE_CONTROL_MASK;
+ if (pc != MODE_SENSE_PC_CURRENT_VALUES) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+
+ page_code = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CODE_OFFSET) &
+ MODE_SENSE_PAGE_CODE_MASK;
+ switch (page_code) {
+ case MODE_PAGE_CACHING:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_caching_page,
+ MODE_PAGE_CACHING_LEN);
+ break;
+ case MODE_PAGE_CONTROL:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_control_page,
+ MODE_PAGE_CONTROL_LEN);
+ break;
+ case MODE_PAGE_POWER_CONDITION:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_pow_cnd_page,
+ MODE_PAGE_POW_CND_LEN);
+ break;
+ case MODE_PAGE_INFO_EXCEP:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_inf_exc_page,
+ MODE_PAGE_INF_EXC_LEN);
+ break;
+ case MODE_PAGE_RETURN_ALL:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_all_pages,
+ MODE_PAGE_ALL_LEN);
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+
+ out:
+ return res;
+}
+
+static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ u32 alloc_len = READ_CAP_10_RESP_SIZE;
+ u32 resp_size = READ_CAP_10_RESP_SIZE;
+ u32 xfer_len;
+ u8 cdb16;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ u8 *response;
+
+ cdb16 = IS_READ_CAP_16(cmd);
+ if (cdb16) {
+ alloc_len = GET_READ_CAP_16_ALLOC_LENGTH(cmd);
+ resp_size = READ_CAP_16_RESP_SIZE;
+ }
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ns = mem;
+
+ response = kmalloc(resp_size, GFP_KERNEL);
+ if (response == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+ memset(response, 0, resp_size);
+ nvme_trans_fill_read_cap(response, id_ns, cdb16);
+
+ xfer_len = min(alloc_len, resp_size);
+ res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+ kfree(response);
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ u32 alloc_len, xfer_len, resp_size;
+ u8 select_report;
+ u8 *response;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ctrl *id_ctrl;
+ u32 ll_length, lun_id;
+ u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
+ __be32 tmp_len;
+
+ alloc_len = GET_REPORT_LUNS_ALLOC_LENGTH(cmd);
+ select_report = GET_U8_FROM_CDB(cmd, REPORT_LUNS_SR_OFFSET);
+
+ if ((select_report != ALL_LUNS_RETURNED) &&
+ (select_report != ALL_WELL_KNOWN_LUNS_RETURNED) &&
+ (select_report != RESTRICTED_LUNS_RETURNED)) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ } else {
+ /* NVMe Controller Identify */
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_id_ctrl),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ctrl = mem;
+ ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
+ resp_size = ll_length + LUN_DATA_HEADER_SIZE;
+
+ if (alloc_len < resp_size) {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out_dma;
+ }
+
+ response = kmalloc(resp_size, GFP_KERNEL);
+ if (response == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+ memset(response, 0, resp_size);
+
+ /* The first LUN ID will always be 0 per the SAM spec */
+ for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
+ /*
+ * Set the LUN Id and then increment to the next LUN
+ * location in the parameter data.
+ */
+ __be64 tmp_id = cpu_to_be64(lun_id);
+ memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64));
+ lun_id_offset += LUN_ENTRY_SIZE;
+ }
+ tmp_len = cpu_to_be32(ll_length);
+ memcpy(response, &tmp_len, sizeof(u32));
+ }
+
+ xfer_len = min(alloc_len, resp_size);
+ res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+ kfree(response);
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 alloc_len, xfer_len, resp_size;
+ u8 desc_format;
+ u8 *response;
+
+ alloc_len = GET_REQUEST_SENSE_ALLOC_LENGTH(cmd);
+ desc_format = GET_U8_FROM_CDB(cmd, REQUEST_SENSE_DESC_OFFSET);
+ desc_format &= REQUEST_SENSE_DESC_MASK;
+
+ resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
+ (FIXED_FMT_SENSE_DATA_SIZE));
+ response = kmalloc(resp_size, GFP_KERNEL);
+ if (response == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ memset(response, 0, resp_size);
+
+ if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) {
+ /* Descriptor Format Sense Data */
+ response[0] = DESC_FORMAT_SENSE_DATA;
+ response[1] = NO_SENSE;
+ /* TODO How is LOW POWER CONDITION ON handled? (byte 2) */
+ response[2] = SCSI_ASC_NO_SENSE;
+ response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ /* SDAT_OVFL = 0 | Additional Sense Length = 0 */
+ } else {
+ /* Fixed Format Sense Data */
+ response[0] = FIXED_SENSE_DATA;
+ /* Byte 1 = Obsolete */
+ response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */
+ /* Bytes 3-6 - Information - set to zero */
+ response[7] = FIXED_SENSE_DATA_ADD_LENGTH;
+ /* Bytes 8-11 - Cmd Specific Information - set to zero */
+ response[12] = SCSI_ASC_NO_SENSE;
+ response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ /* Byte 14 = Field Replaceable Unit Code = 0 */
+ /* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */
+ }
+
+ xfer_len = min(alloc_len, resp_size);
+ res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+ kfree(response);
+ out:
+ return res;
+}
+
+static int nvme_trans_security_protocol(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+}
+
+static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_queue *nvmeq;
+ struct nvme_command c;
+ u8 immed, pcmod, pc, no_flush, start;
+
+ immed = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_IMMED_OFFSET);
+ pcmod = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET);
+ pc = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_OFFSET);
+ no_flush = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_NO_FLUSH_OFFSET);
+ start = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_START_OFFSET);
+
+ immed &= START_STOP_UNIT_CDB_IMMED_MASK;
+ pcmod &= START_STOP_UNIT_CDB_POWER_COND_MOD_MASK;
+ pc = (pc & START_STOP_UNIT_CDB_POWER_COND_MASK) >> NIBBLE_SHIFT;
+ no_flush &= START_STOP_UNIT_CDB_NO_FLUSH_MASK;
+ start &= START_STOP_UNIT_CDB_START_MASK;
+
+ if (immed != 0) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ } else {
+ if (no_flush == 0) {
+ /* Issue NVME FLUSH command prior to START STOP UNIT */
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_cmd_flush;
+ c.common.nsid = cpu_to_le32(ns->ns_id);
+
+ nvmeq = get_nvmeq(ns->dev);
+ put_nvmeq(nvmeq);
+ nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out;
+ }
+ }
+ /* Setup the expected power state transition */
+ res = nvme_trans_power_state(ns, hdr, pc, pcmod, start);
+ }
+
+ out:
+ return res;
+}
+
+static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_command c;
+ struct nvme_queue *nvmeq;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_cmd_flush;
+ c.common.nsid = cpu_to_le32(ns->ns_id);
+
+ nvmeq = get_nvmeq(ns->dev);
+ put_nvmeq(nvmeq);
+ nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out;
+ if (nvme_sc)
+ res = nvme_sc;
+
+ out:
+ return res;
+}
+
+static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 parm_hdr_len = 0;
+ u8 nvme_pf_code = 0;
+ u8 format_prot_info, long_list, format_data;
+
+ format_prot_info = GET_U8_FROM_CDB(cmd,
+ FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET);
+ long_list = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_LONG_LIST_OFFSET);
+ format_data = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET);
+
+ format_prot_info = (format_prot_info &
+ FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK) >>
+ FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT;
+ long_list &= FORMAT_UNIT_CDB_LONG_LIST_MASK;
+ format_data &= FORMAT_UNIT_CDB_FORMAT_DATA_MASK;
+
+ if (format_data != 0) {
+ if (format_prot_info != 0) {
+ if (long_list == 0)
+ parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN;
+ else
+ parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN;
+ }
+ } else if (format_data == 0 && format_prot_info != 0) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+
+ /* Get parm header from data-in/out buffer */
+ /*
+ * According to the translation spec, the only fields in the parameter
+ * list we are concerned with are in the header. So allocate only that.
+ */
+ if (parm_hdr_len > 0) {
+ res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len,
+ format_prot_info, &nvme_pf_code);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ }
+
+ /* Attempt to activate any previously downloaded firmware image */
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 0, 0, 0);
+
+ /* Determine Block size and count and send format command */
+ res = nvme_trans_fmt_set_blk_size_count(ns, hdr);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+
+ res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code);
+
+ out:
+ return res;
+}
+
+static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ struct nvme_dev *dev = ns->dev;
+
+ if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY))
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ NOT_READY, SCSI_ASC_LUN_NOT_READY,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ else
+ res = nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0);
+
+ return res;
+}
+
+static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u32 buffer_offset, parm_list_length;
+ u8 buffer_id, mode;
+
+ parm_list_length =
+ GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET);
+ if (parm_list_length % BYTES_TO_DWORDS != 0) {
+ /* NVMe expects Firmware file to be a whole number of DWORDS */
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ buffer_id = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_ID_OFFSET);
+ if (buffer_id > NVME_MAX_FIRMWARE_SLOT) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ mode = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_MODE_OFFSET) &
+ WRITE_BUFFER_CDB_MODE_MASK;
+ buffer_offset =
+ GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET);
+
+ switch (mode) {
+ case DOWNLOAD_SAVE_ACTIVATE:
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
+ parm_list_length, buffer_offset,
+ buffer_id);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
+ parm_list_length, buffer_offset,
+ buffer_id);
+ break;
+ case DOWNLOAD_SAVE_DEFER_ACTIVATE:
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
+ parm_list_length, buffer_offset,
+ buffer_id);
+ break;
+ case ACTIVATE_DEFERRED_MICROCODE:
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
+ parm_list_length, buffer_offset,
+ buffer_id);
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+
+ out:
+ return res;
+}
+
+struct scsi_unmap_blk_desc {
+ __be64 slba;
+ __be32 nlb;
+ u32 resv;
+};
+
+struct scsi_unmap_parm_list {
+ __be16 unmap_data_len;
+ __be16 unmap_blk_desc_data_len;
+ u32 resv;
+ struct scsi_unmap_blk_desc desc[0];
+};
+
+static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct scsi_unmap_parm_list *plist;
+ struct nvme_dsm_range *range;
+ struct nvme_queue *nvmeq;
+ struct nvme_command c;
+ int i, nvme_sc, res = -ENOMEM;
+ u16 ndesc, list_len;
+ dma_addr_t dma_addr;
+
+ list_len = GET_U16_FROM_CDB(cmd, UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET);
+ if (!list_len)
+ return -EINVAL;
+
+ plist = kmalloc(list_len, GFP_KERNEL);
+ if (!plist)
+ return -ENOMEM;
+
+ res = nvme_trans_copy_from_user(hdr, plist, list_len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+
+ ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4;
+ if (!ndesc || ndesc > 256) {
+ res = -EINVAL;
+ goto out;
+ }
+
+ range = dma_alloc_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
+ &dma_addr, GFP_KERNEL);
+ if (!range)
+ goto out;
+
+ for (i = 0; i < ndesc; i++) {
+ range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb));
+ range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba));
+ range[i].cattr = 0;
+ }
+
+ memset(&c, 0, sizeof(c));
+ c.dsm.opcode = nvme_cmd_dsm;
+ c.dsm.nsid = cpu_to_le32(ns->ns_id);
+ c.dsm.prp1 = cpu_to_le64(dma_addr);
+ c.dsm.nr = cpu_to_le32(ndesc - 1);
+ c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+
+ nvmeq = get_nvmeq(dev);
+ put_nvmeq(nvmeq);
+
+ nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+
+ dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
+ range, dma_addr);
+ out:
+ kfree(plist);
+ return res;
+}
+
+static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
+{
+ u8 cmd[BLK_MAX_CDB];
+ int retcode;
+ unsigned int opcode;
+
+ if (hdr->cmdp == NULL)
+ return -EMSGSIZE;
+ if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
+ return -EFAULT;
+
+ opcode = cmd[0];
+
+ switch (opcode) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ retcode = nvme_trans_io(ns, hdr, 0, cmd);
+ break;
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ retcode = nvme_trans_io(ns, hdr, 1, cmd);
+ break;
+ case INQUIRY:
+ retcode = nvme_trans_inquiry(ns, hdr, cmd);
+ break;
+ case LOG_SENSE:
+ retcode = nvme_trans_log_sense(ns, hdr, cmd);
+ break;
+ case MODE_SELECT:
+ case MODE_SELECT_10:
+ retcode = nvme_trans_mode_select(ns, hdr, cmd);
+ break;
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ retcode = nvme_trans_mode_sense(ns, hdr, cmd);
+ break;
+ case READ_CAPACITY:
+ retcode = nvme_trans_read_capacity(ns, hdr, cmd);
+ break;
+ case SERVICE_ACTION_IN:
+ if (IS_READ_CAP_16(cmd))
+ retcode = nvme_trans_read_capacity(ns, hdr, cmd);
+ else
+ goto out;
+ break;
+ case REPORT_LUNS:
+ retcode = nvme_trans_report_luns(ns, hdr, cmd);
+ break;
+ case REQUEST_SENSE:
+ retcode = nvme_trans_request_sense(ns, hdr, cmd);
+ break;
+ case SECURITY_PROTOCOL_IN:
+ case SECURITY_PROTOCOL_OUT:
+ retcode = nvme_trans_security_protocol(ns, hdr, cmd);
+ break;
+ case START_STOP:
+ retcode = nvme_trans_start_stop(ns, hdr, cmd);
+ break;
+ case SYNCHRONIZE_CACHE:
+ retcode = nvme_trans_synchronize_cache(ns, hdr, cmd);
+ break;
+ case FORMAT_UNIT:
+ retcode = nvme_trans_format_unit(ns, hdr, cmd);
+ break;
+ case TEST_UNIT_READY:
+ retcode = nvme_trans_test_unit_ready(ns, hdr, cmd);
+ break;
+ case WRITE_BUFFER:
+ retcode = nvme_trans_write_buffer(ns, hdr, cmd);
+ break;
+ case UNMAP:
+ retcode = nvme_trans_unmap(ns, hdr, cmd);
+ break;
+ default:
+ out:
+ retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+ return retcode;
+}
+
+int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
+{
+ struct sg_io_hdr hdr;
+ int retcode;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&hdr, u_hdr, sizeof(hdr)))
+ return -EFAULT;
+ if (hdr.interface_id != 'S')
+ return -EINVAL;
+ if (hdr.cmd_len > BLK_MAX_CDB)
+ return -EINVAL;
+
+ retcode = nvme_scsi_translate(ns, &hdr);
+ if (retcode < 0)
+ return retcode;
+ if (retcode > 0)
+ retcode = SNTI_TRANSLATION_SUCCESS;
+ if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0)
+ return -EFAULT;
+
+ return retcode;
+}
+
+int nvme_sg_get_version_num(int __user *ip)
+{
+ return put_user(sg_version_num, ip);
+}
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index ba2b6b5e591..e76bdc074db 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -236,13 +236,12 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static int pcd_block_release(struct gendisk *disk, fmode_t mode)
+static void pcd_block_release(struct gendisk *disk, fmode_t mode)
{
struct pcd_unit *cd = disk->private_data;
mutex_lock(&pcd_mutex);
cdrom_release(&cd->info, mode);
mutex_unlock(&pcd_mutex);
- return 0;
}
static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode,
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 831e3ac156e..19ad8f0c83e 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -783,7 +783,7 @@ static int pd_ioctl(struct block_device *bdev, fmode_t mode,
}
}
-static int pd_release(struct gendisk *p, fmode_t mode)
+static void pd_release(struct gendisk *p, fmode_t mode)
{
struct pd_unit *disk = p->private_data;
@@ -791,8 +791,6 @@ static int pd_release(struct gendisk *p, fmode_t mode)
if (!--disk->access && disk->removable)
pd_special_command(disk, pd_door_unlock);
mutex_unlock(&pd_mutex);
-
- return 0;
}
static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing)
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index ec8f9ed6326..f5c86d523ba 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -211,7 +211,7 @@ static int pf_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-static int pf_release(struct gendisk *disk, fmode_t mode);
+static void pf_release(struct gendisk *disk, fmode_t mode);
static int pf_detect(void);
static void do_pf_read(void);
@@ -360,14 +360,15 @@ static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, u
return 0;
}
-static int pf_release(struct gendisk *disk, fmode_t mode)
+static void pf_release(struct gendisk *disk, fmode_t mode)
{
struct pf_unit *pf = disk->private_data;
mutex_lock(&pf_mutex);
if (pf->access <= 0) {
mutex_unlock(&pf_mutex);
- return -EINVAL;
+ WARN_ON(1);
+ return;
}
pf->access--;
@@ -376,8 +377,6 @@ static int pf_release(struct gendisk *disk, fmode_t mode)
pf_lock(pf, 0);
mutex_unlock(&pf_mutex);
- return 0;
-
}
static unsigned int pf_check_events(struct gendisk *disk, unsigned int clearing)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 2e7de7a59bf..3c08983e600 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -901,7 +901,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
pd->iosched.successive_reads += bio->bi_size >> 10;
else {
pd->iosched.successive_reads = 0;
- pd->iosched.last_write = bio->bi_sector + bio_sectors(bio);
+ pd->iosched.last_write = bio_end_sector(bio);
}
if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
if (pd->read_speed == pd->write_speed) {
@@ -948,31 +948,6 @@ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_que
}
/*
- * Copy CD_FRAMESIZE bytes from src_bio into a destination page
- */
-static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct page *dst_page, int dst_offs)
-{
- unsigned int copy_size = CD_FRAMESIZE;
-
- while (copy_size > 0) {
- struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
- void *vfrom = kmap_atomic(src_bvl->bv_page) +
- src_bvl->bv_offset + offs;
- void *vto = page_address(dst_page) + dst_offs;
- int len = min_t(int, copy_size, src_bvl->bv_len - offs);
-
- BUG_ON(len < 0);
- memcpy(vto, vfrom, len);
- kunmap_atomic(vfrom);
-
- seg++;
- offs = 0;
- dst_offs += len;
- copy_size -= len;
- }
-}
-
-/*
* Copy all data for this packet to pkt->pages[], so that
* a) The number of required segments for the write bio is minimized, which
* is necessary for some scsi controllers.
@@ -1181,16 +1156,15 @@ static int pkt_start_recovery(struct packet_data *pkt)
new_sector = new_block * (CD_FRAMESIZE >> 9);
pkt->sector = new_sector;
+ bio_reset(pkt->bio);
+ pkt->bio->bi_bdev = pd->bdev;
+ pkt->bio->bi_rw = REQ_WRITE;
pkt->bio->bi_sector = new_sector;
- pkt->bio->bi_next = NULL;
- pkt->bio->bi_flags = 1 << BIO_UPTODATE;
- pkt->bio->bi_idx = 0;
+ pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
+ pkt->bio->bi_vcnt = pkt->frames;
- BUG_ON(pkt->bio->bi_rw != REQ_WRITE);
- BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
- BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
- BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
- BUG_ON(pkt->bio->bi_private != pkt);
+ pkt->bio->bi_end_io = pkt_end_io_packet_write;
+ pkt->bio->bi_private = pkt;
drop_super(sb);
return 1;
@@ -1325,55 +1299,35 @@ try_next_bio:
*/
static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
{
- struct bio *bio;
int f;
- int frames_write;
struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
+ bio_reset(pkt->w_bio);
+ pkt->w_bio->bi_sector = pkt->sector;
+ pkt->w_bio->bi_bdev = pd->bdev;
+ pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
+ pkt->w_bio->bi_private = pkt;
+
+ /* XXX: locking? */
for (f = 0; f < pkt->frames; f++) {
bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+ if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
+ BUG();
}
+ VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
/*
* Fill-in bvec with data from orig_bios.
*/
- frames_write = 0;
spin_lock(&pkt->lock);
- bio_list_for_each(bio, &pkt->orig_bios) {
- int segment = bio->bi_idx;
- int src_offs = 0;
- int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
- int num_frames = bio->bi_size / CD_FRAMESIZE;
- BUG_ON(first_frame < 0);
- BUG_ON(first_frame + num_frames > pkt->frames);
- for (f = first_frame; f < first_frame + num_frames; f++) {
- struct bio_vec *src_bvl = bio_iovec_idx(bio, segment);
-
- while (src_offs >= src_bvl->bv_len) {
- src_offs -= src_bvl->bv_len;
- segment++;
- BUG_ON(segment >= bio->bi_vcnt);
- src_bvl = bio_iovec_idx(bio, segment);
- }
+ bio_copy_data(pkt->w_bio, pkt->orig_bios.head);
- if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) {
- bvec[f].bv_page = src_bvl->bv_page;
- bvec[f].bv_offset = src_bvl->bv_offset + src_offs;
- } else {
- pkt_copy_bio_data(bio, segment, src_offs,
- bvec[f].bv_page, bvec[f].bv_offset);
- }
- src_offs += CD_FRAMESIZE;
- frames_write++;
- }
- }
pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
spin_unlock(&pkt->lock);
VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
- frames_write, (unsigned long long)pkt->sector);
- BUG_ON(frames_write != pkt->write_size);
+ pkt->write_size, (unsigned long long)pkt->sector);
if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
pkt_make_local_copy(pkt, bvec);
@@ -1383,16 +1337,6 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
}
/* Start the write request */
- bio_reset(pkt->w_bio);
- pkt->w_bio->bi_sector = pkt->sector;
- pkt->w_bio->bi_bdev = pd->bdev;
- pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
- pkt->w_bio->bi_private = pkt;
- for (f = 0; f < pkt->frames; f++)
- if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
- BUG();
- VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
-
atomic_set(&pkt->io_wait, 1);
pkt->w_bio->bi_rw = WRITE;
pkt_queue_bio(pd, pkt->w_bio);
@@ -2376,10 +2320,9 @@ out:
return ret;
}
-static int pkt_close(struct gendisk *disk, fmode_t mode)
+static void pkt_close(struct gendisk *disk, fmode_t mode)
{
struct pktcdvd_device *pd = disk->private_data;
- int ret = 0;
mutex_lock(&pktcdvd_mutex);
mutex_lock(&ctl_mutex);
@@ -2391,7 +2334,6 @@ static int pkt_close(struct gendisk *disk, fmode_t mode)
}
mutex_unlock(&ctl_mutex);
mutex_unlock(&pktcdvd_mutex);
- return ret;
}
@@ -2433,7 +2375,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
cloned_bio->bi_bdev = pd->bdev;
cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
- pd->stats.secs_r += bio->bi_size >> 9;
+ pd->stats.secs_r += bio_sectors(bio);
pkt_queue_bio(pd, cloned_bio);
return;
}
@@ -2454,7 +2396,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
zone = ZONE(bio->bi_sector, pd);
VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_sector,
- (unsigned long long)(bio->bi_sector + bio_sectors(bio)));
+ (unsigned long long)bio_end_sector(bio));
/* Check if we have to split the bio */
{
@@ -2462,7 +2404,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
sector_t last_zone;
int first_sectors;
- last_zone = ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd);
+ last_zone = ZONE(bio_end_sector(bio) - 1, pd);
if (last_zone != zone) {
BUG_ON(last_zone != zone + pd->settings.size);
first_sectors = last_zone - bio->bi_sector;
@@ -2648,7 +2590,7 @@ static int pkt_seq_show(struct seq_file *m, void *p)
static int pkt_seq_open(struct inode *inode, struct file *file)
{
- return single_open(file, pkt_seq_show, PDE(inode)->data);
+ return single_open(file, pkt_seq_show, PDE_DATA(inode));
}
static const struct file_operations pkt_proc_fops = {
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 75e112d6600..06a2e53e5f3 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -525,7 +525,7 @@ static int ps3vram_proc_show(struct seq_file *m, void *v)
static int ps3vram_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, ps3vram_proc_show, PDE(inode)->data);
+ return single_open(file, ps3vram_proc_show, PDE_DATA(inode));
}
static const struct file_operations ps3vram_proc_fops = {
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b7b7a88d9f6..ca63104136e 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1,3 +1,4 @@
+
/*
rbd.c -- Export ceph rados objects as a Linux block device
@@ -32,12 +33,14 @@
#include <linux/ceph/mon_client.h>
#include <linux/ceph/decode.h>
#include <linux/parser.h>
+#include <linux/bsearch.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
+#include <linux/slab.h>
#include "rbd_types.h"
@@ -52,13 +55,6 @@
#define SECTOR_SHIFT 9
#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
-/* It might be useful to have these defined elsewhere */
-
-#define U8_MAX ((u8) (~0U))
-#define U16_MAX ((u16) (~0U))
-#define U32_MAX ((u32) (~0U))
-#define U64_MAX ((u64) (~0ULL))
-
#define RBD_DRV_NAME "rbd"
#define RBD_DRV_NAME_LONG "rbd (rados block device)"
@@ -72,6 +68,8 @@
#define RBD_SNAP_HEAD_NAME "-"
+#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
+
/* This allows a single page to hold an image name sent by OSD */
#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
#define RBD_IMAGE_ID_LEN_MAX 64
@@ -80,11 +78,14 @@
/* Feature bits */
-#define RBD_FEATURE_LAYERING 1
+#define RBD_FEATURE_LAYERING (1<<0)
+#define RBD_FEATURE_STRIPINGV2 (1<<1)
+#define RBD_FEATURES_ALL \
+ (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
/* Features supported by this (client software) implementation. */
-#define RBD_FEATURES_ALL (0)
+#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
/*
* An RBD device name will be "rbd#", where the "rbd" comes from
@@ -112,7 +113,8 @@ struct rbd_image_header {
char *snap_names;
u64 *snap_sizes;
- u64 obj_version;
+ u64 stripe_unit;
+ u64 stripe_count;
};
/*
@@ -142,13 +144,13 @@ struct rbd_image_header {
*/
struct rbd_spec {
u64 pool_id;
- char *pool_name;
+ const char *pool_name;
- char *image_id;
- char *image_name;
+ const char *image_id;
+ const char *image_name;
u64 snap_id;
- char *snap_name;
+ const char *snap_name;
struct kref kref;
};
@@ -174,13 +176,44 @@ enum obj_request_type {
OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
};
+enum obj_req_flags {
+ OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
+ OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
+ OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
+ OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
+};
+
struct rbd_obj_request {
const char *object_name;
u64 offset; /* object start byte */
u64 length; /* bytes from offset */
+ unsigned long flags;
- struct rbd_img_request *img_request;
- struct list_head links; /* img_request->obj_requests */
+ /*
+ * An object request associated with an image will have its
+ * img_data flag set; a standalone object request will not.
+ *
+ * A standalone object request will have which == BAD_WHICH
+ * and a null obj_request pointer.
+ *
+ * An object request initiated in support of a layered image
+ * object (to check for its existence before a write) will
+ * have which == BAD_WHICH and a non-null obj_request pointer.
+ *
+ * Finally, an object request for rbd image data will have
+ * which != BAD_WHICH, and will have a non-null img_request
+ * pointer. The value of which will be in the range
+ * 0..(img_request->obj_request_count-1).
+ */
+ union {
+ struct rbd_obj_request *obj_request; /* STAT op */
+ struct {
+ struct rbd_img_request *img_request;
+ u64 img_offset;
+ /* links for img_request->obj_requests list */
+ struct list_head links;
+ };
+ };
u32 which; /* posn image request list */
enum obj_request_type type;
@@ -191,13 +224,12 @@ struct rbd_obj_request {
u32 page_count;
};
};
+ struct page **copyup_pages;
struct ceph_osd_request *osd_req;
u64 xferred; /* bytes transferred */
- u64 version;
int result;
- atomic_t done;
rbd_obj_callback_t callback;
struct completion completion;
@@ -205,19 +237,31 @@ struct rbd_obj_request {
struct kref kref;
};
+enum img_req_flags {
+ IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
+ IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
+ IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
+};
+
struct rbd_img_request {
- struct request *rq;
struct rbd_device *rbd_dev;
u64 offset; /* starting image byte offset */
u64 length; /* byte count from offset */
- bool write_request; /* false for read */
+ unsigned long flags;
union {
+ u64 snap_id; /* for reads */
struct ceph_snap_context *snapc; /* for writes */
- u64 snap_id; /* for reads */
};
+ union {
+ struct request *rq; /* block request */
+ struct rbd_obj_request *obj_request; /* obj req initiator */
+ };
+ struct page **copyup_pages;
spinlock_t completion_lock;/* protects next_completion */
u32 next_completion;
rbd_img_callback_t callback;
+ u64 xferred;/* aggregate bytes transferred */
+ int result; /* first nonzero obj_request result */
u32 obj_request_count;
struct list_head obj_requests; /* rbd_obj_request structs */
@@ -232,15 +276,6 @@ struct rbd_img_request {
#define for_each_obj_request_safe(ireq, oreq, n) \
list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
-struct rbd_snap {
- struct device dev;
- const char *name;
- u64 size;
- struct list_head node;
- u64 id;
- u64 features;
-};
-
struct rbd_mapping {
u64 size;
u64 features;
@@ -276,6 +311,7 @@ struct rbd_device {
struct rbd_spec *parent_spec;
u64 parent_overlap;
+ struct rbd_device *parent;
/* protects updating the header */
struct rw_semaphore header_rwsem;
@@ -284,9 +320,6 @@ struct rbd_device {
struct list_head node;
- /* list of snapshots */
- struct list_head snaps;
-
/* sysfs related */
struct device dev;
unsigned long open_count; /* protected by lock */
@@ -312,16 +345,21 @@ static DEFINE_SPINLOCK(rbd_dev_list_lock);
static LIST_HEAD(rbd_client_list); /* clients */
static DEFINE_SPINLOCK(rbd_client_list_lock);
-static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
-static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
+/* Slab caches for frequently-allocated structures */
+
+static struct kmem_cache *rbd_img_request_cache;
+static struct kmem_cache *rbd_obj_request_cache;
+static struct kmem_cache *rbd_segment_name_cache;
-static void rbd_dev_release(struct device *dev);
-static void rbd_remove_snap_dev(struct rbd_snap *snap);
+static int rbd_img_request_submit(struct rbd_img_request *img_request);
+
+static void rbd_dev_device_release(struct device *dev);
static ssize_t rbd_add(struct bus_type *bus, const char *buf,
size_t count);
static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
size_t count);
+static int rbd_dev_image_probe(struct rbd_device *rbd_dev);
static struct bus_attribute rbd_bus_attrs[] = {
__ATTR(add, S_IWUSR, NULL, rbd_add),
@@ -383,8 +421,19 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
# define rbd_assert(expr) ((void) 0)
#endif /* !RBD_DEBUG */
-static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver);
-static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
+static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
+static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
+static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
+
+static int rbd_dev_refresh(struct rbd_device *rbd_dev);
+static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev);
+static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
+ u64 snap_id);
+static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
+ u8 *order, u64 *snap_size);
+static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
+ u64 *snap_features);
+static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
@@ -411,7 +460,7 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
return 0;
}
-static int rbd_release(struct gendisk *disk, fmode_t mode)
+static void rbd_release(struct gendisk *disk, fmode_t mode)
{
struct rbd_device *rbd_dev = disk->private_data;
unsigned long open_count_before;
@@ -424,8 +473,6 @@ static int rbd_release(struct gendisk *disk, fmode_t mode)
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
put_device(&rbd_dev->dev);
mutex_unlock(&ctl_mutex);
-
- return 0;
}
static const struct block_device_operations rbd_bd_ops = {
@@ -484,6 +531,13 @@ out_opt:
return ERR_PTR(ret);
}
+static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
+{
+ kref_get(&rbdc->kref);
+
+ return rbdc;
+}
+
/*
* Find a ceph client with specific addr and configuration. If
* found, bump its reference count.
@@ -499,7 +553,8 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
spin_lock(&rbd_client_list_lock);
list_for_each_entry(client_node, &rbd_client_list, node) {
if (!ceph_compare_options(ceph_opts, client_node->client)) {
- kref_get(&client_node->kref);
+ __rbd_get_client(client_node);
+
found = true;
break;
}
@@ -722,7 +777,6 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
header->snap_sizes[i] =
le64_to_cpu(ondisk->snaps[i].image_size);
} else {
- WARN_ON(ondisk->snap_names_len);
header->snap_names = NULL;
header->snap_sizes = NULL;
}
@@ -735,18 +789,13 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
/* Allocate and fill in the snapshot context */
header->image_size = le64_to_cpu(ondisk->image_size);
- size = sizeof (struct ceph_snap_context);
- size += snap_count * sizeof (header->snapc->snaps[0]);
- header->snapc = kzalloc(size, GFP_KERNEL);
+
+ header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
if (!header->snapc)
goto out_err;
-
- atomic_set(&header->snapc->nref, 1);
header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
- header->snapc->num_snaps = snap_count;
for (i = 0; i < snap_count; i++)
- header->snapc->snaps[i] =
- le64_to_cpu(ondisk->snaps[i].id);
+ header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id);
return 0;
@@ -761,70 +810,174 @@ out_err:
return -ENOMEM;
}
-static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
+static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
+{
+ const char *snap_name;
+
+ rbd_assert(which < rbd_dev->header.snapc->num_snaps);
+
+ /* Skip over names until we find the one we are looking for */
+
+ snap_name = rbd_dev->header.snap_names;
+ while (which--)
+ snap_name += strlen(snap_name) + 1;
+
+ return kstrdup(snap_name, GFP_KERNEL);
+}
+
+/*
+ * Snapshot id comparison function for use with qsort()/bsearch().
+ * Note that result is for snapshots in *descending* order.
+ */
+static int snapid_compare_reverse(const void *s1, const void *s2)
+{
+ u64 snap_id1 = *(u64 *)s1;
+ u64 snap_id2 = *(u64 *)s2;
+
+ if (snap_id1 < snap_id2)
+ return 1;
+ return snap_id1 == snap_id2 ? 0 : -1;
+}
+
+/*
+ * Search a snapshot context to see if the given snapshot id is
+ * present.
+ *
+ * Returns the position of the snapshot id in the array if it's found,
+ * or BAD_SNAP_INDEX otherwise.
+ *
+ * Note: The snapshot array is in kept sorted (by the osd) in
+ * reverse order, highest snapshot id first.
+ */
+static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
{
- struct rbd_snap *snap;
+ struct ceph_snap_context *snapc = rbd_dev->header.snapc;
+ u64 *found;
+ found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
+ sizeof (snap_id), snapid_compare_reverse);
+
+ return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
+}
+
+static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
+ u64 snap_id)
+{
+ u32 which;
+
+ which = rbd_dev_snap_index(rbd_dev, snap_id);
+ if (which == BAD_SNAP_INDEX)
+ return NULL;
+
+ return _rbd_dev_v1_snap_name(rbd_dev, which);
+}
+
+static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
+{
if (snap_id == CEPH_NOSNAP)
return RBD_SNAP_HEAD_NAME;
- list_for_each_entry(snap, &rbd_dev->snaps, node)
- if (snap_id == snap->id)
- return snap->name;
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ if (rbd_dev->image_format == 1)
+ return rbd_dev_v1_snap_name(rbd_dev, snap_id);
- return NULL;
+ return rbd_dev_v2_snap_name(rbd_dev, snap_id);
}
-static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
+static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
+ u64 *snap_size)
{
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ if (snap_id == CEPH_NOSNAP) {
+ *snap_size = rbd_dev->header.image_size;
+ } else if (rbd_dev->image_format == 1) {
+ u32 which;
- struct rbd_snap *snap;
+ which = rbd_dev_snap_index(rbd_dev, snap_id);
+ if (which == BAD_SNAP_INDEX)
+ return -ENOENT;
- list_for_each_entry(snap, &rbd_dev->snaps, node) {
- if (!strcmp(snap_name, snap->name)) {
- rbd_dev->spec->snap_id = snap->id;
- rbd_dev->mapping.size = snap->size;
- rbd_dev->mapping.features = snap->features;
+ *snap_size = rbd_dev->header.snap_sizes[which];
+ } else {
+ u64 size = 0;
+ int ret;
- return 0;
- }
+ ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
+ if (ret)
+ return ret;
+
+ *snap_size = size;
}
+ return 0;
+}
+
+static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
+ u64 *snap_features)
+{
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ if (snap_id == CEPH_NOSNAP) {
+ *snap_features = rbd_dev->header.features;
+ } else if (rbd_dev->image_format == 1) {
+ *snap_features = 0; /* No features for format 1 */
+ } else {
+ u64 features = 0;
+ int ret;
- return -ENOENT;
+ ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
+ if (ret)
+ return ret;
+
+ *snap_features = features;
+ }
+ return 0;
}
-static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
+static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
{
+ const char *snap_name = rbd_dev->spec->snap_name;
+ u64 snap_id;
+ u64 size = 0;
+ u64 features = 0;
int ret;
- if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
- sizeof (RBD_SNAP_HEAD_NAME))) {
- rbd_dev->spec->snap_id = CEPH_NOSNAP;
- rbd_dev->mapping.size = rbd_dev->header.image_size;
- rbd_dev->mapping.features = rbd_dev->header.features;
- ret = 0;
+ if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) {
+ snap_id = rbd_snap_id_by_name(rbd_dev, snap_name);
+ if (snap_id == CEPH_NOSNAP)
+ return -ENOENT;
} else {
- ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
- if (ret < 0)
- goto done;
- rbd_dev->mapping.read_only = true;
+ snap_id = CEPH_NOSNAP;
}
- set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
-done:
- return ret;
+ ret = rbd_snap_size(rbd_dev, snap_id, &size);
+ if (ret)
+ return ret;
+ ret = rbd_snap_features(rbd_dev, snap_id, &features);
+ if (ret)
+ return ret;
+
+ rbd_dev->mapping.size = size;
+ rbd_dev->mapping.features = features;
+
+ /* If we are mapping a snapshot it must be marked read-only */
+
+ if (snap_id != CEPH_NOSNAP)
+ rbd_dev->mapping.read_only = true;
+
+ return 0;
}
-static void rbd_header_free(struct rbd_image_header *header)
+static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
{
- kfree(header->object_prefix);
- header->object_prefix = NULL;
- kfree(header->snap_sizes);
- header->snap_sizes = NULL;
- kfree(header->snap_names);
- header->snap_names = NULL;
- ceph_put_snap_context(header->snapc);
- header->snapc = NULL;
+ rbd_dev->mapping.size = 0;
+ rbd_dev->mapping.features = 0;
+ rbd_dev->mapping.read_only = true;
+}
+
+static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev)
+{
+ rbd_dev->mapping.size = 0;
+ rbd_dev->mapping.features = 0;
+ rbd_dev->mapping.read_only = true;
}
static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
@@ -833,7 +986,7 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
u64 segment;
int ret;
- name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
+ name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
if (!name)
return NULL;
segment = offset >> rbd_dev->header.obj_order;
@@ -849,6 +1002,13 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
return name;
}
+static void rbd_segment_name_free(const char *name)
+{
+ /* The explicit cast here is needed to drop the const qualifier */
+
+ kmem_cache_free(rbd_segment_name_cache, (void *)name);
+}
+
static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
{
u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
@@ -921,6 +1081,37 @@ static void zero_bio_chain(struct bio *chain, int start_ofs)
}
/*
+ * similar to zero_bio_chain(), zeros data defined by a page array,
+ * starting at the given byte offset from the start of the array and
+ * continuing up to the given end offset. The pages array is
+ * assumed to be big enough to hold all bytes up to the end.
+ */
+static void zero_pages(struct page **pages, u64 offset, u64 end)
+{
+ struct page **page = &pages[offset >> PAGE_SHIFT];
+
+ rbd_assert(end > offset);
+ rbd_assert(end - offset <= (u64)SIZE_MAX);
+ while (offset < end) {
+ size_t page_offset;
+ size_t length;
+ unsigned long flags;
+ void *kaddr;
+
+ page_offset = (size_t)(offset & ~PAGE_MASK);
+ length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
+ local_irq_save(flags);
+ kaddr = kmap_atomic(*page);
+ memset(kaddr + page_offset, 0, length);
+ kunmap_atomic(kaddr);
+ local_irq_restore(flags);
+
+ offset += length;
+ page++;
+ }
+}
+
+/*
* Clone a portion of a bio, starting at the given byte offset
* and continuing for the number of bytes indicated.
*/
@@ -952,7 +1143,7 @@ static struct bio *bio_clone_range(struct bio *bio_src,
/* Find first affected segment... */
resid = offset;
- __bio_for_each_segment(bv, bio_src, idx, 0) {
+ bio_for_each_segment(bv, bio_src, idx) {
if (resid < bv->bv_len)
break;
resid -= bv->bv_len;
@@ -1064,6 +1255,77 @@ out_err:
return NULL;
}
+/*
+ * The default/initial value for all object request flags is 0. For
+ * each flag, once its value is set to 1 it is never reset to 0
+ * again.
+ */
+static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
+{
+ if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
+ struct rbd_device *rbd_dev;
+
+ rbd_dev = obj_request->img_request->rbd_dev;
+ rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
+ obj_request);
+ }
+}
+
+static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
+{
+ smp_mb();
+ return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
+}
+
+static void obj_request_done_set(struct rbd_obj_request *obj_request)
+{
+ if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
+ struct rbd_device *rbd_dev = NULL;
+
+ if (obj_request_img_data_test(obj_request))
+ rbd_dev = obj_request->img_request->rbd_dev;
+ rbd_warn(rbd_dev, "obj_request %p already marked done\n",
+ obj_request);
+ }
+}
+
+static bool obj_request_done_test(struct rbd_obj_request *obj_request)
+{
+ smp_mb();
+ return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
+}
+
+/*
+ * This sets the KNOWN flag after (possibly) setting the EXISTS
+ * flag. The latter is set based on the "exists" value provided.
+ *
+ * Note that for our purposes once an object exists it never goes
+ * away again. It's possible that the response from two existence
+ * checks are separated by the creation of the target object, and
+ * the first ("doesn't exist") response arrives *after* the second
+ * ("does exist"). In that case we ignore the second one.
+ */
+static void obj_request_existence_set(struct rbd_obj_request *obj_request,
+ bool exists)
+{
+ if (exists)
+ set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
+ set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
+ smp_mb();
+}
+
+static bool obj_request_known_test(struct rbd_obj_request *obj_request)
+{
+ smp_mb();
+ return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
+}
+
+static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
+{
+ smp_mb();
+ return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
+}
+
static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p (was %d)\n", __func__, obj_request,
@@ -1101,9 +1363,11 @@ static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
{
rbd_assert(obj_request->img_request == NULL);
- rbd_obj_request_get(obj_request);
+ /* Image request now owns object's original reference */
obj_request->img_request = img_request;
obj_request->which = img_request->obj_request_count;
+ rbd_assert(!obj_request_img_data_test(obj_request));
+ obj_request_img_data_set(obj_request);
rbd_assert(obj_request->which != BAD_WHICH);
img_request->obj_request_count++;
list_add_tail(&obj_request->links, &img_request->obj_requests);
@@ -1123,6 +1387,7 @@ static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
img_request->obj_request_count--;
rbd_assert(obj_request->which == img_request->obj_request_count);
obj_request->which = BAD_WHICH;
+ rbd_assert(obj_request_img_data_test(obj_request));
rbd_assert(obj_request->img_request == img_request);
obj_request->img_request = NULL;
obj_request->callback = NULL;
@@ -1141,76 +1406,6 @@ static bool obj_request_type_valid(enum obj_request_type type)
}
}
-static struct ceph_osd_req_op *rbd_osd_req_op_create(u16 opcode, ...)
-{
- struct ceph_osd_req_op *op;
- va_list args;
- size_t size;
-
- op = kzalloc(sizeof (*op), GFP_NOIO);
- if (!op)
- return NULL;
- op->op = opcode;
- va_start(args, opcode);
- switch (opcode) {
- case CEPH_OSD_OP_READ:
- case CEPH_OSD_OP_WRITE:
- /* rbd_osd_req_op_create(READ, offset, length) */
- /* rbd_osd_req_op_create(WRITE, offset, length) */
- op->extent.offset = va_arg(args, u64);
- op->extent.length = va_arg(args, u64);
- if (opcode == CEPH_OSD_OP_WRITE)
- op->payload_len = op->extent.length;
- break;
- case CEPH_OSD_OP_STAT:
- break;
- case CEPH_OSD_OP_CALL:
- /* rbd_osd_req_op_create(CALL, class, method, data, datalen) */
- op->cls.class_name = va_arg(args, char *);
- size = strlen(op->cls.class_name);
- rbd_assert(size <= (size_t) U8_MAX);
- op->cls.class_len = size;
- op->payload_len = size;
-
- op->cls.method_name = va_arg(args, char *);
- size = strlen(op->cls.method_name);
- rbd_assert(size <= (size_t) U8_MAX);
- op->cls.method_len = size;
- op->payload_len += size;
-
- op->cls.argc = 0;
- op->cls.indata = va_arg(args, void *);
- size = va_arg(args, size_t);
- rbd_assert(size <= (size_t) U32_MAX);
- op->cls.indata_len = (u32) size;
- op->payload_len += size;
- break;
- case CEPH_OSD_OP_NOTIFY_ACK:
- case CEPH_OSD_OP_WATCH:
- /* rbd_osd_req_op_create(NOTIFY_ACK, cookie, version) */
- /* rbd_osd_req_op_create(WATCH, cookie, version, flag) */
- op->watch.cookie = va_arg(args, u64);
- op->watch.ver = va_arg(args, u64);
- op->watch.ver = cpu_to_le64(op->watch.ver);
- if (opcode == CEPH_OSD_OP_WATCH && va_arg(args, int))
- op->watch.flag = (u8) 1;
- break;
- default:
- rbd_warn(NULL, "unsupported opcode %hu\n", opcode);
- kfree(op);
- op = NULL;
- break;
- }
- va_end(args);
-
- return op;
-}
-
-static void rbd_osd_req_op_destroy(struct ceph_osd_req_op *op)
-{
- kfree(op);
-}
-
static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
struct rbd_obj_request *obj_request)
{
@@ -1221,7 +1416,24 @@ static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
static void rbd_img_request_complete(struct rbd_img_request *img_request)
{
+
dout("%s: img %p\n", __func__, img_request);
+
+ /*
+ * If no error occurred, compute the aggregate transfer
+ * count for the image request. We could instead use
+ * atomic64_cmpxchg() to update it as each object request
+ * completes; not clear which way is better off hand.
+ */
+ if (!img_request->result) {
+ struct rbd_obj_request *obj_request;
+ u64 xferred = 0;
+
+ for_each_obj_request(img_request, obj_request)
+ xferred += obj_request->xferred;
+ img_request->xferred = xferred;
+ }
+
if (img_request->callback)
img_request->callback(img_request);
else
@@ -1237,39 +1449,56 @@ static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
return wait_for_completion_interruptible(&obj_request->completion);
}
-static void obj_request_done_init(struct rbd_obj_request *obj_request)
+/*
+ * The default/initial value for all image request flags is 0. Each
+ * is conditionally set to 1 at image request initialization time
+ * and currently never change thereafter.
+ */
+static void img_request_write_set(struct rbd_img_request *img_request)
{
- atomic_set(&obj_request->done, 0);
- smp_wmb();
+ set_bit(IMG_REQ_WRITE, &img_request->flags);
+ smp_mb();
}
-static void obj_request_done_set(struct rbd_obj_request *obj_request)
+static bool img_request_write_test(struct rbd_img_request *img_request)
{
- int done;
+ smp_mb();
+ return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
+}
- done = atomic_inc_return(&obj_request->done);
- if (done > 1) {
- struct rbd_img_request *img_request = obj_request->img_request;
- struct rbd_device *rbd_dev;
+static void img_request_child_set(struct rbd_img_request *img_request)
+{
+ set_bit(IMG_REQ_CHILD, &img_request->flags);
+ smp_mb();
+}
- rbd_dev = img_request ? img_request->rbd_dev : NULL;
- rbd_warn(rbd_dev, "obj_request %p was already done\n",
- obj_request);
- }
+static bool img_request_child_test(struct rbd_img_request *img_request)
+{
+ smp_mb();
+ return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
}
-static bool obj_request_done_test(struct rbd_obj_request *obj_request)
+static void img_request_layered_set(struct rbd_img_request *img_request)
+{
+ set_bit(IMG_REQ_LAYERED, &img_request->flags);
+ smp_mb();
+}
+
+static bool img_request_layered_test(struct rbd_img_request *img_request)
{
smp_mb();
- return atomic_read(&obj_request->done) != 0;
+ return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
}
static void
rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
{
+ u64 xferred = obj_request->xferred;
+ u64 length = obj_request->length;
+
dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
obj_request, obj_request->img_request, obj_request->result,
- obj_request->xferred, obj_request->length);
+ xferred, length);
/*
* ENOENT means a hole in the image. We zero-fill the
* entire length of the request. A short read also implies
@@ -1277,15 +1506,20 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
* update the xferred count to indicate the whole request
* was satisfied.
*/
- BUG_ON(obj_request->type != OBJ_REQUEST_BIO);
+ rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
if (obj_request->result == -ENOENT) {
- zero_bio_chain(obj_request->bio_list, 0);
+ if (obj_request->type == OBJ_REQUEST_BIO)
+ zero_bio_chain(obj_request->bio_list, 0);
+ else
+ zero_pages(obj_request->pages, 0, length);
obj_request->result = 0;
- obj_request->xferred = obj_request->length;
- } else if (obj_request->xferred < obj_request->length &&
- !obj_request->result) {
- zero_bio_chain(obj_request->bio_list, obj_request->xferred);
- obj_request->xferred = obj_request->length;
+ obj_request->xferred = length;
+ } else if (xferred < length && !obj_request->result) {
+ if (obj_request->type == OBJ_REQUEST_BIO)
+ zero_bio_chain(obj_request->bio_list, xferred);
+ else
+ zero_pages(obj_request->pages, xferred, length);
+ obj_request->xferred = length;
}
obj_request_done_set(obj_request);
}
@@ -1308,9 +1542,23 @@ static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
{
- dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
- obj_request->result, obj_request->xferred, obj_request->length);
- if (obj_request->img_request)
+ struct rbd_img_request *img_request = NULL;
+ struct rbd_device *rbd_dev = NULL;
+ bool layered = false;
+
+ if (obj_request_img_data_test(obj_request)) {
+ img_request = obj_request->img_request;
+ layered = img_request && img_request_layered_test(img_request);
+ rbd_dev = img_request->rbd_dev;
+ }
+
+ dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
+ obj_request, img_request, obj_request->result,
+ obj_request->xferred, obj_request->length);
+ if (layered && obj_request->result == -ENOENT &&
+ obj_request->img_offset < rbd_dev->parent_overlap)
+ rbd_img_parent_read(obj_request);
+ else if (img_request)
rbd_img_obj_request_read_callback(obj_request);
else
obj_request_done_set(obj_request);
@@ -1321,9 +1569,8 @@ static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
dout("%s: obj %p result %d %llu\n", __func__, obj_request,
obj_request->result, obj_request->length);
/*
- * There is no such thing as a successful short write.
- * Our xferred value is the number of bytes transferred
- * back. Set it to our originally-requested length.
+ * There is no such thing as a successful short write. Set
+ * it to our originally-requested length.
*/
obj_request->xferred = obj_request->length;
obj_request_done_set(obj_request);
@@ -1347,22 +1594,25 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
rbd_assert(osd_req == obj_request->osd_req);
- rbd_assert(!!obj_request->img_request ^
- (obj_request->which == BAD_WHICH));
+ if (obj_request_img_data_test(obj_request)) {
+ rbd_assert(obj_request->img_request);
+ rbd_assert(obj_request->which != BAD_WHICH);
+ } else {
+ rbd_assert(obj_request->which == BAD_WHICH);
+ }
if (osd_req->r_result < 0)
obj_request->result = osd_req->r_result;
- obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version);
- WARN_ON(osd_req->r_num_ops != 1); /* For now */
+ BUG_ON(osd_req->r_num_ops > 2);
/*
* We support a 64-bit length, but ultimately it has to be
* passed to blk_end_request(), which takes an unsigned int.
*/
obj_request->xferred = osd_req->r_reply_op_len[0];
- rbd_assert(obj_request->xferred < (u64) UINT_MAX);
- opcode = osd_req->r_request_ops[0].op;
+ rbd_assert(obj_request->xferred < (u64)UINT_MAX);
+ opcode = osd_req->r_ops[0].op;
switch (opcode) {
case CEPH_OSD_OP_READ:
rbd_osd_read_callback(obj_request);
@@ -1388,28 +1638,49 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
rbd_obj_request_complete(obj_request);
}
+static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request = obj_request->img_request;
+ struct ceph_osd_request *osd_req = obj_request->osd_req;
+ u64 snap_id;
+
+ rbd_assert(osd_req != NULL);
+
+ snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
+ ceph_osdc_build_request(osd_req, obj_request->offset,
+ NULL, snap_id, NULL);
+}
+
+static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request = obj_request->img_request;
+ struct ceph_osd_request *osd_req = obj_request->osd_req;
+ struct ceph_snap_context *snapc;
+ struct timespec mtime = CURRENT_TIME;
+
+ rbd_assert(osd_req != NULL);
+
+ snapc = img_request ? img_request->snapc : NULL;
+ ceph_osdc_build_request(osd_req, obj_request->offset,
+ snapc, CEPH_NOSNAP, &mtime);
+}
+
static struct ceph_osd_request *rbd_osd_req_create(
struct rbd_device *rbd_dev,
bool write_request,
- struct rbd_obj_request *obj_request,
- struct ceph_osd_req_op *op)
+ struct rbd_obj_request *obj_request)
{
- struct rbd_img_request *img_request = obj_request->img_request;
struct ceph_snap_context *snapc = NULL;
struct ceph_osd_client *osdc;
struct ceph_osd_request *osd_req;
- struct timespec now;
- struct timespec *mtime;
- u64 snap_id = CEPH_NOSNAP;
- u64 offset = obj_request->offset;
- u64 length = obj_request->length;
- if (img_request) {
- rbd_assert(img_request->write_request == write_request);
- if (img_request->write_request)
+ if (obj_request_img_data_test(obj_request)) {
+ struct rbd_img_request *img_request = obj_request->img_request;
+
+ rbd_assert(write_request ==
+ img_request_write_test(img_request));
+ if (write_request)
snapc = img_request->snapc;
- else
- snap_id = img_request->snap_id;
}
/* Allocate and initialize the request, for the single op */
@@ -1419,31 +1690,10 @@ static struct ceph_osd_request *rbd_osd_req_create(
if (!osd_req)
return NULL; /* ENOMEM */
- rbd_assert(obj_request_type_valid(obj_request->type));
- switch (obj_request->type) {
- case OBJ_REQUEST_NODATA:
- break; /* Nothing to do */
- case OBJ_REQUEST_BIO:
- rbd_assert(obj_request->bio_list != NULL);
- osd_req->r_bio = obj_request->bio_list;
- break;
- case OBJ_REQUEST_PAGES:
- osd_req->r_pages = obj_request->pages;
- osd_req->r_num_pages = obj_request->page_count;
- osd_req->r_page_alignment = offset & ~PAGE_MASK;
- break;
- }
-
- if (write_request) {
+ if (write_request)
osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
- now = CURRENT_TIME;
- mtime = &now;
- } else {
+ else
osd_req->r_flags = CEPH_OSD_FLAG_READ;
- mtime = NULL; /* not needed for reads */
- offset = 0; /* These are not used... */
- length = 0; /* ...for osd read requests */
- }
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
@@ -1454,14 +1704,51 @@ static struct ceph_osd_request *rbd_osd_req_create(
osd_req->r_file_layout = rbd_dev->layout; /* struct */
- /* osd_req will get its own reference to snapc (if non-null) */
+ return osd_req;
+}
+
+/*
+ * Create a copyup osd request based on the information in the
+ * object request supplied. A copyup request has two osd ops,
+ * a copyup method call, and a "normal" write request.
+ */
+static struct ceph_osd_request *
+rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request;
+ struct ceph_snap_context *snapc;
+ struct rbd_device *rbd_dev;
+ struct ceph_osd_client *osdc;
+ struct ceph_osd_request *osd_req;
+
+ rbd_assert(obj_request_img_data_test(obj_request));
+ img_request = obj_request->img_request;
+ rbd_assert(img_request);
+ rbd_assert(img_request_write_test(img_request));
- ceph_osdc_build_request(osd_req, offset, length, 1, op,
- snapc, snap_id, mtime);
+ /* Allocate and initialize the request, for the two ops */
+
+ snapc = img_request->snapc;
+ rbd_dev = img_request->rbd_dev;
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
+ if (!osd_req)
+ return NULL; /* ENOMEM */
+
+ osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
+ osd_req->r_callback = rbd_osd_req_callback;
+ osd_req->r_priv = obj_request;
+
+ osd_req->r_oid_len = strlen(obj_request->object_name);
+ rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
+ memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
+
+ osd_req->r_file_layout = rbd_dev->layout; /* struct */
return osd_req;
}
+
static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
{
ceph_osdc_put_request(osd_req);
@@ -1480,18 +1767,23 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
rbd_assert(obj_request_type_valid(type));
size = strlen(object_name) + 1;
- obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
- if (!obj_request)
+ name = kmalloc(size, GFP_KERNEL);
+ if (!name)
+ return NULL;
+
+ obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
+ if (!obj_request) {
+ kfree(name);
return NULL;
+ }
- name = (char *)(obj_request + 1);
obj_request->object_name = memcpy(name, object_name, size);
obj_request->offset = offset;
obj_request->length = length;
+ obj_request->flags = 0;
obj_request->which = BAD_WHICH;
obj_request->type = type;
INIT_LIST_HEAD(&obj_request->links);
- obj_request_done_init(obj_request);
init_completion(&obj_request->completion);
kref_init(&obj_request->kref);
@@ -1530,7 +1822,9 @@ static void rbd_obj_request_destroy(struct kref *kref)
break;
}
- kfree(obj_request);
+ kfree(obj_request->object_name);
+ obj_request->object_name = NULL;
+ kmem_cache_free(rbd_obj_request_cache, obj_request);
}
/*
@@ -1541,37 +1835,40 @@ static void rbd_obj_request_destroy(struct kref *kref)
static struct rbd_img_request *rbd_img_request_create(
struct rbd_device *rbd_dev,
u64 offset, u64 length,
- bool write_request)
+ bool write_request,
+ bool child_request)
{
struct rbd_img_request *img_request;
- struct ceph_snap_context *snapc = NULL;
- img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
+ img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
if (!img_request)
return NULL;
if (write_request) {
down_read(&rbd_dev->header_rwsem);
- snapc = ceph_get_snap_context(rbd_dev->header.snapc);
+ ceph_get_snap_context(rbd_dev->header.snapc);
up_read(&rbd_dev->header_rwsem);
- if (WARN_ON(!snapc)) {
- kfree(img_request);
- return NULL; /* Shouldn't happen */
- }
}
img_request->rq = NULL;
img_request->rbd_dev = rbd_dev;
img_request->offset = offset;
img_request->length = length;
- img_request->write_request = write_request;
- if (write_request)
- img_request->snapc = snapc;
- else
+ img_request->flags = 0;
+ if (write_request) {
+ img_request_write_set(img_request);
+ img_request->snapc = rbd_dev->header.snapc;
+ } else {
img_request->snap_id = rbd_dev->spec->snap_id;
+ }
+ if (child_request)
+ img_request_child_set(img_request);
+ if (rbd_dev->parent_spec)
+ img_request_layered_set(img_request);
spin_lock_init(&img_request->completion_lock);
img_request->next_completion = 0;
img_request->callback = NULL;
+ img_request->result = 0;
img_request->obj_request_count = 0;
INIT_LIST_HEAD(&img_request->obj_requests);
kref_init(&img_request->kref);
@@ -1600,78 +1897,204 @@ static void rbd_img_request_destroy(struct kref *kref)
rbd_img_obj_request_del(img_request, obj_request);
rbd_assert(img_request->obj_request_count == 0);
- if (img_request->write_request)
+ if (img_request_write_test(img_request))
ceph_put_snap_context(img_request->snapc);
- kfree(img_request);
+ if (img_request_child_test(img_request))
+ rbd_obj_request_put(img_request->obj_request);
+
+ kmem_cache_free(rbd_img_request_cache, img_request);
+}
+
+static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request;
+ unsigned int xferred;
+ int result;
+ bool more;
+
+ rbd_assert(obj_request_img_data_test(obj_request));
+ img_request = obj_request->img_request;
+
+ rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
+ xferred = (unsigned int)obj_request->xferred;
+ result = obj_request->result;
+ if (result) {
+ struct rbd_device *rbd_dev = img_request->rbd_dev;
+
+ rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
+ img_request_write_test(img_request) ? "write" : "read",
+ obj_request->length, obj_request->img_offset,
+ obj_request->offset);
+ rbd_warn(rbd_dev, " result %d xferred %x\n",
+ result, xferred);
+ if (!img_request->result)
+ img_request->result = result;
+ }
+
+ /* Image object requests don't own their page array */
+
+ if (obj_request->type == OBJ_REQUEST_PAGES) {
+ obj_request->pages = NULL;
+ obj_request->page_count = 0;
+ }
+
+ if (img_request_child_test(img_request)) {
+ rbd_assert(img_request->obj_request != NULL);
+ more = obj_request->which < img_request->obj_request_count - 1;
+ } else {
+ rbd_assert(img_request->rq != NULL);
+ more = blk_end_request(img_request->rq, result, xferred);
+ }
+
+ return more;
}
-static int rbd_img_request_fill_bio(struct rbd_img_request *img_request,
- struct bio *bio_list)
+static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request;
+ u32 which = obj_request->which;
+ bool more = true;
+
+ rbd_assert(obj_request_img_data_test(obj_request));
+ img_request = obj_request->img_request;
+
+ dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
+ rbd_assert(img_request != NULL);
+ rbd_assert(img_request->obj_request_count > 0);
+ rbd_assert(which != BAD_WHICH);
+ rbd_assert(which < img_request->obj_request_count);
+ rbd_assert(which >= img_request->next_completion);
+
+ spin_lock_irq(&img_request->completion_lock);
+ if (which != img_request->next_completion)
+ goto out;
+
+ for_each_obj_request_from(img_request, obj_request) {
+ rbd_assert(more);
+ rbd_assert(which < img_request->obj_request_count);
+
+ if (!obj_request_done_test(obj_request))
+ break;
+ more = rbd_img_obj_end_request(obj_request);
+ which++;
+ }
+
+ rbd_assert(more ^ (which == img_request->obj_request_count));
+ img_request->next_completion = which;
+out:
+ spin_unlock_irq(&img_request->completion_lock);
+
+ if (!more)
+ rbd_img_request_complete(img_request);
+}
+
+/*
+ * Split up an image request into one or more object requests, each
+ * to a different object. The "type" parameter indicates whether
+ * "data_desc" is the pointer to the head of a list of bio
+ * structures, or the base of a page array. In either case this
+ * function assumes data_desc describes memory sufficient to hold
+ * all data described by the image request.
+ */
+static int rbd_img_request_fill(struct rbd_img_request *img_request,
+ enum obj_request_type type,
+ void *data_desc)
{
struct rbd_device *rbd_dev = img_request->rbd_dev;
struct rbd_obj_request *obj_request = NULL;
struct rbd_obj_request *next_obj_request;
- unsigned int bio_offset;
- u64 image_offset;
+ bool write_request = img_request_write_test(img_request);
+ struct bio *bio_list;
+ unsigned int bio_offset = 0;
+ struct page **pages;
+ u64 img_offset;
u64 resid;
u16 opcode;
- dout("%s: img %p bio %p\n", __func__, img_request, bio_list);
+ dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
+ (int)type, data_desc);
- opcode = img_request->write_request ? CEPH_OSD_OP_WRITE
- : CEPH_OSD_OP_READ;
- bio_offset = 0;
- image_offset = img_request->offset;
- rbd_assert(image_offset == bio_list->bi_sector << SECTOR_SHIFT);
+ opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
+ img_offset = img_request->offset;
resid = img_request->length;
rbd_assert(resid > 0);
+
+ if (type == OBJ_REQUEST_BIO) {
+ bio_list = data_desc;
+ rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
+ } else {
+ rbd_assert(type == OBJ_REQUEST_PAGES);
+ pages = data_desc;
+ }
+
while (resid) {
+ struct ceph_osd_request *osd_req;
const char *object_name;
- unsigned int clone_size;
- struct ceph_osd_req_op *op;
u64 offset;
u64 length;
- object_name = rbd_segment_name(rbd_dev, image_offset);
+ object_name = rbd_segment_name(rbd_dev, img_offset);
if (!object_name)
goto out_unwind;
- offset = rbd_segment_offset(rbd_dev, image_offset);
- length = rbd_segment_length(rbd_dev, image_offset, resid);
+ offset = rbd_segment_offset(rbd_dev, img_offset);
+ length = rbd_segment_length(rbd_dev, img_offset, resid);
obj_request = rbd_obj_request_create(object_name,
- offset, length,
- OBJ_REQUEST_BIO);
- kfree(object_name); /* object request has its own copy */
+ offset, length, type);
+ /* object request has its own copy of the object name */
+ rbd_segment_name_free(object_name);
if (!obj_request)
goto out_unwind;
- rbd_assert(length <= (u64) UINT_MAX);
- clone_size = (unsigned int) length;
- obj_request->bio_list = bio_chain_clone_range(&bio_list,
- &bio_offset, clone_size,
- GFP_ATOMIC);
- if (!obj_request->bio_list)
- goto out_partial;
+ if (type == OBJ_REQUEST_BIO) {
+ unsigned int clone_size;
+
+ rbd_assert(length <= (u64)UINT_MAX);
+ clone_size = (unsigned int)length;
+ obj_request->bio_list =
+ bio_chain_clone_range(&bio_list,
+ &bio_offset,
+ clone_size,
+ GFP_ATOMIC);
+ if (!obj_request->bio_list)
+ goto out_partial;
+ } else {
+ unsigned int page_count;
+
+ obj_request->pages = pages;
+ page_count = (u32)calc_pages_for(offset, length);
+ obj_request->page_count = page_count;
+ if ((offset + length) & ~PAGE_MASK)
+ page_count--; /* more on last page */
+ pages += page_count;
+ }
- /*
- * Build up the op to use in building the osd
- * request. Note that the contents of the op are
- * copied by rbd_osd_req_create().
- */
- op = rbd_osd_req_op_create(opcode, offset, length);
- if (!op)
- goto out_partial;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev,
- img_request->write_request,
- obj_request, op);
- rbd_osd_req_op_destroy(op);
- if (!obj_request->osd_req)
+ osd_req = rbd_osd_req_create(rbd_dev, write_request,
+ obj_request);
+ if (!osd_req)
goto out_partial;
- /* status and version are initially zero-filled */
+ obj_request->osd_req = osd_req;
+ obj_request->callback = rbd_img_obj_callback;
+
+ osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
+ 0, 0);
+ if (type == OBJ_REQUEST_BIO)
+ osd_req_op_extent_osd_data_bio(osd_req, 0,
+ obj_request->bio_list, length);
+ else
+ osd_req_op_extent_osd_data_pages(osd_req, 0,
+ obj_request->pages, length,
+ offset & ~PAGE_MASK, false, false);
+ if (write_request)
+ rbd_osd_req_format_write(obj_request);
+ else
+ rbd_osd_req_format_read(obj_request);
+
+ obj_request->img_offset = img_offset;
rbd_img_obj_request_add(img_request, obj_request);
- image_offset += length;
+ img_offset += length;
resid -= length;
}
@@ -1686,61 +2109,389 @@ out_unwind:
return -ENOMEM;
}
-static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
+static void
+rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request;
- u32 which = obj_request->which;
- bool more = true;
+ struct rbd_device *rbd_dev;
+ u64 length;
+ u32 page_count;
+ rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+ rbd_assert(obj_request_img_data_test(obj_request));
img_request = obj_request->img_request;
+ rbd_assert(img_request);
- dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
+ rbd_dev = img_request->rbd_dev;
+ rbd_assert(rbd_dev);
+ length = (u64)1 << rbd_dev->header.obj_order;
+ page_count = (u32)calc_pages_for(0, length);
+
+ rbd_assert(obj_request->copyup_pages);
+ ceph_release_page_vector(obj_request->copyup_pages, page_count);
+ obj_request->copyup_pages = NULL;
+
+ /*
+ * We want the transfer count to reflect the size of the
+ * original write request. There is no such thing as a
+ * successful short write, so if the request was successful
+ * we can just set it to the originally-requested length.
+ */
+ if (!obj_request->result)
+ obj_request->xferred = obj_request->length;
+
+ /* Finish up with the normal image object callback */
+
+ rbd_img_obj_callback(obj_request);
+}
+
+static void
+rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
+{
+ struct rbd_obj_request *orig_request;
+ struct ceph_osd_request *osd_req;
+ struct ceph_osd_client *osdc;
+ struct rbd_device *rbd_dev;
+ struct page **pages;
+ int result;
+ u64 obj_size;
+ u64 xferred;
+
+ rbd_assert(img_request_child_test(img_request));
+
+ /* First get what we need from the image request */
+
+ pages = img_request->copyup_pages;
+ rbd_assert(pages != NULL);
+ img_request->copyup_pages = NULL;
+
+ orig_request = img_request->obj_request;
+ rbd_assert(orig_request != NULL);
+ rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
+ result = img_request->result;
+ obj_size = img_request->length;
+ xferred = img_request->xferred;
+
+ rbd_dev = img_request->rbd_dev;
+ rbd_assert(rbd_dev);
+ rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
+
+ rbd_img_request_put(img_request);
+
+ if (result)
+ goto out_err;
+
+ /* Allocate the new copyup osd request for the original request */
+
+ result = -ENOMEM;
+ rbd_assert(!orig_request->osd_req);
+ osd_req = rbd_osd_req_create_copyup(orig_request);
+ if (!osd_req)
+ goto out_err;
+ orig_request->osd_req = osd_req;
+ orig_request->copyup_pages = pages;
+
+ /* Initialize the copyup op */
+
+ osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
+ osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
+ false, false);
+
+ /* Then the original write request op */
+
+ osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
+ orig_request->offset,
+ orig_request->length, 0, 0);
+ osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
+ orig_request->length);
+
+ rbd_osd_req_format_write(orig_request);
+
+ /* All set, send it off. */
+
+ orig_request->callback = rbd_img_obj_copyup_callback;
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ result = rbd_obj_request_submit(osdc, orig_request);
+ if (!result)
+ return;
+out_err:
+ /* Record the error code and complete the request */
+
+ orig_request->result = result;
+ orig_request->xferred = 0;
+ obj_request_done_set(orig_request);
+ rbd_obj_request_complete(orig_request);
+}
+
+/*
+ * Read from the parent image the range of data that covers the
+ * entire target of the given object request. This is used for
+ * satisfying a layered image write request when the target of an
+ * object request from the image request does not exist.
+ *
+ * A page array big enough to hold the returned data is allocated
+ * and supplied to rbd_img_request_fill() as the "data descriptor."
+ * When the read completes, this page array will be transferred to
+ * the original object request for the copyup operation.
+ *
+ * If an error occurs, record it as the result of the original
+ * object request and mark it done so it gets completed.
+ */
+static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request = NULL;
+ struct rbd_img_request *parent_request = NULL;
+ struct rbd_device *rbd_dev;
+ u64 img_offset;
+ u64 length;
+ struct page **pages = NULL;
+ u32 page_count;
+ int result;
+
+ rbd_assert(obj_request_img_data_test(obj_request));
+ rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+
+ img_request = obj_request->img_request;
rbd_assert(img_request != NULL);
- rbd_assert(img_request->rq != NULL);
- rbd_assert(img_request->obj_request_count > 0);
- rbd_assert(which != BAD_WHICH);
- rbd_assert(which < img_request->obj_request_count);
- rbd_assert(which >= img_request->next_completion);
+ rbd_dev = img_request->rbd_dev;
+ rbd_assert(rbd_dev->parent != NULL);
- spin_lock_irq(&img_request->completion_lock);
- if (which != img_request->next_completion)
- goto out;
+ /*
+ * First things first. The original osd request is of no
+ * use to use any more, we'll need a new one that can hold
+ * the two ops in a copyup request. We'll get that later,
+ * but for now we can release the old one.
+ */
+ rbd_osd_req_destroy(obj_request->osd_req);
+ obj_request->osd_req = NULL;
- for_each_obj_request_from(img_request, obj_request) {
- unsigned int xferred;
- int result;
+ /*
+ * Determine the byte range covered by the object in the
+ * child image to which the original request was to be sent.
+ */
+ img_offset = obj_request->img_offset - obj_request->offset;
+ length = (u64)1 << rbd_dev->header.obj_order;
- rbd_assert(more);
- rbd_assert(which < img_request->obj_request_count);
+ /*
+ * There is no defined parent data beyond the parent
+ * overlap, so limit what we read at that boundary if
+ * necessary.
+ */
+ if (img_offset + length > rbd_dev->parent_overlap) {
+ rbd_assert(img_offset < rbd_dev->parent_overlap);
+ length = rbd_dev->parent_overlap - img_offset;
+ }
- if (!obj_request_done_test(obj_request))
- break;
+ /*
+ * Allocate a page array big enough to receive the data read
+ * from the parent.
+ */
+ page_count = (u32)calc_pages_for(0, length);
+ pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ if (IS_ERR(pages)) {
+ result = PTR_ERR(pages);
+ pages = NULL;
+ goto out_err;
+ }
- rbd_assert(obj_request->xferred <= (u64) UINT_MAX);
- xferred = (unsigned int) obj_request->xferred;
- result = (int) obj_request->result;
- if (result)
- rbd_warn(NULL, "obj_request %s result %d xferred %u\n",
- img_request->write_request ? "write" : "read",
- result, xferred);
+ result = -ENOMEM;
+ parent_request = rbd_img_request_create(rbd_dev->parent,
+ img_offset, length,
+ false, true);
+ if (!parent_request)
+ goto out_err;
+ rbd_obj_request_get(obj_request);
+ parent_request->obj_request = obj_request;
- more = blk_end_request(img_request->rq, result, xferred);
- which++;
+ result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
+ if (result)
+ goto out_err;
+ parent_request->copyup_pages = pages;
+
+ parent_request->callback = rbd_img_obj_parent_read_full_callback;
+ result = rbd_img_request_submit(parent_request);
+ if (!result)
+ return 0;
+
+ parent_request->copyup_pages = NULL;
+ parent_request->obj_request = NULL;
+ rbd_obj_request_put(obj_request);
+out_err:
+ if (pages)
+ ceph_release_page_vector(pages, page_count);
+ if (parent_request)
+ rbd_img_request_put(parent_request);
+ obj_request->result = result;
+ obj_request->xferred = 0;
+ obj_request_done_set(obj_request);
+
+ return result;
+}
+
+static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
+{
+ struct rbd_obj_request *orig_request;
+ int result;
+
+ rbd_assert(!obj_request_img_data_test(obj_request));
+
+ /*
+ * All we need from the object request is the original
+ * request and the result of the STAT op. Grab those, then
+ * we're done with the request.
+ */
+ orig_request = obj_request->obj_request;
+ obj_request->obj_request = NULL;
+ rbd_assert(orig_request);
+ rbd_assert(orig_request->img_request);
+
+ result = obj_request->result;
+ obj_request->result = 0;
+
+ dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
+ obj_request, orig_request, result,
+ obj_request->xferred, obj_request->length);
+ rbd_obj_request_put(obj_request);
+
+ rbd_assert(orig_request);
+ rbd_assert(orig_request->img_request);
+
+ /*
+ * Our only purpose here is to determine whether the object
+ * exists, and we don't want to treat the non-existence as
+ * an error. If something else comes back, transfer the
+ * error to the original request and complete it now.
+ */
+ if (!result) {
+ obj_request_existence_set(orig_request, true);
+ } else if (result == -ENOENT) {
+ obj_request_existence_set(orig_request, false);
+ } else if (result) {
+ orig_request->result = result;
+ goto out;
}
- rbd_assert(more ^ (which == img_request->obj_request_count));
- img_request->next_completion = which;
+ /*
+ * Resubmit the original request now that we have recorded
+ * whether the target object exists.
+ */
+ orig_request->result = rbd_img_obj_request_submit(orig_request);
out:
- spin_unlock_irq(&img_request->completion_lock);
+ if (orig_request->result)
+ rbd_obj_request_complete(orig_request);
+ rbd_obj_request_put(orig_request);
+}
- if (!more)
- rbd_img_request_complete(img_request);
+static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
+{
+ struct rbd_obj_request *stat_request;
+ struct rbd_device *rbd_dev;
+ struct ceph_osd_client *osdc;
+ struct page **pages = NULL;
+ u32 page_count;
+ size_t size;
+ int ret;
+
+ /*
+ * The response data for a STAT call consists of:
+ * le64 length;
+ * struct {
+ * le32 tv_sec;
+ * le32 tv_nsec;
+ * } mtime;
+ */
+ size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
+ page_count = (u32)calc_pages_for(0, size);
+ pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ ret = -ENOMEM;
+ stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
+ OBJ_REQUEST_PAGES);
+ if (!stat_request)
+ goto out;
+
+ rbd_obj_request_get(obj_request);
+ stat_request->obj_request = obj_request;
+ stat_request->pages = pages;
+ stat_request->page_count = page_count;
+
+ rbd_assert(obj_request->img_request);
+ rbd_dev = obj_request->img_request->rbd_dev;
+ stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
+ stat_request);
+ if (!stat_request->osd_req)
+ goto out;
+ stat_request->callback = rbd_img_obj_exists_callback;
+
+ osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
+ osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
+ false, false);
+ rbd_osd_req_format_read(stat_request);
+
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ ret = rbd_obj_request_submit(osdc, stat_request);
+out:
+ if (ret)
+ rbd_obj_request_put(obj_request);
+
+ return ret;
+}
+
+static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request;
+ struct rbd_device *rbd_dev;
+ bool known;
+
+ rbd_assert(obj_request_img_data_test(obj_request));
+
+ img_request = obj_request->img_request;
+ rbd_assert(img_request);
+ rbd_dev = img_request->rbd_dev;
+
+ /*
+ * Only writes to layered images need special handling.
+ * Reads and non-layered writes are simple object requests.
+ * Layered writes that start beyond the end of the overlap
+ * with the parent have no parent data, so they too are
+ * simple object requests. Finally, if the target object is
+ * known to already exist, its parent data has already been
+ * copied, so a write to the object can also be handled as a
+ * simple object request.
+ */
+ if (!img_request_write_test(img_request) ||
+ !img_request_layered_test(img_request) ||
+ rbd_dev->parent_overlap <= obj_request->img_offset ||
+ ((known = obj_request_known_test(obj_request)) &&
+ obj_request_exists_test(obj_request))) {
+
+ struct rbd_device *rbd_dev;
+ struct ceph_osd_client *osdc;
+
+ rbd_dev = obj_request->img_request->rbd_dev;
+ osdc = &rbd_dev->rbd_client->client->osdc;
+
+ return rbd_obj_request_submit(osdc, obj_request);
+ }
+
+ /*
+ * It's a layered write. The target object might exist but
+ * we may not know that yet. If we know it doesn't exist,
+ * start by reading the data for the full target object from
+ * the parent so we can use it for a copyup to the target.
+ */
+ if (known)
+ return rbd_img_obj_parent_read_full(obj_request);
+
+ /* We don't know whether the target exists. Go find out. */
+
+ return rbd_img_obj_exists_submit(obj_request);
}
static int rbd_img_request_submit(struct rbd_img_request *img_request)
{
- struct rbd_device *rbd_dev = img_request->rbd_dev;
- struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
struct rbd_obj_request *next_obj_request;
@@ -1748,27 +2499,105 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)
for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
int ret;
- obj_request->callback = rbd_img_obj_callback;
- ret = rbd_obj_request_submit(osdc, obj_request);
+ ret = rbd_img_obj_request_submit(obj_request);
if (ret)
return ret;
- /*
- * The image request has its own reference to each
- * of its object requests, so we can safely drop the
- * initial one here.
- */
- rbd_obj_request_put(obj_request);
}
return 0;
}
-static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
- u64 ver, u64 notify_id)
+static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
{
struct rbd_obj_request *obj_request;
- struct ceph_osd_req_op *op;
- struct ceph_osd_client *osdc;
+ struct rbd_device *rbd_dev;
+ u64 obj_end;
+
+ rbd_assert(img_request_child_test(img_request));
+
+ obj_request = img_request->obj_request;
+ rbd_assert(obj_request);
+ rbd_assert(obj_request->img_request);
+
+ obj_request->result = img_request->result;
+ if (obj_request->result)
+ goto out;
+
+ /*
+ * We need to zero anything beyond the parent overlap
+ * boundary. Since rbd_img_obj_request_read_callback()
+ * will zero anything beyond the end of a short read, an
+ * easy way to do this is to pretend the data from the
+ * parent came up short--ending at the overlap boundary.
+ */
+ rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
+ obj_end = obj_request->img_offset + obj_request->length;
+ rbd_dev = obj_request->img_request->rbd_dev;
+ if (obj_end > rbd_dev->parent_overlap) {
+ u64 xferred = 0;
+
+ if (obj_request->img_offset < rbd_dev->parent_overlap)
+ xferred = rbd_dev->parent_overlap -
+ obj_request->img_offset;
+
+ obj_request->xferred = min(img_request->xferred, xferred);
+ } else {
+ obj_request->xferred = img_request->xferred;
+ }
+out:
+ rbd_img_request_put(img_request);
+ rbd_img_obj_request_read_callback(obj_request);
+ rbd_obj_request_complete(obj_request);
+}
+
+static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
+{
+ struct rbd_device *rbd_dev;
+ struct rbd_img_request *img_request;
+ int result;
+
+ rbd_assert(obj_request_img_data_test(obj_request));
+ rbd_assert(obj_request->img_request != NULL);
+ rbd_assert(obj_request->result == (s32) -ENOENT);
+ rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+
+ rbd_dev = obj_request->img_request->rbd_dev;
+ rbd_assert(rbd_dev->parent != NULL);
+ /* rbd_read_finish(obj_request, obj_request->length); */
+ img_request = rbd_img_request_create(rbd_dev->parent,
+ obj_request->img_offset,
+ obj_request->length,
+ false, true);
+ result = -ENOMEM;
+ if (!img_request)
+ goto out_err;
+
+ rbd_obj_request_get(obj_request);
+ img_request->obj_request = obj_request;
+
+ result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
+ obj_request->bio_list);
+ if (result)
+ goto out_err;
+
+ img_request->callback = rbd_img_parent_read_callback;
+ result = rbd_img_request_submit(img_request);
+ if (result)
+ goto out_err;
+
+ return;
+out_err:
+ if (img_request)
+ rbd_img_request_put(img_request);
+ obj_request->result = result;
+ obj_request->xferred = 0;
+ obj_request_done_set(obj_request);
+}
+
+static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
+{
+ struct rbd_obj_request *obj_request;
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
int ret;
obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
@@ -1777,17 +2606,15 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
return -ENOMEM;
ret = -ENOMEM;
- op = rbd_osd_req_op_create(CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver);
- if (!op)
- goto out;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
- obj_request, op);
- rbd_osd_req_op_destroy(op);
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
if (!obj_request->osd_req)
goto out;
-
- osdc = &rbd_dev->rbd_client->client->osdc;
obj_request->callback = rbd_obj_request_put;
+
+ osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
+ notify_id, 0, 0);
+ rbd_osd_req_format_read(obj_request);
+
ret = rbd_obj_request_submit(osdc, obj_request);
out:
if (ret)
@@ -1799,21 +2626,16 @@ out:
static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
{
struct rbd_device *rbd_dev = (struct rbd_device *)data;
- u64 hver;
- int rc;
if (!rbd_dev)
return;
dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
- rbd_dev->header_name, (unsigned long long) notify_id,
- (unsigned int) opcode);
- rc = rbd_dev_refresh(rbd_dev, &hver);
- if (rc)
- rbd_warn(rbd_dev, "got notification but failed to "
- " update snaps: %d\n", rc);
+ rbd_dev->header_name, (unsigned long long)notify_id,
+ (unsigned int)opcode);
+ (void)rbd_dev_refresh(rbd_dev);
- rbd_obj_notify_ack(rbd_dev, hver, notify_id);
+ rbd_obj_notify_ack(rbd_dev, notify_id);
}
/*
@@ -1824,7 +2646,6 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
- struct ceph_osd_req_op *op;
int ret;
rbd_assert(start ^ !!rbd_dev->watch_event);
@@ -1844,14 +2665,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
if (!obj_request)
goto out_cancel;
- op = rbd_osd_req_op_create(CEPH_OSD_OP_WATCH,
- rbd_dev->watch_event->cookie,
- rbd_dev->header.obj_version, start);
- if (!op)
- goto out_cancel;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, true,
- obj_request, op);
- rbd_osd_req_op_destroy(op);
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
if (!obj_request->osd_req)
goto out_cancel;
@@ -1860,6 +2674,11 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
else
ceph_osdc_unregister_linger_request(osdc,
rbd_dev->watch_request->osd_req);
+
+ osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
+ rbd_dev->watch_event->cookie, 0, start);
+ rbd_osd_req_format_write(obj_request);
+
ret = rbd_obj_request_submit(osdc, obj_request);
if (ret)
goto out_cancel;
@@ -1899,40 +2718,38 @@ out_cancel:
}
/*
- * Synchronous osd object method call
+ * Synchronous osd object method call. Returns the number of bytes
+ * returned in the outbound buffer, or a negative error code.
*/
static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
const char *object_name,
const char *class_name,
const char *method_name,
- const char *outbound,
+ const void *outbound,
size_t outbound_size,
- char *inbound,
- size_t inbound_size,
- u64 *version)
+ void *inbound,
+ size_t inbound_size)
{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
- struct ceph_osd_client *osdc;
- struct ceph_osd_req_op *op;
struct page **pages;
u32 page_count;
int ret;
/*
- * Method calls are ultimately read operations but they
- * don't involve object data (so no offset or length).
- * The result should placed into the inbound buffer
- * provided. They also supply outbound data--parameters for
- * the object method. Currently if this is present it will
- * be a snapshot id.
+ * Method calls are ultimately read operations. The result
+ * should placed into the inbound buffer provided. They
+ * also supply outbound data--parameters for the object
+ * method. Currently if this is present it will be a
+ * snapshot id.
*/
- page_count = (u32) calc_pages_for(0, inbound_size);
+ page_count = (u32)calc_pages_for(0, inbound_size);
pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
if (IS_ERR(pages))
return PTR_ERR(pages);
ret = -ENOMEM;
- obj_request = rbd_obj_request_create(object_name, 0, 0,
+ obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
OBJ_REQUEST_PAGES);
if (!obj_request)
goto out;
@@ -1940,17 +2757,29 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
obj_request->pages = pages;
obj_request->page_count = page_count;
- op = rbd_osd_req_op_create(CEPH_OSD_OP_CALL, class_name,
- method_name, outbound, outbound_size);
- if (!op)
- goto out;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
- obj_request, op);
- rbd_osd_req_op_destroy(op);
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
if (!obj_request->osd_req)
goto out;
- osdc = &rbd_dev->rbd_client->client->osdc;
+ osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
+ class_name, method_name);
+ if (outbound_size) {
+ struct ceph_pagelist *pagelist;
+
+ pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
+ if (!pagelist)
+ goto out;
+
+ ceph_pagelist_init(pagelist);
+ ceph_pagelist_append(pagelist, outbound, outbound_size);
+ osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
+ pagelist);
+ }
+ osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
+ obj_request->pages, inbound_size,
+ 0, false, false);
+ rbd_osd_req_format_read(obj_request);
+
ret = rbd_obj_request_submit(osdc, obj_request);
if (ret)
goto out;
@@ -1961,10 +2790,10 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
ret = obj_request->result;
if (ret < 0)
goto out;
- ret = 0;
+
+ rbd_assert(obj_request->xferred < (u64)INT_MAX);
+ ret = (int)obj_request->xferred;
ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
- if (version)
- *version = obj_request->version;
out:
if (obj_request)
rbd_obj_request_put(obj_request);
@@ -2034,18 +2863,22 @@ static void rbd_request_fn(struct request_queue *q)
}
result = -EINVAL;
- if (WARN_ON(offset && length > U64_MAX - offset + 1))
+ if (offset && length > U64_MAX - offset + 1) {
+ rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
+ offset, length);
goto end_request; /* Shouldn't happen */
+ }
result = -ENOMEM;
img_request = rbd_img_request_create(rbd_dev, offset, length,
- write_request);
+ write_request, false);
if (!img_request)
goto end_request;
img_request->rq = rq;
- result = rbd_img_request_fill_bio(img_request, rq->bio);
+ result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
+ rq->bio);
if (!result)
result = rbd_img_request_submit(img_request);
if (result)
@@ -2053,8 +2886,10 @@ static void rbd_request_fn(struct request_queue *q)
end_request:
spin_lock_irq(q->queue_lock);
if (result < 0) {
- rbd_warn(rbd_dev, "obj_request %s result %d\n",
- write_request ? "write" : "read", result);
+ rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
+ write_request ? "write" : "read",
+ length, offset, result);
+
__blk_end_request_all(rq, result);
}
}
@@ -2113,22 +2948,22 @@ static void rbd_free_disk(struct rbd_device *rbd_dev)
if (!disk)
return;
- if (disk->flags & GENHD_FL_UP)
+ rbd_dev->disk = NULL;
+ if (disk->flags & GENHD_FL_UP) {
del_gendisk(disk);
- if (disk->queue)
- blk_cleanup_queue(disk->queue);
+ if (disk->queue)
+ blk_cleanup_queue(disk->queue);
+ }
put_disk(disk);
}
static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
const char *object_name,
- u64 offset, u64 length,
- char *buf, u64 *version)
+ u64 offset, u64 length, void *buf)
{
- struct ceph_osd_req_op *op;
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
- struct ceph_osd_client *osdc;
struct page **pages = NULL;
u32 page_count;
size_t size;
@@ -2148,16 +2983,19 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
obj_request->pages = pages;
obj_request->page_count = page_count;
- op = rbd_osd_req_op_create(CEPH_OSD_OP_READ, offset, length);
- if (!op)
- goto out;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
- obj_request, op);
- rbd_osd_req_op_destroy(op);
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
if (!obj_request->osd_req)
goto out;
- osdc = &rbd_dev->rbd_client->client->osdc;
+ osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
+ offset, length, 0, 0);
+ osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
+ obj_request->pages,
+ obj_request->length,
+ obj_request->offset & ~PAGE_MASK,
+ false, false);
+ rbd_osd_req_format_read(obj_request);
+
ret = rbd_obj_request_submit(osdc, obj_request);
if (ret)
goto out;
@@ -2172,10 +3010,8 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
size = (size_t) obj_request->xferred;
ceph_copy_from_page_vector(pages, buf, 0, size);
- rbd_assert(size <= (size_t) INT_MAX);
- ret = (int) size;
- if (version)
- *version = obj_request->version;
+ rbd_assert(size <= (size_t)INT_MAX);
+ ret = (int)size;
out:
if (obj_request)
rbd_obj_request_put(obj_request);
@@ -2196,7 +3032,7 @@ out:
* Returns a pointer-coded errno if a failure occurs.
*/
static struct rbd_image_header_ondisk *
-rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
+rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
{
struct rbd_image_header_ondisk *ondisk = NULL;
u32 snap_count = 0;
@@ -2224,11 +3060,10 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
return ERR_PTR(-ENOMEM);
ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
- 0, size,
- (char *) ondisk, version);
+ 0, size, ondisk);
if (ret < 0)
goto out_err;
- if (WARN_ON((size_t) ret < size)) {
+ if ((size_t)ret < size) {
ret = -ENXIO;
rbd_warn(rbd_dev, "short header read (want %zd got %d)",
size, ret);
@@ -2260,46 +3095,36 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
struct rbd_image_header *header)
{
struct rbd_image_header_ondisk *ondisk;
- u64 ver = 0;
int ret;
- ondisk = rbd_dev_v1_header_read(rbd_dev, &ver);
+ ondisk = rbd_dev_v1_header_read(rbd_dev);
if (IS_ERR(ondisk))
return PTR_ERR(ondisk);
ret = rbd_header_from_disk(header, ondisk);
- if (ret >= 0)
- header->obj_version = ver;
kfree(ondisk);
return ret;
}
-static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
-{
- struct rbd_snap *snap;
- struct rbd_snap *next;
-
- list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
- rbd_remove_snap_dev(snap);
-}
-
static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
{
- sector_t size;
-
if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
return;
- size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE;
- dout("setting size to %llu sectors", (unsigned long long) size);
- rbd_dev->mapping.size = (u64) size;
- set_capacity(rbd_dev->disk, size);
+ if (rbd_dev->mapping.size != rbd_dev->header.image_size) {
+ sector_t size;
+
+ rbd_dev->mapping.size = rbd_dev->header.image_size;
+ size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
+ dout("setting size to %llu sectors", (unsigned long long)size);
+ set_capacity(rbd_dev->disk, size);
+ }
}
/*
* only read the first part of the ondisk header, without the snaps info
*/
-static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
+static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
{
int ret;
struct rbd_image_header h;
@@ -2320,37 +3145,61 @@ static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
/* osd requests may still refer to snapc */
ceph_put_snap_context(rbd_dev->header.snapc);
- if (hver)
- *hver = h.obj_version;
- rbd_dev->header.obj_version = h.obj_version;
rbd_dev->header.image_size = h.image_size;
rbd_dev->header.snapc = h.snapc;
rbd_dev->header.snap_names = h.snap_names;
rbd_dev->header.snap_sizes = h.snap_sizes;
/* Free the extra copy of the object prefix */
- WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
+ if (strcmp(rbd_dev->header.object_prefix, h.object_prefix))
+ rbd_warn(rbd_dev, "object prefix changed (ignoring)");
kfree(h.object_prefix);
- ret = rbd_dev_snaps_update(rbd_dev);
- if (!ret)
- ret = rbd_dev_snaps_register(rbd_dev);
-
up_write(&rbd_dev->header_rwsem);
return ret;
}
-static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver)
+/*
+ * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
+ * has disappeared from the (just updated) snapshot context.
+ */
+static void rbd_exists_validate(struct rbd_device *rbd_dev)
+{
+ u64 snap_id;
+
+ if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
+ return;
+
+ snap_id = rbd_dev->spec->snap_id;
+ if (snap_id == CEPH_NOSNAP)
+ return;
+
+ if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
+ clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
+}
+
+static int rbd_dev_refresh(struct rbd_device *rbd_dev)
{
+ u64 image_size;
int ret;
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ image_size = rbd_dev->header.image_size;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
if (rbd_dev->image_format == 1)
- ret = rbd_dev_v1_refresh(rbd_dev, hver);
+ ret = rbd_dev_v1_refresh(rbd_dev);
else
- ret = rbd_dev_v2_refresh(rbd_dev, hver);
+ ret = rbd_dev_v2_refresh(rbd_dev);
+
+ /* If it's a mapped snapshot, validate its EXISTS flag */
+
+ rbd_exists_validate(rbd_dev);
mutex_unlock(&ctl_mutex);
+ if (ret)
+ rbd_warn(rbd_dev, "got notification but failed to "
+ " update snaps: %d\n", ret);
+ if (image_size != rbd_dev->header.image_size)
+ revalidate_disk(rbd_dev->disk);
return ret;
}
@@ -2394,8 +3243,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->disk = disk;
- set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
-
return 0;
out_disk:
put_disk(disk);
@@ -2416,13 +3263,9 @@ static ssize_t rbd_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- sector_t size;
-
- down_read(&rbd_dev->header_rwsem);
- size = get_capacity(rbd_dev->disk);
- up_read(&rbd_dev->header_rwsem);
- return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)rbd_dev->mapping.size);
}
/*
@@ -2435,7 +3278,7 @@ static ssize_t rbd_features_show(struct device *dev,
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "0x%016llx\n",
- (unsigned long long) rbd_dev->mapping.features);
+ (unsigned long long)rbd_dev->mapping.features);
}
static ssize_t rbd_major_show(struct device *dev,
@@ -2443,7 +3286,11 @@ static ssize_t rbd_major_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%d\n", rbd_dev->major);
+ if (rbd_dev->major)
+ return sprintf(buf, "%d\n", rbd_dev->major);
+
+ return sprintf(buf, "(none)\n");
+
}
static ssize_t rbd_client_id_show(struct device *dev,
@@ -2469,7 +3316,7 @@ static ssize_t rbd_pool_id_show(struct device *dev,
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%llu\n",
- (unsigned long long) rbd_dev->spec->pool_id);
+ (unsigned long long) rbd_dev->spec->pool_id);
}
static ssize_t rbd_name_show(struct device *dev,
@@ -2555,7 +3402,7 @@ static ssize_t rbd_image_refresh(struct device *dev,
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
int ret;
- ret = rbd_dev_refresh(rbd_dev, NULL);
+ ret = rbd_dev_refresh(rbd_dev);
return ret < 0 ? ret : size;
}
@@ -2606,71 +3453,6 @@ static struct device_type rbd_device_type = {
.release = rbd_sysfs_dev_release,
};
-
-/*
- sysfs - snapshots
-*/
-
-static ssize_t rbd_snap_size_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
-
- return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
-}
-
-static ssize_t rbd_snap_id_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
-
- return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
-}
-
-static ssize_t rbd_snap_features_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
-
- return sprintf(buf, "0x%016llx\n",
- (unsigned long long) snap->features);
-}
-
-static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
-static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
-static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL);
-
-static struct attribute *rbd_snap_attrs[] = {
- &dev_attr_snap_size.attr,
- &dev_attr_snap_id.attr,
- &dev_attr_snap_features.attr,
- NULL,
-};
-
-static struct attribute_group rbd_snap_attr_group = {
- .attrs = rbd_snap_attrs,
-};
-
-static void rbd_snap_dev_release(struct device *dev)
-{
- struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
- kfree(snap->name);
- kfree(snap);
-}
-
-static const struct attribute_group *rbd_snap_attr_groups[] = {
- &rbd_snap_attr_group,
- NULL
-};
-
-static struct device_type rbd_snap_device_type = {
- .groups = rbd_snap_attr_groups,
- .release = rbd_snap_dev_release,
-};
-
static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
{
kref_get(&spec->kref);
@@ -2694,8 +3476,6 @@ static struct rbd_spec *rbd_spec_alloc(void)
return NULL;
kref_init(&spec->kref);
- rbd_spec_put(rbd_spec_get(spec)); /* TEMPORARY */
-
return spec;
}
@@ -2722,7 +3502,6 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
spin_lock_init(&rbd_dev->lock);
rbd_dev->flags = 0;
INIT_LIST_HEAD(&rbd_dev->node);
- INIT_LIST_HEAD(&rbd_dev->snaps);
init_rwsem(&rbd_dev->header_rwsem);
rbd_dev->spec = spec;
@@ -2740,96 +3519,11 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
static void rbd_dev_destroy(struct rbd_device *rbd_dev)
{
- rbd_spec_put(rbd_dev->parent_spec);
- kfree(rbd_dev->header_name);
rbd_put_client(rbd_dev->rbd_client);
rbd_spec_put(rbd_dev->spec);
kfree(rbd_dev);
}
-static bool rbd_snap_registered(struct rbd_snap *snap)
-{
- bool ret = snap->dev.type == &rbd_snap_device_type;
- bool reg = device_is_registered(&snap->dev);
-
- rbd_assert(!ret ^ reg);
-
- return ret;
-}
-
-static void rbd_remove_snap_dev(struct rbd_snap *snap)
-{
- list_del(&snap->node);
- if (device_is_registered(&snap->dev))
- device_unregister(&snap->dev);
-}
-
-static int rbd_register_snap_dev(struct rbd_snap *snap,
- struct device *parent)
-{
- struct device *dev = &snap->dev;
- int ret;
-
- dev->type = &rbd_snap_device_type;
- dev->parent = parent;
- dev->release = rbd_snap_dev_release;
- dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name);
- dout("%s: registering device for snapshot %s\n", __func__, snap->name);
-
- ret = device_register(dev);
-
- return ret;
-}
-
-static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
- const char *snap_name,
- u64 snap_id, u64 snap_size,
- u64 snap_features)
-{
- struct rbd_snap *snap;
- int ret;
-
- snap = kzalloc(sizeof (*snap), GFP_KERNEL);
- if (!snap)
- return ERR_PTR(-ENOMEM);
-
- ret = -ENOMEM;
- snap->name = kstrdup(snap_name, GFP_KERNEL);
- if (!snap->name)
- goto err;
-
- snap->id = snap_id;
- snap->size = snap_size;
- snap->features = snap_features;
-
- return snap;
-
-err:
- kfree(snap->name);
- kfree(snap);
-
- return ERR_PTR(ret);
-}
-
-static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
- u64 *snap_size, u64 *snap_features)
-{
- char *snap_name;
-
- rbd_assert(which < rbd_dev->header.snapc->num_snaps);
-
- *snap_size = rbd_dev->header.snap_sizes[which];
- *snap_features = 0; /* No features for v1 */
-
- /* Skip over names until we find the one we are looking for */
-
- snap_name = rbd_dev->header.snap_names;
- while (which--)
- snap_name += strlen(snap_name) + 1;
-
- return snap_name;
-}
-
/*
* Get the size and object order for an image snapshot, or if
* snap_id is CEPH_NOSNAP, gets this information for the base
@@ -2847,18 +3541,21 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_size",
- (char *) &snapid, sizeof (snapid),
- (char *) &size_buf, sizeof (size_buf), NULL);
+ &snapid, sizeof (snapid),
+ &size_buf, sizeof (size_buf));
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
+ if (ret < sizeof (size_buf))
+ return -ERANGE;
- *order = size_buf.order;
+ if (order)
+ *order = size_buf.order;
*snap_size = le64_to_cpu(size_buf.size);
dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
- (unsigned long long) snap_id, (unsigned int) *order,
- (unsigned long long) *snap_size);
+ (unsigned long long)snap_id, (unsigned int)*order,
+ (unsigned long long)*snap_size);
return 0;
}
@@ -2881,17 +3578,16 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
return -ENOMEM;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
- "rbd", "get_object_prefix",
- NULL, 0,
- reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL);
+ "rbd", "get_object_prefix", NULL, 0,
+ reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
p = reply_buf;
rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
- p + RBD_OBJ_PREFIX_LEN_MAX,
- NULL, GFP_NOIO);
+ p + ret, NULL, GFP_NOIO);
+ ret = 0;
if (IS_ERR(rbd_dev->header.object_prefix)) {
ret = PTR_ERR(rbd_dev->header.object_prefix);
@@ -2899,7 +3595,6 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
} else {
dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
}
-
out:
kfree(reply_buf);
@@ -2913,29 +3608,30 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
struct {
__le64 features;
__le64 incompat;
- } features_buf = { 0 };
+ } __attribute__ ((packed)) features_buf = { 0 };
u64 incompat;
int ret;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_features",
- (char *) &snapid, sizeof (snapid),
- (char *) &features_buf, sizeof (features_buf),
- NULL);
+ &snapid, sizeof (snapid),
+ &features_buf, sizeof (features_buf));
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
+ if (ret < sizeof (features_buf))
+ return -ERANGE;
incompat = le64_to_cpu(features_buf.incompat);
- if (incompat & ~RBD_FEATURES_ALL)
+ if (incompat & ~RBD_FEATURES_SUPPORTED)
return -ENXIO;
*snap_features = le64_to_cpu(features_buf.features);
dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
- (unsigned long long) snap_id,
- (unsigned long long) *snap_features,
- (unsigned long long) le64_to_cpu(features_buf.incompat));
+ (unsigned long long)snap_id,
+ (unsigned long long)*snap_features,
+ (unsigned long long)le64_to_cpu(features_buf.incompat));
return 0;
}
@@ -2975,15 +3671,15 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
snapid = cpu_to_le64(CEPH_NOSNAP);
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_parent",
- (char *) &snapid, sizeof (snapid),
- (char *) reply_buf, size, NULL);
+ &snapid, sizeof (snapid),
+ reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out_err;
- ret = -ERANGE;
p = reply_buf;
- end = (char *) reply_buf + size;
+ end = reply_buf + ret;
+ ret = -ERANGE;
ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
if (parent_spec->pool_id == CEPH_NOPOOL)
goto out; /* No parent? No problem. */
@@ -2991,8 +3687,11 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
/* The ceph file layout needs to fit pool id in 32 bits */
ret = -EIO;
- if (WARN_ON(parent_spec->pool_id > (u64) U32_MAX))
- goto out;
+ if (parent_spec->pool_id > (u64)U32_MAX) {
+ rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
+ (unsigned long long)parent_spec->pool_id, U32_MAX);
+ goto out_err;
+ }
image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
if (IS_ERR(image_id)) {
@@ -3015,6 +3714,56 @@ out_err:
return ret;
}
+static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
+{
+ struct {
+ __le64 stripe_unit;
+ __le64 stripe_count;
+ } __attribute__ ((packed)) striping_info_buf = { 0 };
+ size_t size = sizeof (striping_info_buf);
+ void *p;
+ u64 obj_size;
+ u64 stripe_unit;
+ u64 stripe_count;
+ int ret;
+
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ "rbd", "get_stripe_unit_count", NULL, 0,
+ (char *)&striping_info_buf, size);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
+ if (ret < 0)
+ return ret;
+ if (ret < size)
+ return -ERANGE;
+
+ /*
+ * We don't actually support the "fancy striping" feature
+ * (STRIPINGV2) yet, but if the striping sizes are the
+ * defaults the behavior is the same as before. So find
+ * out, and only fail if the image has non-default values.
+ */
+ ret = -EINVAL;
+ obj_size = (u64)1 << rbd_dev->header.obj_order;
+ p = &striping_info_buf;
+ stripe_unit = ceph_decode_64(&p);
+ if (stripe_unit != obj_size) {
+ rbd_warn(rbd_dev, "unsupported stripe unit "
+ "(got %llu want %llu)",
+ stripe_unit, obj_size);
+ return -EINVAL;
+ }
+ stripe_count = ceph_decode_64(&p);
+ if (stripe_count != 1) {
+ rbd_warn(rbd_dev, "unsupported stripe count "
+ "(got %llu want 1)", stripe_count);
+ return -EINVAL;
+ }
+ rbd_dev->header.stripe_unit = stripe_unit;
+ rbd_dev->header.stripe_count = stripe_count;
+
+ return 0;
+}
+
static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
{
size_t image_id_size;
@@ -3036,8 +3785,8 @@ static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
return NULL;
p = image_id;
- end = (char *) image_id + image_id_size;
- ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32) len);
+ end = image_id + image_id_size;
+ ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
reply_buf = kmalloc(size, GFP_KERNEL);
@@ -3047,11 +3796,12 @@ static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
"rbd", "dir_get_name",
image_id, image_id_size,
- (char *) reply_buf, size, NULL);
+ reply_buf, size);
if (ret < 0)
goto out;
p = reply_buf;
- end = (char *) reply_buf + size;
+ end = reply_buf + ret;
+
image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
if (IS_ERR(image_name))
image_name = NULL;
@@ -3064,69 +3814,134 @@ out:
return image_name;
}
+static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
+{
+ struct ceph_snap_context *snapc = rbd_dev->header.snapc;
+ const char *snap_name;
+ u32 which = 0;
+
+ /* Skip over names until we find the one we are looking for */
+
+ snap_name = rbd_dev->header.snap_names;
+ while (which < snapc->num_snaps) {
+ if (!strcmp(name, snap_name))
+ return snapc->snaps[which];
+ snap_name += strlen(snap_name) + 1;
+ which++;
+ }
+ return CEPH_NOSNAP;
+}
+
+static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
+{
+ struct ceph_snap_context *snapc = rbd_dev->header.snapc;
+ u32 which;
+ bool found = false;
+ u64 snap_id;
+
+ for (which = 0; !found && which < snapc->num_snaps; which++) {
+ const char *snap_name;
+
+ snap_id = snapc->snaps[which];
+ snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
+ if (IS_ERR(snap_name))
+ break;
+ found = !strcmp(name, snap_name);
+ kfree(snap_name);
+ }
+ return found ? snap_id : CEPH_NOSNAP;
+}
+
/*
- * When a parent image gets probed, we only have the pool, image,
- * and snapshot ids but not the names of any of them. This call
- * is made later to fill in those names. It has to be done after
- * rbd_dev_snaps_update() has completed because some of the
- * information (in particular, snapshot name) is not available
- * until then.
+ * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
+ * no snapshot by that name is found, or if an error occurs.
*/
-static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
+static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
- struct ceph_osd_client *osdc;
- const char *name;
- void *reply_buf = NULL;
+ if (rbd_dev->image_format == 1)
+ return rbd_v1_snap_id_by_name(rbd_dev, name);
+
+ return rbd_v2_snap_id_by_name(rbd_dev, name);
+}
+
+/*
+ * When an rbd image has a parent image, it is identified by the
+ * pool, image, and snapshot ids (not names). This function fills
+ * in the names for those ids. (It's OK if we can't figure out the
+ * name for an image id, but the pool and snapshot ids should always
+ * exist and have names.) All names in an rbd spec are dynamically
+ * allocated.
+ *
+ * When an image being mapped (not a parent) is probed, we have the
+ * pool name and pool id, image name and image id, and the snapshot
+ * name. The only thing we're missing is the snapshot id.
+ */
+static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
+{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ struct rbd_spec *spec = rbd_dev->spec;
+ const char *pool_name;
+ const char *image_name;
+ const char *snap_name;
int ret;
- if (rbd_dev->spec->pool_name)
- return 0; /* Already have the names */
+ /*
+ * An image being mapped will have the pool name (etc.), but
+ * we need to look up the snapshot id.
+ */
+ if (spec->pool_name) {
+ if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
+ u64 snap_id;
+
+ snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
+ if (snap_id == CEPH_NOSNAP)
+ return -ENOENT;
+ spec->snap_id = snap_id;
+ } else {
+ spec->snap_id = CEPH_NOSNAP;
+ }
- /* Look up the pool name */
+ return 0;
+ }
- osdc = &rbd_dev->rbd_client->client->osdc;
- name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
- if (!name) {
- rbd_warn(rbd_dev, "there is no pool with id %llu",
- rbd_dev->spec->pool_id); /* Really a BUG() */
+ /* Get the pool name; we have to make our own copy of this */
+
+ pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
+ if (!pool_name) {
+ rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
return -EIO;
}
-
- rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
- if (!rbd_dev->spec->pool_name)
+ pool_name = kstrdup(pool_name, GFP_KERNEL);
+ if (!pool_name)
return -ENOMEM;
/* Fetch the image name; tolerate failure here */
- name = rbd_dev_image_name(rbd_dev);
- if (name)
- rbd_dev->spec->image_name = (char *) name;
- else
+ image_name = rbd_dev_image_name(rbd_dev);
+ if (!image_name)
rbd_warn(rbd_dev, "unable to get image name");
- /* Look up the snapshot name. */
+ /* Look up the snapshot name, and make a copy */
- name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
- if (!name) {
- rbd_warn(rbd_dev, "no snapshot with id %llu",
- rbd_dev->spec->snap_id); /* Really a BUG() */
- ret = -EIO;
+ snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
+ if (!snap_name) {
+ ret = -ENOMEM;
goto out_err;
}
- rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL);
- if(!rbd_dev->spec->snap_name)
- goto out_err;
+
+ spec->pool_name = pool_name;
+ spec->image_name = image_name;
+ spec->snap_name = snap_name;
return 0;
out_err:
- kfree(reply_buf);
- kfree(rbd_dev->spec->pool_name);
- rbd_dev->spec->pool_name = NULL;
+ kfree(image_name);
+ kfree(pool_name);
return ret;
}
-static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
+static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
{
size_t size;
int ret;
@@ -3151,16 +3966,15 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
return -ENOMEM;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
- "rbd", "get_snapcontext",
- NULL, 0,
- reply_buf, size, ver);
+ "rbd", "get_snapcontext", NULL, 0,
+ reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
- ret = -ERANGE;
p = reply_buf;
- end = (char *) reply_buf + size;
+ end = reply_buf + ret;
+ ret = -ERANGE;
ceph_decode_64_safe(&p, end, seq, out);
ceph_decode_32_safe(&p, end, snap_count, out);
@@ -3177,37 +3991,33 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
}
if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
goto out;
+ ret = 0;
- size = sizeof (struct ceph_snap_context) +
- snap_count * sizeof (snapc->snaps[0]);
- snapc = kmalloc(size, GFP_KERNEL);
+ snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
if (!snapc) {
ret = -ENOMEM;
goto out;
}
-
- atomic_set(&snapc->nref, 1);
snapc->seq = seq;
- snapc->num_snaps = snap_count;
for (i = 0; i < snap_count; i++)
snapc->snaps[i] = ceph_decode_64(&p);
rbd_dev->header.snapc = snapc;
dout(" snap context seq = %llu, snap_count = %u\n",
- (unsigned long long) seq, (unsigned int) snap_count);
-
+ (unsigned long long)seq, (unsigned int)snap_count);
out:
kfree(reply_buf);
- return 0;
+ return ret;
}
-static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
+static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
+ u64 snap_id)
{
size_t size;
void *reply_buf;
- __le64 snap_id;
+ __le64 snapid;
int ret;
void *p;
void *end;
@@ -3218,236 +4028,52 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
if (!reply_buf)
return ERR_PTR(-ENOMEM);
- snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
+ snapid = cpu_to_le64(snap_id);
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_snapshot_name",
- (char *) &snap_id, sizeof (snap_id),
- reply_buf, size, NULL);
+ &snapid, sizeof (snapid),
+ reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
- if (ret < 0)
+ if (ret < 0) {
+ snap_name = ERR_PTR(ret);
goto out;
+ }
p = reply_buf;
- end = (char *) reply_buf + size;
+ end = reply_buf + ret;
snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
- if (IS_ERR(snap_name)) {
- ret = PTR_ERR(snap_name);
+ if (IS_ERR(snap_name))
goto out;
- } else {
- dout(" snap_id 0x%016llx snap_name = %s\n",
- (unsigned long long) le64_to_cpu(snap_id), snap_name);
- }
- kfree(reply_buf);
- return snap_name;
+ dout(" snap_id 0x%016llx snap_name = %s\n",
+ (unsigned long long)snap_id, snap_name);
out:
kfree(reply_buf);
- return ERR_PTR(ret);
-}
-
-static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
- u64 *snap_size, u64 *snap_features)
-{
- u64 snap_id;
- u8 order;
- int ret;
-
- snap_id = rbd_dev->header.snapc->snaps[which];
- ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size);
- if (ret)
- return ERR_PTR(ret);
- ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features);
- if (ret)
- return ERR_PTR(ret);
-
- return rbd_dev_v2_snap_name(rbd_dev, which);
-}
-
-static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
- u64 *snap_size, u64 *snap_features)
-{
- if (rbd_dev->image_format == 1)
- return rbd_dev_v1_snap_info(rbd_dev, which,
- snap_size, snap_features);
- if (rbd_dev->image_format == 2)
- return rbd_dev_v2_snap_info(rbd_dev, which,
- snap_size, snap_features);
- return ERR_PTR(-EINVAL);
+ return snap_name;
}
-static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver)
+static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev)
{
int ret;
- __u8 obj_order;
down_write(&rbd_dev->header_rwsem);
- /* Grab old order first, to see if it changes */
-
- obj_order = rbd_dev->header.obj_order,
ret = rbd_dev_v2_image_size(rbd_dev);
if (ret)
goto out;
- if (rbd_dev->header.obj_order != obj_order) {
- ret = -EIO;
- goto out;
- }
rbd_update_mapping_size(rbd_dev);
- ret = rbd_dev_v2_snap_context(rbd_dev, hver);
+ ret = rbd_dev_v2_snap_context(rbd_dev);
dout("rbd_dev_v2_snap_context returned %d\n", ret);
if (ret)
goto out;
- ret = rbd_dev_snaps_update(rbd_dev);
- dout("rbd_dev_snaps_update returned %d\n", ret);
- if (ret)
- goto out;
- ret = rbd_dev_snaps_register(rbd_dev);
- dout("rbd_dev_snaps_register returned %d\n", ret);
out:
up_write(&rbd_dev->header_rwsem);
return ret;
}
-/*
- * Scan the rbd device's current snapshot list and compare it to the
- * newly-received snapshot context. Remove any existing snapshots
- * not present in the new snapshot context. Add a new snapshot for
- * any snaphots in the snapshot context not in the current list.
- * And verify there are no changes to snapshots we already know
- * about.
- *
- * Assumes the snapshots in the snapshot context are sorted by
- * snapshot id, highest id first. (Snapshots in the rbd_dev's list
- * are also maintained in that order.)
- */
-static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
-{
- struct ceph_snap_context *snapc = rbd_dev->header.snapc;
- const u32 snap_count = snapc->num_snaps;
- struct list_head *head = &rbd_dev->snaps;
- struct list_head *links = head->next;
- u32 index = 0;
-
- dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count);
- while (index < snap_count || links != head) {
- u64 snap_id;
- struct rbd_snap *snap;
- char *snap_name;
- u64 snap_size = 0;
- u64 snap_features = 0;
-
- snap_id = index < snap_count ? snapc->snaps[index]
- : CEPH_NOSNAP;
- snap = links != head ? list_entry(links, struct rbd_snap, node)
- : NULL;
- rbd_assert(!snap || snap->id != CEPH_NOSNAP);
-
- if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
- struct list_head *next = links->next;
-
- /*
- * A previously-existing snapshot is not in
- * the new snap context.
- *
- * If the now missing snapshot is the one the
- * image is mapped to, clear its exists flag
- * so we can avoid sending any more requests
- * to it.
- */
- if (rbd_dev->spec->snap_id == snap->id)
- clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
- rbd_remove_snap_dev(snap);
- dout("%ssnap id %llu has been removed\n",
- rbd_dev->spec->snap_id == snap->id ?
- "mapped " : "",
- (unsigned long long) snap->id);
-
- /* Done with this list entry; advance */
-
- links = next;
- continue;
- }
-
- snap_name = rbd_dev_snap_info(rbd_dev, index,
- &snap_size, &snap_features);
- if (IS_ERR(snap_name))
- return PTR_ERR(snap_name);
-
- dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count,
- (unsigned long long) snap_id);
- if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
- struct rbd_snap *new_snap;
-
- /* We haven't seen this snapshot before */
-
- new_snap = __rbd_add_snap_dev(rbd_dev, snap_name,
- snap_id, snap_size, snap_features);
- if (IS_ERR(new_snap)) {
- int err = PTR_ERR(new_snap);
-
- dout(" failed to add dev, error %d\n", err);
-
- return err;
- }
-
- /* New goes before existing, or at end of list */
-
- dout(" added dev%s\n", snap ? "" : " at end\n");
- if (snap)
- list_add_tail(&new_snap->node, &snap->node);
- else
- list_add_tail(&new_snap->node, head);
- } else {
- /* Already have this one */
-
- dout(" already present\n");
-
- rbd_assert(snap->size == snap_size);
- rbd_assert(!strcmp(snap->name, snap_name));
- rbd_assert(snap->features == snap_features);
-
- /* Done with this list entry; advance */
-
- links = links->next;
- }
-
- /* Advance to the next entry in the snapshot context */
-
- index++;
- }
- dout("%s: done\n", __func__);
-
- return 0;
-}
-
-/*
- * Scan the list of snapshots and register the devices for any that
- * have not already been registered.
- */
-static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
-{
- struct rbd_snap *snap;
- int ret = 0;
-
- dout("%s:\n", __func__);
- if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
- return -EIO;
-
- list_for_each_entry(snap, &rbd_dev->snaps, node) {
- if (!rbd_snap_registered(snap)) {
- ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
- if (ret < 0)
- break;
- }
- }
- dout("%s: returning %d\n", __func__, ret);
-
- return ret;
-}
-
static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
{
struct device *dev;
@@ -3459,7 +4085,7 @@ static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
dev->bus = &rbd_bus_type;
dev->type = &rbd_device_type;
dev->parent = &rbd_root_dev;
- dev->release = rbd_dev_release;
+ dev->release = rbd_dev_device_release;
dev_set_name(dev, "%d", rbd_dev->dev_id);
ret = device_register(dev);
@@ -3673,6 +4299,7 @@ static int rbd_add_parse_args(const char *buf,
size_t len;
char *options;
const char *mon_addrs;
+ char *snap_name;
size_t mon_addrs_size;
struct rbd_spec *spec = NULL;
struct rbd_options *rbd_opts = NULL;
@@ -3731,10 +4358,11 @@ static int rbd_add_parse_args(const char *buf,
ret = -ENAMETOOLONG;
goto out_err;
}
- spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
- if (!spec->snap_name)
+ snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
+ if (!snap_name)
goto out_mem;
- *(spec->snap_name + len) = '\0';
+ *(snap_name + len) = '\0';
+ spec->snap_name = snap_name;
/* Initialize all rbd options to the defaults */
@@ -3788,15 +4416,19 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
size_t size;
char *object_name;
void *response;
- void *p;
+ char *image_id;
/*
* When probing a parent image, the image id is already
* known (and the image name likely is not). There's no
- * need to fetch the image id again in this case.
+ * need to fetch the image id again in this case. We
+ * do still need to set the image format though.
*/
- if (rbd_dev->spec->image_id)
+ if (rbd_dev->spec->image_id) {
+ rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
+
return 0;
+ }
/*
* First, see if the format 2 image id file exists, and if
@@ -3818,23 +4450,32 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
goto out;
}
+ /* If it doesn't exist we'll assume it's a format 1 image */
+
ret = rbd_obj_method_sync(rbd_dev, object_name,
- "rbd", "get_id",
- NULL, 0,
- response, RBD_IMAGE_ID_LEN_MAX, NULL);
+ "rbd", "get_id", NULL, 0,
+ response, RBD_IMAGE_ID_LEN_MAX);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
- if (ret < 0)
- goto out;
-
- p = response;
- rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
- p + RBD_IMAGE_ID_LEN_MAX,
+ if (ret == -ENOENT) {
+ image_id = kstrdup("", GFP_KERNEL);
+ ret = image_id ? 0 : -ENOMEM;
+ if (!ret)
+ rbd_dev->image_format = 1;
+ } else if (ret > sizeof (__le32)) {
+ void *p = response;
+
+ image_id = ceph_extract_encoded_string(&p, p + ret,
NULL, GFP_NOIO);
- if (IS_ERR(rbd_dev->spec->image_id)) {
- ret = PTR_ERR(rbd_dev->spec->image_id);
- rbd_dev->spec->image_id = NULL;
+ ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
+ if (!ret)
+ rbd_dev->image_format = 2;
} else {
- dout("image_id is %s\n", rbd_dev->spec->image_id);
+ ret = -EINVAL;
+ }
+
+ if (!ret) {
+ rbd_dev->spec->image_id = image_id;
+ dout("image_id is %s\n", image_id);
}
out:
kfree(response);
@@ -3843,27 +4484,30 @@ out:
return ret;
}
-static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
+/* Undo whatever state changes are made by v1 or v2 image probe */
+
+static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
- int ret;
- size_t size;
+ struct rbd_image_header *header;
- /* Version 1 images have no id; empty string is used */
+ rbd_dev_remove_parent(rbd_dev);
+ rbd_spec_put(rbd_dev->parent_spec);
+ rbd_dev->parent_spec = NULL;
+ rbd_dev->parent_overlap = 0;
- rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
- if (!rbd_dev->spec->image_id)
- return -ENOMEM;
+ /* Free dynamic fields from the header, then zero it out */
- /* Record the header object name for this rbd image. */
+ header = &rbd_dev->header;
+ ceph_put_snap_context(header->snapc);
+ kfree(header->snap_sizes);
+ kfree(header->snap_names);
+ kfree(header->object_prefix);
+ memset(header, 0, sizeof (*header));
+}
- size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX);
- rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
- if (!rbd_dev->header_name) {
- ret = -ENOMEM;
- goto out_err;
- }
- sprintf(rbd_dev->header_name, "%s%s",
- rbd_dev->spec->image_name, RBD_SUFFIX);
+static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
+{
+ int ret;
/* Populate rbd image metadata */
@@ -3876,8 +4520,6 @@ static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
rbd_dev->parent_spec = NULL;
rbd_dev->parent_overlap = 0;
- rbd_dev->image_format = 1;
-
dout("discovered version 1 image, header name is %s\n",
rbd_dev->header_name);
@@ -3894,43 +4536,45 @@ out_err:
static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
{
- size_t size;
int ret;
- u64 ver = 0;
-
- /*
- * Image id was filled in by the caller. Record the header
- * object name for this rbd image.
- */
- size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id);
- rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
- if (!rbd_dev->header_name)
- return -ENOMEM;
- sprintf(rbd_dev->header_name, "%s%s",
- RBD_HEADER_PREFIX, rbd_dev->spec->image_id);
-
- /* Get the size and object order for the image */
ret = rbd_dev_v2_image_size(rbd_dev);
- if (ret < 0)
+ if (ret)
goto out_err;
/* Get the object prefix (a.k.a. block_name) for the image */
ret = rbd_dev_v2_object_prefix(rbd_dev);
- if (ret < 0)
+ if (ret)
goto out_err;
/* Get the and check features for the image */
ret = rbd_dev_v2_features(rbd_dev);
- if (ret < 0)
+ if (ret)
goto out_err;
/* If the image supports layering, get the parent info */
if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
ret = rbd_dev_v2_parent_info(rbd_dev);
+ if (ret)
+ goto out_err;
+
+ /*
+ * Don't print a warning for parent images. We can
+ * tell this point because we won't know its pool
+ * name yet (just its pool id).
+ */
+ if (rbd_dev->spec->pool_name)
+ rbd_warn(rbd_dev, "WARNING: kernel layering "
+ "is EXPERIMENTAL!");
+ }
+
+ /* If the image supports fancy striping, get its parameters */
+
+ if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
+ ret = rbd_dev_v2_striping_info(rbd_dev);
if (ret < 0)
goto out_err;
}
@@ -3942,12 +4586,9 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
/* Get the snapshot context, plus the header version */
- ret = rbd_dev_v2_snap_context(rbd_dev, &ver);
+ ret = rbd_dev_v2_snap_context(rbd_dev);
if (ret)
goto out_err;
- rbd_dev->header.obj_version = ver;
-
- rbd_dev->image_format = 2;
dout("discovered version 2 image, header name is %s\n",
rbd_dev->header_name);
@@ -3965,22 +4606,54 @@ out_err:
return ret;
}
-static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
+static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
{
+ struct rbd_device *parent = NULL;
+ struct rbd_spec *parent_spec;
+ struct rbd_client *rbdc;
int ret;
- /* no need to lock here, as rbd_dev is not registered yet */
- ret = rbd_dev_snaps_update(rbd_dev);
- if (ret)
- return ret;
+ if (!rbd_dev->parent_spec)
+ return 0;
+ /*
+ * We need to pass a reference to the client and the parent
+ * spec when creating the parent rbd_dev. Images related by
+ * parent/child relationships always share both.
+ */
+ parent_spec = rbd_spec_get(rbd_dev->parent_spec);
+ rbdc = __rbd_get_client(rbd_dev->rbd_client);
- ret = rbd_dev_probe_update_spec(rbd_dev);
- if (ret)
- goto err_out_snaps;
+ ret = -ENOMEM;
+ parent = rbd_dev_create(rbdc, parent_spec);
+ if (!parent)
+ goto out_err;
+
+ ret = rbd_dev_image_probe(parent);
+ if (ret < 0)
+ goto out_err;
+ rbd_dev->parent = parent;
+
+ return 0;
+out_err:
+ if (parent) {
+ rbd_spec_put(rbd_dev->parent_spec);
+ kfree(rbd_dev->header_name);
+ rbd_dev_destroy(parent);
+ } else {
+ rbd_put_client(rbdc);
+ rbd_spec_put(parent_spec);
+ }
+
+ return ret;
+}
+
+static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
+{
+ int ret;
- ret = rbd_dev_set_mapping(rbd_dev);
+ ret = rbd_dev_mapping_set(rbd_dev);
if (ret)
- goto err_out_snaps;
+ return ret;
/* generate unique id: find highest unique id, add one */
rbd_dev_id_get(rbd_dev);
@@ -4007,54 +4680,81 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
if (ret)
goto err_out_disk;
- /*
- * At this point cleanup in the event of an error is the job
- * of the sysfs code (initiated by rbd_bus_del_dev()).
- */
- down_write(&rbd_dev->header_rwsem);
- ret = rbd_dev_snaps_register(rbd_dev);
- up_write(&rbd_dev->header_rwsem);
- if (ret)
- goto err_out_bus;
-
- ret = rbd_dev_header_watch_sync(rbd_dev, 1);
- if (ret)
- goto err_out_bus;
-
/* Everything's ready. Announce the disk to the world. */
+ set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
+ set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
add_disk(rbd_dev->disk);
pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
(unsigned long long) rbd_dev->mapping.size);
return ret;
-err_out_bus:
- /* this will also clean up rest of rbd_dev stuff */
- rbd_bus_del_dev(rbd_dev);
-
- return ret;
err_out_disk:
rbd_free_disk(rbd_dev);
err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_id:
rbd_dev_id_put(rbd_dev);
-err_out_snaps:
- rbd_remove_all_snaps(rbd_dev);
+ rbd_dev_mapping_clear(rbd_dev);
return ret;
}
+static int rbd_dev_header_name(struct rbd_device *rbd_dev)
+{
+ struct rbd_spec *spec = rbd_dev->spec;
+ size_t size;
+
+ /* Record the header object name for this rbd image. */
+
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+
+ if (rbd_dev->image_format == 1)
+ size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
+ else
+ size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
+
+ rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
+ if (!rbd_dev->header_name)
+ return -ENOMEM;
+
+ if (rbd_dev->image_format == 1)
+ sprintf(rbd_dev->header_name, "%s%s",
+ spec->image_name, RBD_SUFFIX);
+ else
+ sprintf(rbd_dev->header_name, "%s%s",
+ RBD_HEADER_PREFIX, spec->image_id);
+ return 0;
+}
+
+static void rbd_dev_image_release(struct rbd_device *rbd_dev)
+{
+ int ret;
+
+ rbd_dev_unprobe(rbd_dev);
+ ret = rbd_dev_header_watch_sync(rbd_dev, 0);
+ if (ret)
+ rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
+ kfree(rbd_dev->header_name);
+ rbd_dev->header_name = NULL;
+ rbd_dev->image_format = 0;
+ kfree(rbd_dev->spec->image_id);
+ rbd_dev->spec->image_id = NULL;
+
+ rbd_dev_destroy(rbd_dev);
+}
+
/*
* Probe for the existence of the header object for the given rbd
* device. For format 2 images this includes determining the image
* id.
*/
-static int rbd_dev_probe(struct rbd_device *rbd_dev)
+static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
{
int ret;
+ int tmp;
/*
* Get the id from the image id object. If it's not a
@@ -4063,18 +4763,48 @@ static int rbd_dev_probe(struct rbd_device *rbd_dev)
*/
ret = rbd_dev_image_id(rbd_dev);
if (ret)
+ return ret;
+ rbd_assert(rbd_dev->spec->image_id);
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+
+ ret = rbd_dev_header_name(rbd_dev);
+ if (ret)
+ goto err_out_format;
+
+ ret = rbd_dev_header_watch_sync(rbd_dev, 1);
+ if (ret)
+ goto out_header_name;
+
+ if (rbd_dev->image_format == 1)
ret = rbd_dev_v1_probe(rbd_dev);
else
ret = rbd_dev_v2_probe(rbd_dev);
- if (ret) {
- dout("probe failed, returning %d\n", ret);
-
- return ret;
- }
+ if (ret)
+ goto err_out_watch;
- ret = rbd_dev_probe_finish(rbd_dev);
+ ret = rbd_dev_spec_update(rbd_dev);
if (ret)
- rbd_header_free(&rbd_dev->header);
+ goto err_out_probe;
+
+ ret = rbd_dev_probe_parent(rbd_dev);
+ if (!ret)
+ return 0;
+
+err_out_probe:
+ rbd_dev_unprobe(rbd_dev);
+err_out_watch:
+ tmp = rbd_dev_header_watch_sync(rbd_dev, 0);
+ if (tmp)
+ rbd_warn(rbd_dev, "unable to tear down watch request\n");
+out_header_name:
+ kfree(rbd_dev->header_name);
+ rbd_dev->header_name = NULL;
+err_out_format:
+ rbd_dev->image_format = 0;
+ kfree(rbd_dev->spec->image_id);
+ rbd_dev->spec->image_id = NULL;
+
+ dout("probe failed, returning %d\n", ret);
return ret;
}
@@ -4111,11 +4841,13 @@ static ssize_t rbd_add(struct bus_type *bus,
rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
if (rc < 0)
goto err_out_client;
- spec->pool_id = (u64) rc;
+ spec->pool_id = (u64)rc;
/* The ceph file layout needs to fit pool id in 32 bits */
- if (WARN_ON(spec->pool_id > (u64) U32_MAX)) {
+ if (spec->pool_id > (u64)U32_MAX) {
+ rbd_warn(NULL, "pool id too large (%llu > %u)\n",
+ (unsigned long long)spec->pool_id, U32_MAX);
rc = -EIO;
goto err_out_client;
}
@@ -4130,11 +4862,15 @@ static ssize_t rbd_add(struct bus_type *bus,
kfree(rbd_opts);
rbd_opts = NULL; /* done with this */
- rc = rbd_dev_probe(rbd_dev);
+ rc = rbd_dev_image_probe(rbd_dev);
if (rc < 0)
goto err_out_rbd_dev;
- return count;
+ rc = rbd_dev_device_setup(rbd_dev);
+ if (!rc)
+ return count;
+
+ rbd_dev_image_release(rbd_dev);
err_out_rbd_dev:
rbd_dev_destroy(rbd_dev);
err_out_client:
@@ -4149,7 +4885,7 @@ err_out_module:
dout("Error adding device %s\n", buf);
- return (ssize_t) rc;
+ return (ssize_t)rc;
}
static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
@@ -4169,27 +4905,43 @@ static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
return NULL;
}
-static void rbd_dev_release(struct device *dev)
+static void rbd_dev_device_release(struct device *dev)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- if (rbd_dev->watch_event)
- rbd_dev_header_watch_sync(rbd_dev, 0);
-
- /* clean up and free blkdev */
rbd_free_disk(rbd_dev);
+ clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
+ rbd_dev_clear_mapping(rbd_dev);
unregister_blkdev(rbd_dev->major, rbd_dev->name);
-
- /* release allocated disk header fields */
- rbd_header_free(&rbd_dev->header);
-
- /* done with the id, and with the rbd_dev */
+ rbd_dev->major = 0;
rbd_dev_id_put(rbd_dev);
- rbd_assert(rbd_dev->rbd_client != NULL);
- rbd_dev_destroy(rbd_dev);
+ rbd_dev_mapping_clear(rbd_dev);
+}
- /* release module ref */
- module_put(THIS_MODULE);
+static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
+{
+ while (rbd_dev->parent) {
+ struct rbd_device *first = rbd_dev;
+ struct rbd_device *second = first->parent;
+ struct rbd_device *third;
+
+ /*
+ * Follow to the parent with no grandparent and
+ * remove it.
+ */
+ while (second && (third = second->parent)) {
+ first = second;
+ second = third;
+ }
+ rbd_assert(second);
+ rbd_dev_image_release(second);
+ first->parent = NULL;
+ first->parent_overlap = 0;
+
+ rbd_assert(first->parent_spec);
+ rbd_spec_put(first->parent_spec);
+ first->parent_spec = NULL;
+ }
}
static ssize_t rbd_remove(struct bus_type *bus,
@@ -4197,13 +4949,13 @@ static ssize_t rbd_remove(struct bus_type *bus,
size_t count)
{
struct rbd_device *rbd_dev = NULL;
- int target_id, rc;
+ int target_id;
unsigned long ul;
- int ret = count;
+ int ret;
- rc = strict_strtoul(buf, 10, &ul);
- if (rc)
- return rc;
+ ret = strict_strtoul(buf, 10, &ul);
+ if (ret)
+ return ret;
/* convert to int; abort if we lost anything in the conversion */
target_id = (int) ul;
@@ -4226,10 +4978,10 @@ static ssize_t rbd_remove(struct bus_type *bus,
spin_unlock_irq(&rbd_dev->lock);
if (ret < 0)
goto done;
-
- rbd_remove_all_snaps(rbd_dev);
+ ret = count;
rbd_bus_del_dev(rbd_dev);
-
+ rbd_dev_image_release(rbd_dev);
+ module_put(THIS_MODULE);
done:
mutex_unlock(&ctl_mutex);
@@ -4261,6 +5013,56 @@ static void rbd_sysfs_cleanup(void)
device_unregister(&rbd_root_dev);
}
+static int rbd_slab_init(void)
+{
+ rbd_assert(!rbd_img_request_cache);
+ rbd_img_request_cache = kmem_cache_create("rbd_img_request",
+ sizeof (struct rbd_img_request),
+ __alignof__(struct rbd_img_request),
+ 0, NULL);
+ if (!rbd_img_request_cache)
+ return -ENOMEM;
+
+ rbd_assert(!rbd_obj_request_cache);
+ rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
+ sizeof (struct rbd_obj_request),
+ __alignof__(struct rbd_obj_request),
+ 0, NULL);
+ if (!rbd_obj_request_cache)
+ goto out_err;
+
+ rbd_assert(!rbd_segment_name_cache);
+ rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
+ MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
+ if (rbd_segment_name_cache)
+ return 0;
+out_err:
+ if (rbd_obj_request_cache) {
+ kmem_cache_destroy(rbd_obj_request_cache);
+ rbd_obj_request_cache = NULL;
+ }
+
+ kmem_cache_destroy(rbd_img_request_cache);
+ rbd_img_request_cache = NULL;
+
+ return -ENOMEM;
+}
+
+static void rbd_slab_exit(void)
+{
+ rbd_assert(rbd_segment_name_cache);
+ kmem_cache_destroy(rbd_segment_name_cache);
+ rbd_segment_name_cache = NULL;
+
+ rbd_assert(rbd_obj_request_cache);
+ kmem_cache_destroy(rbd_obj_request_cache);
+ rbd_obj_request_cache = NULL;
+
+ rbd_assert(rbd_img_request_cache);
+ kmem_cache_destroy(rbd_img_request_cache);
+ rbd_img_request_cache = NULL;
+}
+
static int __init rbd_init(void)
{
int rc;
@@ -4270,16 +5072,22 @@ static int __init rbd_init(void)
return -EINVAL;
}
- rc = rbd_sysfs_init();
+ rc = rbd_slab_init();
if (rc)
return rc;
- pr_info("loaded " RBD_DRV_NAME_LONG "\n");
- return 0;
+ rc = rbd_sysfs_init();
+ if (rc)
+ rbd_slab_exit();
+ else
+ pr_info("loaded " RBD_DRV_NAME_LONG "\n");
+
+ return rc;
}
static void __exit rbd_exit(void)
{
rbd_sysfs_cleanup();
+ rbd_slab_exit();
}
module_init(rbd_init);
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 8766a225709..2f445b7a174 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -673,7 +673,7 @@ static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim __iomem *base = fs->swd->base;
@@ -687,8 +687,6 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
if (fs->ref_count == 0)
swim_motor(base, OFF);
mutex_unlock(&swim_mutex);
-
- return 0;
}
static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 758f2ac878c..20e061c3e02 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -251,7 +251,7 @@ static int fd_eject(struct floppy_state *fs);
static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param);
static int floppy_open(struct block_device *bdev, fmode_t mode);
-static int floppy_release(struct gendisk *disk, fmode_t mode);
+static void floppy_release(struct gendisk *disk, fmode_t mode);
static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
@@ -1017,7 +1017,7 @@ static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
@@ -1029,7 +1029,6 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
swim3_select(fs, RELAX);
}
mutex_unlock(&swim3_mutex);
- return 0;
}
static unsigned int floppy_check_events(struct gendisk *disk,
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 8ad21a25bc0..64723953e1c 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -100,96 +100,103 @@ static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
return vbr;
}
-static void virtblk_add_buf_wait(struct virtio_blk *vblk,
- struct virtblk_req *vbr,
- unsigned long out,
- unsigned long in)
+static int __virtblk_add_req(struct virtqueue *vq,
+ struct virtblk_req *vbr,
+ struct scatterlist *data_sg,
+ bool have_data)
{
- DEFINE_WAIT(wait);
+ struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
+ unsigned int num_out = 0, num_in = 0;
+ int type = vbr->out_hdr.type & ~VIRTIO_BLK_T_OUT;
- for (;;) {
- prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
- TASK_UNINTERRUPTIBLE);
+ sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
+ sgs[num_out++] = &hdr;
- spin_lock_irq(vblk->disk->queue->queue_lock);
- if (virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
- GFP_ATOMIC) < 0) {
- spin_unlock_irq(vblk->disk->queue->queue_lock);
- io_schedule();
- } else {
- virtqueue_kick(vblk->vq);
- spin_unlock_irq(vblk->disk->queue->queue_lock);
- break;
- }
+ /*
+ * If this is a packet command we need a couple of additional headers.
+ * Behind the normal outhdr we put a segment with the scsi command
+ * block, and before the normal inhdr we put the sense data and the
+ * inhdr with additional status information.
+ */
+ if (type == VIRTIO_BLK_T_SCSI_CMD) {
+ sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
+ sgs[num_out++] = &cmd;
+ }
+ if (have_data) {
+ if (vbr->out_hdr.type & VIRTIO_BLK_T_OUT)
+ sgs[num_out++] = data_sg;
+ else
+ sgs[num_out + num_in++] = data_sg;
}
- finish_wait(&vblk->queue_wait, &wait);
+ if (type == VIRTIO_BLK_T_SCSI_CMD) {
+ sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
+ sgs[num_out + num_in++] = &sense;
+ sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
+ sgs[num_out + num_in++] = &inhdr;
+ }
+
+ sg_init_one(&status, &vbr->status, sizeof(vbr->status));
+ sgs[num_out + num_in++] = &status;
+
+ return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
-static inline void virtblk_add_req(struct virtblk_req *vbr,
- unsigned int out, unsigned int in)
+static void virtblk_add_req(struct virtblk_req *vbr, bool have_data)
{
struct virtio_blk *vblk = vbr->vblk;
+ DEFINE_WAIT(wait);
+ int ret;
spin_lock_irq(vblk->disk->queue->queue_lock);
- if (unlikely(virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
- GFP_ATOMIC) < 0)) {
+ while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr, vbr->sg,
+ have_data)) < 0)) {
+ prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+
spin_unlock_irq(vblk->disk->queue->queue_lock);
- virtblk_add_buf_wait(vblk, vbr, out, in);
- return;
+ io_schedule();
+ spin_lock_irq(vblk->disk->queue->queue_lock);
+
+ finish_wait(&vblk->queue_wait, &wait);
}
+
virtqueue_kick(vblk->vq);
spin_unlock_irq(vblk->disk->queue->queue_lock);
}
-static int virtblk_bio_send_flush(struct virtblk_req *vbr)
+static void virtblk_bio_send_flush(struct virtblk_req *vbr)
{
- unsigned int out = 0, in = 0;
-
vbr->flags |= VBLK_IS_FLUSH;
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = 0;
- sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
- sg_set_buf(&vbr->sg[out + in++], &vbr->status, sizeof(vbr->status));
-
- virtblk_add_req(vbr, out, in);
- return 0;
+ virtblk_add_req(vbr, false);
}
-static int virtblk_bio_send_data(struct virtblk_req *vbr)
+static void virtblk_bio_send_data(struct virtblk_req *vbr)
{
struct virtio_blk *vblk = vbr->vblk;
- unsigned int num, out = 0, in = 0;
struct bio *bio = vbr->bio;
+ bool have_data;
vbr->flags &= ~VBLK_IS_FLUSH;
vbr->out_hdr.type = 0;
vbr->out_hdr.sector = bio->bi_sector;
vbr->out_hdr.ioprio = bio_prio(bio);
- sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
-
- num = blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg + out);
-
- sg_set_buf(&vbr->sg[num + out + in++], &vbr->status,
- sizeof(vbr->status));
-
- if (num) {
- if (bio->bi_rw & REQ_WRITE) {
+ if (blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg)) {
+ have_data = true;
+ if (bio->bi_rw & REQ_WRITE)
vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
- out += num;
- } else {
+ else
vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
- in += num;
- }
- }
+ } else
+ have_data = false;
- virtblk_add_req(vbr, out, in);
-
- return 0;
+ virtblk_add_req(vbr, have_data);
}
static void virtblk_bio_send_data_work(struct work_struct *work)
@@ -298,7 +305,7 @@ static void virtblk_done(struct virtqueue *vq)
static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
struct request *req)
{
- unsigned long num, out = 0, in = 0;
+ unsigned int num;
struct virtblk_req *vbr;
vbr = virtblk_alloc_req(vblk, GFP_ATOMIC);
@@ -335,40 +342,15 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
}
}
- sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
-
- /*
- * If this is a packet command we need a couple of additional headers.
- * Behind the normal outhdr we put a segment with the scsi command
- * block, and before the normal inhdr we put the sense data and the
- * inhdr with additional status information before the normal inhdr.
- */
- if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
- sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
-
- num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
-
- if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
- sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
- sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
- sizeof(vbr->in_hdr));
- }
-
- sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
- sizeof(vbr->status));
-
+ num = blk_rq_map_sg(q, vbr->req, vblk->sg);
if (num) {
- if (rq_data_dir(vbr->req) == WRITE) {
+ if (rq_data_dir(vbr->req) == WRITE)
vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
- out += num;
- } else {
+ else
vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
- in += num;
- }
}
- if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr,
- GFP_ATOMIC) < 0) {
+ if (__virtblk_add_req(vblk->vq, vbr, vblk->sg, num) < 0) {
mempool_free(vbr, vblk->pool);
return false;
}
@@ -539,6 +521,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct virtio_device *vdev = vblk->vdev;
struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10];
+ char *envp[] = { "RESIZE=1", NULL };
u64 capacity, size;
mutex_lock(&vblk->config_lock);
@@ -568,6 +551,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
set_capacity(vblk->disk, capacity);
revalidate_disk(vblk->disk);
+ kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
done:
mutex_unlock(&vblk->config_lock);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a894f88762d..d89ef86220f 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1617,7 +1617,7 @@ out:
return err;
}
-static int blkif_release(struct gendisk *disk, fmode_t mode)
+static void blkif_release(struct gendisk *disk, fmode_t mode)
{
struct blkfront_info *info = disk->private_data;
struct block_device *bdev;
@@ -1658,7 +1658,6 @@ static int blkif_release(struct gendisk *disk, fmode_t mode)
out:
bdput(bdev);
mutex_unlock(&blkfront_mutex);
- return 0;
}
static const struct block_device_operations xlvbd_block_fops =
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 1f38643173c..f8ef15f37c5 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -915,7 +915,7 @@ static int ace_open(struct block_device *bdev, fmode_t mode)
return 0;
}
-static int ace_release(struct gendisk *disk, fmode_t mode)
+static void ace_release(struct gendisk *disk, fmode_t mode)
{
struct ace_device *ace = disk->private_data;
unsigned long flags;
@@ -932,7 +932,6 @@ static int ace_release(struct gendisk *disk, fmode_t mode)
}
spin_unlock_irqrestore(&ace->lock, flags);
mutex_unlock(&xsysace_mutex);
- return 0;
}
static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index a22e3f89594..5a95baf4b10 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -309,20 +309,18 @@ err_out:
return rc;
}
-static int
+static void
z2_release(struct gendisk *disk, fmode_t mode)
{
mutex_lock(&z2ram_mutex);
if ( current_device == -1 ) {
mutex_unlock(&z2ram_mutex);
- return 0;
+ return;
}
mutex_unlock(&z2ram_mutex);
/*
* FIXME: unmap memory
*/
-
- return 0;
}
static const struct block_device_operations z2_fops =