diff options
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/aoe/aoe.h | 11 | ||||
-rw-r--r-- | drivers/block/aoe/aoecmd.c | 156 | ||||
-rw-r--r-- | drivers/block/aoe/aoedev.c | 3 | ||||
-rw-r--r-- | drivers/block/aoe/aoenet.c | 7 | ||||
-rw-r--r-- | drivers/block/cciss.c | 32 | ||||
-rw-r--r-- | drivers/block/cryptoloop.c | 2 | ||||
-rw-r--r-- | drivers/block/loop.c | 2 | ||||
-rw-r--r-- | drivers/block/loop.h | 85 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 11 | ||||
-rw-r--r-- | drivers/block/nbd.c | 11 | ||||
-rw-r--r-- | drivers/block/nvme-core.c | 62 | ||||
-rw-r--r-- | drivers/block/nvme-scsi.c | 3 | ||||
-rw-r--r-- | drivers/block/pktcdvd.c | 3 | ||||
-rw-r--r-- | drivers/block/rbd.c | 221 | ||||
-rw-r--r-- | drivers/block/swim.c | 2 | ||||
-rw-r--r-- | drivers/block/virtio_blk.c | 2 | ||||
-rw-r--r-- | drivers/block/xen-blkback/xenbus.c | 2 | ||||
-rw-r--r-- | drivers/block/xsysace.c | 4 |
18 files changed, 433 insertions, 186 deletions
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 175649468c9..025c41d3cb3 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h @@ -1,5 +1,5 @@ -/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ -#define VERSION "81" +/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */ +#define VERSION "83" #define AOE_MAJOR 152 #define DEVICE_NAME "aoe" @@ -196,9 +196,11 @@ struct ktstate { struct completion rendez; struct task_struct *task; wait_queue_head_t *waitq; - int (*fn) (void); - char *name; + int (*fn) (int); + char name[12]; spinlock_t *lock; + int id; + int active; }; int aoeblk_init(void); @@ -222,6 +224,7 @@ int aoecmd_init(void); struct sk_buff *aoecmd_ata_id(struct aoedev *); void aoe_freetframe(struct frame *); void aoe_flush_iocq(void); +void aoe_flush_iocq_by_index(int); void aoe_end_request(struct aoedev *, struct request *, int); int aoe_ktstart(struct ktstate *k); void aoe_ktstop(struct ktstate *k); diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index fc803ecbbce..99cb944a002 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ +/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */ /* * aoecmd.c * Filesystem request handling methods @@ -35,14 +35,27 @@ module_param(aoe_maxout, int, 0644); MODULE_PARM_DESC(aoe_maxout, "Only aoe_maxout outstanding packets for every MAC on eX.Y."); -static wait_queue_head_t ktiowq; -static struct ktstate kts; +/* The number of online cpus during module initialization gives us a + * convenient heuristic cap on the parallelism used for ktio threads + * doing I/O completion. It is not important that the cap equal the + * actual number of running CPUs at any given time, but because of CPU + * hotplug, we take care to use ncpus instead of using + * num_online_cpus() after module initialization. + */ +static int ncpus; + +/* mutex lock used for synchronization while thread spawning */ +static DEFINE_MUTEX(ktio_spawn_lock); + +static wait_queue_head_t *ktiowq; +static struct ktstate *kts; /* io completion queue */ -static struct { +struct iocq_ktio { struct list_head head; spinlock_t lock; -} iocq; +}; +static struct iocq_ktio *iocq; static struct page *empty_page; @@ -1278,23 +1291,36 @@ out: * Returns true iff responses needing processing remain. */ static int -ktio(void) +ktio(int id) { struct frame *f; struct list_head *pos; int i; + int actual_id; for (i = 0; ; ++i) { if (i == MAXIOC) return 1; - if (list_empty(&iocq.head)) + if (list_empty(&iocq[id].head)) return 0; - pos = iocq.head.next; + pos = iocq[id].head.next; list_del(pos); - spin_unlock_irq(&iocq.lock); f = list_entry(pos, struct frame, head); + spin_unlock_irq(&iocq[id].lock); ktiocomplete(f); - spin_lock_irq(&iocq.lock); + + /* Figure out if extra threads are required. */ + actual_id = f->t->d->aoeminor % ncpus; + + if (!kts[actual_id].active) { + BUG_ON(id != 0); + mutex_lock(&ktio_spawn_lock); + if (!kts[actual_id].active + && aoe_ktstart(&kts[actual_id]) == 0) + kts[actual_id].active = 1; + mutex_unlock(&ktio_spawn_lock); + } + spin_lock_irq(&iocq[id].lock); } } @@ -1311,7 +1337,7 @@ kthread(void *vp) complete(&k->rendez); /* tell spawner we're running */ do { spin_lock_irq(k->lock); - more = k->fn(); + more = k->fn(k->id); if (!more) { add_wait_queue(k->waitq, &wait); __set_current_state(TASK_INTERRUPTIBLE); @@ -1340,7 +1366,7 @@ aoe_ktstart(struct ktstate *k) struct task_struct *task; init_completion(&k->rendez); - task = kthread_run(kthread, k, k->name); + task = kthread_run(kthread, k, "%s", k->name); if (task == NULL || IS_ERR(task)) return -ENOMEM; k->task = task; @@ -1353,13 +1379,24 @@ aoe_ktstart(struct ktstate *k) static void ktcomplete(struct frame *f, struct sk_buff *skb) { + int id; ulong flags; f->r_skb = skb; - spin_lock_irqsave(&iocq.lock, flags); - list_add_tail(&f->head, &iocq.head); - spin_unlock_irqrestore(&iocq.lock, flags); - wake_up(&ktiowq); + id = f->t->d->aoeminor % ncpus; + spin_lock_irqsave(&iocq[id].lock, flags); + if (!kts[id].active) { + spin_unlock_irqrestore(&iocq[id].lock, flags); + /* The thread with id has not been spawned yet, + * so delegate the work to the main thread and + * try spawning a new thread. + */ + id = 0; + spin_lock_irqsave(&iocq[id].lock, flags); + } + list_add_tail(&f->head, &iocq[id].head); + spin_unlock_irqrestore(&iocq[id].lock, flags); + wake_up(&ktiowq[id]); } struct sk_buff * @@ -1706,6 +1743,17 @@ aoe_failbuf(struct aoedev *d, struct buf *buf) void aoe_flush_iocq(void) { + int i; + + for (i = 0; i < ncpus; i++) { + if (kts[i].active) + aoe_flush_iocq_by_index(i); + } +} + +void +aoe_flush_iocq_by_index(int id) +{ struct frame *f; struct aoedev *d; LIST_HEAD(flist); @@ -1713,9 +1761,9 @@ aoe_flush_iocq(void) struct sk_buff *skb; ulong flags; - spin_lock_irqsave(&iocq.lock, flags); - list_splice_init(&iocq.head, &flist); - spin_unlock_irqrestore(&iocq.lock, flags); + spin_lock_irqsave(&iocq[id].lock, flags); + list_splice_init(&iocq[id].head, &flist); + spin_unlock_irqrestore(&iocq[id].lock, flags); while (!list_empty(&flist)) { pos = flist.next; list_del(pos); @@ -1738,6 +1786,8 @@ int __init aoecmd_init(void) { void *p; + int i; + int ret; /* get_zeroed_page returns page with ref count 1 */ p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); @@ -1745,22 +1795,72 @@ aoecmd_init(void) return -ENOMEM; empty_page = virt_to_page(p); - INIT_LIST_HEAD(&iocq.head); - spin_lock_init(&iocq.lock); - init_waitqueue_head(&ktiowq); - kts.name = "aoe_ktio"; - kts.fn = ktio; - kts.waitq = &ktiowq; - kts.lock = &iocq.lock; - return aoe_ktstart(&kts); + ncpus = num_online_cpus(); + + iocq = kcalloc(ncpus, sizeof(struct iocq_ktio), GFP_KERNEL); + if (!iocq) + return -ENOMEM; + + kts = kcalloc(ncpus, sizeof(struct ktstate), GFP_KERNEL); + if (!kts) { + ret = -ENOMEM; + goto kts_fail; + } + + ktiowq = kcalloc(ncpus, sizeof(wait_queue_head_t), GFP_KERNEL); + if (!ktiowq) { + ret = -ENOMEM; + goto ktiowq_fail; + } + + mutex_init(&ktio_spawn_lock); + + for (i = 0; i < ncpus; i++) { + INIT_LIST_HEAD(&iocq[i].head); + spin_lock_init(&iocq[i].lock); + init_waitqueue_head(&ktiowq[i]); + snprintf(kts[i].name, sizeof(kts[i].name), "aoe_ktio%d", i); + kts[i].fn = ktio; + kts[i].waitq = &ktiowq[i]; + kts[i].lock = &iocq[i].lock; + kts[i].id = i; + kts[i].active = 0; + } + kts[0].active = 1; + if (aoe_ktstart(&kts[0])) { + ret = -ENOMEM; + goto ktstart_fail; + } + return 0; + +ktstart_fail: + kfree(ktiowq); +ktiowq_fail: + kfree(kts); +kts_fail: + kfree(iocq); + + return ret; } void aoecmd_exit(void) { - aoe_ktstop(&kts); + int i; + + for (i = 0; i < ncpus; i++) + if (kts[i].active) + aoe_ktstop(&kts[i]); + aoe_flush_iocq(); + /* Free up the iocq and thread speicific configuration + * allocated during startup. + */ + kfree(iocq); + kfree(kts); + kfree(ktiowq); + free_page((unsigned long) page_address(empty_page)); empty_page = NULL; } diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index 98f2965778b..784c92e038d 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ +/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */ /* * aoedev.c * AoE device utility functions; maintains device list. @@ -518,7 +518,6 @@ void aoedev_exit(void) { flush_scheduled_work(); - aoe_flush_iocq(); flush(NULL, 0, EXITING); } diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index 71d3ea8d300..63773a90581 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ +/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */ /* * aoenet.c * Ethernet portion of AoE driver @@ -52,7 +52,7 @@ static struct sk_buff_head skbtxq; /* enters with txlock held */ static int -tx(void) __must_hold(&txlock) +tx(int id) __must_hold(&txlock) { struct sk_buff *skb; struct net_device *ifp; @@ -205,7 +205,8 @@ aoenet_init(void) kts.lock = &txlock; kts.fn = tx; kts.waitq = &txwq; - kts.name = "aoe_tx"; + kts.id = 0; + snprintf(kts.name, sizeof(kts.name), "aoe_tx%d", kts.id); if (aoe_ktstart(&kts)) return -EAGAIN; dev_add_pack(&aoe_pt); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 6374dc10352..62b6c2cc80b 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -168,8 +168,6 @@ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id); static int cciss_open(struct block_device *bdev, fmode_t mode); static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode); static void cciss_release(struct gendisk *disk, fmode_t mode); -static int do_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg); static int cciss_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); @@ -235,7 +233,7 @@ static const struct block_device_operations cciss_fops = { .owner = THIS_MODULE, .open = cciss_unlocked_open, .release = cciss_release, - .ioctl = do_ioctl, + .ioctl = cciss_ioctl, .getgeo = cciss_getgeo, #ifdef CONFIG_COMPAT .compat_ioctl = cciss_compat_ioctl, @@ -1143,16 +1141,6 @@ static void cciss_release(struct gendisk *disk, fmode_t mode) mutex_unlock(&cciss_mutex); } -static int do_ioctl(struct block_device *bdev, fmode_t mode, - unsigned cmd, unsigned long arg) -{ - int ret; - mutex_lock(&cciss_mutex); - ret = cciss_ioctl(bdev, mode, cmd, arg); - mutex_unlock(&cciss_mutex); - return ret; -} - #ifdef CONFIG_COMPAT static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, @@ -1179,7 +1167,7 @@ static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode, case CCISS_REGNEWD: case CCISS_RESCANDISK: case CCISS_GETLUNINFO: - return do_ioctl(bdev, mode, cmd, arg); + return cciss_ioctl(bdev, mode, cmd, arg); case CCISS_PASSTHRU32: return cciss_ioctl32_passthru(bdev, mode, cmd, arg); @@ -1219,7 +1207,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, if (err) return -EFAULT; - err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); + err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); if (err) return err; err |= @@ -1261,7 +1249,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, if (err) return -EFAULT; - err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); + err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); if (err) return err; err |= @@ -1311,11 +1299,14 @@ static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp) static int cciss_getintinfo(ctlr_info_t *h, void __user *argp) { cciss_coalint_struct intinfo; + unsigned long flags; if (!argp) return -EINVAL; + spin_lock_irqsave(&h->lock, flags); intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay); intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount); + spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user (argp, &intinfo, sizeof(cciss_coalint_struct))) return -EFAULT; @@ -1356,12 +1347,15 @@ static int cciss_setintinfo(ctlr_info_t *h, void __user *argp) static int cciss_getnodename(ctlr_info_t *h, void __user *argp) { NodeName_type NodeName; + unsigned long flags; int i; if (!argp) return -EINVAL; + spin_lock_irqsave(&h->lock, flags); for (i = 0; i < 16; i++) NodeName[i] = readb(&h->cfgtable->ServerName[i]); + spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) return -EFAULT; return 0; @@ -1398,10 +1392,13 @@ static int cciss_setnodename(ctlr_info_t *h, void __user *argp) static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) { Heartbeat_type heartbeat; + unsigned long flags; if (!argp) return -EINVAL; + spin_lock_irqsave(&h->lock, flags); heartbeat = readl(&h->cfgtable->HeartBeat); + spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type))) return -EFAULT; return 0; @@ -1410,10 +1407,13 @@ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) static int cciss_getbustypes(ctlr_info_t *h, void __user *argp) { BusTypes_type BusTypes; + unsigned long flags; if (!argp) return -EINVAL; + spin_lock_irqsave(&h->lock, flags); BusTypes = readl(&h->cfgtable->BusTypes); + spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type))) return -EFAULT; return 0; diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c index 8b6bb764b0a..99e773cb70d 100644 --- a/drivers/block/cryptoloop.c +++ b/drivers/block/cryptoloop.c @@ -25,9 +25,9 @@ #include <linux/string.h> #include <linux/crypto.h> #include <linux/blkdev.h> -#include <linux/loop.h> #include <linux/scatterlist.h> #include <asm/uaccess.h> +#include "loop.h" MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI"); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index d92d50fd84b..40e715531aa 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -63,7 +63,6 @@ #include <linux/init.h> #include <linux/swap.h> #include <linux/slab.h> -#include <linux/loop.h> #include <linux/compat.h> #include <linux/suspend.h> #include <linux/freezer.h> @@ -76,6 +75,7 @@ #include <linux/sysfs.h> #include <linux/miscdevice.h> #include <linux/falloc.h> +#include "loop.h" #include <asm/uaccess.h> diff --git a/drivers/block/loop.h b/drivers/block/loop.h new file mode 100644 index 00000000000..90df5d6485b --- /dev/null +++ b/drivers/block/loop.h @@ -0,0 +1,85 @@ +/* + * loop.h + * + * Written by Theodore Ts'o, 3/29/93. + * + * Copyright 1993 by Theodore Ts'o. Redistribution of this file is + * permitted under the GNU General Public License. + */ +#ifndef _LINUX_LOOP_H +#define _LINUX_LOOP_H + +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <uapi/linux/loop.h> + +/* Possible states of device */ +enum { + Lo_unbound, + Lo_bound, + Lo_rundown, +}; + +struct loop_func_table; + +struct loop_device { + int lo_number; + int lo_refcnt; + loff_t lo_offset; + loff_t lo_sizelimit; + int lo_flags; + int (*transfer)(struct loop_device *, int cmd, + struct page *raw_page, unsigned raw_off, + struct page *loop_page, unsigned loop_off, + int size, sector_t real_block); + char lo_file_name[LO_NAME_SIZE]; + char lo_crypt_name[LO_NAME_SIZE]; + char lo_encrypt_key[LO_KEY_SIZE]; + int lo_encrypt_key_size; + struct loop_func_table *lo_encryption; + __u32 lo_init[2]; + kuid_t lo_key_owner; /* Who set the key */ + int (*ioctl)(struct loop_device *, int cmd, + unsigned long arg); + + struct file * lo_backing_file; + struct block_device *lo_device; + unsigned lo_blocksize; + void *key_data; + + gfp_t old_gfp_mask; + + spinlock_t lo_lock; + struct bio_list lo_bio_list; + unsigned int lo_bio_count; + int lo_state; + struct mutex lo_ctl_mutex; + struct task_struct *lo_thread; + wait_queue_head_t lo_event; + /* wait queue for incoming requests */ + wait_queue_head_t lo_req_wait; + + struct request_queue *lo_queue; + struct gendisk *lo_disk; +}; + +/* Support for loadable transfer modules */ +struct loop_func_table { + int number; /* filter type */ + int (*transfer)(struct loop_device *lo, int cmd, + struct page *raw_page, unsigned raw_off, + struct page *loop_page, unsigned loop_off, + int size, sector_t real_block); + int (*init)(struct loop_device *, const struct loop_info64 *); + /* release is called from loop_unregister_transfer or clr_fd */ + int (*release)(struct loop_device *); + int (*ioctl)(struct loop_device *, int cmd, unsigned long arg); + struct module *owner; +}; + +int loop_register_transfer(struct loop_func_table *funcs); +int loop_unregister_transfer(int number); + +#endif diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 847107ef0cc..952dbfe2212 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3002,7 +3002,8 @@ static int mtip_hw_debugfs_init(struct driver_data *dd) static void mtip_hw_debugfs_exit(struct driver_data *dd) { - debugfs_remove_recursive(dd->dfs_node); + if (dd->dfs_node) + debugfs_remove_recursive(dd->dfs_node); } @@ -3863,7 +3864,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) struct driver_data *dd = queue->queuedata; struct scatterlist *sg; struct bio_vec *bvec; - int nents = 0; + int i, nents = 0; int tag = 0, unaligned = 0; if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { @@ -3921,11 +3922,12 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) } /* Create the scatter list for this bio. */ - bio_for_each_segment(bvec, bio, nents) { + bio_for_each_segment(bvec, bio, i) { sg_set_page(&sg[nents], bvec->bv_page, bvec->bv_len, bvec->bv_offset); + nents++; } /* Issue the read/write. */ @@ -4085,7 +4087,8 @@ skip_create_disk: start_service_thread: sprintf(thd_name, "mtip_svc_thd_%02d", index); dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, - dd, dd->numa_node, thd_name); + dd, dd->numa_node, "%s", + thd_name); if (IS_ERR(dd->mtip_svc_handler)) { dev_err(&dd->pdev->dev, "service thread failed to start\n"); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 037288e7874..2dc3b5153f0 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -623,8 +623,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, if (!nbd->sock) return -EINVAL; + nbd->disconnect = 1; + nbd_send_req(nbd, &sreq); - return 0; + return 0; } case NBD_CLEAR_SOCK: { @@ -654,6 +656,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, nbd->sock = SOCKET_I(inode); if (max_part > 0) bdev->bd_invalidated = 1; + nbd->disconnect = 0; /* we're connected now */ return 0; } else { fput(file); @@ -714,7 +717,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, else blk_queue_flush(nbd->disk->queue, 0); - thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name); + thread = kthread_create(nbd_thread, nbd, "%s", + nbd->disk->disk_name); if (IS_ERR(thread)) { mutex_lock(&nbd->tx_lock); return PTR_ERR(thread); @@ -742,6 +746,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, set_capacity(nbd->disk, 0); if (max_part > 0) ioctl_by_bdev(bdev, BLKRRPART, 0); + if (nbd->disconnect) /* user requested, ignore socket errors */ + return 0; return nbd->harderror; } @@ -750,7 +756,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, * This is for compatibility only. The queue is always cleared * by NBD_DO_IT or NBD_CLEAR_SOCK. */ - BUG_ON(!nbd->sock && !list_empty(&nbd->queue_head)); return 0; case NBD_PRINT_DEBUG: diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 8efdfaa44a5..ce79a590b45 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -629,7 +629,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, struct nvme_command *cmnd; struct nvme_iod *iod; enum dma_data_direction dma_dir; - int cmdid, length, result = -ENOMEM; + int cmdid, length, result; u16 control; u32 dsmgmt; int psegs = bio_phys_segments(ns->queue, bio); @@ -640,6 +640,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, return result; } + result = -ENOMEM; iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); if (!iod) goto nomem; @@ -977,6 +978,8 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) if (timeout && !time_after(now, info[cmdid].timeout)) continue; + if (info[cmdid].ctx == CMD_CTX_CANCELLED) + continue; dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); ctx = cancel_cmdid(nvmeq, cmdid, &fn); fn(nvmeq->dev, ctx, &cqe); @@ -1206,7 +1209,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, if (addr & 3) return ERR_PTR(-EINVAL); - if (!length) + if (!length || length > INT_MAX - PAGE_SIZE) return ERR_PTR(-EINVAL); offset = offset_in_page(addr); @@ -1227,7 +1230,8 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, sg_init_table(sg, count); for (i = 0; i < count; i++) { sg_set_page(&sg[i], pages[i], - min_t(int, length, PAGE_SIZE - offset), offset); + min_t(unsigned, length, PAGE_SIZE - offset), + offset); length -= (PAGE_SIZE - offset); offset = 0; } @@ -1435,7 +1439,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev, nvme_free_iod(dev, iod); } - if (!status && copy_to_user(&ucmd->result, &cmd.result, + if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result, sizeof(cmd.result))) status = -EFAULT; @@ -1633,7 +1637,8 @@ static int set_queue_count(struct nvme_dev *dev, int count) static int nvme_setup_io_queues(struct nvme_dev *dev) { - int result, cpu, i, nr_io_queues, db_bar_size, q_depth; + struct pci_dev *pdev = dev->pci_dev; + int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count; nr_io_queues = num_online_cpus(); result = set_queue_count(dev, nr_io_queues); @@ -1642,14 +1647,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (result < nr_io_queues) nr_io_queues = result; + q_count = nr_io_queues; /* Deregister the admin queue's interrupt */ free_irq(dev->entry[0].vector, dev->queues[0]); db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); if (db_bar_size > 8192) { iounmap(dev->bar); - dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), - db_bar_size); + dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size); dev->dbs = ((void __iomem *)dev->bar) + 4096; dev->queues[0]->q_db = dev->dbs; } @@ -1657,19 +1662,36 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) for (i = 0; i < nr_io_queues; i++) dev->entry[i].entry = i; for (;;) { - result = pci_enable_msix(dev->pci_dev, dev->entry, - nr_io_queues); + result = pci_enable_msix(pdev, dev->entry, nr_io_queues); if (result == 0) { break; } else if (result > 0) { nr_io_queues = result; continue; } else { - nr_io_queues = 1; + nr_io_queues = 0; break; } } + if (nr_io_queues == 0) { + nr_io_queues = q_count; + for (;;) { + result = pci_enable_msi_block(pdev, nr_io_queues); + if (result == 0) { + for (i = 0; i < nr_io_queues; i++) + dev->entry[i].vector = i + pdev->irq; + break; + } else if (result > 0) { + nr_io_queues = result; + continue; + } else { + nr_io_queues = 1; + break; + } + } + } + result = queue_request_irq(dev, dev->queues[0], "nvme admin"); /* XXX: handle failure here */ @@ -1850,7 +1872,10 @@ static void nvme_free_dev(struct kref *kref) { struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); nvme_dev_remove(dev); - pci_disable_msix(dev->pci_dev); + if (dev->pci_dev->msi_enabled) + pci_disable_msi(dev->pci_dev); + else if (dev->pci_dev->msix_enabled) + pci_disable_msix(dev->pci_dev); iounmap(dev->bar); nvme_release_instance(dev); nvme_release_prp_pools(dev); @@ -1923,8 +1948,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&dev->namespaces); dev->pci_dev = pdev; pci_set_drvdata(pdev, dev); - dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + else + goto disable; + result = nvme_set_instance(dev); if (result) goto disable; @@ -1977,7 +2008,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) unmap: iounmap(dev->bar); disable_msix: - pci_disable_msix(pdev); + if (dev->pci_dev->msi_enabled) + pci_disable_msi(dev->pci_dev); + else if (dev->pci_dev->msix_enabled) + pci_disable_msix(dev->pci_dev); nvme_release_instance(dev); nvme_release_prp_pools(dev); disable: diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c index fed54b03989..102de2f52b5 100644 --- a/drivers/block/nvme-scsi.c +++ b/drivers/block/nvme-scsi.c @@ -44,7 +44,6 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/types.h> -#include <linux/version.h> #include <scsi/sg.h> #include <scsi/scsi.h> @@ -1654,7 +1653,7 @@ static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list, } } -static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, +static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 *mode_page, u8 page_code) { int res = SNTI_TRANSLATION_SUCCESS; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 3c08983e600..f5d0ea11d9f 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -83,7 +83,8 @@ #define MAX_SPEED 0xffff -#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1)) +#define ZONE(sector, pd) (((sector) + (pd)->offset) & \ + ~(sector_t)((pd)->settings.size - 1)) static DEFINE_MUTEX(pktcdvd_mutex); static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d6d314027b5..4ad2ad9a5bb 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -372,7 +372,7 @@ enum rbd_dev_flags { RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */ }; -static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */ +static DEFINE_MUTEX(client_mutex); /* Serialize client creation */ static LIST_HEAD(rbd_dev_list); /* devices */ static DEFINE_SPINLOCK(rbd_dev_list_lock); @@ -489,10 +489,8 @@ static int rbd_open(struct block_device *bdev, fmode_t mode) if (removing) return -ENOENT; - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); (void) get_device(&rbd_dev->dev); set_device_ro(bdev, rbd_dev->mapping.read_only); - mutex_unlock(&ctl_mutex); return 0; } @@ -507,9 +505,7 @@ static void rbd_release(struct gendisk *disk, fmode_t mode) spin_unlock_irq(&rbd_dev->lock); rbd_assert(open_count_before > 0); - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); put_device(&rbd_dev->dev); - mutex_unlock(&ctl_mutex); } static const struct block_device_operations rbd_bd_ops = { @@ -519,8 +515,8 @@ static const struct block_device_operations rbd_bd_ops = { }; /* - * Initialize an rbd client instance. - * We own *ceph_opts. + * Initialize an rbd client instance. Success or not, this function + * consumes ceph_opts. Caller holds client_mutex. */ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) { @@ -535,30 +531,25 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) kref_init(&rbdc->kref); INIT_LIST_HEAD(&rbdc->node); - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0); if (IS_ERR(rbdc->client)) - goto out_mutex; + goto out_rbdc; ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */ ret = ceph_open_session(rbdc->client); if (ret < 0) - goto out_err; + goto out_client; spin_lock(&rbd_client_list_lock); list_add_tail(&rbdc->node, &rbd_client_list); spin_unlock(&rbd_client_list_lock); - mutex_unlock(&ctl_mutex); dout("%s: rbdc %p\n", __func__, rbdc); return rbdc; - -out_err: +out_client: ceph_destroy_client(rbdc->client); -out_mutex: - mutex_unlock(&ctl_mutex); +out_rbdc: kfree(rbdc); out_opt: if (ceph_opts) @@ -675,17 +666,20 @@ static int parse_rbd_opts_token(char *c, void *private) /* * Get a ceph client with specific addr and configuration, if one does - * not exist create it. + * not exist create it. Either way, ceph_opts is consumed by this + * function. */ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts) { struct rbd_client *rbdc; + mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING); rbdc = rbd_client_find(ceph_opts); if (rbdc) /* using an existing client */ ceph_destroy_options(ceph_opts); else rbdc = rbd_client_create(ceph_opts); + mutex_unlock(&client_mutex); return rbdc; } @@ -839,7 +833,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev, /* We won't fail any more, fill in the header */ - down_write(&rbd_dev->header_rwsem); if (first_time) { header->object_prefix = object_prefix; header->obj_order = ondisk->options.order; @@ -868,8 +861,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev, if (rbd_dev->mapping.size != header->image_size) rbd_dev->mapping.size = header->image_size; - up_write(&rbd_dev->header_rwsem); - return 0; out_2big: ret = -EIO; @@ -1035,12 +1026,16 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) char *name; u64 segment; int ret; + char *name_format; name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO); if (!name) return NULL; segment = offset >> rbd_dev->header.obj_order; - ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx", + name_format = "%s.%012llx"; + if (rbd_dev->image_format == 2) + name_format = "%s.%016llx"; + ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format, rbd_dev->header.object_prefix, segment); if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) { pr_err("error formatting segment name for #%llu (%d)\n", @@ -1121,6 +1116,7 @@ static void zero_bio_chain(struct bio *chain, int start_ofs) buf = bvec_kmap_irq(bv, &flags); memset(buf + remainder, 0, bv->bv_len - remainder); + flush_dcache_page(bv->bv_page); bvec_kunmap_irq(buf, &flags); } pos += bv->bv_len; @@ -1148,11 +1144,12 @@ static void zero_pages(struct page **pages, u64 offset, u64 end) unsigned long flags; void *kaddr; - page_offset = (size_t)(offset & ~PAGE_MASK); - length = min(PAGE_SIZE - page_offset, (size_t)(end - offset)); + page_offset = offset & ~PAGE_MASK; + length = min_t(size_t, PAGE_SIZE - page_offset, end - offset); local_irq_save(flags); kaddr = kmap_atomic(*page); memset(kaddr + page_offset, 0, length); + flush_dcache_page(*page); kunmap_atomic(kaddr); local_irq_restore(flags); @@ -2166,9 +2163,9 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, struct rbd_obj_request *obj_request = NULL; struct rbd_obj_request *next_obj_request; bool write_request = img_request_write_test(img_request); - struct bio *bio_list; + struct bio *bio_list = 0; unsigned int bio_offset = 0; - struct page **pages; + struct page **pages = 0; u64 img_offset; u64 resid; u16 opcode; @@ -2247,13 +2244,17 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, obj_request->pages, length, offset & ~PAGE_MASK, false, false); + /* + * set obj_request->img_request before formatting + * the osd_request so that it gets the right snapc + */ + rbd_img_obj_request_add(img_request, obj_request); if (write_request) rbd_osd_req_format_write(obj_request); else rbd_osd_req_format_read(obj_request); obj_request->img_offset = img_offset; - rbd_img_obj_request_add(img_request, obj_request); img_offset += length; resid -= length; @@ -2526,6 +2527,7 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) */ orig_request = obj_request->obj_request; obj_request->obj_request = NULL; + rbd_obj_request_put(orig_request); rbd_assert(orig_request); rbd_assert(orig_request->img_request); @@ -2546,7 +2548,6 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) if (!rbd_dev->parent_overlap) { struct ceph_osd_client *osdc; - rbd_obj_request_put(orig_request); osdc = &rbd_dev->rbd_client->client->osdc; result = rbd_obj_request_submit(osdc, orig_request); if (!result) @@ -2576,7 +2577,6 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) out: if (orig_request->result) rbd_obj_request_complete(orig_request); - rbd_obj_request_put(orig_request); } static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request) @@ -2850,7 +2850,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) (unsigned int)opcode); ret = rbd_dev_refresh(rbd_dev); if (ret) - rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret); + rbd_warn(rbd_dev, "header refresh error (%d)\n", ret); rbd_obj_notify_ack(rbd_dev, notify_id); } @@ -3330,8 +3330,8 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev) int ret; rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); + down_write(&rbd_dev->header_rwsem); mapping_size = rbd_dev->mapping.size; - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); if (rbd_dev->image_format == 1) ret = rbd_dev_v1_header_info(rbd_dev); else @@ -3340,7 +3340,8 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev) /* If it's a mapped snapshot, validate its EXISTS flag */ rbd_exists_validate(rbd_dev); - mutex_unlock(&ctl_mutex); + up_write(&rbd_dev->header_rwsem); + if (mapping_size != rbd_dev->mapping.size) { sector_t size; @@ -3804,6 +3805,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) void *end; u64 pool_id; char *image_id; + u64 snap_id; u64 overlap; int ret; @@ -3863,24 +3865,56 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) (unsigned long long)pool_id, U32_MAX); goto out_err; } - parent_spec->pool_id = pool_id; image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); if (IS_ERR(image_id)) { ret = PTR_ERR(image_id); goto out_err; } - parent_spec->image_id = image_id; - ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err); + ceph_decode_64_safe(&p, end, snap_id, out_err); ceph_decode_64_safe(&p, end, overlap, out_err); - if (overlap) { - rbd_spec_put(rbd_dev->parent_spec); + /* + * The parent won't change (except when the clone is + * flattened, already handled that). So we only need to + * record the parent spec we have not already done so. + */ + if (!rbd_dev->parent_spec) { + parent_spec->pool_id = pool_id; + parent_spec->image_id = image_id; + parent_spec->snap_id = snap_id; rbd_dev->parent_spec = parent_spec; parent_spec = NULL; /* rbd_dev now owns this */ - rbd_dev->parent_overlap = overlap; - } else { - rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n"); + } + + /* + * We always update the parent overlap. If it's zero we + * treat it specially. + */ + rbd_dev->parent_overlap = overlap; + smp_mb(); + if (!overlap) { + + /* A null parent_spec indicates it's the initial probe */ + + if (parent_spec) { + /* + * The overlap has become zero, so the clone + * must have been resized down to 0 at some + * point. Treat this the same as a flatten. + */ + rbd_dev_parent_put(rbd_dev); + pr_info("%s: clone image now standalone\n", + rbd_dev->disk->disk_name); + } else { + /* + * For the initial probe, if we find the + * overlap is zero we just pretend there was + * no parent image. + */ + rbd_warn(rbd_dev, "ignoring parent of " + "clone with overlap 0\n"); + } } out: ret = 0; @@ -4236,12 +4270,14 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) bool first_time = rbd_dev->header.object_prefix == NULL; int ret; - down_write(&rbd_dev->header_rwsem); + ret = rbd_dev_v2_image_size(rbd_dev); + if (ret) + return ret; if (first_time) { ret = rbd_dev_v2_header_onetime(rbd_dev); if (ret) - goto out; + return ret; } /* @@ -4256,7 +4292,7 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) ret = rbd_dev_v2_parent_info(rbd_dev); if (ret) - goto out; + return ret; /* * Print a warning if this is the initial probe and @@ -4271,18 +4307,12 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) "is EXPERIMENTAL!"); } - ret = rbd_dev_v2_image_size(rbd_dev); - if (ret) - goto out; - if (rbd_dev->spec->snap_id == CEPH_NOSNAP) if (rbd_dev->mapping.size != rbd_dev->header.image_size) rbd_dev->mapping.size = rbd_dev->header.image_size; ret = rbd_dev_v2_snap_context(rbd_dev); dout("rbd_dev_v2_snap_context returned %d\n", ret); -out: - up_write(&rbd_dev->header_rwsem); return ret; } @@ -4292,8 +4322,6 @@ static int rbd_bus_add_dev(struct rbd_device *rbd_dev) struct device *dev; int ret; - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - dev = &rbd_dev->dev; dev->bus = &rbd_bus_type; dev->type = &rbd_device_type; @@ -4302,8 +4330,6 @@ static int rbd_bus_add_dev(struct rbd_device *rbd_dev) dev_set_name(dev, "%d", rbd_dev->dev_id); ret = device_register(dev); - mutex_unlock(&ctl_mutex); - return ret; } @@ -4697,8 +4723,10 @@ out: return ret; } -/* Undo whatever state changes are made by v1 or v2 image probe */ - +/* + * Undo whatever state changes are made by v1 or v2 header info + * call. + */ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) { struct rbd_image_header *header; @@ -4902,9 +4930,10 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) int tmp; /* - * Get the id from the image id object. If it's not a - * format 2 image, we'll get ENOENT back, and we'll assume - * it's a format 1 image. + * Get the id from the image id object. Unless there's an + * error, rbd_dev->spec->image_id will be filled in with + * a dynamically-allocated string, and rbd_dev->image_format + * will be set to either 1 or 2. */ ret = rbd_dev_image_id(rbd_dev); if (ret) @@ -4992,7 +5021,6 @@ static ssize_t rbd_add(struct bus_type *bus, rc = PTR_ERR(rbdc); goto err_out_args; } - ceph_opts = NULL; /* rbd_dev client now owns this */ /* pick the pool */ osdc = &rbdc->client->osdc; @@ -5027,18 +5055,18 @@ static ssize_t rbd_add(struct bus_type *bus, rbd_dev->mapping.read_only = read_only; rc = rbd_dev_device_setup(rbd_dev); - if (!rc) - return count; + if (rc) { + rbd_dev_image_release(rbd_dev); + goto err_out_module; + } + + return count; - rbd_dev_image_release(rbd_dev); err_out_rbd_dev: rbd_dev_destroy(rbd_dev); err_out_client: rbd_put_client(rbdc); err_out_args: - if (ceph_opts) - ceph_destroy_options(ceph_opts); - kfree(rbd_opts); rbd_spec_put(spec); err_out_module: module_put(THIS_MODULE); @@ -5048,23 +5076,6 @@ err_out_module: return (ssize_t)rc; } -static struct rbd_device *__rbd_get_dev(unsigned long dev_id) -{ - struct list_head *tmp; - struct rbd_device *rbd_dev; - - spin_lock(&rbd_dev_list_lock); - list_for_each(tmp, &rbd_dev_list) { - rbd_dev = list_entry(tmp, struct rbd_device, node); - if (rbd_dev->dev_id == dev_id) { - spin_unlock(&rbd_dev_list_lock); - return rbd_dev; - } - } - spin_unlock(&rbd_dev_list_lock); - return NULL; -} - static void rbd_dev_device_release(struct device *dev) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); @@ -5109,8 +5120,10 @@ static ssize_t rbd_remove(struct bus_type *bus, size_t count) { struct rbd_device *rbd_dev = NULL; - int target_id; + struct list_head *tmp; + int dev_id; unsigned long ul; + bool already = false; int ret; ret = strict_strtoul(buf, 10, &ul); @@ -5118,37 +5131,40 @@ static ssize_t rbd_remove(struct bus_type *bus, return ret; /* convert to int; abort if we lost anything in the conversion */ - target_id = (int) ul; - if (target_id != ul) + dev_id = (int)ul; + if (dev_id != ul) return -EINVAL; - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - - rbd_dev = __rbd_get_dev(target_id); - if (!rbd_dev) { - ret = -ENOENT; - goto done; + ret = -ENOENT; + spin_lock(&rbd_dev_list_lock); + list_for_each(tmp, &rbd_dev_list) { + rbd_dev = list_entry(tmp, struct rbd_device, node); + if (rbd_dev->dev_id == dev_id) { + ret = 0; + break; + } + } + if (!ret) { + spin_lock_irq(&rbd_dev->lock); + if (rbd_dev->open_count) + ret = -EBUSY; + else + already = test_and_set_bit(RBD_DEV_FLAG_REMOVING, + &rbd_dev->flags); + spin_unlock_irq(&rbd_dev->lock); } + spin_unlock(&rbd_dev_list_lock); + if (ret < 0 || already) + return ret; - spin_lock_irq(&rbd_dev->lock); - if (rbd_dev->open_count) - ret = -EBUSY; - else - set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags); - spin_unlock_irq(&rbd_dev->lock); - if (ret < 0) - goto done; rbd_bus_del_dev(rbd_dev); ret = rbd_dev_header_watch_sync(rbd_dev, false); if (ret) rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); rbd_dev_image_release(rbd_dev); module_put(THIS_MODULE); - ret = count; -done: - mutex_unlock(&ctl_mutex); - return ret; + return count; } /* @@ -5256,6 +5272,7 @@ static void __exit rbd_exit(void) module_init(rbd_init); module_exit(rbd_exit); +MODULE_AUTHOR("Alex Elder <elder@inktank.com>"); MODULE_AUTHOR("Sage Weil <sage@newdream.net>"); MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>"); MODULE_DESCRIPTION("rados block device"); diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 2f445b7a174..8ed6ccb748c 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -893,7 +893,7 @@ static int swim_probe(struct platform_device *dev) swim_base = ioremap(res->start, resource_size(res)); if (!swim_base) { - return -ENOMEM; + ret = -ENOMEM; goto out_release_io; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 64723953e1c..5cdf88b7ad9 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -20,7 +20,7 @@ module_param(use_bio, bool, S_IRUGO); static int major; static DEFINE_IDA(vd_index_ida); -struct workqueue_struct *virtblk_wq; +static struct workqueue_struct *virtblk_wq; struct virtio_blk { diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 8bfd1bcf95e..04608a6502d 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -93,7 +93,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif) } invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping); - blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, name); + blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, "%s", name); if (IS_ERR(blkif->xenblkd)) { err = PTR_ERR(blkif->xenblkd); blkif->xenblkd = NULL; diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 3fd130fdfbc..1393b8871a2 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -407,7 +407,7 @@ static void ace_dump_regs(struct ace_device *ace) ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT)); } -void ace_fix_driveid(u16 *id) +static void ace_fix_driveid(u16 *id) { #if defined(__BIG_ENDIAN) int i; @@ -463,7 +463,7 @@ static inline void ace_fsm_yieldirq(struct ace_device *ace) } /* Get the next read/write request; ending requests that we don't handle */ -struct request *ace_get_next_request(struct request_queue * q) +static struct request *ace_get_next_request(struct request_queue *q) { struct request *req; |