From 12a057321529df2fb650ac5f34dfd7abcca667df Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Mar 2006 13:38:01 -0500 Subject: [PATCH] keep sync and async cfq_queue separate Signed-off-by: Al Viro --- include/linux/blkdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux/blkdev.h') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 860e7a485a5..e19cb631084 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -58,7 +58,7 @@ struct cfq_io_context { * circular list of cfq_io_contexts belonging to a process io context */ struct list_head list; - struct cfq_queue *cfqq; + struct cfq_queue *cfqq[2]; void *key; struct io_context *ioc; -- cgit v1.2.3-70-g09d2 From d9ff41879364cfca7c15abc20ae398e35de3f883 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Mar 2006 13:51:22 -0500 Subject: [PATCH] make cfq_exit_queue() prune the cfq_io_context for that queue Signed-off-by: Al Viro --- block/cfq-iosched.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++- include/linux/blkdev.h | 2 ++ 2 files changed, 60 insertions(+), 1 deletion(-) (limited to 'include/linux/blkdev.h') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 3bacf4bb7dd..3fc6e505e9c 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -177,6 +177,8 @@ struct cfq_data { unsigned int cfq_slice_async_rq; unsigned int cfq_slice_idle; unsigned int cfq_max_depth; + + struct list_head cic_list; }; /* @@ -1215,7 +1217,12 @@ static void cfq_free_io_context(struct cfq_io_context *cic) static void cfq_exit_single_io_context(struct cfq_io_context *cic) { struct cfq_data *cfqd = cic->key; - request_queue_t *q = cfqd->queue; + request_queue_t *q; + + if (!cfqd) + return; + + q = cfqd->queue; WARN_ON(!irqs_disabled()); @@ -1236,6 +1243,7 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic) } cic->key = NULL; + list_del_init(&cic->queue_list); spin_unlock(q->queue_lock); } @@ -1254,12 +1262,14 @@ static void cfq_exit_io_context(struct cfq_io_context *cic) /* * put the reference this task is holding to the various queues */ + read_lock(&cfq_exit_lock); list_for_each(entry, &cic->list) { __cic = list_entry(entry, struct cfq_io_context, list); cfq_exit_single_io_context(__cic); } cfq_exit_single_io_context(cic); + read_unlock(&cfq_exit_lock); local_irq_restore(flags); } @@ -1279,6 +1289,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) cic->ttime_mean = 0; cic->dtor = cfq_free_io_context; cic->exit = cfq_exit_io_context; + INIT_LIST_HEAD(&cic->queue_list); } return cic; @@ -1446,6 +1457,7 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) if (!ioc) return NULL; +restart: if ((cic = ioc->cic) == NULL) { cic = cfq_alloc_io_context(cfqd, gfp_mask); @@ -1461,6 +1473,7 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) read_lock(&cfq_exit_lock); ioc->set_ioprio = cfq_ioc_set_ioprio; ioc->cic = cic; + list_add(&cic->queue_list, &cfqd->cic_list); read_unlock(&cfq_exit_lock); } else { struct cfq_io_context *__cic; @@ -1471,6 +1484,19 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) if (cic->key == cfqd) goto out; + if (unlikely(!cic->key)) { + read_lock(&cfq_exit_lock); + if (list_empty(&cic->list)) + ioc->cic = NULL; + else + ioc->cic = list_entry(cic->list.next, + struct cfq_io_context, + list); + read_unlock(&cfq_exit_lock); + kmem_cache_free(cfq_ioc_pool, cic); + goto restart; + } + /* * cic exists, check if we already are there. linear search * should be ok here, the list will usually not be more than @@ -1485,6 +1511,13 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) cic = __cic; goto out; } + if (unlikely(!__cic->key)) { + read_lock(&cfq_exit_lock); + list_del(&__cic->list); + read_unlock(&cfq_exit_lock); + kmem_cache_free(cfq_ioc_pool, __cic); + goto restart; + } } /* @@ -1499,6 +1532,7 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) __cic->key = cfqd; read_lock(&cfq_exit_lock); list_add(&__cic->list, &cic->list); + list_add(&__cic->queue_list, &cfqd->cic_list); read_unlock(&cfq_exit_lock); cic = __cic; } @@ -2104,8 +2138,30 @@ static void cfq_put_cfqd(struct cfq_data *cfqd) static void cfq_exit_queue(elevator_t *e) { struct cfq_data *cfqd = e->elevator_data; + request_queue_t *q = cfqd->queue; cfq_shutdown_timer_wq(cfqd); + write_lock(&cfq_exit_lock); + spin_lock_irq(q->queue_lock); + if (cfqd->active_queue) + __cfq_slice_expired(cfqd, cfqd->active_queue, 0); + while(!list_empty(&cfqd->cic_list)) { + struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, + struct cfq_io_context, + queue_list); + if (cic->cfqq[ASYNC]) { + cfq_put_queue(cic->cfqq[ASYNC]); + cic->cfqq[ASYNC] = NULL; + } + if (cic->cfqq[SYNC]) { + cfq_put_queue(cic->cfqq[SYNC]); + cic->cfqq[SYNC] = NULL; + } + cic->key = NULL; + list_del_init(&cic->queue_list); + } + spin_unlock_irq(q->queue_lock); + write_unlock(&cfq_exit_lock); cfq_put_cfqd(cfqd); } @@ -2127,6 +2183,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) INIT_LIST_HEAD(&cfqd->cur_rr); INIT_LIST_HEAD(&cfqd->idle_rr); INIT_LIST_HEAD(&cfqd->empty_list); + INIT_LIST_HEAD(&cfqd->cic_list); cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); if (!cfqd->crq_hash) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e19cb631084..80518f70353 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -69,6 +69,8 @@ struct cfq_io_context { unsigned long ttime_samples; unsigned long ttime_mean; + struct list_head queue_list; + void (*dtor)(struct cfq_io_context *); void (*exit)(struct cfq_io_context *); }; -- cgit v1.2.3-70-g09d2 From 483f4afc421435b7cfe5e88f74eea0b73a476d75 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Mar 2006 18:34:37 -0500 Subject: [PATCH] fix sysfs interaction and lifetime rules handling for queues --- block/ll_rw_blk.c | 83 +++++++++++++++++++++++++++++++++++--------------- include/linux/blkdev.h | 6 ++-- 2 files changed, 61 insertions(+), 28 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 6dc76918205..6c793b196aa 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -1740,16 +1740,11 @@ EXPORT_SYMBOL(blk_run_queue); * Hopefully the low level driver will have finished any * outstanding requests first... **/ -void blk_cleanup_queue(request_queue_t * q) +static void blk_release_queue(struct kobject *kobj) { + request_queue_t *q = container_of(kobj, struct request_queue, kobj); struct request_list *rl = &q->rq; - if (!atomic_dec_and_test(&q->refcnt)) - return; - - if (q->elevator) - elevator_exit(q->elevator); - blk_sync_queue(q); if (rl->rq_pool) @@ -1761,6 +1756,24 @@ void blk_cleanup_queue(request_queue_t * q) kmem_cache_free(requestq_cachep, q); } +void blk_put_queue(request_queue_t *q) +{ + kobject_put(&q->kobj); +} +EXPORT_SYMBOL(blk_put_queue); + +void blk_cleanup_queue(request_queue_t * q) +{ + mutex_lock(&q->sysfs_lock); + set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); + mutex_unlock(&q->sysfs_lock); + + if (q->elevator) + elevator_exit(q->elevator); + + blk_put_queue(q); +} + EXPORT_SYMBOL(blk_cleanup_queue); static int blk_init_free_list(request_queue_t *q) @@ -1788,6 +1801,8 @@ request_queue_t *blk_alloc_queue(gfp_t gfp_mask) } EXPORT_SYMBOL(blk_alloc_queue); +static struct kobj_type queue_ktype; + request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) { request_queue_t *q; @@ -1798,11 +1813,16 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) memset(q, 0, sizeof(*q)); init_timer(&q->unplug_timer); - atomic_set(&q->refcnt, 1); + + snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); + q->kobj.ktype = &queue_ktype; + kobject_init(&q->kobj); q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; q->backing_dev_info.unplug_io_data = q; + mutex_init(&q->sysfs_lock); + return q; } EXPORT_SYMBOL(blk_alloc_queue_node); @@ -1901,7 +1921,7 @@ EXPORT_SYMBOL(blk_init_queue_node); int blk_get_queue(request_queue_t *q) { if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { - atomic_inc(&q->refcnt); + kobject_get(&q->kobj); return 0; } @@ -3764,13 +3784,19 @@ static ssize_t queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct queue_sysfs_entry *entry = to_queue(attr); - struct request_queue *q; + request_queue_t *q = container_of(kobj, struct request_queue, kobj); + ssize_t res; - q = container_of(kobj, struct request_queue, kobj); if (!entry->show) return -EIO; - - return entry->show(q, page); + mutex_lock(&q->sysfs_lock); + if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { + mutex_unlock(&q->sysfs_lock); + return -ENOENT; + } + res = entry->show(q, page); + mutex_unlock(&q->sysfs_lock); + return res; } static ssize_t @@ -3778,13 +3804,20 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct queue_sysfs_entry *entry = to_queue(attr); - struct request_queue *q; + request_queue_t *q = container_of(kobj, struct request_queue, kobj); + + ssize_t res; - q = container_of(kobj, struct request_queue, kobj); if (!entry->store) return -EIO; - - return entry->store(q, page, length); + mutex_lock(&q->sysfs_lock); + if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { + mutex_unlock(&q->sysfs_lock); + return -ENOENT; + } + res = entry->store(q, page, length); + mutex_unlock(&q->sysfs_lock); + return res; } static struct sysfs_ops queue_sysfs_ops = { @@ -3795,6 +3828,7 @@ static struct sysfs_ops queue_sysfs_ops = { static struct kobj_type queue_ktype = { .sysfs_ops = &queue_sysfs_ops, .default_attrs = default_attrs, + .release = blk_release_queue, }; int blk_register_queue(struct gendisk *disk) @@ -3807,19 +3841,17 @@ int blk_register_queue(struct gendisk *disk) return -ENXIO; q->kobj.parent = kobject_get(&disk->kobj); - if (!q->kobj.parent) - return -EBUSY; - snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); - q->kobj.ktype = &queue_ktype; - - ret = kobject_register(&q->kobj); + ret = kobject_add(&q->kobj); if (ret < 0) return ret; + kobject_uevent(&q->kobj, KOBJ_ADD); + ret = elv_register_queue(q); if (ret) { - kobject_unregister(&q->kobj); + kobject_uevent(&q->kobj, KOBJ_REMOVE); + kobject_del(&q->kobj); return ret; } @@ -3833,7 +3865,8 @@ void blk_unregister_queue(struct gendisk *disk) if (q && q->request_fn) { elv_unregister_queue(q); - kobject_unregister(&q->kobj); + kobject_uevent(&q->kobj, KOBJ_REMOVE); + kobject_del(&q->kobj); kobject_put(&disk->kobj); } } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 80518f70353..56bb6a4e15f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -406,8 +406,6 @@ struct request_queue struct blk_queue_tag *queue_tags; - atomic_t refcnt; - unsigned int nr_sorted; unsigned int in_flight; @@ -426,6 +424,8 @@ struct request_queue struct request pre_flush_rq, bar_rq, post_flush_rq; struct request *orig_bar_rq; unsigned int bi_size; + + struct mutex sysfs_lock; }; #define RQ_INACTIVE (-1) @@ -727,7 +727,7 @@ extern long nr_blockdev_pages(void); int blk_get_queue(request_queue_t *); request_queue_t *blk_alloc_queue(gfp_t); request_queue_t *blk_alloc_queue_node(gfp_t, int); -#define blk_put_queue(q) blk_cleanup_queue((q)) +extern void blk_put_queue(request_queue_t *); /* * tag stuff -- cgit v1.2.3-70-g09d2 From 2056a782f8e7e65fd4bfd027506b4ce1c5e9ccd4 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 23 Mar 2006 20:00:26 +0100 Subject: [PATCH] Block queue IO tracing support (blktrace) as of 2006-03-23 Signed-off-by: Jens Axboe --- block/Kconfig | 12 + block/Makefile | 2 + block/blktrace.c | 538 +++++++++++++++++++++++++++++++++++++++++++ block/elevator.c | 4 + block/ioctl.c | 6 + block/ll_rw_blk.c | 44 +++- drivers/block/cciss.c | 2 + drivers/md/dm.c | 13 +- fs/bio.c | 4 + fs/compat_ioctl.c | 1 + include/linux/blkdev.h | 3 + include/linux/blktrace_api.h | 277 ++++++++++++++++++++++ include/linux/compat_ioctl.h | 4 + include/linux/fs.h | 4 + include/linux/sched.h | 1 + kernel/fork.c | 1 + mm/highmem.c | 3 + 17 files changed, 916 insertions(+), 3 deletions(-) create mode 100644 block/blktrace.c create mode 100644 include/linux/blktrace_api.h (limited to 'include/linux/blkdev.h') diff --git a/block/Kconfig b/block/Kconfig index 377f6dd20e1..96783645092 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -11,4 +11,16 @@ config LBD your machine, or if you want to have a raid or loopback device bigger than 2TB. Otherwise say N. +config BLK_DEV_IO_TRACE + bool "Support for tracing block io actions" + select RELAY + select DEBUG_FS + help + Say Y here, if you want to be able to trace the block layer actions + on a given queue. Tracing allows you to see any traffic happening + on a block device queue. For more information (and the user space + support tools needed), fetch the blktrace app from: + + git://brick.kernel.dk/data/git/blktrace.git + source block/Kconfig.iosched diff --git a/block/Makefile b/block/Makefile index 7e4f93e2b44..c05de0e0037 100644 --- a/block/Makefile +++ b/block/Makefile @@ -8,3 +8,5 @@ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_AS) += as-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o + +obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o diff --git a/block/blktrace.c b/block/blktrace.c new file mode 100644 index 00000000000..36f3a172275 --- /dev/null +++ b/block/blktrace.c @@ -0,0 +1,538 @@ +/* + * Copyright (C) 2006 Jens Axboe + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, }; +static unsigned int blktrace_seq __read_mostly = 1; + +/* + * Send out a notify for this process, if we haven't done so since a trace + * started + */ +static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) +{ + struct blk_io_trace *t; + + t = relay_reserve(bt->rchan, sizeof(*t) + sizeof(tsk->comm)); + if (t) { + t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; + t->device = bt->dev; + t->action = BLK_TC_ACT(BLK_TC_NOTIFY); + t->pid = tsk->pid; + t->cpu = smp_processor_id(); + t->pdu_len = sizeof(tsk->comm); + memcpy((void *) t + sizeof(*t), tsk->comm, t->pdu_len); + tsk->btrace_seq = blktrace_seq; + } +} + +static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, + pid_t pid) +{ + if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) + return 1; + if (sector < bt->start_lba || sector > bt->end_lba) + return 1; + if (bt->pid && pid != bt->pid) + return 1; + + return 0; +} + +/* + * Data direction bit lookup + */ +static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) }; + +/* + * Bio action bits of interest + */ +static u32 bio_act[3] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC) }; + +/* + * More could be added as needed, taking care to increment the decrementer + * to get correct indexing + */ +#define trace_barrier_bit(rw) \ + (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0)) +#define trace_sync_bit(rw) \ + (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1)) + +/* + * The worker for the various blk_add_trace*() types. Fills out a + * blk_io_trace structure and places it in a per-cpu subbuffer. + */ +void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, + int rw, u32 what, int error, int pdu_len, void *pdu_data) +{ + struct task_struct *tsk = current; + struct blk_io_trace *t; + unsigned long flags; + unsigned long *sequence; + pid_t pid; + int cpu; + + if (unlikely(bt->trace_state != Blktrace_running)) + return; + + what |= ddir_act[rw & WRITE]; + what |= bio_act[trace_barrier_bit(rw)]; + what |= bio_act[trace_sync_bit(rw)]; + + pid = tsk->pid; + if (unlikely(act_log_check(bt, what, sector, pid))) + return; + + /* + * A word about the locking here - we disable interrupts to reserve + * some space in the relay per-cpu buffer, to prevent an irq + * from coming in and stepping on our toes. Once reserved, it's + * enough to get preemption disabled to prevent read of this data + * before we are through filling it. get_cpu()/put_cpu() does this + * for us + */ + local_irq_save(flags); + + if (unlikely(tsk->btrace_seq != blktrace_seq)) + trace_note_tsk(bt, tsk); + + t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); + if (t) { + cpu = smp_processor_id(); + sequence = per_cpu_ptr(bt->sequence, cpu); + + t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; + t->sequence = ++(*sequence); + t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu); + t->sector = sector; + t->bytes = bytes; + t->action = what; + t->pid = pid; + t->device = bt->dev; + t->cpu = cpu; + t->error = error; + t->pdu_len = pdu_len; + + if (pdu_len) + memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); + } + + local_irq_restore(flags); +} + +EXPORT_SYMBOL_GPL(__blk_add_trace); + +static struct dentry *blk_tree_root; +static struct mutex blk_tree_mutex; +static unsigned int root_users; + +static inline void blk_remove_root(void) +{ + if (blk_tree_root) { + debugfs_remove(blk_tree_root); + blk_tree_root = NULL; + } +} + +static void blk_remove_tree(struct dentry *dir) +{ + mutex_lock(&blk_tree_mutex); + debugfs_remove(dir); + if (--root_users == 0) + blk_remove_root(); + mutex_unlock(&blk_tree_mutex); +} + +static struct dentry *blk_create_tree(const char *blk_name) +{ + struct dentry *dir = NULL; + + mutex_lock(&blk_tree_mutex); + + if (!blk_tree_root) { + blk_tree_root = debugfs_create_dir("block", NULL); + if (!blk_tree_root) + goto err; + } + + dir = debugfs_create_dir(blk_name, blk_tree_root); + if (dir) + root_users++; + else + blk_remove_root(); + +err: + mutex_unlock(&blk_tree_mutex); + return dir; +} + +static void blk_trace_cleanup(struct blk_trace *bt) +{ + relay_close(bt->rchan); + debugfs_remove(bt->dropped_file); + blk_remove_tree(bt->dir); + free_percpu(bt->sequence); + kfree(bt); +} + +static int blk_trace_remove(request_queue_t *q) +{ + struct blk_trace *bt; + + bt = xchg(&q->blk_trace, NULL); + if (!bt) + return -EINVAL; + + if (bt->trace_state == Blktrace_setup || + bt->trace_state == Blktrace_stopped) + blk_trace_cleanup(bt); + + return 0; +} + +static int blk_dropped_open(struct inode *inode, struct file *filp) +{ + filp->private_data = inode->u.generic_ip; + + return 0; +} + +static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct blk_trace *bt = filp->private_data; + char buf[16]; + + snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); + + return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); +} + +static struct file_operations blk_dropped_fops = { + .owner = THIS_MODULE, + .open = blk_dropped_open, + .read = blk_dropped_read, +}; + +/* + * Keep track of how many times we encountered a full subbuffer, to aid + * the user space app in telling how many lost events there were. + */ +static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, + void *prev_subbuf, size_t prev_padding) +{ + struct blk_trace *bt; + + if (!relay_buf_full(buf)) + return 1; + + bt = buf->chan->private_data; + atomic_inc(&bt->dropped); + return 0; +} + +static int blk_remove_buf_file_callback(struct dentry *dentry) +{ + debugfs_remove(dentry); + return 0; +} + +static struct dentry *blk_create_buf_file_callback(const char *filename, + struct dentry *parent, + int mode, + struct rchan_buf *buf, + int *is_global) +{ + return debugfs_create_file(filename, mode, parent, buf, + &relay_file_operations); +} + +static struct rchan_callbacks blk_relay_callbacks = { + .subbuf_start = blk_subbuf_start_callback, + .create_buf_file = blk_create_buf_file_callback, + .remove_buf_file = blk_remove_buf_file_callback, +}; + +/* + * Setup everything required to start tracing + */ +static int blk_trace_setup(request_queue_t *q, struct block_device *bdev, + char __user *arg) +{ + struct blk_user_trace_setup buts; + struct blk_trace *old_bt, *bt = NULL; + struct dentry *dir = NULL; + char b[BDEVNAME_SIZE]; + int ret, i; + + if (copy_from_user(&buts, arg, sizeof(buts))) + return -EFAULT; + + if (!buts.buf_size || !buts.buf_nr) + return -EINVAL; + + strcpy(buts.name, bdevname(bdev, b)); + + /* + * some device names have larger paths - convert the slashes + * to underscores for this to work as expected + */ + for (i = 0; i < strlen(buts.name); i++) + if (buts.name[i] == '/') + buts.name[i] = '_'; + + if (copy_to_user(arg, &buts, sizeof(buts))) + return -EFAULT; + + ret = -ENOMEM; + bt = kzalloc(sizeof(*bt), GFP_KERNEL); + if (!bt) + goto err; + + bt->sequence = alloc_percpu(unsigned long); + if (!bt->sequence) + goto err; + + ret = -ENOENT; + dir = blk_create_tree(buts.name); + if (!dir) + goto err; + + bt->dir = dir; + bt->dev = bdev->bd_dev; + atomic_set(&bt->dropped, 0); + + ret = -EIO; + bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops); + if (!bt->dropped_file) + goto err; + + bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks); + if (!bt->rchan) + goto err; + bt->rchan->private_data = bt; + + bt->act_mask = buts.act_mask; + if (!bt->act_mask) + bt->act_mask = (u16) -1; + + bt->start_lba = buts.start_lba; + bt->end_lba = buts.end_lba; + if (!bt->end_lba) + bt->end_lba = -1ULL; + + bt->pid = buts.pid; + bt->trace_state = Blktrace_setup; + + ret = -EBUSY; + old_bt = xchg(&q->blk_trace, bt); + if (old_bt) { + (void) xchg(&q->blk_trace, old_bt); + goto err; + } + + return 0; +err: + if (dir) + blk_remove_tree(dir); + if (bt) { + if (bt->dropped_file) + debugfs_remove(bt->dropped_file); + if (bt->sequence) + free_percpu(bt->sequence); + if (bt->rchan) + relay_close(bt->rchan); + kfree(bt); + } + return ret; +} + +static int blk_trace_startstop(request_queue_t *q, int start) +{ + struct blk_trace *bt; + int ret; + + if ((bt = q->blk_trace) == NULL) + return -EINVAL; + + /* + * For starting a trace, we can transition from a setup or stopped + * trace. For stopping a trace, the state must be running + */ + ret = -EINVAL; + if (start) { + if (bt->trace_state == Blktrace_setup || + bt->trace_state == Blktrace_stopped) { + blktrace_seq++; + smp_mb(); + bt->trace_state = Blktrace_running; + ret = 0; + } + } else { + if (bt->trace_state == Blktrace_running) { + bt->trace_state = Blktrace_stopped; + relay_flush(bt->rchan); + ret = 0; + } + } + + return ret; +} + +/** + * blk_trace_ioctl: - handle the ioctls associated with tracing + * @bdev: the block device + * @cmd: the ioctl cmd + * @arg: the argument data, if any + * + **/ +int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) +{ + request_queue_t *q; + int ret, start = 0; + + q = bdev_get_queue(bdev); + if (!q) + return -ENXIO; + + mutex_lock(&bdev->bd_mutex); + + switch (cmd) { + case BLKTRACESETUP: + ret = blk_trace_setup(q, bdev, arg); + break; + case BLKTRACESTART: + start = 1; + case BLKTRACESTOP: + ret = blk_trace_startstop(q, start); + break; + case BLKTRACETEARDOWN: + ret = blk_trace_remove(q); + break; + default: + ret = -ENOTTY; + break; + } + + mutex_unlock(&bdev->bd_mutex); + return ret; +} + +/** + * blk_trace_shutdown: - stop and cleanup trace structures + * @q: the request queue associated with the device + * + **/ +void blk_trace_shutdown(request_queue_t *q) +{ + blk_trace_startstop(q, 0); + blk_trace_remove(q); +} + +/* + * Average offset over two calls to sched_clock() with a gettimeofday() + * in the middle + */ +static void blk_check_time(unsigned long long *t) +{ + unsigned long long a, b; + struct timeval tv; + + a = sched_clock(); + do_gettimeofday(&tv); + b = sched_clock(); + + *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000; + *t -= (a + b) / 2; +} + +static void blk_trace_check_cpu_time(void *data) +{ + unsigned long long *t; + int cpu = get_cpu(); + + t = &per_cpu(blk_trace_cpu_offset, cpu); + + /* + * Just call it twice, hopefully the second call will be cache hot + * and a little more precise + */ + blk_check_time(t); + blk_check_time(t); + + put_cpu(); +} + +/* + * Call blk_trace_check_cpu_time() on each CPU to calibrate our inter-CPU + * timings + */ +static void blk_trace_calibrate_offsets(void) +{ + unsigned long flags; + + smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1); + local_irq_save(flags); + blk_trace_check_cpu_time(NULL); + local_irq_restore(flags); +} + +static void blk_trace_set_ht_offsets(void) +{ +#if defined(CONFIG_SCHED_SMT) + int cpu, i; + + /* + * now make sure HT siblings have the same time offset + */ + preempt_disable(); + for_each_online_cpu(cpu) { + unsigned long long *cpu_off, *sibling_off; + + for_each_cpu_mask(i, cpu_sibling_map[cpu]) { + if (i == cpu) + continue; + + cpu_off = &per_cpu(blk_trace_cpu_offset, cpu); + sibling_off = &per_cpu(blk_trace_cpu_offset, i); + *sibling_off = *cpu_off; + } + } + preempt_enable(); +#endif +} + +static __init int blk_trace_init(void) +{ + mutex_init(&blk_tree_mutex); + blk_trace_calibrate_offsets(); + blk_trace_set_ht_offsets(); + + return 0; +} + +module_init(blk_trace_init); + diff --git a/block/elevator.c b/block/elevator.c index db3d0d8296a..5e558c4689a 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -33,6 +33,7 @@ #include #include #include +#include #include @@ -333,6 +334,8 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) struct list_head *pos; unsigned ordseq; + blk_add_trace_rq(q, rq, BLK_TA_INSERT); + rq->q = q; switch (where) { @@ -499,6 +502,7 @@ struct request *elv_next_request(request_queue_t *q) * not be passed by new incoming requests */ rq->flags |= REQ_STARTED; + blk_add_trace_rq(q, rq, BLK_TA_ISSUE); } if (!q->boundary_rq || q->boundary_rq == rq) { diff --git a/block/ioctl.c b/block/ioctl.c index 35fdb7dc651..9cfa2e1ecb2 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -5,6 +5,7 @@ #include #include #include +#include #include static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) @@ -189,6 +190,11 @@ static int blkdev_locked_ioctl(struct file *file, struct block_device *bdev, return put_ulong(arg, bdev->bd_inode->i_size >> 9); case BLKGETSIZE64: return put_u64(arg, bdev->bd_inode->i_size); + case BLKTRACESTART: + case BLKTRACESTOP: + case BLKTRACESETUP: + case BLKTRACETEARDOWN: + return blk_trace_ioctl(bdev, cmd, (char __user *) arg); } return -ENOIOCTLCMD; } diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 6c793b196aa..062067fa7ea 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -28,6 +28,7 @@ #include #include #include +#include /* * for max sense size @@ -1556,8 +1557,10 @@ void blk_plug_device(request_queue_t *q) if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) return; - if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) + if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); + blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); + } } EXPORT_SYMBOL(blk_plug_device); @@ -1621,14 +1624,21 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, /* * devices don't necessarily have an ->unplug_fn defined */ - if (q->unplug_fn) + if (q->unplug_fn) { + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, + q->rq.count[READ] + q->rq.count[WRITE]); + q->unplug_fn(q); + } } static void blk_unplug_work(void *data) { request_queue_t *q = data; + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, + q->rq.count[READ] + q->rq.count[WRITE]); + q->unplug_fn(q); } @@ -1636,6 +1646,9 @@ static void blk_unplug_timeout(unsigned long data) { request_queue_t *q = (request_queue_t *)data; + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, + q->rq.count[READ] + q->rq.count[WRITE]); + kblockd_schedule_work(&q->unplug_work); } @@ -1753,6 +1766,9 @@ static void blk_release_queue(struct kobject *kobj) if (q->queue_tags) __blk_queue_free_tags(q); + if (q->blk_trace) + blk_trace_shutdown(q); + kmem_cache_free(requestq_cachep, q); } @@ -2129,6 +2145,8 @@ rq_starved: rq_init(q, rq); rq->rl = rl; + + blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); out: return rq; } @@ -2157,6 +2175,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw, if (!rq) { struct io_context *ioc; + blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); + __generic_unplug_device(q); spin_unlock_irq(q->queue_lock); io_schedule(); @@ -2210,6 +2230,8 @@ EXPORT_SYMBOL(blk_get_request); */ void blk_requeue_request(request_queue_t *q, struct request *rq) { + blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); + if (blk_rq_tagged(rq)) blk_queue_end_tag(q, rq); @@ -2844,6 +2866,8 @@ static int __make_request(request_queue_t *q, struct bio *bio) if (!q->back_merge_fn(q, req, bio)) break; + blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); + req->biotail->bi_next = bio; req->biotail = bio; req->nr_sectors = req->hard_nr_sectors += nr_sectors; @@ -2859,6 +2883,8 @@ static int __make_request(request_queue_t *q, struct bio *bio) if (!q->front_merge_fn(q, req, bio)) break; + blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); + bio->bi_next = req->bio; req->bio = bio; @@ -2976,6 +3002,7 @@ void generic_make_request(struct bio *bio) request_queue_t *q; sector_t maxsector; int ret, nr_sectors = bio_sectors(bio); + dev_t old_dev; might_sleep(); /* Test device or partition size, when known. */ @@ -3002,6 +3029,8 @@ void generic_make_request(struct bio *bio) * NOTE: we don't repeat the blk_size check for each new device. * Stacking drivers are expected to know what they are doing. */ + maxsector = -1; + old_dev = 0; do { char b[BDEVNAME_SIZE]; @@ -3034,6 +3063,15 @@ end_io: */ blk_partition_remap(bio); + if (maxsector != -1) + blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, + maxsector); + + blk_add_trace_bio(q, bio, BLK_TA_QUEUE); + + maxsector = bio->bi_sector; + old_dev = bio->bi_bdev->bd_dev; + ret = q->make_request_fn(q, bio); } while (ret); } @@ -3153,6 +3191,8 @@ static int __end_that_request_first(struct request *req, int uptodate, int total_bytes, bio_nbytes, error, next_idx = 0; struct bio *bio; + blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); + /* * extend uptodate bool to allow < 0 value to be direct io error */ diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index e29b8926f80..1f2890989b5 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -2331,6 +2332,7 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd, cmd->rq->completion_data = cmd; cmd->rq->errors = status; + blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE); blk_complete_request(cmd->rq); } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 26b08ee425c..8c82373f7ff 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -17,6 +17,7 @@ #include #include #include +#include static const char *_name = DM_NAME; @@ -334,6 +335,8 @@ static void dec_pending(struct dm_io *io, int error) /* nudge anyone waiting on suspend queue */ wake_up(&io->md->wait); + blk_add_trace_bio(io->md->queue, io->bio, BLK_TA_COMPLETE); + bio_endio(io->bio, io->bio->bi_size, io->error); free_io(io->md, io); } @@ -392,6 +395,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, struct target_io *tio) { int r; + sector_t sector; /* * Sanity checks. @@ -407,10 +411,17 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, * this io. */ atomic_inc(&tio->io->io_count); + sector = clone->bi_sector; r = ti->type->map(ti, clone, &tio->info); - if (r > 0) + if (r > 0) { /* the bio has been remapped so dispatch it */ + + blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, + tio->io->bio->bi_bdev->bd_dev, sector, + clone->bi_sector); + generic_make_request(clone); + } else if (r < 0) { /* error the io and bail out */ diff --git a/fs/bio.c b/fs/bio.c index 8f1d2e815c9..0a8c59cb68f 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -25,6 +25,7 @@ #include #include #include +#include #include /* for struct sg_iovec */ #define BIO_POOL_SIZE 256 @@ -1095,6 +1096,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) if (!bp) return bp; + blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi, + bi->bi_sector + first_sectors); + BUG_ON(bi->bi_vcnt != 1); BUG_ON(bi->bi_idx != 0); atomic_set(&bp->cnt, 3); diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index c666769a875..7c031f00fd7 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -72,6 +72,7 @@ #include #include #include +#include #include /* siocdevprivate_ioctl */ #include diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 56bb6a4e15f..c179966f1a2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -22,6 +22,7 @@ typedef struct request_queue request_queue_t; struct elevator_queue; typedef struct elevator_queue elevator_t; struct request_pm_state; +struct blk_trace; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ @@ -416,6 +417,8 @@ struct request_queue unsigned int sg_reserved_size; int node; + struct blk_trace *blk_trace; + /* * reserved for flush operations */ diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h new file mode 100644 index 00000000000..b34d3e73d5e --- /dev/null +++ b/include/linux/blktrace_api.h @@ -0,0 +1,277 @@ +#ifndef BLKTRACE_H +#define BLKTRACE_H + +#include +#include +#include + +/* + * Trace categories + */ +enum blktrace_cat { + BLK_TC_READ = 1 << 0, /* reads */ + BLK_TC_WRITE = 1 << 1, /* writes */ + BLK_TC_BARRIER = 1 << 2, /* barrier */ + BLK_TC_SYNC = 1 << 3, /* barrier */ + BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ + BLK_TC_REQUEUE = 1 << 5, /* requeueing */ + BLK_TC_ISSUE = 1 << 6, /* issue */ + BLK_TC_COMPLETE = 1 << 7, /* completions */ + BLK_TC_FS = 1 << 8, /* fs requests */ + BLK_TC_PC = 1 << 9, /* pc requests */ + BLK_TC_NOTIFY = 1 << 10, /* special message */ + + BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ +}; + +#define BLK_TC_SHIFT (16) +#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT) + +/* + * Basic trace actions + */ +enum blktrace_act { + __BLK_TA_QUEUE = 1, /* queued */ + __BLK_TA_BACKMERGE, /* back merged to existing rq */ + __BLK_TA_FRONTMERGE, /* front merge to existing rq */ + __BLK_TA_GETRQ, /* allocated new request */ + __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */ + __BLK_TA_REQUEUE, /* request requeued */ + __BLK_TA_ISSUE, /* sent to driver */ + __BLK_TA_COMPLETE, /* completed by driver */ + __BLK_TA_PLUG, /* queue was plugged */ + __BLK_TA_UNPLUG_IO, /* queue was unplugged by io */ + __BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */ + __BLK_TA_INSERT, /* insert request */ + __BLK_TA_SPLIT, /* bio was split */ + __BLK_TA_BOUNCE, /* bio was bounced */ + __BLK_TA_REMAP, /* bio was remapped */ +}; + +/* + * Trace actions in full. Additionally, read or write is masked + */ +#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE)) +#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE)) +#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE)) +#define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_SPLIT (__BLK_TA_SPLIT) +#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE) +#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE)) + +#define BLK_IO_TRACE_MAGIC 0x65617400 +#define BLK_IO_TRACE_VERSION 0x07 + +/* + * The trace itself + */ +struct blk_io_trace { + u32 magic; /* MAGIC << 8 | version */ + u32 sequence; /* event number */ + u64 time; /* in microseconds */ + u64 sector; /* disk offset */ + u32 bytes; /* transfer length */ + u32 action; /* what happened */ + u32 pid; /* who did it */ + u32 device; /* device number */ + u32 cpu; /* on what cpu did it happen */ + u16 error; /* completion error */ + u16 pdu_len; /* length of data after this trace */ +}; + +/* + * The remap event + */ +struct blk_io_trace_remap { + u32 device; + u32 __pad; + u64 sector; +}; + +enum { + Blktrace_setup = 1, + Blktrace_running, + Blktrace_stopped, +}; + +struct blk_trace { + int trace_state; + struct rchan *rchan; + unsigned long *sequence; + u16 act_mask; + u64 start_lba; + u64 end_lba; + u32 pid; + u32 dev; + struct dentry *dir; + struct dentry *dropped_file; + atomic_t dropped; +}; + +/* + * User setup structure passed with BLKTRACESTART + */ +struct blk_user_trace_setup { + char name[BDEVNAME_SIZE]; /* output */ + u16 act_mask; /* input */ + u32 buf_size; /* input */ + u32 buf_nr; /* input */ + u64 start_lba; + u64 end_lba; + u32 pid; +}; + +#if defined(CONFIG_BLK_DEV_IO_TRACE) +extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); +extern void blk_trace_shutdown(request_queue_t *); +extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); + +/** + * blk_add_trace_rq - Add a trace for a request oriented action + * @q: queue the io is for + * @rq: the source request + * @what: the action + * + * Description: + * Records an action against a request. Will log the bio offset + size. + * + **/ +static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, + u32 what) +{ + struct blk_trace *bt = q->blk_trace; + int rw = rq->flags & 0x07; + + if (likely(!bt)) + return; + + if (blk_pc_request(rq)) { + what |= BLK_TC_ACT(BLK_TC_PC); + __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); + } else { + what |= BLK_TC_ACT(BLK_TC_FS); + __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL); + } +} + +/** + * blk_add_trace_bio - Add a trace for a bio oriented action + * @q: queue the io is for + * @bio: the source bio + * @what: the action + * + * Description: + * Records an action against a bio. Will log the bio offset + size. + * + **/ +static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio, + u32 what) +{ + struct blk_trace *bt = q->blk_trace; + + if (likely(!bt)) + return; + + __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL); +} + +/** + * blk_add_trace_generic - Add a trace for a generic action + * @q: queue the io is for + * @bio: the source bio + * @rw: the data direction + * @what: the action + * + * Description: + * Records a simple trace + * + **/ +static inline void blk_add_trace_generic(struct request_queue *q, + struct bio *bio, int rw, u32 what) +{ + struct blk_trace *bt = q->blk_trace; + + if (likely(!bt)) + return; + + if (bio) + blk_add_trace_bio(q, bio, what); + else + __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL); +} + +/** + * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload + * @q: queue the io is for + * @what: the action + * @bio: the source bio + * @pdu: the integer payload + * + * Description: + * Adds a trace with some integer payload. This might be an unplug + * option given as the action, with the depth at unplug time given + * as the payload + * + **/ +static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what, + struct bio *bio, unsigned int pdu) +{ + struct blk_trace *bt = q->blk_trace; + u64 rpdu = cpu_to_be64(pdu); + + if (likely(!bt)) + return; + + if (bio) + __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu); + else + __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); +} + +/** + * blk_add_trace_remap - Add a trace for a remap operation + * @q: queue the io is for + * @bio: the source bio + * @dev: target device + * @from: source sector + * @to: target sector + * + * Description: + * Device mapper or raid target sometimes need to split a bio because + * it spans a stripe (or similar). Add a trace for that action. + * + **/ +static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio, + dev_t dev, sector_t from, sector_t to) +{ + struct blk_trace *bt = q->blk_trace; + struct blk_io_trace_remap r; + + if (likely(!bt)) + return; + + r.device = cpu_to_be32(dev); + r.sector = cpu_to_be64(to); + + __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); +} + +#else /* !CONFIG_BLK_DEV_IO_TRACE */ +#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) +#define blk_trace_shutdown(q) do { } while (0) +#define blk_add_trace_rq(q, rq, what) do { } while (0) +#define blk_add_trace_bio(q, rq, what) do { } while (0) +#define blk_add_trace_generic(q, rq, rw, what) do { } while (0) +#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0) +#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0) +#endif /* CONFIG_BLK_DEV_IO_TRACE */ + +#endif diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h index ae7dfb790df..efb518f16bb 100644 --- a/include/linux/compat_ioctl.h +++ b/include/linux/compat_ioctl.h @@ -97,6 +97,10 @@ COMPATIBLE_IOCTL(BLKRRPART) COMPATIBLE_IOCTL(BLKFLSBUF) COMPATIBLE_IOCTL(BLKSECTSET) COMPATIBLE_IOCTL(BLKSSZGET) +COMPATIBLE_IOCTL(BLKTRACESTART) +COMPATIBLE_IOCTL(BLKTRACESTOP) +COMPATIBLE_IOCTL(BLKTRACESETUP) +COMPATIBLE_IOCTL(BLKTRACETEARDOWN) ULONG_IOCTL(BLKRASET) ULONG_IOCTL(BLKFRASET) /* RAID */ diff --git a/include/linux/fs.h b/include/linux/fs.h index f9c9dea636d..9b34a1b0345 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -197,6 +197,10 @@ extern int dir_notify_enable; #define BLKBSZGET _IOR(0x12,112,size_t) #define BLKBSZSET _IOW(0x12,113,size_t) #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ +#define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup) +#define BLKTRACESTART _IO(0x12,116) +#define BLKTRACESTOP _IO(0x12,117) +#define BLKTRACETEARDOWN _IO(0x12,118) #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ #define FIBMAP _IO(0x00,1) /* bmap access */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 62e6314382f..e60a91d5b36 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -706,6 +706,7 @@ struct task_struct { prio_array_t *array; unsigned short ioprio; + unsigned int btrace_seq; unsigned long sleep_avg; unsigned long long timestamp, last_ran; diff --git a/kernel/fork.c b/kernel/fork.c index c79ae0b19a4..c21bae8c93b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -181,6 +181,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) /* One for us, one for whoever does the "release_task()" (usually parent) */ atomic_set(&tsk->usage,2); atomic_set(&tsk->fs_excl, 0); + tsk->btrace_seq = 0; return tsk; } diff --git a/mm/highmem.c b/mm/highmem.c index ce2e7e8bbfa..d0ea1eec6a9 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -26,6 +26,7 @@ #include #include #include +#include #include static mempool_t *page_pool, *isa_page_pool; @@ -483,6 +484,8 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) pool = isa_page_pool; } + blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE); + /* * slow path */ -- cgit v1.2.3-70-g09d2 From e2d74ac0664c89757bde8fb18c98cd7bf53da61c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 28 Mar 2006 08:59:01 +0200 Subject: [PATCH] [BLOCK] cfq-iosched: change cfq io context linking from list to tree On setups with many disks, we spend a considerable amount of time looking up the process-disk mapping on each queue of io. Testing with a NULL based block driver, this costs 40-50% reduction in throughput for 1000 disks. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 205 +++++++++++++++++++++++-------------------------- block/ll_rw_blk.c | 19 +++-- include/linux/blkdev.h | 14 ++-- 3 files changed, 114 insertions(+), 124 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index bde40a6ae66..bb43a167762 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1190,19 +1190,19 @@ cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio) return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT)); } -static void cfq_free_io_context(struct cfq_io_context *cic) +static void cfq_free_io_context(struct io_context *ioc) { struct cfq_io_context *__cic; - struct list_head *entry, *next; - int freed = 1; + struct rb_node *n; + int freed = 0; - list_for_each_safe(entry, next, &cic->list) { - __cic = list_entry(entry, struct cfq_io_context, list); + while ((n = rb_first(&ioc->cic_root)) != NULL) { + __cic = rb_entry(n, struct cfq_io_context, rb_node); + rb_erase(&__cic->rb_node, &ioc->cic_root); kmem_cache_free(cfq_ioc_pool, __cic); freed++; } - kmem_cache_free(cfq_ioc_pool, cic); if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone) complete(ioc_gone); } @@ -1210,8 +1210,7 @@ static void cfq_free_io_context(struct cfq_io_context *cic) static void cfq_trim(struct io_context *ioc) { ioc->set_ioprio = NULL; - if (ioc->cic) - cfq_free_io_context(ioc->cic); + cfq_free_io_context(ioc); } /* @@ -1250,26 +1249,26 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic) spin_unlock(q->queue_lock); } -static void cfq_exit_io_context(struct cfq_io_context *cic) +static void cfq_exit_io_context(struct io_context *ioc) { struct cfq_io_context *__cic; - struct list_head *entry; unsigned long flags; - - local_irq_save(flags); + struct rb_node *n; /* * put the reference this task is holding to the various queues */ - read_lock(&cfq_exit_lock); - list_for_each(entry, &cic->list) { - __cic = list_entry(entry, struct cfq_io_context, list); + read_lock_irqsave(&cfq_exit_lock, flags); + + n = rb_first(&ioc->cic_root); + while (n != NULL) { + __cic = rb_entry(n, struct cfq_io_context, rb_node); + cfq_exit_single_io_context(__cic); + n = rb_next(n); } - cfq_exit_single_io_context(cic); - read_unlock(&cfq_exit_lock); - local_irq_restore(flags); + read_unlock_irqrestore(&cfq_exit_lock, flags); } static struct cfq_io_context * @@ -1278,10 +1277,10 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); if (cic) { - INIT_LIST_HEAD(&cic->list); + RB_CLEAR(&cic->rb_node); + cic->key = NULL; cic->cfqq[ASYNC] = NULL; cic->cfqq[SYNC] = NULL; - cic->key = NULL; cic->last_end_request = jiffies; cic->ttime_total = 0; cic->ttime_samples = 0; @@ -1373,15 +1372,17 @@ static inline void changed_ioprio(struct cfq_io_context *cic) static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) { struct cfq_io_context *cic; + struct rb_node *n; write_lock(&cfq_exit_lock); - cic = ioc->cic; - - changed_ioprio(cic); - - list_for_each_entry(cic, &cic->list, list) + n = rb_first(&ioc->cic_root); + while (n != NULL) { + cic = rb_entry(n, struct cfq_io_context, rb_node); + changed_ioprio(cic); + n = rb_next(n); + } write_unlock(&cfq_exit_lock); @@ -1445,14 +1446,67 @@ out: return cfqq; } +static struct cfq_io_context * +cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) +{ + struct rb_node *n = ioc->cic_root.rb_node; + struct cfq_io_context *cic; + void *key = cfqd; + + while (n) { + cic = rb_entry(n, struct cfq_io_context, rb_node); + + if (key < cic->key) + n = n->rb_left; + else if (key > cic->key) + n = n->rb_right; + else + return cic; + } + + return NULL; +} + +static inline void +cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, + struct cfq_io_context *cic) +{ + struct rb_node **p = &ioc->cic_root.rb_node; + struct rb_node *parent = NULL; + struct cfq_io_context *__cic; + + read_lock(&cfq_exit_lock); + + cic->ioc = ioc; + cic->key = cfqd; + + ioc->set_ioprio = cfq_ioc_set_ioprio; + + while (*p) { + parent = *p; + __cic = rb_entry(parent, struct cfq_io_context, rb_node); + + if (cic->key < __cic->key) + p = &(*p)->rb_left; + else if (cic->key > __cic->key) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(&cic->rb_node, parent, p); + rb_insert_color(&cic->rb_node, &ioc->cic_root); + list_add(&cic->queue_list, &cfqd->cic_list); + read_unlock(&cfq_exit_lock); +} + /* * Setup general io context and cfq io context. There can be several cfq * io contexts per general io context, if this process is doing io to more - * than one device managed by cfq. Note that caller is holding a reference to - * cfqq, so we don't need to worry about it disappearing + * than one device managed by cfq. */ static struct cfq_io_context * -cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) +cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) { struct io_context *ioc = NULL; struct cfq_io_context *cic; @@ -1463,88 +1517,15 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) if (!ioc) return NULL; -restart: - if ((cic = ioc->cic) == NULL) { - cic = cfq_alloc_io_context(cfqd, gfp_mask); - - if (cic == NULL) - goto err; - - /* - * manually increment generic io_context usage count, it - * cannot go away since we are already holding one ref to it - */ - cic->ioc = ioc; - cic->key = cfqd; - read_lock(&cfq_exit_lock); - ioc->set_ioprio = cfq_ioc_set_ioprio; - ioc->cic = cic; - list_add(&cic->queue_list, &cfqd->cic_list); - read_unlock(&cfq_exit_lock); - } else { - struct cfq_io_context *__cic; - - /* - * the first cic on the list is actually the head itself - */ - if (cic->key == cfqd) - goto out; - - if (unlikely(!cic->key)) { - read_lock(&cfq_exit_lock); - if (list_empty(&cic->list)) - ioc->cic = NULL; - else - ioc->cic = list_entry(cic->list.next, - struct cfq_io_context, - list); - read_unlock(&cfq_exit_lock); - kmem_cache_free(cfq_ioc_pool, cic); - atomic_dec(&ioc_count); - goto restart; - } - - /* - * cic exists, check if we already are there. linear search - * should be ok here, the list will usually not be more than - * 1 or a few entries long - */ - list_for_each_entry(__cic, &cic->list, list) { - /* - * this process is already holding a reference to - * this queue, so no need to get one more - */ - if (__cic->key == cfqd) { - cic = __cic; - goto out; - } - if (unlikely(!__cic->key)) { - read_lock(&cfq_exit_lock); - list_del(&__cic->list); - read_unlock(&cfq_exit_lock); - kmem_cache_free(cfq_ioc_pool, __cic); - atomic_dec(&ioc_count); - goto restart; - } - } + cic = cfq_cic_rb_lookup(cfqd, ioc); + if (cic) + goto out; - /* - * nope, process doesn't have a cic assoicated with this - * cfqq yet. get a new one and add to list - */ - __cic = cfq_alloc_io_context(cfqd, gfp_mask); - if (__cic == NULL) - goto err; - - __cic->ioc = ioc; - __cic->key = cfqd; - read_lock(&cfq_exit_lock); - list_add(&__cic->list, &cic->list); - list_add(&__cic->queue_list, &cfqd->cic_list); - read_unlock(&cfq_exit_lock); - cic = __cic; - } + cic = cfq_alloc_io_context(cfqd, gfp_mask); + if (cic == NULL) + goto err; + cfq_cic_link(cfqd, ioc, cic); out: return cic; err: @@ -1965,7 +1946,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, might_sleep_if(gfp_mask & __GFP_WAIT); - cic = cfq_get_io_context(cfqd, key, gfp_mask); + cic = cfq_get_io_context(cfqd, gfp_mask); spin_lock_irqsave(q->queue_lock, flags); @@ -2133,11 +2114,14 @@ static void cfq_exit_queue(elevator_t *e) request_queue_t *q = cfqd->queue; cfq_shutdown_timer_wq(cfqd); + write_lock(&cfq_exit_lock); spin_lock_irq(q->queue_lock); + if (cfqd->active_queue) __cfq_slice_expired(cfqd, cfqd->active_queue, 0); - while(!list_empty(&cfqd->cic_list)) { + + while (!list_empty(&cfqd->cic_list)) { struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, struct cfq_io_context, queue_list); @@ -2152,6 +2136,7 @@ static void cfq_exit_queue(elevator_t *e) cic->key = NULL; list_del_init(&cic->queue_list); } + spin_unlock_irq(q->queue_lock); write_unlock(&cfq_exit_lock); diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 82469db2510..cb608768ca3 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3539,11 +3539,15 @@ void put_io_context(struct io_context *ioc) BUG_ON(atomic_read(&ioc->refcount) == 0); if (atomic_dec_and_test(&ioc->refcount)) { + struct cfq_io_context *cic; + rcu_read_lock(); if (ioc->aic && ioc->aic->dtor) ioc->aic->dtor(ioc->aic); - if (ioc->cic && ioc->cic->dtor) - ioc->cic->dtor(ioc->cic); + if (ioc->cic_root.rb_node != NULL) { + cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); + cic->dtor(ioc); + } rcu_read_unlock(); kmem_cache_free(iocontext_cachep, ioc); @@ -3556,6 +3560,7 @@ void exit_io_context(void) { unsigned long flags; struct io_context *ioc; + struct cfq_io_context *cic; local_irq_save(flags); task_lock(current); @@ -3567,9 +3572,11 @@ void exit_io_context(void) if (ioc->aic && ioc->aic->exit) ioc->aic->exit(ioc->aic); - if (ioc->cic && ioc->cic->exit) - ioc->cic->exit(ioc->cic); - + if (ioc->cic_root.rb_node != NULL) { + cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); + cic->exit(ioc); + } + put_io_context(ioc); } @@ -3598,7 +3605,7 @@ struct io_context *current_io_context(gfp_t gfp_flags) ret->last_waited = jiffies; /* doesn't matter... */ ret->nr_batch_requests = 0; /* because this is 0 */ ret->aic = NULL; - ret->cic = NULL; + ret->cic_root.rb_node = NULL; tsk->io_context = ret; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c179966f1a2..ed0ffa67356 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -55,13 +55,11 @@ struct as_io_context { struct cfq_queue; struct cfq_io_context { - /* - * circular list of cfq_io_contexts belonging to a process io context - */ - struct list_head list; - struct cfq_queue *cfqq[2]; + struct rb_node rb_node; void *key; + struct cfq_queue *cfqq[2]; + struct io_context *ioc; unsigned long last_end_request; @@ -72,8 +70,8 @@ struct cfq_io_context { struct list_head queue_list; - void (*dtor)(struct cfq_io_context *); - void (*exit)(struct cfq_io_context *); + void (*dtor)(struct io_context *); /* destructor */ + void (*exit)(struct io_context *); /* called on task exit */ }; /* @@ -94,7 +92,7 @@ struct io_context { int nr_batch_requests; /* Number of requests left in the batch */ struct as_io_context *aic; - struct cfq_io_context *cic; + struct rb_root cic_root; }; void put_io_context(struct io_context *ioc); -- cgit v1.2.3-70-g09d2 From 206dc69b31ca05baac68c75b8ed2ba7dd857d273 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 28 Mar 2006 13:03:44 +0200 Subject: [BLOCK] cfq-iosched: seek and async performance fixes Detect whether a given process is seeky and if so disable (mostly) the idle window if it is. We still allow just a little idle time, just enough to allow that process to submit a new request. That is needed to maintain fairness across priority groups. In some cases, we could setup several async queues. This is not optimal from a performance POV, since we want all async io in one queue to perform good sorting on it. It also impacted sync queues, as async io got too much slice time. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 102 +++++++++++++++++++++++++++++++------------------ include/linux/blkdev.h | 8 +++- 2 files changed, 72 insertions(+), 38 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 15152e2da0d..67d446de022 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -26,18 +26,12 @@ static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ static const int cfq_slice_sync = HZ / 10; static int cfq_slice_async = HZ / 25; static const int cfq_slice_async_rq = 2; -static int cfq_slice_idle = HZ / 100; +static int cfq_slice_idle = HZ / 70; #define CFQ_IDLE_GRACE (HZ / 10) #define CFQ_SLICE_SCALE (5) #define CFQ_KEY_ASYNC (0) -#define CFQ_KEY_ANY (0xffff) - -/* - * disable queueing at the driver/hardware level - */ -static const int cfq_max_depth = 2; static DEFINE_RWLOCK(cfq_exit_lock); @@ -102,6 +96,8 @@ static struct completion *ioc_gone; #define cfq_cfqq_sync(cfqq) \ (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC]) +#define sample_valid(samples) ((samples) > 80) + /* * Per block device queue structure */ @@ -170,7 +166,6 @@ struct cfq_data { unsigned int cfq_slice[2]; unsigned int cfq_slice_async_rq; unsigned int cfq_slice_idle; - unsigned int cfq_max_depth; struct list_head cic_list; }; @@ -343,6 +338,14 @@ static int cfq_queue_empty(request_queue_t *q) return !cfqd->busy_queues; } +static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) +{ + if (rw == READ || process_sync(task)) + return task->pid; + + return CFQ_KEY_ASYNC; +} + /* * Lifted from AS - choose which of crq1 and crq2 that is best served now. * We choose the request that is closest to the head right now. Distance @@ -626,15 +629,20 @@ cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) cfq_add_crq_rb(crq); } -static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) - +static struct request * +cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) { - struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY); + struct task_struct *tsk = current; + pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio)); + struct cfq_queue *cfqq; struct rb_node *n; + sector_t sector; + cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); if (!cfqq) goto out; + sector = bio->bi_sector + bio_sectors(bio); n = cfqq->sort_list.rb_node; while (n) { struct cfq_rq *crq = rb_entry_crq(n); @@ -688,7 +696,7 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) goto out; } - __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio)); + __rq = cfq_find_rq_fmerge(cfqd, bio); if (__rq && elv_rq_merge_ok(__rq, bio)) { ret = ELEVATOR_FRONT_MERGE; goto out; @@ -891,6 +899,7 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) { + struct cfq_io_context *cic; unsigned long sl; WARN_ON(!RB_EMPTY(&cfqq->sort_list)); @@ -906,13 +915,23 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) /* * task has exited, don't wait */ - if (cfqd->active_cic && !cfqd->active_cic->ioc->task) + cic = cfqd->active_cic; + if (!cic || !cic->ioc->task) return 0; cfq_mark_cfqq_must_dispatch(cfqq); cfq_mark_cfqq_wait_request(cfqq); sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle); + + /* + * we don't want to idle for seeks, but we do want to allow + * fair distribution of slice time for a process doing back-to-back + * seeks. so allow a little bit of time for him to submit a new rq + */ + if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072) + sl = 2; + mod_timer(&cfqd->idle_slice_timer, jiffies + sl); return 1; } @@ -1129,13 +1148,6 @@ cfq_dispatch_requests(request_queue_t *q, int force) if (cfqq) { int max_dispatch; - /* - * if idle window is disabled, allow queue buildup - */ - if (!cfq_cfqq_idle_window(cfqq) && - cfqd->rq_in_driver >= cfqd->cfq_max_depth) - return 0; - cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_wait_request(cfqq); del_timer(&cfqd->idle_slice_timer); @@ -1185,13 +1197,13 @@ __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio, const int hashval) { struct hlist_head *hash_list = &cfqd->cfq_hash[hashval]; - struct hlist_node *entry, *next; + struct hlist_node *entry; + struct cfq_queue *__cfqq; - hlist_for_each_safe(entry, next, hash_list) { - struct cfq_queue *__cfqq = list_entry_qhash(entry); + hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) { const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio); - if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) + if (__cfqq->key == key && (__p == prio || !prio)) return __cfqq; } @@ -1572,7 +1584,33 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; } -#define sample_valid(samples) ((samples) > 80) +static void +cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, + struct cfq_rq *crq) +{ + sector_t sdist; + u64 total; + + if (cic->last_request_pos < crq->request->sector) + sdist = crq->request->sector - cic->last_request_pos; + else + sdist = cic->last_request_pos - crq->request->sector; + + /* + * Don't allow the seek distance to get too large from the + * odd fragment, pagein, etc + */ + if (cic->seek_samples <= 60) /* second&third seek */ + sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); + else + sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); + + cic->seek_samples = (7*cic->seek_samples + 256) / 8; + cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; + total = cic->seek_total + (cic->seek_samples/2); + do_div(total, cic->seek_samples); + cic->seek_mean = (sector_t)total; +} /* * Disable idle window if the process thinks too long or seeks so much that @@ -1685,9 +1723,11 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, cic = crq->io_context; cfq_update_io_thinktime(cfqd, cic); + cfq_update_io_seektime(cfqd, cic, crq); cfq_update_idle_window(cfqd, cfqq, cic); cic->last_queue = jiffies; + cic->last_request_pos = crq->request->sector + crq->request->nr_sectors; if (cfqq == cfqd->active_queue) { /* @@ -1820,14 +1860,6 @@ static void cfq_prio_boost(struct cfq_queue *cfqq) cfq_resort_rr_list(cfqq, 0); } -static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) -{ - if (rw == READ || process_sync(task)) - return task->pid; - - return CFQ_KEY_ASYNC; -} - static inline int __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct task_struct *task, int rw) @@ -2226,7 +2258,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) cfqd->cfq_slice[1] = cfq_slice_sync; cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; - cfqd->cfq_max_depth = cfq_max_depth; return 0; out_crqpool: @@ -2309,7 +2340,6 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); -SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ @@ -2338,7 +2368,6 @@ STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); -STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); #undef STORE_FUNCTION #define CFQ_ATTR(name) \ @@ -2355,7 +2384,6 @@ static struct elv_fs_entry cfq_attrs[] = { CFQ_ATTR(slice_async), CFQ_ATTR(slice_async_rq), CFQ_ATTR(slice_idle), - CFQ_ATTR(max_depth), __ATTR_NULL }; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ed0ffa67356..d0cac8b58de 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -63,11 +63,17 @@ struct cfq_io_context { struct io_context *ioc; unsigned long last_end_request; - unsigned long last_queue; + sector_t last_request_pos; + unsigned long last_queue; + unsigned long ttime_total; unsigned long ttime_samples; unsigned long ttime_mean; + unsigned int seek_samples; + u64 seek_total; + sector_t seek_mean; + struct list_head queue_list; void (*dtor)(struct io_context *); /* destructor */ -- cgit v1.2.3-70-g09d2