diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 22 | ||||
-rw-r--r-- | block/Kconfig.iosched | 43 | ||||
-rw-r--r-- | block/Makefile | 2 | ||||
-rw-r--r-- | block/as-iosched.c | 1520 | ||||
-rw-r--r-- | block/blk-cgroup.c | 361 | ||||
-rw-r--r-- | block/blk-cgroup.h | 127 | ||||
-rw-r--r-- | block/blk-core.c | 19 | ||||
-rw-r--r-- | block/blk-ioc.c | 12 | ||||
-rw-r--r-- | block/blk-settings.c | 51 | ||||
-rw-r--r-- | block/blk-sysfs.c | 33 | ||||
-rw-r--r-- | block/bsg.c | 3 | ||||
-rw-r--r-- | block/cfq-iosched.c | 1493 | ||||
-rw-r--r-- | block/compat_ioctl.c | 2 | ||||
-rw-r--r-- | block/elevator.c | 10 | ||||
-rw-r--r-- | block/genhd.c | 12 | ||||
-rw-r--r-- | block/ioctl.c | 2 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 6 |
17 files changed, 1995 insertions, 1723 deletions
diff --git a/block/Kconfig b/block/Kconfig index 9be0b56eaee..e20fbde0875 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -77,6 +77,28 @@ config BLK_DEV_INTEGRITY T10/SCSI Data Integrity Field or the T13/ATA External Path Protection. If in doubt, say N. +config BLK_CGROUP + bool + depends on CGROUPS + default n + ---help--- + Generic block IO controller cgroup interface. This is the common + cgroup interface which should be used by various IO controlling + policies. + + Currently, CFQ IO scheduler uses it to recognize task groups and + control disk bandwidth allocation (proportional time slice allocation) + to such task groups. + +config DEBUG_BLK_CGROUP + bool + depends on BLK_CGROUP + default n + ---help--- + Enable some debugging help. Currently it stores the cgroup path + in the blk group which can be used by cfq for tracing various + group related activity. + endif # BLOCK config BLOCK_COMPAT diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 7e803fc8877..b71abfb0d72 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -12,24 +12,14 @@ config IOSCHED_NOOP that do their own scheduling and require only minimal assistance from the kernel. -config IOSCHED_AS - tristate "Anticipatory I/O scheduler" - default y - ---help--- - The anticipatory I/O scheduler is generally a good choice for most - environments, but is quite large and complex when compared to the - deadline I/O scheduler, it can also be slower in some cases - especially some database loads. - config IOSCHED_DEADLINE tristate "Deadline I/O scheduler" default y ---help--- - The deadline I/O scheduler is simple and compact, and is often as - good as the anticipatory I/O scheduler, and in some database - workloads, better. In the case of a single process performing I/O to - a disk at any one time, its behaviour is almost identical to the - anticipatory I/O scheduler and so is a good choice. + The deadline I/O scheduler is simple and compact. It will provide + CSCAN service with FIFO expiration of requests, switching to + a new point in the service tree and doing a batch of IO from there + in case of expiry. config IOSCHED_CFQ tristate "CFQ I/O scheduler" @@ -37,9 +27,28 @@ config IOSCHED_CFQ ---help--- The CFQ I/O scheduler tries to distribute bandwidth equally among all processes in the system. It should provide a fair - working environment, suitable for desktop systems. + and low latency working environment, suitable for both desktop + and server systems. + This is the default I/O scheduler. +config CFQ_GROUP_IOSCHED + bool "CFQ Group Scheduling support" + depends on IOSCHED_CFQ && CGROUPS + select BLK_CGROUP + default n + ---help--- + Enable group IO scheduling in CFQ. + +config DEBUG_CFQ_IOSCHED + bool "Debug CFQ Scheduling" + depends on CFQ_GROUP_IOSCHED + select DEBUG_BLK_CGROUP + default n + ---help--- + Enable CFQ IO scheduling debugging in CFQ. Currently it makes + blktrace output more verbose. + choice prompt "Default I/O scheduler" default DEFAULT_CFQ @@ -47,9 +56,6 @@ choice Select the I/O scheduler which will be used by default for all block devices. - config DEFAULT_AS - bool "Anticipatory" if IOSCHED_AS=y - config DEFAULT_DEADLINE bool "Deadline" if IOSCHED_DEADLINE=y @@ -63,7 +69,6 @@ endchoice config DEFAULT_IOSCHED string - default "anticipatory" if DEFAULT_AS default "deadline" if DEFAULT_DEADLINE default "cfq" if DEFAULT_CFQ default "noop" if DEFAULT_NOOP diff --git a/block/Makefile b/block/Makefile index ba74ca6bfa1..cb2d515ebd6 100644 --- a/block/Makefile +++ b/block/Makefile @@ -8,8 +8,8 @@ obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ blk-iopoll.o ioctl.o genhd.o scsi_ioctl.o obj-$(CONFIG_BLK_DEV_BSG) += bsg.o +obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o -obj-$(CONFIG_IOSCHED_AS) += as-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o diff --git a/block/as-iosched.c b/block/as-iosched.c deleted file mode 100644 index ce8ba57c655..00000000000 --- a/block/as-iosched.c +++ /dev/null @@ -1,1520 +0,0 @@ -/* - * Anticipatory & deadline i/o scheduler. - * - * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk> - * Nick Piggin <nickpiggin@yahoo.com.au> - * - */ -#include <linux/kernel.h> -#include <linux/fs.h> -#include <linux/blkdev.h> -#include <linux/elevator.h> -#include <linux/bio.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/init.h> -#include <linux/compiler.h> -#include <linux/rbtree.h> -#include <linux/interrupt.h> - -/* - * See Documentation/block/as-iosched.txt - */ - -/* - * max time before a read is submitted. - */ -#define default_read_expire (HZ / 8) - -/* - * ditto for writes, these limits are not hard, even - * if the disk is capable of satisfying them. - */ -#define default_write_expire (HZ / 4) - -/* - * read_batch_expire describes how long we will allow a stream of reads to - * persist before looking to see whether it is time to switch over to writes. - */ -#define default_read_batch_expire (HZ / 2) - -/* - * write_batch_expire describes how long we want a stream of writes to run for. - * This is not a hard limit, but a target we set for the auto-tuning thingy. - * See, the problem is: we can send a lot of writes to disk cache / TCQ in - * a short amount of time... - */ -#define default_write_batch_expire (HZ / 8) - -/* - * max time we may wait to anticipate a read (default around 6ms) - */ -#define default_antic_expire ((HZ / 150) ? HZ / 150 : 1) - -/* - * Keep track of up to 20ms thinktimes. We can go as big as we like here, - * however huge values tend to interfere and not decay fast enough. A program - * might be in a non-io phase of operation. Waiting on user input for example, - * or doing a lengthy computation. A small penalty can be justified there, and - * will still catch out those processes that constantly have large thinktimes. - */ -#define MAX_THINKTIME (HZ/50UL) - -/* Bits in as_io_context.state */ -enum as_io_states { - AS_TASK_RUNNING=0, /* Process has not exited */ - AS_TASK_IOSTARTED, /* Process has started some IO */ - AS_TASK_IORUNNING, /* Process has completed some IO */ -}; - -enum anticipation_status { - ANTIC_OFF=0, /* Not anticipating (normal operation) */ - ANTIC_WAIT_REQ, /* The last read has not yet completed */ - ANTIC_WAIT_NEXT, /* Currently anticipating a request vs - last read (which has completed) */ - ANTIC_FINISHED, /* Anticipating but have found a candidate - * or timed out */ -}; - -struct as_data { - /* - * run time data - */ - - struct request_queue *q; /* the "owner" queue */ - - /* - * requests (as_rq s) are present on both sort_list and fifo_list - */ - struct rb_root sort_list[2]; - struct list_head fifo_list[2]; - - struct request *next_rq[2]; /* next in sort order */ - sector_t last_sector[2]; /* last SYNC & ASYNC sectors */ - - unsigned long exit_prob; /* probability a task will exit while - being waited on */ - unsigned long exit_no_coop; /* probablility an exited task will - not be part of a later cooperating - request */ - unsigned long new_ttime_total; /* mean thinktime on new proc */ - unsigned long new_ttime_mean; - u64 new_seek_total; /* mean seek on new proc */ - sector_t new_seek_mean; - - unsigned long current_batch_expires; - unsigned long last_check_fifo[2]; - int changed_batch; /* 1: waiting for old batch to end */ - int new_batch; /* 1: waiting on first read complete */ - int batch_data_dir; /* current batch SYNC / ASYNC */ - int write_batch_count; /* max # of reqs in a write batch */ - int current_write_count; /* how many requests left this batch */ - int write_batch_idled; /* has the write batch gone idle? */ - - enum anticipation_status antic_status; - unsigned long antic_start; /* jiffies: when it started */ - struct timer_list antic_timer; /* anticipatory scheduling timer */ - struct work_struct antic_work; /* Deferred unplugging */ - struct io_context *io_context; /* Identify the expected process */ - int ioc_finished; /* IO associated with io_context is finished */ - int nr_dispatched; - - /* - * settings that change how the i/o scheduler behaves - */ - unsigned long fifo_expire[2]; - unsigned long batch_expire[2]; - unsigned long antic_expire; -}; - -/* - * per-request data. - */ -enum arq_state { - AS_RQ_NEW=0, /* New - not referenced and not on any lists */ - AS_RQ_QUEUED, /* In the request queue. It belongs to the - scheduler */ - AS_RQ_DISPATCHED, /* On the dispatch list. It belongs to the - driver now */ - AS_RQ_PRESCHED, /* Debug poisoning for requests being used */ - AS_RQ_REMOVED, - AS_RQ_MERGED, - AS_RQ_POSTSCHED, /* when they shouldn't be */ -}; - -#define RQ_IOC(rq) ((struct io_context *) (rq)->elevator_private) -#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) -#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) - -static DEFINE_PER_CPU(unsigned long, as_ioc_count); -static struct completion *ioc_gone; -static DEFINE_SPINLOCK(ioc_gone_lock); - -static void as_move_to_dispatch(struct as_data *ad, struct request *rq); -static void as_antic_stop(struct as_data *ad); - -/* - * IO Context helper functions - */ - -/* Called to deallocate the as_io_context */ -static void free_as_io_context(struct as_io_context *aic) -{ - kfree(aic); - elv_ioc_count_dec(as_ioc_count); - if (ioc_gone) { - /* - * AS scheduler is exiting, grab exit lock and check - * the pending io context count. If it hits zero, - * complete ioc_gone and set it back to NULL. - */ - spin_lock(&ioc_gone_lock); - if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) { - complete(ioc_gone); - ioc_gone = NULL; - } - spin_unlock(&ioc_gone_lock); - } -} - -static void as_trim(struct io_context *ioc) -{ - spin_lock_irq(&ioc->lock); - if (ioc->aic) - free_as_io_context(ioc->aic); - ioc->aic = NULL; - spin_unlock_irq(&ioc->lock); -} - -/* Called when the task exits */ -static void exit_as_io_context(struct as_io_context *aic) -{ - WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state)); - clear_bit(AS_TASK_RUNNING, &aic->state); -} - -static struct as_io_context *alloc_as_io_context(void) -{ - struct as_io_context *ret; - - ret = kmalloc(sizeof(*ret), GFP_ATOMIC); - if (ret) { - ret->dtor = free_as_io_context; - ret->exit = exit_as_io_context; - ret->state = 1 << AS_TASK_RUNNING; - atomic_set(&ret->nr_queued, 0); - atomic_set(&ret->nr_dispatched, 0); - spin_lock_init(&ret->lock); - ret->ttime_total = 0; - ret->ttime_samples = 0; - ret->ttime_mean = 0; - ret->seek_total = 0; - ret->seek_samples = 0; - ret->seek_mean = 0; - elv_ioc_count_inc(as_ioc_count); - } - - return ret; -} - -/* - * If the current task has no AS IO context then create one and initialise it. - * Then take a ref on the task's io context and return it. - */ -static struct io_context *as_get_io_context(int node) -{ - struct io_context *ioc = get_io_context(GFP_ATOMIC, node); - if (ioc && !ioc->aic) { - ioc->aic = alloc_as_io_context(); - if (!ioc->aic) { - put_io_context(ioc); - ioc = NULL; - } - } - return ioc; -} - -static void as_put_io_context(struct request *rq) -{ - struct as_io_context *aic; - - if (unlikely(!RQ_IOC(rq))) - return; - - aic = RQ_IOC(rq)->aic; - - if (rq_is_sync(rq) && aic) { - unsigned long flags; - - spin_lock_irqsave(&aic->lock, flags); - set_bit(AS_TASK_IORUNNING, &aic->state); - aic->last_end_request = jiffies; - spin_unlock_irqrestore(&aic->lock, flags); - } - - put_io_context(RQ_IOC(rq)); -} - -/* - * rb tree support functions - */ -#define RQ_RB_ROOT(ad, rq) (&(ad)->sort_list[rq_is_sync((rq))]) - -static void as_add_rq_rb(struct as_data *ad, struct request *rq) -{ - struct request *alias; - - while ((unlikely(alias = elv_rb_add(RQ_RB_ROOT(ad, rq), rq)))) { - as_move_to_dispatch(ad, alias); - as_antic_stop(ad); - } -} - -static inline void as_del_rq_rb(struct as_data *ad, struct request *rq) -{ - elv_rb_del(RQ_RB_ROOT(ad, rq), rq); -} - -/* - * IO Scheduler proper - */ - -#define MAXBACK (1024 * 1024) /* - * Maximum distance the disk will go backward - * for a request. - */ - -#define BACK_PENALTY 2 - -/* - * as_choose_req selects the preferred one of two requests of the same data_dir - * ignoring time - eg. timeouts, which is the job of as_dispatch_request - */ -static struct request * -as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2) -{ - int data_dir; - sector_t last, s1, s2, d1, d2; - int r1_wrap=0, r2_wrap=0; /* requests are behind the disk head */ - const sector_t maxback = MAXBACK; - - if (rq1 == NULL || rq1 == rq2) - return rq2; - if (rq2 == NULL) - return rq1; - - data_dir = rq_is_sync(rq1); - - last = ad->last_sector[data_dir]; - s1 = blk_rq_pos(rq1); - s2 = blk_rq_pos(rq2); - - BUG_ON(data_dir != rq_is_sync(rq2)); - - /* - * Strict one way elevator _except_ in the case where we allow - * short backward seeks which are biased as twice the cost of a - * similar forward seek. - */ - if (s1 >= last) - d1 = s1 - last; - else if (s1+maxback >= last) - d1 = (last - s1)*BACK_PENALTY; - else { - r1_wrap = 1; - d1 = 0; /* shut up, gcc */ - } - - if (s2 >= last) - d2 = s2 - last; - else if (s2+maxback >= last) - d2 = (last - s2)*BACK_PENALTY; - else { - r2_wrap = 1; - d2 = 0; - } - - /* Found required data */ - if (!r1_wrap && r2_wrap) - return rq1; - else if (!r2_wrap && r1_wrap) - return rq2; - else if (r1_wrap && r2_wrap) { - /* both behind the head */ - if (s1 <= s2) - return rq1; - else - return rq2; - } - - /* Both requests in front of the head */ - if (d1 < d2) - return rq1; - else if (d2 < d1) - return rq2; - else { - if (s1 >= s2) - return rq1; - else - return rq2; - } -} - -/* - * as_find_next_rq finds the next request after @prev in elevator order. - * this with as_choose_req form the basis for how the scheduler chooses - * what request to process next. Anticipation works on top of this. - */ -static struct request * -as_find_next_rq(struct as_data *ad, struct request *last) -{ - struct rb_node *rbnext = rb_next(&last->rb_node); - struct rb_node *rbprev = rb_prev(&last->rb_node); - struct request *next = NULL, *prev = NULL; - - BUG_ON(RB_EMPTY_NODE(&last->rb_node)); - - if (rbprev) - prev = rb_entry_rq(rbprev); - - if (rbnext) - next = rb_entry_rq(rbnext); - else { - const int data_dir = rq_is_sync(last); - - rbnext = rb_first(&ad->sort_list[data_dir]); - if (rbnext && rbnext != &last->rb_node) - next = rb_entry_rq(rbnext); - } - - return as_choose_req(ad, next, prev); -} - -/* - * anticipatory scheduling functions follow - */ - -/* - * as_antic_expired tells us when we have anticipated too long. - * The funny "absolute difference" math on the elapsed time is to handle - * jiffy wraps, and disks which have been idle for 0x80000000 jiffies. - */ -static int as_antic_expired(struct as_data *ad) -{ - long delta_jif; - - delta_jif = jiffies - ad->antic_start; - if (unlikely(delta_jif < 0)) - delta_jif = -delta_jif; - if (delta_jif < ad->antic_expire) - return 0; - - return 1; -} - -/* - * as_antic_waitnext starts anticipating that a nice request will soon be - * submitted. See also as_antic_waitreq - */ -static void as_antic_waitnext(struct as_data *ad) -{ - unsigned long timeout; - - BUG_ON(ad->antic_status != ANTIC_OFF - && ad->antic_status != ANTIC_WAIT_REQ); - - timeout = ad->antic_start + ad->antic_expire; - - mod_timer(&ad->antic_timer, timeout); - - ad->antic_status = ANTIC_WAIT_NEXT; -} - -/* - * as_antic_waitreq starts anticipating. We don't start timing the anticipation - * until the request that we're anticipating on has finished. This means we - * are timing from when the candidate process wakes up hopefully. - */ -static void as_antic_waitreq(struct as_data *ad) -{ - BUG_ON(ad->antic_status == ANTIC_FINISHED); - if (ad->antic_status == ANTIC_OFF) { - if (!ad->io_context || ad->ioc_finished) - as_antic_waitnext(ad); - else - ad->antic_status = ANTIC_WAIT_REQ; - } -} - -/* - * This is called directly by the functions in this file to stop anticipation. - * We kill the timer and schedule a call to the request_fn asap. - */ -static void as_antic_stop(struct as_data *ad) -{ - int status = ad->antic_status; - - if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) { - if (status == ANTIC_WAIT_NEXT) - del_timer(&ad->antic_timer); - ad->antic_status = ANTIC_FINISHED; - /* see as_work_handler */ - kblockd_schedule_work(ad->q, &ad->antic_work); - } -} - -/* - * as_antic_timeout is the timer function set by as_antic_waitnext. - */ -static void as_antic_timeout(unsigned long data) -{ - struct request_queue *q = (struct request_queue *)data; - struct as_data *ad = q->elevator->elevator_data; - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - if (ad->antic_status == ANTIC_WAIT_REQ - || ad->antic_status == ANTIC_WAIT_NEXT) { - struct as_io_context *aic; - spin_lock(&ad->io_context->lock); - aic = ad->io_context->aic; - - ad->antic_status = ANTIC_FINISHED; - kblockd_schedule_work(q, &ad->antic_work); - - if (aic->ttime_samples == 0) { - /* process anticipated on has exited or timed out*/ - ad->exit_prob = (7*ad->exit_prob + 256)/8; - } - if (!test_bit(AS_TASK_RUNNING, &aic->state)) { - /* process not "saved" by a cooperating request */ - ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8; - } - spin_unlock(&ad->io_context->lock); - } - spin_unlock_irqrestore(q->queue_lock, flags); -} - -static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic, - unsigned long ttime) -{ - /* fixed point: 1.0 == 1<<8 */ - if (aic->ttime_samples == 0) { - ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8; - ad->new_ttime_mean = ad->new_ttime_total / 256; - - ad->exit_prob = (7*ad->exit_prob)/8; - } - aic->ttime_samples = (7*aic->ttime_samples + 256) / 8; - aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8; - aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples; -} - -static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic, - sector_t sdist) -{ - u64 total; - - if (aic->seek_samples == 0) { - ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8; - ad->new_seek_mean = ad->new_seek_total / 256; - } - - /* - * Don't allow the seek distance to get too large from the - * odd fragment, pagein, etc - */ - if (aic->seek_samples <= 60) /* second&third seek */ - sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024); - else - sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*64); - - aic->seek_samples = (7*aic->seek_samples + 256) / 8; - aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8; - total = aic->seek_total + (aic->seek_samples/2); - do_div(total, aic->seek_samples); - aic->seek_mean = (sector_t)total; -} - -/* - * as_update_iohist keeps a decaying histogram of IO thinktimes, and - * updates @aic->ttime_mean based on that. It is called when a new - * request is queued. - */ -static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, - struct request *rq) -{ - int data_dir = rq_is_sync(rq); - unsigned long thinktime = 0; - sector_t seek_dist; - - if (aic == NULL) - return; - - if (data_dir == BLK_RW_SYNC) { - unsigned long in_flight = atomic_read(&aic->nr_queued) - + atomic_read(&aic->nr_dispatched); - spin_lock(&aic->lock); - if (test_bit(AS_TASK_IORUNNING, &aic->state) || - test_bit(AS_TASK_IOSTARTED, &aic->state)) { - /* Calculate read -> read thinktime */ - if (test_bit(AS_TASK_IORUNNING, &aic->state) - && in_flight == 0) { - thinktime = jiffies - aic->last_end_request; - thinktime = min(thinktime, MAX_THINKTIME-1); - } - as_update_thinktime(ad, aic, thinktime); - - /* Calculate read -> read seek distance */ - if (aic->last_request_pos < blk_rq_pos(rq)) - seek_dist = blk_rq_pos(rq) - - aic->last_request_pos; - else - seek_dist = aic->last_request_pos - - blk_rq_pos(rq); - as_update_seekdist(ad, aic, seek_dist); - } - aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); - set_bit(AS_TASK_IOSTARTED, &aic->state); - spin_unlock(&aic->lock); - } -} - -/* - * as_close_req decides if one request is considered "close" to the - * previous one issued. - */ -static int as_close_req(struct as_data *ad, struct as_io_context *aic, - struct request *rq) -{ - unsigned long delay; /* jiffies */ - sector_t last = ad->last_sector[ad->batch_data_dir]; - sector_t next = blk_rq_pos(rq); - sector_t delta; /* acceptable close offset (in sectors) */ - sector_t s; - - if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished) - delay = 0; - else - delay = jiffies - ad->antic_start; - - if (delay == 0) - delta = 8192; - else if (delay <= (20 * HZ / 1000) && delay <= ad->antic_expire) - delta = 8192 << delay; - else - return 1; - - if ((last <= next + (delta>>1)) && (next <= last + delta)) - return 1; - - if (last < next) - s = next - last; - else - s = last - next; - - if (aic->seek_samples == 0) { - /* - * Process has just started IO. Use past statistics to - * gauge success possibility - */ - if (ad->new_seek_mean > s) { - /* this request is better than what we're expecting */ - return 1; - } - - } else { - if (aic->seek_mean > s) { - /* this request is better than what we're expecting */ - return 1; - } - } - - return 0; -} - -/* - * as_can_break_anticipation returns true if we have been anticipating this - * request. - * - * It also returns true if the process against which we are anticipating - * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to - * dispatch it ASAP, because we know that application will not be submitting - * any new reads. - * - * If the task which has submitted the request has exited, break anticipation. - * - * If this task has queued some other IO, do not enter enticipation. - */ -static int as_can_break_anticipation(struct as_data *ad, struct request *rq) -{ - struct io_context *ioc; - struct as_io_context *aic; - - ioc = ad->io_context; - BUG_ON(!ioc); - spin_lock(&ioc->lock); - - if (rq && ioc == RQ_IOC(rq)) { - /* request from same process */ - spin_unlock(&ioc->lock); - return 1; - } - - if (ad->ioc_finished && as_antic_expired(ad)) { - /* - * In this situation status should really be FINISHED, - * however the timer hasn't had the chance to run yet. - */ - spin_unlock(&ioc->lock); - return 1; - } - - aic = ioc->aic; - if (!aic) { - spin_unlock(&ioc->lock); - return 0; - } - - if (atomic_read(&aic->nr_queued) > 0) { - /* process has more requests queued */ - spin_unlock(&ioc->lock); - return 1; - } - - if (atomic_read(&aic->nr_dispatched) > 0) { - /* process has more requests dispatched */ - spin_unlock(&ioc->lock); - return 1; - } - - if (rq && rq_is_sync(rq) && as_close_req(ad, aic, rq)) { - /* - * Found a close request that is not one of ours. - * - * This makes close requests from another process update - * our IO history. Is generally useful when there are - * two or more cooperating processes working in the same - * area. - */ - if (!test_bit(AS_TASK_RUNNING, &aic->state)) { - if (aic->ttime_samples == 0) - ad->exit_prob = (7*ad->exit_prob + 256)/8; - - ad->exit_no_coop = (7*ad->exit_no_coop)/8; - } - - as_update_iohist(ad, aic, rq); - spin_unlock(&ioc->lock); - return 1; - } - - if (!test_bit(AS_TASK_RUNNING, &aic->state)) { - /* process anticipated on has exited */ - if (aic->ttime_samples == 0) - ad->exit_prob = (7*ad->exit_prob + 256)/8; - - if (ad->exit_no_coop > 128) { - spin_unlock(&ioc->lock); - return 1; - } - } - - if (aic->ttime_samples == 0) { - if (ad->new_ttime_mean > ad->antic_expire) { - spin_unlock(&ioc->lock); - return 1; - } - if (ad->exit_prob * ad->exit_no_coop > 128*256) { - spin_unlock(&ioc->lock); - return 1; - } - } else if (aic->ttime_mean > ad->antic_expire) { - /* the process thinks too much between requests */ - spin_unlock(&ioc->lock); - return 1; - } - spin_unlock(&ioc->lock); - return 0; -} - -/* - * as_can_anticipate indicates whether we should either run rq - * or keep anticipating a better request. - */ -static int as_can_anticipate(struct as_data *ad, struct request *rq) -{ -#if 0 /* disable for now, we need to check tag level as well */ - /* - * SSD device without seek penalty, disable idling - */ - if (blk_queue_nonrot(ad->q)) axman - return 0; -#endif - - if (!ad->io_context) - /* - * Last request submitted was a write - */ - return 0; - - if (ad->antic_status == ANTIC_FINISHED) - /* - * Don't restart if we have just finished. Run the next request - */ - return 0; - - if (as_can_break_anticipation(ad, rq)) - /* - * This request is a good candidate. Don't keep anticipating, - * run it. - */ - return 0; - - /* - * OK from here, we haven't finished, and don't have a decent request! - * Status is either ANTIC_OFF so start waiting, - * ANTIC_WAIT_REQ so continue waiting for request to finish - * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request. - */ - - return 1; -} - -/* - * as_update_rq must be called whenever a request (rq) is added to - * the sort_list. This function keeps caches up to date, and checks if the - * request might be one we are "anticipating" - */ -static void as_update_rq(struct as_data *ad, struct request *rq) -{ - const int data_dir = rq_is_sync(rq); - - /* keep the next_rq cache up to date */ - ad->next_rq[data_dir] = as_choose_req(ad, rq, ad->next_rq[data_dir]); - - /* - * have we been anticipating this request? - * or does it come from the same process as the one we are anticipating - * for? - */ - if (ad->antic_status == ANTIC_WAIT_REQ - || ad->antic_status == ANTIC_WAIT_NEXT) { - if (as_can_break_anticipation(ad, rq)) - as_antic_stop(ad); - } -} - -/* - * Gathers timings and resizes the write batch automatically - */ -static void update_write_batch(struct as_data *ad) -{ - unsigned long batch = ad->batch_expire[BLK_RW_ASYNC]; - long write_time; - - write_time = (jiffies - ad->current_batch_expires) + batch; - if (write_time < 0) - write_time = 0; - - if (write_time > batch && !ad->write_batch_idled) { - if (write_time > batch * 3) - ad->write_batch_count /= 2; - else - ad->write_batch_count--; - } else if (write_time < batch && ad->current_write_count == 0) { - if (batch > write_time * 3) - ad->write_batch_count *= 2; - else - ad->write_batch_count++; - } - - if (ad->write_batch_count < 1) - ad->write_batch_count = 1; -} - -/* - * as_completed_request is to be called when a request has completed and - * returned something to the requesting process, be it an error or data. - */ -static void as_completed_request(struct request_queue *q, struct request *rq) -{ - struct as_data *ad = q->elevator->elevator_data; - - WARN_ON(!list_empty(&rq->queuelist)); - - if (RQ_STATE(rq) != AS_RQ_REMOVED) { - WARN(1, "rq->state %d\n", RQ_STATE(rq)); - goto out; - } - - if (ad->changed_batch && ad->nr_dispatched == 1) { - ad->current_batch_expires = jiffies + - ad->batch_expire[ad->batch_data_dir]; - kblockd_schedule_work(q, &ad->antic_work); - ad->changed_batch = 0; - - if (ad->batch_data_dir == BLK_RW_SYNC) - ad->new_batch = 1; - } - WARN_ON(ad->nr_dispatched == 0); - ad->nr_dispatched--; - - /* - * Start counting the batch from when a request of that direction is - * actually serviced. This should help devices with big TCQ windows - * and writeback caches - */ - if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) { - update_write_batch(ad); - ad->current_batch_expires = jiffies + - ad->batch_expire[BLK_RW_SYNC]; - ad->new_batch = 0; - } - - if (ad->io_context == RQ_IOC(rq) && ad->io_context) { - ad->antic_start = jiffies; - ad->ioc_finished = 1; - if (ad->antic_status == ANTIC_WAIT_REQ) { - /* - * We were waiting on this request, now anticipate - * the next one - */ - as_antic_waitnext(ad); - } - } - - as_put_io_context(rq); -out: - RQ_SET_STATE(rq, AS_RQ_POSTSCHED); -} - -/* - * as_remove_queued_request removes a request from the pre dispatch queue - * without updating refcounts. It is expected the caller will drop the - * reference unless it replaces the request at somepart of the elevator - * (ie. the dispatch queue) - */ -static void as_remove_queued_request(struct request_queue *q, - struct request *rq) -{ - const int data_dir = rq_is_sync(rq); - struct as_data *ad = q->elevator->elevator_data; - struct io_context *ioc; - - WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED); - - ioc = RQ_IOC(rq); - if (ioc && ioc->aic) { - BUG_ON(!atomic_read(&ioc->aic->nr_queued)); - atomic_dec(&ioc->aic->nr_queued); - } - - /* - * Update the "next_rq" cache if we are about to remove its - * entry - */ - if (ad->next_rq[data_dir] == rq) - ad->next_rq[data_dir] = as_find_next_rq(ad, rq); - - rq_fifo_clear(rq); - as_del_rq_rb(ad, rq); -} - -/* - * as_fifo_expired returns 0 if there are no expired requests on the fifo, - * 1 otherwise. It is ratelimited so that we only perform the check once per - * `fifo_expire' interval. Otherwise a large number of expired requests - * would create a hopeless seekstorm. - * - * See as_antic_expired comment. - */ -static int as_fifo_expired(struct as_data *ad, int adir) -{ - struct request *rq; - long delta_jif; - - delta_jif = jiffies - ad->last_check_fifo[adir]; - if (unlikely(delta_jif < 0)) - delta_jif = -delta_jif; - if (delta_jif < ad->fifo_expire[adir]) - return 0; - - ad->last_check_fifo[adir] = jiffies; - - if (list_empty(&ad->fifo_list[adir])) - return 0; - - rq = rq_entry_fifo(ad->fifo_list[adir].next); - - return time_after(jiffies, rq_fifo_time(rq)); -} - -/* - * as_batch_expired returns true if the current batch has expired. A batch - * is a set of reads or a set of writes. - */ -static inline int as_batch_expired(struct as_data *ad) -{ - if (ad->changed_batch || ad->new_batch) - return 0; - - if (ad->batch_data_dir == BLK_RW_SYNC) - /* TODO! add a check so a complete fifo gets written? */ - return time_after(jiffies, ad->current_batch_expires); - - return time_after(jiffies, ad->current_batch_expires) - || ad->current_write_count == 0; -} - -/* - * move an entry to dispatch queue - */ -static void as_move_to_dispatch(struct as_data *ad, struct request *rq) -{ - const int data_dir = rq_is_sync(rq); - - BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); - - as_antic_stop(ad); - ad->antic_status = ANTIC_OFF; - - /* - * This has to be set in order to be correctly updated by - * as_find_next_rq - */ - ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq); - - if (data_dir == BLK_RW_SYNC) { - struct io_context *ioc = RQ_IOC(rq); - /* In case we have to anticipate after this */ - copy_io_context(&ad->io_context, &ioc); - } else { - if (ad->io_context) { - put_io_context(ad->io_context); - ad->io_context = NULL; - } - - if (ad->current_write_count != 0) - ad->current_write_count--; - } - ad->ioc_finished = 0; - - ad->next_rq[data_dir] = as_find_next_rq(ad, rq); - - /* - * take it off the sort and fifo list, add to dispatch queue - */ - as_remove_queued_request(ad->q, rq); - WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED); - - elv_dispatch_sort(ad->q, rq); - - RQ_SET_STATE(rq, AS_RQ_DISPATCHED); - if (RQ_IOC(rq) && RQ_IOC(rq)->aic) - atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched); - ad->nr_dispatched++; -} - -/* - * as_dispatch_request selects the best request according to - * read/write expire, batch expire, etc, and moves it to the dispatch - * queue. Returns 1 if a request was found, 0 otherwise. - */ -static int as_dispatch_request(struct request_queue *q, int force) -{ - struct as_data *ad = q->elevator->elevator_data; - const int reads = !list_empty(&ad->fifo_list[BLK_RW_SYNC]); - const int writes = !list_empty(&ad->fifo_list[BLK_RW_ASYNC]); - struct request *rq; - - if (unlikely(force)) { - /* - * Forced dispatch, accounting is useless. Reset - * accounting states and dump fifo_lists. Note that - * batch_data_dir is reset to BLK_RW_SYNC to avoid - * screwing write batch accounting as write batch - * accounting occurs on W->R transition. - */ - int dispatched = 0; - - ad->batch_data_dir = BLK_RW_SYNC; - ad->changed_batch = 0; - ad->new_batch = 0; - - while (ad->next_rq[BLK_RW_SYNC]) { - as_move_to_dispatch(ad, ad->next_rq[BLK_RW_SYNC]); - dispatched++; - } - ad->last_check_fifo[BLK_RW_SYNC] = jiffies; - - while (ad->next_rq[BLK_RW_ASYNC]) { - as_move_to_dispatch(ad, ad->next_rq[BLK_RW_ASYNC]); - dispatched++; - } - ad->last_check_fifo[BLK_RW_ASYNC] = jiffies; - - return dispatched; - } - - /* Signal that the write batch was uncontended, so we can't time it */ - if (ad->batch_data_dir == BLK_RW_ASYNC && !reads) { - if (ad->current_write_count == 0 || !writes) - ad->write_batch_idled = 1; - } - - if (!(reads || writes) - || ad->antic_status == ANTIC_WAIT_REQ - || ad->antic_status == ANTIC_WAIT_NEXT - || ad->changed_batch) - return 0; - - if (!(reads && writes && as_batch_expired(ad))) { - /* - * batch is still running or no reads or no writes - */ - rq = ad->next_rq[ad->batch_data_dir]; - - if (ad->batch_data_dir == BLK_RW_SYNC && ad->antic_expire) { - if (as_fifo_expired(ad, BLK_RW_SYNC)) - goto fifo_expired; - - if (as_can_anticipate(ad, rq)) { - as_antic_waitreq(ad); - return 0; - } - } - - if (rq) { - /* we have a "next request" */ - if (reads && !writes) - ad->current_batch_expires = - jiffies + ad->batch_expire[BLK_RW_SYNC]; - goto dispatch_request; - } - } - - /* - * at this point we are not running a batch. select the appropriate - * data direction (read / write) - */ - - if (reads) { - BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[BLK_RW_SYNC])); - - if (writes && ad->batch_data_dir == BLK_RW_SYNC) - /* - * Last batch was a read, switch to writes - */ - goto dispatch_writes; - - if (ad->batch_data_dir == BLK_RW_ASYNC) { - WARN_ON(ad->new_batch); - ad->changed_batch = 1; - } - ad->batch_data_dir = BLK_RW_SYNC; - rq = rq_entry_fifo(ad->fifo_list[BLK_RW_SYNC].next); - ad->last_check_fifo[ad->batch_data_dir] = jiffies; - goto dispatch_request; - } - - /* - * the last batch was a read - */ - - if (writes) { -dispatch_writes: - BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[BLK_RW_ASYNC])); - - if (ad->batch_data_dir == BLK_RW_SYNC) { - ad->changed_batch = 1; - - /* - * new_batch might be 1 when the queue runs out of - * reads. A subsequent submission of a write might - * cause a change of batch before the read is finished. - */ - ad->new_batch = 0; - } - ad->batch_data_dir = BLK_RW_ASYNC; - ad->current_write_count = ad->write_batch_count; - ad->write_batch_idled = 0; - rq = rq_entry_fifo(ad->fifo_list[BLK_RW_ASYNC].next); - ad->last_check_fifo[BLK_RW_ASYNC] = jiffies; - goto dispatch_request; - } - - BUG(); - return 0; - -dispatch_request: - /* - * If a request has expired, service it. - */ - - if (as_fifo_expired(ad, ad->batch_data_dir)) { -fifo_expired: - rq = rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next); - } - - if (ad->changed_batch) { - WARN_ON(ad->new_batch); - - if (ad->nr_dispatched) - return 0; - - if (ad->batch_data_dir == BLK_RW_ASYNC) - ad->current_batch_expires = jiffies + - ad->batch_expire[BLK_RW_ASYNC]; - else - ad->new_batch = 1; - - ad->changed_batch = 0; - } - - /* - * rq is the selected appropriate request. - */ - as_move_to_dispatch(ad, rq); - - return 1; -} - -/* - * add rq to rbtree and fifo - */ -static void as_add_request(struct request_queue *q, struct request *rq) -{ - struct as_data *ad = q->elevator->elevator_data; - int data_dir; - - RQ_SET_STATE(rq, AS_RQ_NEW); - - data_dir = rq_is_sync(rq); - - rq->elevator_private = as_get_io_context(q->node); - - if (RQ_IOC(rq)) { - as_update_iohist(ad, RQ_IOC(rq)->aic, rq); - atomic_inc(&RQ_IOC(rq)->aic->nr_queued); - } - - as_add_rq_rb(ad, rq); - - /* - * set expire time and add to fifo list - */ - rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]); - list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]); - - as_update_rq(ad, rq); /* keep state machine up to date */ - RQ_SET_STATE(rq, AS_RQ_QUEUED); -} - -static void as_activate_request(struct request_queue *q, struct request *rq) -{ - WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED); - RQ_SET_STATE(rq, AS_RQ_REMOVED); - if (RQ_IOC(rq) && RQ_IOC(rq)->aic) - atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched); -} - -static void as_deactivate_request(struct request_queue *q, struct request *rq) -{ - WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED); - RQ_SET_STATE(rq, AS_RQ_DISPATCHED); - if (RQ_IOC(rq) && RQ_IOC(rq)->aic) - atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched); -} - -/* - * as_queue_empty tells us if there are requests left in the device. It may - * not be the case that a driver can get the next request even if the queue - * is not empty - it is used in the block layer to check for plugging and - * merging opportunities - */ -static int as_queue_empty(struct request_queue *q) -{ - struct as_data *ad = q->elevator->elevator_data; - - return list_empty(&ad->fifo_list[BLK_RW_ASYNC]) - && list_empty(&ad->fifo_list[BLK_RW_SYNC]); -} - -static int -as_merge(struct request_queue *q, struct request **req, struct bio *bio) -{ - struct as_data *ad = q->elevator->elevator_data; - sector_t rb_key = bio->bi_sector + bio_sectors(bio); - struct request *__rq; - - /* - * check for front merge - */ - __rq = elv_rb_find(&ad->sort_list[bio_data_dir(bio)], rb_key); - if (__rq && elv_rq_merge_ok(__rq, bio)) { - *req = __rq; - return ELEVATOR_FRONT_MERGE; - } - - return ELEVATOR_NO_MERGE; -} - -static void as_merged_request(struct request_queue *q, struct request *req, - int type) -{ - struct as_data *ad = q->elevator->elevator_data; - - /* - * if the merge was a front merge, we need to reposition request - */ - if (type == ELEVATOR_FRONT_MERGE) { - as_del_rq_rb(ad, req); - as_add_rq_rb(ad, req); - /* - * Note! At this stage of this and the next function, our next - * request may not be optimal - eg the request may have "grown" - * behind the disk head. We currently don't bother adjusting. - */ - } -} - -static void as_merged_requests(struct request_queue *q, struct request *req, - struct request *next) -{ - /* - * if next expires before rq, assign its expire time to arq - * and move into next position (next will be deleted) in fifo - */ - if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { - if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { - list_move(&req->queuelist, &next->queuelist); - rq_set_fifo_time(req, rq_fifo_time(next)); - } - } - - /* - * kill knowledge of next, this one is a goner - */ - as_remove_queued_request(q, next); - as_put_io_context(next); - - RQ_SET_STATE(next, AS_RQ_MERGED); -} - -/* - * This is executed in a "deferred" process context, by kblockd. It calls the - * driver's request_fn so the driver can submit that request. - * - * IMPORTANT! This guy will reenter the elevator, so set up all queue global - * state before calling, and don't rely on any state over calls. - * - * FIXME! dispatch queue is not a queue at all! - */ -static void as_work_handler(struct work_struct *work) -{ - struct as_data *ad = container_of(work, struct as_data, antic_work); - - blk_run_queue(ad->q); -} - -static int as_may_queue(struct request_queue *q, int rw) -{ - int ret = ELV_MQUEUE_MAY; - struct as_data *ad = q->elevator->elevator_data; - struct io_context *ioc; - if (ad->antic_status == ANTIC_WAIT_REQ || - ad->antic_status == ANTIC_WAIT_NEXT) { - ioc = as_get_io_context(q->node); - if (ad->io_context == ioc) - ret = ELV_MQUEUE_MUST; - put_io_context(ioc); - } - - return ret; -} - -static void as_exit_queue(struct elevator_queue *e) -{ - struct as_data *ad = e->elevator_data; - - del_timer_sync(&ad->antic_timer); - cancel_work_sync(&ad->antic_work); - - BUG_ON(!list_empty(&ad->fifo_list[BLK_RW_SYNC])); - BUG_ON(!list_empty(&ad->fifo_list[BLK_RW_ASYNC])); - - put_io_context(ad->io_context); - kfree(ad); -} - -/* - * initialize elevator private data (as_data). - */ -static void *as_init_queue(struct request_queue *q) -{ - struct as_data *ad; - - ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node); - if (!ad) - return NULL; - - ad->q = q; /* Identify what queue the data belongs to */ - - /* anticipatory scheduling helpers */ - ad->antic_timer.function = as_antic_timeout; - ad->antic_timer.data = (unsigned long)q; - init_timer(&ad->antic_timer); - INIT_WORK(&ad->antic_work, as_work_handler); - - INIT_LIST_HEAD(&ad->fifo_list[BLK_RW_SYNC]); - INIT_LIST_HEAD(&ad->fifo_list[BLK_RW_ASYNC]); - ad->sort_list[BLK_RW_SYNC] = RB_ROOT; - ad->sort_list[BLK_RW_ASYNC] = RB_ROOT; - ad->fifo_expire[BLK_RW_SYNC] = default_read_expire; - ad->fifo_expire[BLK_RW_ASYNC] = default_write_expire; - ad->antic_expire = default_antic_expire; - ad->batch_expire[BLK_RW_SYNC] = default_read_batch_expire; - ad->batch_expire[BLK_RW_ASYNC] = default_write_batch_expire; - - ad->current_batch_expires = jiffies + ad->batch_expire[BLK_RW_SYNC]; - ad->write_batch_count = ad->batch_expire[BLK_RW_ASYNC] / 10; - if (ad->write_batch_count < 2) - ad->write_batch_count = 2; - - return ad; -} - -/* - * sysfs parts below - */ - -static ssize_t -as_var_show(unsigned int var, char *page) -{ - return sprintf(page, "%d\n", var); -} - -static ssize_t -as_var_store(unsigned long *var, const char *page, size_t count) -{ - char *p = (char *) page; - - *var = simple_strtoul(p, &p, 10); - return count; -} - -static ssize_t est_time_show(struct elevator_queue *e, char *page) -{ - struct as_data *ad = e->elevator_data; - int pos = 0; - - pos += sprintf(page+pos, "%lu %% exit probability\n", - 100*ad->exit_prob/256); - pos += sprintf(page+pos, "%lu %% probability of exiting without a " - "cooperating process submitting IO\n", - 100*ad->exit_no_coop/256); - pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean); - pos += sprintf(page+pos, "%llu sectors new seek distance\n", - (unsigned long long)ad->new_seek_mean); - - return pos; -} - -#define SHOW_FUNCTION(__FUNC, __VAR) \ -static ssize_t __FUNC(struct elevator_queue *e, char *page) \ -{ \ - struct as_data *ad = e->elevator_data; \ - return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ -} -SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[BLK_RW_SYNC]); -SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[BLK_RW_ASYNC]); -SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire); -SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[BLK_RW_SYNC]); -SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[BLK_RW_ASYNC]); -#undef SHOW_FUNCTION - -#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ -static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ -{ \ - struct as_data *ad = e->elevator_data; \ - int ret = as_var_store(__PTR, (page), count); \ - if (*(__PTR) < (MIN)) \ - *(__PTR) = (MIN); \ - else if (*(__PTR) > (MAX)) \ - *(__PTR) = (MAX); \ - *(__PTR) = msecs_to_jiffies(*(__PTR)); \ - return ret; \ -} -STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[BLK_RW_SYNC], 0, INT_MAX); -STORE_FUNCTION(as_write_expire_store, - &ad->fifo_expire[BLK_RW_ASYNC], 0, INT_MAX); -STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX); -STORE_FUNCTION(as_read_batch_expire_store, - &ad->batch_expire[BLK_RW_SYNC], 0, INT_MAX); -STORE_FUNCTION(as_write_batch_expire_store, - &ad->batch_expire[BLK_RW_ASYNC], 0, INT_MAX); -#undef STORE_FUNCTION - -#define AS_ATTR(name) \ - __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store) - -static struct elv_fs_entry as_attrs[] = { - __ATTR_RO(est_time), - AS_ATTR(read_expire), - AS_ATTR(write_expire), - AS_ATTR(antic_expire), - AS_ATTR(read_batch_expire), - AS_ATTR(write_batch_expire), - __ATTR_NULL -}; - -static struct elevator_type iosched_as = { - .ops = { - .elevator_merge_fn = as_merge, - .elevator_merged_fn = as_merged_request, - .elevator_merge_req_fn = as_merged_requests, - .elevator_dispatch_fn = as_dispatch_request, - .elevator_add_req_fn = as_add_request, - .elevator_activate_req_fn = as_activate_request, - .elevator_deactivate_req_fn = as_deactivate_request, - .elevator_queue_empty_fn = as_queue_empty, - .elevator_completed_req_fn = as_completed_request, - .elevator_former_req_fn = elv_rb_former_request, - .elevator_latter_req_fn = elv_rb_latter_request, - .elevator_may_queue_fn = as_may_queue, - .elevator_init_fn = as_init_queue, - .elevator_exit_fn = as_exit_queue, - .trim = as_trim, - }, - - .elevator_attrs = as_attrs, - .elevator_name = "anticipatory", - .elevator_owner = THIS_MODULE, -}; - -static int __init as_init(void) -{ - elv_register(&iosched_as); - - return 0; -} - -static void __exit as_exit(void) -{ - DECLARE_COMPLETION_ONSTACK(all_gone); - elv_unregister(&iosched_as); - ioc_gone = &all_gone; - /* ioc_gone's update must be visible before reading ioc_count */ - smp_wmb(); - if (elv_ioc_count_read(as_ioc_count)) - wait_for_completion(&all_gone); - synchronize_rcu(); -} - -module_init(as_init); -module_exit(as_exit); - -MODULE_AUTHOR("Nick Piggin"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("anticipatory IO scheduler"); diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c new file mode 100644 index 00000000000..1fa2654db0a --- /dev/null +++ b/block/blk-cgroup.c @@ -0,0 +1,361 @@ +/* + * Common Block IO controller cgroup interface + * + * Based on ideas and code from CFQ, CFS and BFQ: + * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> + * + * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> + * Paolo Valente <paolo.valente@unimore.it> + * + * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> + * Nauman Rafique <nauman@google.com> + */ +#include <linux/ioprio.h> +#include <linux/seq_file.h> +#include <linux/kdev_t.h> +#include <linux/module.h> +#include <linux/err.h> +#include "blk-cgroup.h" + +static DEFINE_SPINLOCK(blkio_list_lock); +static LIST_HEAD(blkio_list); + +struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; +EXPORT_SYMBOL_GPL(blkio_root_cgroup); + +bool blkiocg_css_tryget(struct blkio_cgroup *blkcg) +{ + if (!css_tryget(&blkcg->css)) + return false; + return true; +} +EXPORT_SYMBOL_GPL(blkiocg_css_tryget); + +void blkiocg_css_put(struct blkio_cgroup *blkcg) +{ + css_put(&blkcg->css); +} +EXPORT_SYMBOL_GPL(blkiocg_css_put); + +struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) +{ + return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), + struct blkio_cgroup, css); +} +EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); + +void blkiocg_update_blkio_group_stats(struct blkio_group *blkg, + unsigned long time, unsigned long sectors) +{ + blkg->time += time; + blkg->sectors += sectors; +} +EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_stats); + +void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, + struct blkio_group *blkg, void *key, dev_t dev) +{ + unsigned long flags; + + spin_lock_irqsave(&blkcg->lock, flags); + rcu_assign_pointer(blkg->key, key); + blkg->blkcg_id = css_id(&blkcg->css); + hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); + spin_unlock_irqrestore(&blkcg->lock, flags); +#ifdef CONFIG_DEBUG_BLK_CGROUP + /* Need to take css reference ? */ + cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); +#endif + blkg->dev = dev; +} +EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group); + +static void __blkiocg_del_blkio_group(struct blkio_group *blkg) +{ + hlist_del_init_rcu(&blkg->blkcg_node); + blkg->blkcg_id = 0; +} + +/* + * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1 + * indicating that blk_group was unhashed by the time we got to it. + */ +int blkiocg_del_blkio_group(struct blkio_group *blkg) +{ + struct blkio_cgroup *blkcg; + unsigned long flags; + struct cgroup_subsys_state *css; + int ret = 1; + + rcu_read_lock(); + css = css_lookup(&blkio_subsys, blkg->blkcg_id); + if (!css) + goto out; + + blkcg = container_of(css, struct blkio_cgroup, css); + spin_lock_irqsave(&blkcg->lock, flags); + if (!hlist_unhashed(&blkg->blkcg_node)) { + __blkiocg_del_blkio_group(blkg); + ret = 0; + } + spin_unlock_irqrestore(&blkcg->lock, flags); +out: + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group); + +/* called under rcu_read_lock(). */ +struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) +{ + struct blkio_group *blkg; + struct hlist_node *n; + void *__key; + + hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) { + __key = blkg->key; + if (__key == key) + return blkg; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(blkiocg_lookup_group); + +#define SHOW_FUNCTION(__VAR) \ +static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \ + struct cftype *cftype) \ +{ \ + struct blkio_cgroup *blkcg; \ + \ + blkcg = cgroup_to_blkio_cgroup(cgroup); \ + return (u64)blkcg->__VAR; \ +} + +SHOW_FUNCTION(weight); +#undef SHOW_FUNCTION + +static int +blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val) +{ + struct blkio_cgroup *blkcg; + struct blkio_group *blkg; + struct hlist_node *n; + struct blkio_policy_type *blkiop; + + if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX) + return -EINVAL; + + blkcg = cgroup_to_blkio_cgroup(cgroup); + spin_lock_irq(&blkcg->lock); + blkcg->weight = (unsigned int)val; + hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { + spin_lock(&blkio_list_lock); + list_for_each_entry(blkiop, &blkio_list, list) + blkiop->ops.blkio_update_group_weight_fn(blkg, + blkcg->weight); + spin_unlock(&blkio_list_lock); + } + spin_unlock_irq(&blkcg->lock); + return 0; +} + +#define SHOW_FUNCTION_PER_GROUP(__VAR) \ +static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \ + struct cftype *cftype, struct seq_file *m) \ +{ \ + struct blkio_cgroup *blkcg; \ + struct blkio_group *blkg; \ + struct hlist_node *n; \ + \ + if (!cgroup_lock_live_group(cgroup)) \ + return -ENODEV; \ + \ + blkcg = cgroup_to_blkio_cgroup(cgroup); \ + rcu_read_lock(); \ + hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\ + if (blkg->dev) \ + seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev), \ + MINOR(blkg->dev), blkg->__VAR); \ + } \ + rcu_read_unlock(); \ + cgroup_unlock(); \ + return 0; \ +} + +SHOW_FUNCTION_PER_GROUP(time); +SHOW_FUNCTION_PER_GROUP(sectors); +#ifdef CONFIG_DEBUG_BLK_CGROUP +SHOW_FUNCTION_PER_GROUP(dequeue); +#endif +#undef SHOW_FUNCTION_PER_GROUP + +#ifdef CONFIG_DEBUG_BLK_CGROUP +void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg, + unsigned long dequeue) +{ + blkg->dequeue += dequeue; +} +EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats); +#endif + +struct cftype blkio_files[] = { + { + .name = "weight", + .read_u64 = blkiocg_weight_read, + .write_u64 = blkiocg_weight_write, + }, + { + .name = "time", + .read_seq_string = blkiocg_time_read, + }, + { + .name = "sectors", + .read_seq_string = blkiocg_sectors_read, + }, +#ifdef CONFIG_DEBUG_BLK_CGROUP + { + .name = "dequeue", + .read_seq_string = blkiocg_dequeue_read, + }, +#endif +}; + +static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) +{ + return cgroup_add_files(cgroup, subsys, blkio_files, + ARRAY_SIZE(blkio_files)); +} + +static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) +{ + struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); + unsigned long flags; + struct blkio_group *blkg; + void *key; + struct blkio_policy_type *blkiop; + + rcu_read_lock(); +remove_entry: + spin_lock_irqsave(&blkcg->lock, flags); + + if (hlist_empty(&blkcg->blkg_list)) { + spin_unlock_irqrestore(&blkcg->lock, flags); + goto done; + } + + blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, + blkcg_node); + key = rcu_dereference(blkg->key); + __blkiocg_del_blkio_group(blkg); + + spin_unlock_irqrestore(&blkcg->lock, flags); + + /* + * This blkio_group is being unlinked as associated cgroup is going + * away. Let all the IO controlling policies know about this event. + * + * Currently this is static call to one io controlling policy. Once + * we have more policies in place, we need some dynamic registration + * of callback function. + */ + spin_lock(&blkio_list_lock); + list_for_each_entry(blkiop, &blkio_list, list) + blkiop->ops.blkio_unlink_group_fn(key, blkg); + spin_unlock(&blkio_list_lock); + goto remove_entry; +done: + free_css_id(&blkio_subsys, &blkcg->css); + rcu_read_unlock(); + kfree(blkcg); +} + +static struct cgroup_subsys_state * +blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) +{ + struct blkio_cgroup *blkcg, *parent_blkcg; + + if (!cgroup->parent) { + blkcg = &blkio_root_cgroup; + goto done; + } + + /* Currently we do not support hierarchy deeper than two level (0,1) */ + parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent); + if (css_depth(&parent_blkcg->css) > 0) + return ERR_PTR(-EINVAL); + + blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); + if (!blkcg) + return ERR_PTR(-ENOMEM); + + blkcg->weight = BLKIO_WEIGHT_DEFAULT; +done: + spin_lock_init(&blkcg->lock); + INIT_HLIST_HEAD(&blkcg->blkg_list); + + return &blkcg->css; +} + +/* + * We cannot support shared io contexts, as we have no mean to support + * two tasks with the same ioc in two different groups without major rework + * of the main cic data structures. For now we allow a task to change + * its cgroup only if it's the only owner of its ioc. + */ +static int blkiocg_can_attach(struct cgroup_subsys *subsys, + struct cgroup *cgroup, struct task_struct *tsk, + bool threadgroup) +{ + struct io_context *ioc; + int ret = 0; + + /* task_lock() is needed to avoid races with exit_io_context() */ + task_lock(tsk); + ioc = tsk->io_context; + if (ioc && atomic_read(&ioc->nr_tasks) > 1) + ret = -EINVAL; + task_unlock(tsk); + + return ret; +} + +static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup, + struct cgroup *prev, struct task_struct *tsk, + bool threadgroup) +{ + struct io_context *ioc; + + task_lock(tsk); + ioc = tsk->io_context; + if (ioc) + ioc->cgroup_changed = 1; + task_unlock(tsk); +} + +struct cgroup_subsys blkio_subsys = { + .name = "blkio", + .create = blkiocg_create, + .can_attach = blkiocg_can_attach, + .attach = blkiocg_attach, + .destroy = blkiocg_destroy, + .populate = blkiocg_populate, + .subsys_id = blkio_subsys_id, + .use_id = 1, +}; + +void blkio_policy_register(struct blkio_policy_type *blkiop) +{ + spin_lock(&blkio_list_lock); + list_add_tail(&blkiop->list, &blkio_list); + spin_unlock(&blkio_list_lock); +} +EXPORT_SYMBOL_GPL(blkio_policy_register); + +void blkio_policy_unregister(struct blkio_policy_type *blkiop) +{ + spin_lock(&blkio_list_lock); + list_del_init(&blkiop->list); + spin_unlock(&blkio_list_lock); +} +EXPORT_SYMBOL_GPL(blkio_policy_unregister); diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h new file mode 100644 index 00000000000..4d316df863b --- /dev/null +++ b/block/blk-cgroup.h @@ -0,0 +1,127 @@ +#ifndef _BLK_CGROUP_H +#define _BLK_CGROUP_H +/* + * Common Block IO controller cgroup interface + * + * Based on ideas and code from CFQ, CFS and BFQ: + * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> + * + * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> + * Paolo Valente <paolo.valente@unimore.it> + * + * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> + * Nauman Rafique <nauman@google.com> + */ + +#include <linux/cgroup.h> + +#ifdef CONFIG_BLK_CGROUP + +struct blkio_cgroup { + struct cgroup_subsys_state css; + unsigned int weight; + spinlock_t lock; + struct hlist_head blkg_list; +}; + +struct blkio_group { + /* An rcu protected unique identifier for the group */ + void *key; + struct hlist_node blkcg_node; + unsigned short blkcg_id; +#ifdef CONFIG_DEBUG_BLK_CGROUP + /* Store cgroup path */ + char path[128]; + /* How many times this group has been removed from service tree */ + unsigned long dequeue; +#endif + /* The device MKDEV(major, minor), this group has been created for */ + dev_t dev; + + /* total disk time and nr sectors dispatched by this group */ + unsigned long time; + unsigned long sectors; +}; + +extern bool blkiocg_css_tryget(struct blkio_cgroup *blkcg); +extern void blkiocg_css_put(struct blkio_cgroup *blkcg); + +typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg); +typedef void (blkio_update_group_weight_fn) (struct blkio_group *blkg, + unsigned int weight); + +struct blkio_policy_ops { + blkio_unlink_group_fn *blkio_unlink_group_fn; + blkio_update_group_weight_fn *blkio_update_group_weight_fn; +}; + +struct blkio_policy_type { + struct list_head list; + struct blkio_policy_ops ops; +}; + +/* Blkio controller policy registration */ +extern void blkio_policy_register(struct blkio_policy_type *); +extern void blkio_policy_unregister(struct blkio_policy_type *); + +#else + +struct blkio_group { +}; + +struct blkio_policy_type { +}; + +static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { } +static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { } + +#endif + +#define BLKIO_WEIGHT_MIN 100 +#define BLKIO_WEIGHT_MAX 1000 +#define BLKIO_WEIGHT_DEFAULT 500 + +#ifdef CONFIG_DEBUG_BLK_CGROUP +static inline char *blkg_path(struct blkio_group *blkg) +{ + return blkg->path; +} +void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg, + unsigned long dequeue); +#else +static inline char *blkg_path(struct blkio_group *blkg) { return NULL; } +static inline void blkiocg_update_blkio_group_dequeue_stats( + struct blkio_group *blkg, unsigned long dequeue) {} +#endif + +#ifdef CONFIG_BLK_CGROUP +extern struct blkio_cgroup blkio_root_cgroup; +extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); +extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, + struct blkio_group *blkg, void *key, dev_t dev); +extern int blkiocg_del_blkio_group(struct blkio_group *blkg); +extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, + void *key); +void blkiocg_update_blkio_group_stats(struct blkio_group *blkg, + unsigned long time, unsigned long sectors); +#else +struct cgroup; +static inline struct blkio_cgroup * +cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } + +static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, + struct blkio_group *blkg, void *key, dev_t dev) +{ +} + +static inline int +blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; } + +static inline struct blkio_group * +blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; } +static inline void blkiocg_update_blkio_group_stats(struct blkio_group *blkg, + unsigned long time, unsigned long sectors) +{ +} +#endif +#endif /* _BLK_CGROUP_H */ diff --git a/block/blk-core.c b/block/blk-core.c index 71da5111120..718897e6d37 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2358,6 +2358,25 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, rq->rq_disk = bio->bi_bdev->bd_disk; } +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +/** + * rq_flush_dcache_pages - Helper function to flush all pages in a request + * @rq: the request to be flushed + * + * Description: + * Flush all pages in @rq. + */ +void rq_flush_dcache_pages(struct request *rq) +{ + struct req_iterator iter; + struct bio_vec *bvec; + + rq_for_each_segment(bvec, rq, iter) + flush_dcache_page(bvec->bv_page); +} +EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); +#endif + /** * blk_lld_busy - Check if underlying low-level drivers of a device are busy * @q : the queue of the device being checked diff --git a/block/blk-ioc.c b/block/blk-ioc.c index d4ed6000147..cbdabb0dd6d 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -66,22 +66,22 @@ static void cfq_exit(struct io_context *ioc) } /* Called by the exitting task */ -void exit_io_context(void) +void exit_io_context(struct task_struct *task) { struct io_context *ioc; - task_lock(current); - ioc = current->io_context; - current->io_context = NULL; - task_unlock(current); + task_lock(task); + ioc = task->io_context; + task->io_context = NULL; + task_unlock(task); if (atomic_dec_and_test(&ioc->nr_tasks)) { if (ioc->aic && ioc->aic->exit) ioc->aic->exit(ioc->aic); cfq_exit(ioc); - put_io_context(ioc); } + put_io_context(ioc); } struct io_context *alloc_io_context(gfp_t gfp_flags, int node) diff --git a/block/blk-settings.c b/block/blk-settings.c index 66d4aa8799b..dd1f1e0e196 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -8,6 +8,7 @@ #include <linux/blkdev.h> #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ #include <linux/gcd.h> +#include <linux/jiffies.h> #include "blk.h" @@ -96,7 +97,11 @@ void blk_set_default_limits(struct queue_limits *lim) lim->max_segment_size = MAX_SEGMENT_SIZE; lim->max_sectors = BLK_DEF_MAX_SECTORS; lim->max_hw_sectors = INT_MAX; - lim->max_discard_sectors = SAFE_MAX_SECTORS; + lim->max_discard_sectors = 0; + lim->discard_granularity = 0; + lim->discard_alignment = 0; + lim->discard_misaligned = 0; + lim->discard_zeroes_data = -1; lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); lim->alignment_offset = 0; @@ -141,7 +146,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) q->nr_batching = BLK_BATCH_REQ; q->unplug_thresh = 4; /* hmm */ - q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */ + q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */ if (q->unplug_delay == 0) q->unplug_delay = 1; @@ -488,6 +493,16 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) } EXPORT_SYMBOL(blk_queue_stack_limits); +static unsigned int lcm(unsigned int a, unsigned int b) +{ + if (a && b) + return (a * b) / gcd(a, b); + else if (b) + return b; + + return a; +} + /** * blk_stack_limits - adjust queue_limits for stacked devices * @t: the stacking driver limits (top) @@ -502,6 +517,10 @@ EXPORT_SYMBOL(blk_queue_stack_limits); int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t offset) { + int ret; + + ret = 0; + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); @@ -526,12 +545,19 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->io_min = max(t->io_min, b->io_min); t->no_cluster |= b->no_cluster; + t->discard_zeroes_data &= b->discard_zeroes_data; /* Bottom device offset aligned? */ if (offset && (offset & (b->physical_block_size - 1)) != b->alignment_offset) { t->misaligned = 1; - return -1; + ret = -1; + } + + if (offset && + (offset & (b->discard_granularity - 1)) != b->discard_alignment) { + t->discard_misaligned = 1; + ret = -1; } /* If top has no alignment offset, inherit from bottom */ @@ -539,23 +565,26 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->alignment_offset = b->alignment_offset & (b->physical_block_size - 1); + if (!t->discard_alignment) + t->discard_alignment = + b->discard_alignment & (b->discard_granularity - 1); + /* Top device aligned on logical block boundary? */ if (t->alignment_offset & (t->logical_block_size - 1)) { t->misaligned = 1; - return -1; + ret = -1; } - /* Find lcm() of optimal I/O size */ - if (t->io_opt && b->io_opt) - t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt); - else if (b->io_opt) - t->io_opt = b->io_opt; + /* Find lcm() of optimal I/O size and granularity */ + t->io_opt = lcm(t->io_opt, b->io_opt); + t->discard_granularity = lcm(t->discard_granularity, + b->discard_granularity); /* Verify that optimal I/O size is a multiple of io_min */ if (t->io_min && t->io_opt % t->io_min) - return -1; + ret = -1; - return 0; + return ret; } EXPORT_SYMBOL(blk_stack_limits); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 8a6d81afb28..8606c9543fd 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -126,6 +126,21 @@ static ssize_t queue_io_opt_show(struct request_queue *q, char *page) return queue_var_show(queue_io_opt(q), page); } +static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) +{ + return queue_var_show(q->limits.discard_granularity, page); +} + +static ssize_t queue_discard_max_show(struct request_queue *q, char *page) +{ + return queue_var_show(q->limits.max_discard_sectors << 9, page); +} + +static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_discard_zeroes_data(q), page); +} + static ssize_t queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) { @@ -293,6 +308,21 @@ static struct queue_sysfs_entry queue_io_opt_entry = { .show = queue_io_opt_show, }; +static struct queue_sysfs_entry queue_discard_granularity_entry = { + .attr = {.name = "discard_granularity", .mode = S_IRUGO }, + .show = queue_discard_granularity_show, +}; + +static struct queue_sysfs_entry queue_discard_max_entry = { + .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, + .show = queue_discard_max_show, +}; + +static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { + .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, + .show = queue_discard_zeroes_data_show, +}; + static struct queue_sysfs_entry queue_nonrot_entry = { .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, .show = queue_nonrot_show, @@ -328,6 +358,9 @@ static struct attribute *default_attrs[] = { &queue_physical_block_size_entry.attr, &queue_io_min_entry.attr, &queue_io_opt_entry.attr, + &queue_discard_granularity_entry.attr, + &queue_discard_max_entry.attr, + &queue_discard_zeroes_data_entry.attr, &queue_nonrot_entry.attr, &queue_nomerges_entry.attr, &queue_rq_affinity_entry.attr, diff --git a/block/bsg.c b/block/bsg.c index 0676301f16d..a9fd2d84b53 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -15,6 +15,7 @@ #include <linux/blkdev.h> #include <linux/poll.h> #include <linux/cdev.h> +#include <linux/jiffies.h> #include <linux/percpu.h> #include <linux/uio.h> #include <linux/idr.h> @@ -197,7 +198,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, rq->cmd_len = hdr->request_len; rq->cmd_type = REQ_TYPE_BLOCK_PC; - rq->timeout = (hdr->timeout * HZ) / 1000; + rq->timeout = msecs_to_jiffies(hdr->timeout); if (!rq->timeout) rq->timeout = q->sg_timeout; if (!rq->timeout) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index aa1e9535e35..cfb0b2f5f63 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -9,9 +9,11 @@ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/elevator.h> +#include <linux/jiffies.h> #include <linux/rbtree.h> #include <linux/ioprio.h> #include <linux/blktrace_api.h> +#include "blk-cgroup.h" /* * tunables @@ -27,6 +29,8 @@ static const int cfq_slice_sync = HZ / 10; static int cfq_slice_async = HZ / 25; static const int cfq_slice_async_rq = 2; static int cfq_slice_idle = HZ / 125; +static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ +static const int cfq_hist_divisor = 4; /* * offset from end of service tree @@ -38,8 +42,15 @@ static int cfq_slice_idle = HZ / 125; */ #define CFQ_MIN_TT (2) +/* + * Allow merged cfqqs to perform this amount of seeky I/O before + * deciding to break the queues up again. + */ +#define CFQQ_COOP_TOUT (HZ) + #define CFQ_SLICE_SCALE (5) #define CFQ_HW_QUEUE_MIN (5) +#define CFQ_SERVICE_SHIFT 12 #define RQ_CIC(rq) \ ((struct cfq_io_context *) (rq)->elevator_private) @@ -57,6 +68,7 @@ static DEFINE_SPINLOCK(ioc_gone_lock); #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) #define sample_valid(samples) ((samples) > 80) +#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node) /* * Most of our rbtree usage is for sorting with min extraction, so @@ -67,8 +79,12 @@ static DEFINE_SPINLOCK(ioc_gone_lock); struct cfq_rb_root { struct rb_root rb; struct rb_node *left; + unsigned count; + u64 min_vdisktime; + struct rb_node *active; + unsigned total_weight; }; -#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } +#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, 0, 0, } /* * Per process-grouping structure @@ -99,6 +115,11 @@ struct cfq_queue { /* fifo list of requests in sort_list */ struct list_head fifo; + /* time when queue got scheduled in to dispatch first request. */ + unsigned long dispatch_start; + unsigned int allocated_slice; + /* time when first request from queue completed and slice started. */ + unsigned long slice_start; unsigned long slice_end; long slice_resid; unsigned int slice_dispatch; @@ -112,7 +133,71 @@ struct cfq_queue { unsigned short ioprio, org_ioprio; unsigned short ioprio_class, org_ioprio_class; + unsigned int seek_samples; + u64 seek_total; + sector_t seek_mean; + sector_t last_request_pos; + unsigned long seeky_start; + pid_t pid; + + struct cfq_rb_root *service_tree; + struct cfq_queue *new_cfqq; + struct cfq_group *cfqg; + struct cfq_group *orig_cfqg; + /* Sectors dispatched in current dispatch round */ + unsigned long nr_sectors; +}; + +/* + * First index in the service_trees. + * IDLE is handled separately, so it has negative index + */ +enum wl_prio_t { + BE_WORKLOAD = 0, + RT_WORKLOAD = 1, + IDLE_WORKLOAD = 2, +}; + +/* + * Second index in the service_trees. + */ +enum wl_type_t { + ASYNC_WORKLOAD = 0, + SYNC_NOIDLE_WORKLOAD = 1, + SYNC_WORKLOAD = 2 +}; + +/* This is per cgroup per device grouping structure */ +struct cfq_group { + /* group service_tree member */ + struct rb_node rb_node; + + /* group service_tree key */ + u64 vdisktime; + unsigned int weight; + bool on_st; + + /* number of cfqq currently on this group */ + int nr_cfqq; + + /* Per group busy queus average. Useful for workload slice calc. */ + unsigned int busy_queues_avg[2]; + /* + * rr lists of queues with requests, onle rr for each priority class. + * Counts are embedded in the cfq_rb_root + */ + struct cfq_rb_root service_trees[2][3]; + struct cfq_rb_root service_tree_idle; + + unsigned long saved_workload_slice; + enum wl_type_t saved_workload; + enum wl_prio_t saved_serving_prio; + struct blkio_group blkg; +#ifdef CONFIG_CFQ_GROUP_IOSCHED + struct hlist_node cfqd_node; + atomic_t ref; +#endif }; /* @@ -120,11 +205,20 @@ struct cfq_queue { */ struct cfq_data { struct request_queue *queue; + /* Root service tree for cfq_groups */ + struct cfq_rb_root grp_service_tree; + struct cfq_group root_group; + /* Number of active cfq groups on group service tree */ + int nr_groups; /* - * rr list of queues with requests and the count of them + * The priority currently being served */ - struct cfq_rb_root service_tree; + enum wl_prio_t serving_prio; + enum wl_type_t serving_type; + unsigned long workload_expires; + struct cfq_group *serving_group; + bool noidle_tree_requires_idle; /* * Each priority tree is sorted by next_request position. These @@ -143,8 +237,14 @@ struct cfq_data { */ int rq_queued; int hw_tag; - int hw_tag_samples; - int rq_in_driver_peak; + /* + * hw_tag can be + * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection) + * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth) + * 0 => no NCQ + */ + int hw_tag_est_depth; + unsigned int hw_tag_samples; /* * idle window management @@ -174,6 +274,7 @@ struct cfq_data { unsigned int cfq_slice_async_rq; unsigned int cfq_slice_idle; unsigned int cfq_latency; + unsigned int cfq_group_isolation; struct list_head cic_list; @@ -183,8 +284,28 @@ struct cfq_data { struct cfq_queue oom_cfqq; unsigned long last_end_sync_rq; + + /* List of cfq groups being managed on this device*/ + struct hlist_head cfqg_list; + struct rcu_head rcu; }; +static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); + +static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, + enum wl_prio_t prio, + enum wl_type_t type, + struct cfq_data *cfqd) +{ + if (!cfqg) + return NULL; + + if (prio == IDLE_WORKLOAD) + return &cfqg->service_tree_idle; + + return &cfqg->service_trees[prio][type]; +} + enum cfqq_state_flags { CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ @@ -195,8 +316,10 @@ enum cfqq_state_flags { CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ CFQ_CFQQ_FLAG_sync, /* synchronous queue */ - CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */ - CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */ + CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ + CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ + CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ + CFQ_CFQQ_FLAG_wait_busy_done, /* Got new request. Expire the queue */ }; #define CFQ_CFQQ_FNS(name) \ @@ -223,14 +346,78 @@ CFQ_CFQQ_FNS(prio_changed); CFQ_CFQQ_FNS(slice_new); CFQ_CFQQ_FNS(sync); CFQ_CFQQ_FNS(coop); -CFQ_CFQQ_FNS(coop_preempt); +CFQ_CFQQ_FNS(deep); +CFQ_CFQQ_FNS(wait_busy); +CFQ_CFQQ_FNS(wait_busy_done); #undef CFQ_CFQQ_FNS +#ifdef CONFIG_DEBUG_CFQ_IOSCHED +#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ + blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \ + cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ + blkg_path(&(cfqq)->cfqg->blkg), ##args); + +#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \ + blk_add_trace_msg((cfqd)->queue, "%s " fmt, \ + blkg_path(&(cfqg)->blkg), ##args); \ + +#else #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) +#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0); +#endif #define cfq_log(cfqd, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) +/* Traverses through cfq group service trees */ +#define for_each_cfqg_st(cfqg, i, j, st) \ + for (i = 0; i <= IDLE_WORKLOAD; i++) \ + for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\ + : &cfqg->service_tree_idle; \ + (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \ + (i == IDLE_WORKLOAD && j == 0); \ + j++, st = i < IDLE_WORKLOAD ? \ + &cfqg->service_trees[i][j]: NULL) \ + + +static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) +{ + if (cfq_class_idle(cfqq)) + return IDLE_WORKLOAD; + if (cfq_class_rt(cfqq)) + return RT_WORKLOAD; + return BE_WORKLOAD; +} + + +static enum wl_type_t cfqq_type(struct cfq_queue *cfqq) +{ + if (!cfq_cfqq_sync(cfqq)) + return ASYNC_WORKLOAD; + if (!cfq_cfqq_idle_window(cfqq)) + return SYNC_NOIDLE_WORKLOAD; + return SYNC_WORKLOAD; +} + +static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl, + struct cfq_data *cfqd, + struct cfq_group *cfqg) +{ + if (wl == IDLE_WORKLOAD) + return cfqg->service_tree_idle.count; + + return cfqg->service_trees[wl][ASYNC_WORKLOAD].count + + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count + + cfqg->service_trees[wl][SYNC_WORKLOAD].count; +} + +static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, + struct cfq_group *cfqg) +{ + return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count + + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; +} + static void cfq_dispatch_insert(struct request_queue *, struct request *); static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, struct io_context *, gfp_t); @@ -279,7 +466,7 @@ static int cfq_queue_empty(struct request_queue *q) { struct cfq_data *cfqd = q->elevator->elevator_data; - return !cfqd->busy_queues; + return !cfqd->rq_queued; } /* @@ -303,10 +490,110 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); } +static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg) +{ + u64 d = delta << CFQ_SERVICE_SHIFT; + + d = d * BLKIO_WEIGHT_DEFAULT; + do_div(d, cfqg->weight); + return d; +} + +static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime) +{ + s64 delta = (s64)(vdisktime - min_vdisktime); + if (delta > 0) + min_vdisktime = vdisktime; + + return min_vdisktime; +} + +static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime) +{ + s64 delta = (s64)(vdisktime - min_vdisktime); + if (delta < 0) + min_vdisktime = vdisktime; + + return min_vdisktime; +} + +static void update_min_vdisktime(struct cfq_rb_root *st) +{ + u64 vdisktime = st->min_vdisktime; + struct cfq_group *cfqg; + + if (st->active) { + cfqg = rb_entry_cfqg(st->active); + vdisktime = cfqg->vdisktime; + } + + if (st->left) { + cfqg = rb_entry_cfqg(st->left); + vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime); + } + + st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime); +} + +/* + * get averaged number of queues of RT/BE priority. + * average is updated, with a formula that gives more weight to higher numbers, + * to quickly follows sudden increases and decrease slowly + */ + +static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd, + struct cfq_group *cfqg, bool rt) +{ + unsigned min_q, max_q; + unsigned mult = cfq_hist_divisor - 1; + unsigned round = cfq_hist_divisor / 2; + unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg); + + min_q = min(cfqg->busy_queues_avg[rt], busy); + max_q = max(cfqg->busy_queues_avg[rt], busy); + cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) / + cfq_hist_divisor; + return cfqg->busy_queues_avg[rt]; +} + +static inline unsigned +cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) +{ + struct cfq_rb_root *st = &cfqd->grp_service_tree; + + return cfq_target_latency * cfqg->weight / st->total_weight; +} + static inline void cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; + unsigned slice = cfq_prio_to_slice(cfqd, cfqq); + if (cfqd->cfq_latency) { + /* + * interested queues (we consider only the ones with the same + * priority class in the cfq group) + */ + unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, + cfq_class_rt(cfqq)); + unsigned sync_slice = cfqd->cfq_slice[1]; + unsigned expect_latency = sync_slice * iq; + unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg); + + if (expect_latency > group_slice) { + unsigned base_low_slice = 2 * cfqd->cfq_slice_idle; + /* scale low_slice according to IO priority + * and sync vs async */ + unsigned low_slice = + min(slice, base_low_slice * slice / sync_slice); + /* the adapted slice value is scaled to fit all iqs + * into the target latency */ + slice = max(slice * group_slice / expect_latency, + low_slice); + } + } + cfqq->slice_start = jiffies; + cfqq->slice_end = jiffies + slice; + cfqq->allocated_slice = slice; cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); } @@ -331,9 +618,9 @@ static inline bool cfq_slice_used(struct cfq_queue *cfqq) * behind the head is penalized and only allowed to a certain extent. */ static struct request * -cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) +cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last) { - sector_t last, s1, s2, d1 = 0, d2 = 0; + sector_t s1, s2, d1 = 0, d2 = 0; unsigned long back_max; #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ @@ -356,8 +643,6 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) s1 = blk_rq_pos(rq1); s2 = blk_rq_pos(rq2); - last = cfqd->last_position; - /* * by definition, 1KiB is 2 sectors */ @@ -425,6 +710,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) */ static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) { + /* Service tree is empty */ + if (!root->count) + return NULL; + if (!root->left) root->left = rb_first(&root->rb); @@ -434,6 +723,17 @@ static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) return NULL; } +static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root) +{ + if (!root->left) + root->left = rb_first(&root->rb); + + if (root->left) + return rb_entry_cfqg(root->left); + + return NULL; +} + static void rb_erase_init(struct rb_node *n, struct rb_root *root) { rb_erase(n, root); @@ -445,6 +745,7 @@ static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) if (root->left == n) root->left = NULL; rb_erase_init(n, &root->rb); + --root->count; } /* @@ -471,7 +772,7 @@ cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, next = rb_entry_rq(rbnext); } - return cfq_choose_req(cfqd, next, prev); + return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); } static unsigned long cfq_slice_offset(struct cfq_data *cfqd, @@ -480,12 +781,336 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd, /* * just an approximation, should be ok. */ - return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) - + return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) - cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); } +static inline s64 +cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg) +{ + return cfqg->vdisktime - st->min_vdisktime; +} + +static void +__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) +{ + struct rb_node **node = &st->rb.rb_node; + struct rb_node *parent = NULL; + struct cfq_group *__cfqg; + s64 key = cfqg_key(st, cfqg); + int left = 1; + + while (*node != NULL) { + parent = *node; + __cfqg = rb_entry_cfqg(parent); + + if (key < cfqg_key(st, __cfqg)) + node = &parent->rb_left; + else { + node = &parent->rb_right; + left = 0; + } + } + + if (left) + st->left = &cfqg->rb_node; + + rb_link_node(&cfqg->rb_node, parent, node); + rb_insert_color(&cfqg->rb_node, &st->rb); +} + +static void +cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg) +{ + struct cfq_rb_root *st = &cfqd->grp_service_tree; + struct cfq_group *__cfqg; + struct rb_node *n; + + cfqg->nr_cfqq++; + if (cfqg->on_st) + return; + + /* + * Currently put the group at the end. Later implement something + * so that groups get lesser vtime based on their weights, so that + * if group does not loose all if it was not continously backlogged. + */ + n = rb_last(&st->rb); + if (n) { + __cfqg = rb_entry_cfqg(n); + cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; + } else + cfqg->vdisktime = st->min_vdisktime; + + __cfq_group_service_tree_add(st, cfqg); + cfqg->on_st = true; + cfqd->nr_groups++; + st->total_weight += cfqg->weight; +} + +static void +cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg) +{ + struct cfq_rb_root *st = &cfqd->grp_service_tree; + + if (st->active == &cfqg->rb_node) + st->active = NULL; + + BUG_ON(cfqg->nr_cfqq < 1); + cfqg->nr_cfqq--; + + /* If there are other cfq queues under this group, don't delete it */ + if (cfqg->nr_cfqq) + return; + + cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); + cfqg->on_st = false; + cfqd->nr_groups--; + st->total_weight -= cfqg->weight; + if (!RB_EMPTY_NODE(&cfqg->rb_node)) + cfq_rb_erase(&cfqg->rb_node, st); + cfqg->saved_workload_slice = 0; + blkiocg_update_blkio_group_dequeue_stats(&cfqg->blkg, 1); +} + +static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) +{ + unsigned int slice_used; + + /* + * Queue got expired before even a single request completed or + * got expired immediately after first request completion. + */ + if (!cfqq->slice_start || cfqq->slice_start == jiffies) { + /* + * Also charge the seek time incurred to the group, otherwise + * if there are mutiple queues in the group, each can dispatch + * a single request on seeky media and cause lots of seek time + * and group will never know it. + */ + slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start), + 1); + } else { + slice_used = jiffies - cfqq->slice_start; + if (slice_used > cfqq->allocated_slice) + slice_used = cfqq->allocated_slice; + } + + cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u sect=%lu", slice_used, + cfqq->nr_sectors); + return slice_used; +} + +static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, + struct cfq_queue *cfqq) +{ + struct cfq_rb_root *st = &cfqd->grp_service_tree; + unsigned int used_sl, charge_sl; + int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) + - cfqg->service_tree_idle.count; + + BUG_ON(nr_sync < 0); + used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq); + + if (!cfq_cfqq_sync(cfqq) && !nr_sync) + charge_sl = cfqq->allocated_slice; + + /* Can't update vdisktime while group is on service tree */ + cfq_rb_erase(&cfqg->rb_node, st); + cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg); + __cfq_group_service_tree_add(st, cfqg); + + /* This group is being expired. Save the context */ + if (time_after(cfqd->workload_expires, jiffies)) { + cfqg->saved_workload_slice = cfqd->workload_expires + - jiffies; + cfqg->saved_workload = cfqd->serving_type; + cfqg->saved_serving_prio = cfqd->serving_prio; + } else + cfqg->saved_workload_slice = 0; + + cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, + st->min_vdisktime); + blkiocg_update_blkio_group_stats(&cfqg->blkg, used_sl, + cfqq->nr_sectors); +} + +#ifdef CONFIG_CFQ_GROUP_IOSCHED +static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg) +{ + if (blkg) + return container_of(blkg, struct cfq_group, blkg); + return NULL; +} + +void +cfq_update_blkio_group_weight(struct blkio_group *blkg, unsigned int weight) +{ + cfqg_of_blkg(blkg)->weight = weight; +} + +static struct cfq_group * +cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) +{ + struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); + struct cfq_group *cfqg = NULL; + void *key = cfqd; + int i, j; + struct cfq_rb_root *st; + struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info; + unsigned int major, minor; + + /* Do we need to take this reference */ + if (!blkiocg_css_tryget(blkcg)) + return NULL;; + + cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); + if (cfqg || !create) + goto done; + + cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node); + if (!cfqg) + goto done; + + cfqg->weight = blkcg->weight; + for_each_cfqg_st(cfqg, i, j, st) + *st = CFQ_RB_ROOT; + RB_CLEAR_NODE(&cfqg->rb_node); + + /* + * Take the initial reference that will be released on destroy + * This can be thought of a joint reference by cgroup and + * elevator which will be dropped by either elevator exit + * or cgroup deletion path depending on who is exiting first. + */ + atomic_set(&cfqg->ref, 1); + + /* Add group onto cgroup list */ + sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); + blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, + MKDEV(major, minor)); + + /* Add group on cfqd list */ + hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); + +done: + blkiocg_css_put(blkcg); + return cfqg; +} + /* - * The cfqd->service_tree holds all pending cfq_queue's that have + * Search for the cfq group current task belongs to. If create = 1, then also + * create the cfq group if it does not exist. request_queue lock must be held. + */ +static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) +{ + struct cgroup *cgroup; + struct cfq_group *cfqg = NULL; + + rcu_read_lock(); + cgroup = task_cgroup(current, blkio_subsys_id); + cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create); + if (!cfqg && create) + cfqg = &cfqd->root_group; + rcu_read_unlock(); + return cfqg; +} + +static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) +{ + /* Currently, all async queues are mapped to root group */ + if (!cfq_cfqq_sync(cfqq)) + cfqg = &cfqq->cfqd->root_group; + + cfqq->cfqg = cfqg; + /* cfqq reference on cfqg */ + atomic_inc(&cfqq->cfqg->ref); +} + +static void cfq_put_cfqg(struct cfq_group *cfqg) +{ + struct cfq_rb_root *st; + int i, j; + + BUG_ON(atomic_read(&cfqg->ref) <= 0); + if (!atomic_dec_and_test(&cfqg->ref)) + return; + for_each_cfqg_st(cfqg, i, j, st) + BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL); + kfree(cfqg); +} + +static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg) +{ + /* Something wrong if we are trying to remove same group twice */ + BUG_ON(hlist_unhashed(&cfqg->cfqd_node)); + + hlist_del_init(&cfqg->cfqd_node); + + /* + * Put the reference taken at the time of creation so that when all + * queues are gone, group can be destroyed. + */ + cfq_put_cfqg(cfqg); +} + +static void cfq_release_cfq_groups(struct cfq_data *cfqd) +{ + struct hlist_node *pos, *n; + struct cfq_group *cfqg; + + hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) { + /* + * If cgroup removal path got to blk_group first and removed + * it from cgroup list, then it will take care of destroying + * cfqg also. + */ + if (!blkiocg_del_blkio_group(&cfqg->blkg)) + cfq_destroy_cfqg(cfqd, cfqg); + } +} + +/* + * Blk cgroup controller notification saying that blkio_group object is being + * delinked as associated cgroup object is going away. That also means that + * no new IO will come in this group. So get rid of this group as soon as + * any pending IO in the group is finished. + * + * This function is called under rcu_read_lock(). key is the rcu protected + * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu + * read lock. + * + * "key" was fetched from blkio_group under blkio_cgroup->lock. That means + * it should not be NULL as even if elevator was exiting, cgroup deltion + * path got to it first. + */ +void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg) +{ + unsigned long flags; + struct cfq_data *cfqd = key; + + spin_lock_irqsave(cfqd->queue->queue_lock, flags); + cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg)); + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); +} + +#else /* GROUP_IOSCHED */ +static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) +{ + return &cfqd->root_group; +} +static inline void +cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { + cfqq->cfqg = cfqg; +} + +static void cfq_release_cfq_groups(struct cfq_data *cfqd) {} +static inline void cfq_put_cfqg(struct cfq_group *cfqg) {} + +#endif /* GROUP_IOSCHED */ + +/* + * The cfqd->service_trees holds all pending cfq_queue's that have * requests waiting to be processed. It is sorted in the order that * we will service the queues. */ @@ -495,11 +1120,42 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct rb_node **p, *parent; struct cfq_queue *__cfqq; unsigned long rb_key; + struct cfq_rb_root *service_tree; int left; + int new_cfqq = 1; + int group_changed = 0; + +#ifdef CONFIG_CFQ_GROUP_IOSCHED + if (!cfqd->cfq_group_isolation + && cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD + && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) { + /* Move this cfq to root group */ + cfq_log_cfqq(cfqd, cfqq, "moving to root group"); + if (!RB_EMPTY_NODE(&cfqq->rb_node)) + cfq_group_service_tree_del(cfqd, cfqq->cfqg); + cfqq->orig_cfqg = cfqq->cfqg; + cfqq->cfqg = &cfqd->root_group; + atomic_inc(&cfqd->root_group.ref); + group_changed = 1; + } else if (!cfqd->cfq_group_isolation + && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) { + /* cfqq is sequential now needs to go to its original group */ + BUG_ON(cfqq->cfqg != &cfqd->root_group); + if (!RB_EMPTY_NODE(&cfqq->rb_node)) + cfq_group_service_tree_del(cfqd, cfqq->cfqg); + cfq_put_cfqg(cfqq->cfqg); + cfqq->cfqg = cfqq->orig_cfqg; + cfqq->orig_cfqg = NULL; + group_changed = 1; + cfq_log_cfqq(cfqd, cfqq, "moved to origin group"); + } +#endif + service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), + cfqq_type(cfqq), cfqd); if (cfq_class_idle(cfqq)) { rb_key = CFQ_IDLE_DELAY; - parent = rb_last(&cfqd->service_tree.rb); + parent = rb_last(&service_tree->rb); if (parent && parent != &cfqq->rb_node) { __cfqq = rb_entry(parent, struct cfq_queue, rb_node); rb_key += __cfqq->rb_key; @@ -517,23 +1173,27 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqq->slice_resid = 0; } else { rb_key = -HZ; - __cfqq = cfq_rb_first(&cfqd->service_tree); + __cfqq = cfq_rb_first(service_tree); rb_key += __cfqq ? __cfqq->rb_key : jiffies; } if (!RB_EMPTY_NODE(&cfqq->rb_node)) { + new_cfqq = 0; /* * same position, nothing more to do */ - if (rb_key == cfqq->rb_key) + if (rb_key == cfqq->rb_key && + cfqq->service_tree == service_tree) return; - cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); + cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); + cfqq->service_tree = NULL; } left = 1; parent = NULL; - p = &cfqd->service_tree.rb.rb_node; + cfqq->service_tree = service_tree; + p = &service_tree->rb.rb_node; while (*p) { struct rb_node **n; @@ -541,35 +1201,28 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, __cfqq = rb_entry(parent, struct cfq_queue, rb_node); /* - * sort RT queues first, we always want to give - * preference to them. IDLE queues goes to the back. - * after that, sort on the next service time. + * sort by key, that represents service time. */ - if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq)) - n = &(*p)->rb_left; - else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq)) - n = &(*p)->rb_right; - else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq)) - n = &(*p)->rb_left; - else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) - n = &(*p)->rb_right; - else if (time_before(rb_key, __cfqq->rb_key)) + if (time_before(rb_key, __cfqq->rb_key)) n = &(*p)->rb_left; - else + else { n = &(*p)->rb_right; - - if (n == &(*p)->rb_right) left = 0; + } p = n; } if (left) - cfqd->service_tree.left = &cfqq->rb_node; + service_tree->left = &cfqq->rb_node; cfqq->rb_key = rb_key; rb_link_node(&cfqq->rb_node, parent, p); - rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); + rb_insert_color(&cfqq->rb_node, &service_tree->rb); + service_tree->count++; + if ((add_front || !new_cfqq) && !group_changed) + return; + cfq_group_service_tree_add(cfqd, cfqq->cfqg); } static struct cfq_queue * @@ -671,13 +1324,16 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) BUG_ON(!cfq_cfqq_on_rr(cfqq)); cfq_clear_cfqq_on_rr(cfqq); - if (!RB_EMPTY_NODE(&cfqq->rb_node)) - cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); + if (!RB_EMPTY_NODE(&cfqq->rb_node)) { + cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); + cfqq->service_tree = NULL; + } if (cfqq->p_root) { rb_erase(&cfqq->p_node, cfqq->p_root); cfqq->p_root = NULL; } + cfq_group_service_tree_del(cfqd, cfqq->cfqg); BUG_ON(!cfqd->busy_queues); cfqd->busy_queues--; } @@ -688,7 +1344,6 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) static void cfq_del_rq_rb(struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); - struct cfq_data *cfqd = cfqq->cfqd; const int sync = rq_is_sync(rq); BUG_ON(!cfqq->queued[sync]); @@ -696,8 +1351,17 @@ static void cfq_del_rq_rb(struct request *rq) elv_rb_del(&cfqq->sort_list, rq); - if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) - cfq_del_cfqq_rr(cfqd, cfqq); + if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) { + /* + * Queue will be deleted from service tree when we actually + * expire it later. Right now just remove it from prio tree + * as it is empty. + */ + if (cfqq->p_root) { + rb_erase(&cfqq->p_node, cfqq->p_root); + cfqq->p_root = NULL; + } + } } static void cfq_add_rq_rb(struct request *rq) @@ -722,7 +1386,7 @@ static void cfq_add_rq_rb(struct request *rq) * check if this request is a better next-serve candidate */ prev = cfqq->next_rq; - cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq); + cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position); /* * adjust priority tree position, if ->next_rq changes @@ -829,6 +1493,7 @@ static void cfq_merged_requests(struct request_queue *q, struct request *rq, struct request *next) { + struct cfq_queue *cfqq = RQ_CFQQ(rq); /* * reposition in fifo if next is older than rq */ @@ -838,6 +1503,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, rq_set_fifo_time(rq, rq_fifo_time(next)); } + if (cfqq->next_rq == next) + cfqq->next_rq = rq; cfq_remove_request(next); } @@ -848,6 +1515,9 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, struct cfq_io_context *cic; struct cfq_queue *cfqq; + /* Deny merge if bio and rq don't belong to same cfq group */ + if ((RQ_CFQQ(rq))->cfqg != cfq_get_cfqg(cfqd, 0)) + return false; /* * Disallow merge of a sync bio into an async request. */ @@ -871,8 +1541,12 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, { if (cfqq) { cfq_log_cfqq(cfqd, cfqq, "set_active"); + cfqq->slice_start = 0; + cfqq->dispatch_start = jiffies; + cfqq->allocated_slice = 0; cfqq->slice_end = 0; cfqq->slice_dispatch = 0; + cfqq->nr_sectors = 0; cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_must_dispatch(cfqq); @@ -899,6 +1573,8 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, del_timer(&cfqd->idle_slice_timer); cfq_clear_cfqq_wait_request(cfqq); + cfq_clear_cfqq_wait_busy(cfqq); + cfq_clear_cfqq_wait_busy_done(cfqq); /* * store what was left of this slice, if the queue idled/timed out @@ -908,11 +1584,19 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); } + cfq_group_served(cfqd, cfqq->cfqg, cfqq); + + if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) + cfq_del_cfqq_rr(cfqd, cfqq); + cfq_resort_rr_list(cfqd, cfqq); if (cfqq == cfqd->active_queue) cfqd->active_queue = NULL; + if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active) + cfqd->grp_service_tree.active = NULL; + if (cfqd->active_cic) { put_io_context(cfqd->active_cic->ioc); cfqd->active_cic = NULL; @@ -933,10 +1617,39 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) */ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) { - if (RB_EMPTY_ROOT(&cfqd->service_tree.rb)) + struct cfq_rb_root *service_tree = + service_tree_for(cfqd->serving_group, cfqd->serving_prio, + cfqd->serving_type, cfqd); + + if (!cfqd->rq_queued) return NULL; - return cfq_rb_first(&cfqd->service_tree); + /* There is nothing to dispatch */ + if (!service_tree) + return NULL; + if (RB_EMPTY_ROOT(&service_tree->rb)) + return NULL; + return cfq_rb_first(service_tree); +} + +static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) +{ + struct cfq_group *cfqg; + struct cfq_queue *cfqq; + int i, j; + struct cfq_rb_root *st; + + if (!cfqd->rq_queued) + return NULL; + + cfqg = cfq_get_next_cfqg(cfqd); + if (!cfqg) + return NULL; + + for_each_cfqg_st(cfqg, i, j, st) + if ((cfqq = cfq_rb_first(st)) != NULL) + return cfqq; + return NULL; } /* @@ -945,14 +1658,8 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - if (!cfqq) { + if (!cfqq) cfqq = cfq_get_next_queue(cfqd); - if (cfqq && !cfq_cfqq_coop_preempt(cfqq)) - cfq_clear_cfqq_coop(cfqq); - } - - if (cfqq) - cfq_clear_cfqq_coop_preempt(cfqq); __cfq_set_active_queue(cfqd, cfqq); return cfqq; @@ -967,16 +1674,16 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, return cfqd->last_position - blk_rq_pos(rq); } -#define CIC_SEEK_THR 8 * 1024 -#define CIC_SEEKY(cic) ((cic)->seek_mean > CIC_SEEK_THR) +#define CFQQ_SEEK_THR 8 * 1024 +#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) -static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq) +static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct request *rq) { - struct cfq_io_context *cic = cfqd->active_cic; - sector_t sdist = cic->seek_mean; + sector_t sdist = cfqq->seek_mean; - if (!sample_valid(cic->seek_samples)) - sdist = CIC_SEEK_THR; + if (!sample_valid(cfqq->seek_samples)) + sdist = CFQQ_SEEK_THR; return cfq_dist_from_last(cfqd, rq) <= sdist; } @@ -1005,7 +1712,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, * will contain the closest sector. */ __cfqq = rb_entry(parent, struct cfq_queue, p_node); - if (cfq_rq_close(cfqd, __cfqq->next_rq)) + if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) return __cfqq; if (blk_rq_pos(__cfqq->next_rq) < sector) @@ -1016,7 +1723,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, return NULL; __cfqq = rb_entry(node, struct cfq_queue, p_node); - if (cfq_rq_close(cfqd, __cfqq->next_rq)) + if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) return __cfqq; return NULL; @@ -1033,16 +1740,13 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, * assumption. */ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, - struct cfq_queue *cur_cfqq, - bool probe) + struct cfq_queue *cur_cfqq) { struct cfq_queue *cfqq; - /* - * A valid cfq_io_context is necessary to compare requests against - * the seek_mean of the current cfqq. - */ - if (!cfqd->active_cic) + if (!cfq_cfqq_sync(cur_cfqq)) + return NULL; + if (CFQQ_SEEKY(cur_cfqq)) return NULL; /* @@ -1054,14 +1758,55 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, if (!cfqq) return NULL; - if (cfq_cfqq_coop(cfqq)) + /* If new queue belongs to different cfq_group, don't choose it */ + if (cur_cfqq->cfqg != cfqq->cfqg) + return NULL; + + /* + * It only makes sense to merge sync queues. + */ + if (!cfq_cfqq_sync(cfqq)) + return NULL; + if (CFQQ_SEEKY(cfqq)) + return NULL; + + /* + * Do not merge queues of different priority classes + */ + if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq)) return NULL; - if (!probe) - cfq_mark_cfqq_coop(cfqq); return cfqq; } +/* + * Determine whether we should enforce idle window for this queue. + */ + +static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + enum wl_prio_t prio = cfqq_prio(cfqq); + struct cfq_rb_root *service_tree = cfqq->service_tree; + + BUG_ON(!service_tree); + BUG_ON(!service_tree->count); + + /* We never do for idle class queues. */ + if (prio == IDLE_WORKLOAD) + return false; + + /* We do for queues that were marked with idle window flag. */ + if (cfq_cfqq_idle_window(cfqq) && + !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)) + return true; + + /* + * Otherwise, we do only if they are the last ones + * in their service tree. + */ + return service_tree->count == 1; +} + static void cfq_arm_slice_timer(struct cfq_data *cfqd) { struct cfq_queue *cfqq = cfqd->active_queue; @@ -1082,13 +1827,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) /* * idle is disabled, either manually or by past process history */ - if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq)) + if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq)) return; /* - * still requests with the driver, don't idle + * still active requests from this queue, don't idle */ - if (rq_in_driver(cfqd)) + if (cfqq->dispatched) return; /* @@ -1109,14 +1854,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) cfq_mark_cfqq_wait_request(cfqq); - /* - * we don't want to idle for seeks, but we do want to allow - * fair distribution of slice time for a process doing back-to-back - * seeks. so allow a little bit of time for him to submit a new rq - */ sl = cfqd->cfq_slice_idle; - if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) - sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); mod_timer(&cfqd->idle_slice_timer, jiffies + sl); cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); @@ -1139,6 +1877,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) if (cfq_cfqq_sync(cfqq)) cfqd->sync_flight++; + cfqq->nr_sectors += blk_rq_sectors(rq); } /* @@ -1175,6 +1914,207 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) } /* + * Must be called with the queue_lock held. + */ +static int cfqq_process_refs(struct cfq_queue *cfqq) +{ + int process_refs, io_refs; + + io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; + process_refs = atomic_read(&cfqq->ref) - io_refs; + BUG_ON(process_refs < 0); + return process_refs; +} + +static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) +{ + int process_refs, new_process_refs; + struct cfq_queue *__cfqq; + + /* Avoid a circular list and skip interim queue merges */ + while ((__cfqq = new_cfqq->new_cfqq)) { + if (__cfqq == cfqq) + return; + new_cfqq = __cfqq; + } + + process_refs = cfqq_process_refs(cfqq); + /* + * If the process for the cfqq has gone away, there is no + * sense in merging the queues. + */ + if (process_refs == 0) + return; + + /* + * Merge in the direction of the lesser amount of work. + */ + new_process_refs = cfqq_process_refs(new_cfqq); + if (new_process_refs >= process_refs) { + cfqq->new_cfqq = new_cfqq; + atomic_add(process_refs, &new_cfqq->ref); + } else { + new_cfqq->new_cfqq = cfqq; + atomic_add(new_process_refs, &cfqq->ref); + } +} + +static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, + struct cfq_group *cfqg, enum wl_prio_t prio, + bool prio_changed) +{ + struct cfq_queue *queue; + int i; + bool key_valid = false; + unsigned long lowest_key = 0; + enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; + + if (prio_changed) { + /* + * When priorities switched, we prefer starting + * from SYNC_NOIDLE (first choice), or just SYNC + * over ASYNC + */ + if (service_tree_for(cfqg, prio, cur_best, cfqd)->count) + return cur_best; + cur_best = SYNC_WORKLOAD; + if (service_tree_for(cfqg, prio, cur_best, cfqd)->count) + return cur_best; + + return ASYNC_WORKLOAD; + } + + for (i = 0; i < 3; ++i) { + /* otherwise, select the one with lowest rb_key */ + queue = cfq_rb_first(service_tree_for(cfqg, prio, i, cfqd)); + if (queue && + (!key_valid || time_before(queue->rb_key, lowest_key))) { + lowest_key = queue->rb_key; + cur_best = i; + key_valid = true; + } + } + + return cur_best; +} + +static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) +{ + enum wl_prio_t previous_prio = cfqd->serving_prio; + bool prio_changed; + unsigned slice; + unsigned count; + struct cfq_rb_root *st; + unsigned group_slice; + + if (!cfqg) { + cfqd->serving_prio = IDLE_WORKLOAD; + cfqd->workload_expires = jiffies + 1; + return; + } + + /* Choose next priority. RT > BE > IDLE */ + if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) + cfqd->serving_prio = RT_WORKLOAD; + else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) + cfqd->serving_prio = BE_WORKLOAD; + else { + cfqd->serving_prio = IDLE_WORKLOAD; + cfqd->workload_expires = jiffies + 1; + return; + } + + /* + * For RT and BE, we have to choose also the type + * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload + * expiration time + */ + prio_changed = (cfqd->serving_prio != previous_prio); + st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type, + cfqd); + count = st->count; + + /* + * If priority didn't change, check workload expiration, + * and that we still have other queues ready + */ + if (!prio_changed && count && + !time_after(jiffies, cfqd->workload_expires)) + return; + + /* otherwise select new workload type */ + cfqd->serving_type = + cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio, prio_changed); + st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type, + cfqd); + count = st->count; + + /* + * the workload slice is computed as a fraction of target latency + * proportional to the number of queues in that workload, over + * all the queues in the same priority class + */ + group_slice = cfq_group_slice(cfqd, cfqg); + + slice = group_slice * count / + max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio], + cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg)); + + if (cfqd->serving_type == ASYNC_WORKLOAD) { + unsigned int tmp; + + /* + * Async queues are currently system wide. Just taking + * proportion of queues with-in same group will lead to higher + * async ratio system wide as generally root group is going + * to have higher weight. A more accurate thing would be to + * calculate system wide asnc/sync ratio. + */ + tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg); + tmp = tmp/cfqd->busy_queues; + slice = min_t(unsigned, slice, tmp); + + /* async workload slice is scaled down according to + * the sync/async slice ratio. */ + slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1]; + } else + /* sync workload slice is at least 2 * cfq_slice_idle */ + slice = max(slice, 2 * cfqd->cfq_slice_idle); + + slice = max_t(unsigned, slice, CFQ_MIN_TT); + cfqd->workload_expires = jiffies + slice; + cfqd->noidle_tree_requires_idle = false; +} + +static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) +{ + struct cfq_rb_root *st = &cfqd->grp_service_tree; + struct cfq_group *cfqg; + + if (RB_EMPTY_ROOT(&st->rb)) + return NULL; + cfqg = cfq_rb_first_group(st); + st->active = &cfqg->rb_node; + update_min_vdisktime(st); + return cfqg; +} + +static void cfq_choose_cfqg(struct cfq_data *cfqd) +{ + struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); + + cfqd->serving_group = cfqg; + + /* Restore the workload type data */ + if (cfqg->saved_workload_slice) { + cfqd->workload_expires = jiffies + cfqg->saved_workload_slice; + cfqd->serving_type = cfqg->saved_workload; + cfqd->serving_prio = cfqg->saved_serving_prio; + } + choose_service_tree(cfqd, cfqg); +} + +/* * Select a queue for service. If we have a current active queue, * check whether to continue servicing it, or retrieve and set a new one. */ @@ -1186,10 +2126,13 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) if (!cfqq) goto new_queue; + if (!cfqd->rq_queued) + return NULL; /* * The active queue has run out of time, expire it and select new. */ - if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) + if ((cfq_slice_used(cfqq) || cfq_cfqq_wait_busy_done(cfqq)) + && !cfq_cfqq_must_dispatch(cfqq)) goto expire; /* @@ -1203,11 +2146,14 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) * If another queue has a request waiting within our mean seek * distance, let it run. The expire code will check for close * cooperators and put the close queue at the front of the service - * tree. + * tree. If possible, merge the expiring queue with the new cfqq. */ - new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0); - if (new_cfqq) + new_cfqq = cfq_close_cooperator(cfqd, cfqq); + if (new_cfqq) { + if (!cfqq->new_cfqq) + cfq_setup_merge(cfqq, new_cfqq); goto expire; + } /* * No requests pending. If the active queue still has requests in @@ -1215,7 +2161,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) * conditions to happen (or time out) before selecting a new queue. */ if (timer_pending(&cfqd->idle_slice_timer) || - (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) { + (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) { cfqq = NULL; goto keep_queue; } @@ -1223,6 +2169,13 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) expire: cfq_slice_expired(cfqd, 0); new_queue: + /* + * Current queue expired. Check if we have to switch to a new + * service tree + */ + if (!new_cfqq) + cfq_choose_cfqg(cfqd); + cfqq = cfq_set_active_queue(cfqd, new_cfqq); keep_queue: return cfqq; @@ -1238,6 +2191,9 @@ static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) } BUG_ON(!list_empty(&cfqq->fifo)); + + /* By default cfqq is not expired if it is empty. Do it explicitly */ + __cfq_slice_expired(cfqq->cfqd, cfqq, 0); return dispatched; } @@ -1250,11 +2206,10 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) struct cfq_queue *cfqq; int dispatched = 0; - while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL) + while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) dispatched += __cfq_forced_dispatch_cfqq(cfqq); cfq_slice_expired(cfqd, 0); - BUG_ON(cfqd->busy_queues); cfq_log(cfqd, "forced_dispatch=%d", dispatched); @@ -1268,7 +2223,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) /* * Drain async requests before we start sync IO */ - if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) + if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) return false; /* @@ -1298,9 +2253,9 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) return false; /* - * Sole queue user, allow bigger slice + * Sole queue user, no limit */ - max_dispatch *= 4; + max_dispatch = -1; } /* @@ -1407,11 +2362,13 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) * task holds one reference to the queue, dropped when task exits. each rq * in-flight on this queue also holds a reference, dropped when rq is freed. * + * Each cfq queue took a reference on the parent group. Drop it now. * queue lock must be held here. */ static void cfq_put_queue(struct cfq_queue *cfqq) { struct cfq_data *cfqd = cfqq->cfqd; + struct cfq_group *cfqg, *orig_cfqg; BUG_ON(atomic_read(&cfqq->ref) <= 0); @@ -1421,14 +2378,19 @@ static void cfq_put_queue(struct cfq_queue *cfqq) cfq_log_cfqq(cfqd, cfqq, "put_queue"); BUG_ON(rb_first(&cfqq->sort_list)); BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); - BUG_ON(cfq_cfqq_on_rr(cfqq)); + cfqg = cfqq->cfqg; + orig_cfqg = cfqq->orig_cfqg; if (unlikely(cfqd->active_queue == cfqq)) { __cfq_slice_expired(cfqd, cfqq, 0); cfq_schedule_dispatch(cfqd); } + BUG_ON(cfq_cfqq_on_rr(cfqq)); kmem_cache_free(cfq_pool, cfqq); + cfq_put_cfqg(cfqg); + if (orig_cfqg) + cfq_put_cfqg(orig_cfqg); } /* @@ -1518,11 +2480,29 @@ static void cfq_free_io_context(struct io_context *ioc) static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) { + struct cfq_queue *__cfqq, *next; + if (unlikely(cfqq == cfqd->active_queue)) { __cfq_slice_expired(cfqd, cfqq, 0); cfq_schedule_dispatch(cfqd); } + /* + * If this queue was scheduled to merge with another queue, be + * sure to drop the reference taken on that queue (and others in + * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs. + */ + __cfqq = cfqq->new_cfqq; + while (__cfqq) { + if (__cfqq == cfqq) { + WARN(1, "cfqq->new_cfqq loop detected\n"); + break; + } + next = __cfqq->new_cfqq; + cfq_put_queue(__cfqq); + __cfqq = next; + } + cfq_put_queue(cfqq); } @@ -1703,14 +2683,51 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqq->pid = pid; } +#ifdef CONFIG_CFQ_GROUP_IOSCHED +static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic) +{ + struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1); + struct cfq_data *cfqd = cic->key; + unsigned long flags; + struct request_queue *q; + + if (unlikely(!cfqd)) + return; + + q = cfqd->queue; + + spin_lock_irqsave(q->queue_lock, flags); + + if (sync_cfqq) { + /* + * Drop reference to sync queue. A new sync queue will be + * assigned in new group upon arrival of a fresh request. + */ + cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup"); + cic_set_cfqq(cic, NULL, 1); + cfq_put_queue(sync_cfqq); + } + + spin_unlock_irqrestore(q->queue_lock, flags); +} + +static void cfq_ioc_set_cgroup(struct io_context *ioc) +{ + call_for_each_cic(ioc, changed_cgroup); + ioc->cgroup_changed = 0; +} +#endif /* CONFIG_CFQ_GROUP_IOSCHED */ + static struct cfq_queue * cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, gfp_t gfp_mask) { struct cfq_queue *cfqq, *new_cfqq = NULL; struct cfq_io_context *cic; + struct cfq_group *cfqg; retry: + cfqg = cfq_get_cfqg(cfqd, 1); cic = cfq_cic_lookup(cfqd, ioc); /* cic always exists here */ cfqq = cic_to_cfqq(cic, is_sync); @@ -1741,6 +2758,7 @@ retry: if (cfqq) { cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); cfq_init_prio_data(cfqq, ioc); + cfq_link_cfqq_cfqg(cfqq, cfqg); cfq_log_cfqq(cfqd, cfqq, "alloced"); } else cfqq = &cfqd->oom_cfqq; @@ -1932,6 +2950,10 @@ out: if (unlikely(ioc->ioprio_changed)) cfq_ioc_set_ioprio(ioc); +#ifdef CONFIG_CFQ_GROUP_IOSCHED + if (unlikely(ioc->cgroup_changed)) + cfq_ioc_set_cgroup(ioc); +#endif return cic; err_free: cfq_cic_free(cic); @@ -1952,33 +2974,46 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) } static void -cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, +cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *rq) { sector_t sdist; u64 total; - if (!cic->last_request_pos) + if (!cfqq->last_request_pos) sdist = 0; - else if (cic->last_request_pos < blk_rq_pos(rq)) - sdist = blk_rq_pos(rq) - cic->last_request_pos; + else if (cfqq->last_request_pos < blk_rq_pos(rq)) + sdist = blk_rq_pos(rq) - cfqq->last_request_pos; else - sdist = cic->last_request_pos - blk_rq_pos(rq); + sdist = cfqq->last_request_pos - blk_rq_pos(rq); /* * Don't allow the seek distance to get too large from the * odd fragment, pagein, etc */ - if (cic->seek_samples <= 60) /* second&third seek */ - sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); + if (cfqq->seek_samples <= 60) /* second&third seek */ + sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024); else - sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); + sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64); - cic->seek_samples = (7*cic->seek_samples + 256) / 8; - cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; - total = cic->seek_total + (cic->seek_samples/2); - do_div(total, cic->seek_samples); - cic->seek_mean = (sector_t)total; + cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8; + cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8; + total = cfqq->seek_total + (cfqq->seek_samples/2); + do_div(total, cfqq->seek_samples); + cfqq->seek_mean = (sector_t)total; + + /* + * If this cfqq is shared between multiple processes, check to + * make sure that those processes are still issuing I/Os within + * the mean seek distance. If not, it may be time to break the + * queues apart again. + */ + if (cfq_cfqq_coop(cfqq)) { + if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start) + cfqq->seeky_start = jiffies; + else if (!CFQQ_SEEKY(cfqq)) + cfqq->seeky_start = 0; + } } /* @@ -1999,14 +3034,15 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); + if (cfqq->queued[0] + cfqq->queued[1] >= 4) + cfq_mark_cfqq_deep(cfqq); + if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || - (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) + (!cfq_cfqq_deep(cfqq) && sample_valid(cfqq->seek_samples) + && CFQQ_SEEKY(cfqq))) enable_idle = 0; else if (sample_valid(cic->ttime_samples)) { - unsigned int slice_idle = cfqd->cfq_slice_idle; - if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) - slice_idle = msecs_to_jiffies(CFQ_MIN_TT); - if (cic->ttime_mean > slice_idle) + if (cic->ttime_mean > cfqd->cfq_slice_idle) enable_idle = 0; else enable_idle = 1; @@ -2035,9 +3071,6 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, if (!cfqq) return false; - if (cfq_slice_used(cfqq)) - return true; - if (cfq_class_idle(new_cfqq)) return false; @@ -2051,6 +3084,19 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) return true; + if (new_cfqq->cfqg != cfqq->cfqg) + return false; + + if (cfq_slice_used(cfqq)) + return true; + + /* Allow preemption only if we are idling on sync-noidle tree */ + if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD && + cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && + new_cfqq->service_tree->count == 2 && + RB_EMPTY_ROOT(&cfqq->sort_list)) + return true; + /* * So both queues are sync. Let the new request get disk time if * it's a metadata request and the current queue is doing regular IO. @@ -2071,16 +3117,8 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, * if this request is as-good as one we would expect from the * current cfqq, let it preempt */ - if (cfq_rq_close(cfqd, rq) && (!cfq_cfqq_coop(new_cfqq) || - cfqd->busy_queues == 1)) { - /* - * Mark new queue coop_preempt, so its coop flag will not be - * cleared when new queue gets scheduled at the very first time - */ - cfq_mark_cfqq_coop_preempt(new_cfqq); - cfq_mark_cfqq_coop(new_cfqq); + if (cfq_rq_close(cfqd, cfqq, rq)) return true; - } return false; } @@ -2121,12 +3159,16 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqq->meta_pending++; cfq_update_io_thinktime(cfqd, cic); - cfq_update_io_seektime(cfqd, cic, rq); + cfq_update_io_seektime(cfqd, cfqq, rq); cfq_update_idle_window(cfqd, cfqq, cic); - cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); + cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); if (cfqq == cfqd->active_queue) { + if (cfq_cfqq_wait_busy(cfqq)) { + cfq_clear_cfqq_wait_busy(cfqq); + cfq_mark_cfqq_wait_busy_done(cfqq); + } /* * Remember that we saw a request from this process, but * don't start queuing just yet. Otherwise we risk seeing lots @@ -2141,9 +3183,9 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || cfqd->busy_queues > 1) { del_timer(&cfqd->idle_slice_timer); - __blk_run_queue(cfqd->queue); - } - cfq_mark_cfqq_must_dispatch(cfqq); + __blk_run_queue(cfqd->queue); + } else + cfq_mark_cfqq_must_dispatch(cfqq); } } else if (cfq_should_preempt(cfqd, cfqq, rq)) { /* @@ -2165,10 +3207,9 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) cfq_log_cfqq(cfqd, cfqq, "insert_request"); cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); - cfq_add_rq_rb(rq); - rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); list_add_tail(&rq->queuelist, &cfqq->fifo); + cfq_add_rq_rb(rq); cfq_rq_enqueued(cfqd, cfqq, rq); } @@ -2179,23 +3220,35 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) */ static void cfq_update_hw_tag(struct cfq_data *cfqd) { - if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak) - cfqd->rq_in_driver_peak = rq_in_driver(cfqd); + struct cfq_queue *cfqq = cfqd->active_queue; + + if (rq_in_driver(cfqd) > cfqd->hw_tag_est_depth) + cfqd->hw_tag_est_depth = rq_in_driver(cfqd); + + if (cfqd->hw_tag == 1) + return; if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN) return; + /* + * If active queue hasn't enough requests and can idle, cfq might not + * dispatch sufficient requests to hardware. Don't zero hw_tag in this + * case + */ + if (cfqq && cfq_cfqq_idle_window(cfqq) && + cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] < + CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN) + return; + if (cfqd->hw_tag_samples++ < 50) return; - if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN) + if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN) cfqd->hw_tag = 1; else cfqd->hw_tag = 0; - - cfqd->hw_tag_samples = 0; - cfqd->rq_in_driver_peak = 0; } static void cfq_completed_request(struct request_queue *q, struct request *rq) @@ -2206,7 +3259,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) unsigned long now; now = jiffies; - cfq_log_cfqq(cfqd, cfqq, "complete"); + cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq)); cfq_update_hw_tag(cfqd); @@ -2234,18 +3287,40 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) cfq_set_prio_slice(cfqd, cfqq); cfq_clear_cfqq_slice_new(cfqq); } + + /* + * If this queue consumed its slice and this is last queue + * in the group, wait for next request before we expire + * the queue + */ + if (cfq_slice_used(cfqq) && cfqq->cfqg->nr_cfqq == 1) { + cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; + cfq_mark_cfqq_wait_busy(cfqq); + } + /* - * If there are no requests waiting in this queue, and - * there are other queues ready to issue requests, AND - * those other queues are issuing requests within our - * mean seek distance, give them a chance to run instead - * of idling. + * Idling is not enabled on: + * - expired queues + * - idle-priority queues + * - async queues + * - queues with still some requests queued + * - when there is a close cooperator */ if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) cfq_slice_expired(cfqd, 1); - else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq, 1) && - sync && !rq_noidle(rq)) - cfq_arm_slice_timer(cfqd); + else if (sync && cfqq_empty && + !cfq_close_cooperator(cfqd, cfqq)) { + cfqd->noidle_tree_requires_idle |= !rq_noidle(rq); + /* + * Idling is enabled for SYNC_WORKLOAD. + * SYNC_NOIDLE_WORKLOAD idles at the end of the tree + * only if we processed at least one !rq_noidle request + */ + if (cfqd->serving_type == SYNC_WORKLOAD + || cfqd->noidle_tree_requires_idle + || cfqq->cfqg->nr_cfqq == 1) + cfq_arm_slice_timer(cfqd); + } } if (!rq_in_driver(cfqd)) @@ -2269,12 +3344,10 @@ static void cfq_prio_boost(struct cfq_queue *cfqq) cfqq->ioprio = IOPRIO_NORM; } else { /* - * check if we need to unboost the queue + * unboost the queue (if needed) */ - if (cfqq->ioprio_class != cfqq->org_ioprio_class) - cfqq->ioprio_class = cfqq->org_ioprio_class; - if (cfqq->ioprio != cfqq->org_ioprio) - cfqq->ioprio = cfqq->org_ioprio; + cfqq->ioprio_class = cfqq->org_ioprio_class; + cfqq->ioprio = cfqq->org_ioprio; } } @@ -2338,6 +3411,43 @@ static void cfq_put_request(struct request *rq) } } +static struct cfq_queue * +cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic, + struct cfq_queue *cfqq) +{ + cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); + cic_set_cfqq(cic, cfqq->new_cfqq, 1); + cfq_mark_cfqq_coop(cfqq->new_cfqq); + cfq_put_queue(cfqq); + return cic_to_cfqq(cic, 1); +} + +static int should_split_cfqq(struct cfq_queue *cfqq) +{ + if (cfqq->seeky_start && + time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT)) + return 1; + return 0; +} + +/* + * Returns NULL if a new cfqq should be allocated, or the old cfqq if this + * was the last process referring to said cfqq. + */ +static struct cfq_queue * +split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) +{ + if (cfqq_process_refs(cfqq) == 1) { + cfqq->seeky_start = 0; + cfqq->pid = current->pid; + cfq_clear_cfqq_coop(cfqq); + return cfqq; + } + + cic_set_cfqq(cic, NULL, 1); + cfq_put_queue(cfqq); + return NULL; +} /* * Allocate cfq data structures associated with this request. */ @@ -2360,10 +3470,30 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) if (!cic) goto queue_fail; +new_queue: cfqq = cic_to_cfqq(cic, is_sync); if (!cfqq || cfqq == &cfqd->oom_cfqq) { cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); cic_set_cfqq(cic, cfqq, is_sync); + } else { + /* + * If the queue was seeky for too long, break it apart. + */ + if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) { + cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); + cfqq = split_cfqq(cic, cfqq); + if (!cfqq) + goto new_queue; + } + + /* + * Check to see if this queue is scheduled to merge with + * another, closely cooperating queue. The merging of + * queues happens here as it must be done in process context. + * The reference on new_cfqq was taken in merge_cfqqs. + */ + if (cfqq->new_cfqq) + cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq); } cfqq->allocated[rw]++; @@ -2438,6 +3568,11 @@ static void cfq_idle_slice_timer(unsigned long data) */ if (!RB_EMPTY_ROOT(&cfqq->sort_list)) goto out_kick; + + /* + * Queue depth flag is reset only when the idle didn't succeed + */ + cfq_clear_cfqq_deep(cfqq); } expire: cfq_slice_expired(cfqd, timed_out); @@ -2468,6 +3603,11 @@ static void cfq_put_async_queues(struct cfq_data *cfqd) cfq_put_queue(cfqd->async_idle_cfqq); } +static void cfq_cfqd_free(struct rcu_head *head) +{ + kfree(container_of(head, struct cfq_data, rcu)); +} + static void cfq_exit_queue(struct elevator_queue *e) { struct cfq_data *cfqd = e->elevator_data; @@ -2489,25 +3629,49 @@ static void cfq_exit_queue(struct elevator_queue *e) } cfq_put_async_queues(cfqd); + cfq_release_cfq_groups(cfqd); + blkiocg_del_blkio_group(&cfqd->root_group.blkg); spin_unlock_irq(q->queue_lock); cfq_shutdown_timer_wq(cfqd); - kfree(cfqd); + /* Wait for cfqg->blkg->key accessors to exit their grace periods. */ + call_rcu(&cfqd->rcu, cfq_cfqd_free); } static void *cfq_init_queue(struct request_queue *q) { struct cfq_data *cfqd; - int i; + int i, j; + struct cfq_group *cfqg; + struct cfq_rb_root *st; cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); if (!cfqd) return NULL; - cfqd->service_tree = CFQ_RB_ROOT; + /* Init root service tree */ + cfqd->grp_service_tree = CFQ_RB_ROOT; + + /* Init root group */ + cfqg = &cfqd->root_group; + for_each_cfqg_st(cfqg, i, j, st) + *st = CFQ_RB_ROOT; + RB_CLEAR_NODE(&cfqg->rb_node); + /* Give preference to root group over other groups */ + cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT; + +#ifdef CONFIG_CFQ_GROUP_IOSCHED + /* + * Take a reference to root group which we never drop. This is just + * to make sure that cfq_put_cfqg() does not try to kfree root group + */ + atomic_set(&cfqg->ref, 1); + blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd, + 0); +#endif /* * Not strictly needed (since RB_ROOT just clears the node and we * zeroed cfqd on alloc), but better be safe in case someone decides @@ -2523,6 +3687,7 @@ static void *cfq_init_queue(struct request_queue *q) */ cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); atomic_inc(&cfqd->oom_cfqq.ref); + cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); INIT_LIST_HEAD(&cfqd->cic_list); @@ -2544,8 +3709,10 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; cfqd->cfq_latency = 1; - cfqd->hw_tag = 1; + cfqd->cfq_group_isolation = 0; + cfqd->hw_tag = -1; cfqd->last_end_sync_rq = jiffies; + INIT_RCU_HEAD(&cfqd->rcu); return cfqd; } @@ -2614,6 +3781,7 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); +SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ @@ -2646,6 +3814,7 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); +STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0); #undef STORE_FUNCTION #define CFQ_ATTR(name) \ @@ -2662,6 +3831,7 @@ static struct elv_fs_entry cfq_attrs[] = { CFQ_ATTR(slice_async_rq), CFQ_ATTR(slice_idle), CFQ_ATTR(low_latency), + CFQ_ATTR(group_isolation), __ATTR_NULL }; @@ -2691,6 +3861,17 @@ static struct elevator_type iosched_cfq = { .elevator_owner = THIS_MODULE, }; +#ifdef CONFIG_CFQ_GROUP_IOSCHED +static struct blkio_policy_type blkio_policy_cfq = { + .ops = { + .blkio_unlink_group_fn = cfq_unlink_blkio_group, + .blkio_update_group_weight_fn = cfq_update_blkio_group_weight, + }, +}; +#else +static struct blkio_policy_type blkio_policy_cfq; +#endif + static int __init cfq_init(void) { /* @@ -2705,6 +3886,7 @@ static int __init cfq_init(void) return -ENOMEM; elv_register(&iosched_cfq); + blkio_policy_register(&blkio_policy_cfq); return 0; } @@ -2712,6 +3894,7 @@ static int __init cfq_init(void) static void __exit cfq_exit(void) { DECLARE_COMPLETION_ONSTACK(all_gone); + blkio_policy_unregister(&blkio_policy_cfq); elv_unregister(&iosched_cfq); ioc_gone = &all_gone; /* ioc_gone's update must be visible before reading ioc_count */ diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 9bd086c1a4d..4eb8e9ea4af 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -747,6 +747,8 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) return compat_put_uint(arg, bdev_io_opt(bdev)); case BLKALIGNOFF: return compat_put_int(arg, bdev_alignment_offset(bdev)); + case BLKDISCARDZEROES: + return compat_put_uint(arg, bdev_discard_zeroes_data(bdev)); case BLKFLSBUF: case BLKROSET: case BLKDISCARD: diff --git a/block/elevator.c b/block/elevator.c index a847046c6e5..9ad5ccc4c5e 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -154,10 +154,7 @@ static struct elevator_type *elevator_get(const char *name) spin_unlock(&elv_list_lock); - if (!strcmp(name, "anticipatory")) - sprintf(elv, "as-iosched"); - else - sprintf(elv, "%s-iosched", name); + sprintf(elv, "%s-iosched", name); request_module("%s", elv); spin_lock(&elv_list_lock); @@ -193,10 +190,7 @@ static int __init elevator_setup(char *str) * Be backwards-compatible with previous kernels, so users * won't get the wrong elevator. */ - if (!strcmp(str, "as")) - strcpy(chosen_elevator, "anticipatory"); - else - strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); + strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); return 1; } diff --git a/block/genhd.c b/block/genhd.c index 517e4332cb3..b11a4ad7d57 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -861,12 +861,23 @@ static ssize_t disk_alignment_offset_show(struct device *dev, return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue)); } +static ssize_t disk_discard_alignment_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%u\n", queue_discard_alignment(disk->queue)); +} + static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL); static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL); static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); +static DEVICE_ATTR(discard_alignment, S_IRUGO, disk_discard_alignment_show, + NULL); static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); @@ -887,6 +898,7 @@ static struct attribute *disk_attrs[] = { &dev_attr_ro.attr, &dev_attr_size.attr, &dev_attr_alignment_offset.attr, + &dev_attr_discard_alignment.attr, &dev_attr_capability.attr, &dev_attr_stat.attr, &dev_attr_inflight.attr, diff --git a/block/ioctl.c b/block/ioctl.c index 1f4d1de12b0..be48ea51fae 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -280,6 +280,8 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, return put_uint(arg, bdev_io_opt(bdev)); case BLKALIGNOFF: return put_int(arg, bdev_alignment_offset(bdev)); + case BLKDISCARDZEROES: + return put_uint(arg, bdev_discard_zeroes_data(bdev)); case BLKSECTGET: return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); case BLKRASET: diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index e5b10017a50..a8b5a10eb5b 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -35,7 +35,9 @@ struct blk_cmd_filter { unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; -} blk_default_cmd_filter; +}; + +static struct blk_cmd_filter blk_default_cmd_filter; /* Command group 3 is reserved and should never be used. */ const unsigned char scsi_command_size_tbl[8] = @@ -675,7 +677,7 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod } EXPORT_SYMBOL(scsi_cmd_ioctl); -int __init blk_scsi_ioctl_init(void) +static int __init blk_scsi_ioctl_init(void) { blk_set_cmd_filter_defaults(&blk_default_cmd_filter); return 0; |