diff options
author | Divyesh Shah <dpshah@google.com> | 2010-04-01 15:01:41 -0700 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2010-04-02 08:44:37 +0200 |
commit | 9195291e5f05e01d67f9a09c756b8aca8f009089 (patch) | |
tree | e46a151fe39be2bc23d0683bedb2cbefe916fb5b /block/blk-cgroup.c | |
parent | 303a3acb2362f16c7e7f4c53b40c2f4b396dc8d5 (diff) |
blkio: Increment the blkio cgroup stats for real now
We also add start_time_ns and io_start_time_ns fields to struct request
here to record the time when a request is created and when it is
dispatched to device. We use ns uints here as ms and jiffies are
not very useful for non-rotational media.
Signed-off-by: Divyesh Shah<dpshah@google.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r-- | block/blk-cgroup.c | 60 |
1 files changed, 58 insertions, 2 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index ad6843f2e0a..9af7257f429 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -15,6 +15,7 @@ #include <linux/kdev_t.h> #include <linux/module.h> #include <linux/err.h> +#include <linux/blkdev.h> #include "blk-cgroup.h" static DEFINE_SPINLOCK(blkio_list_lock); @@ -55,6 +56,26 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) } EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); +/* + * Add to the appropriate stat variable depending on the request type. + * This should be called with the blkg->stats_lock held. + */ +void io_add_stat(uint64_t *stat, uint64_t add, unsigned int flags) +{ + if (flags & REQ_RW) + stat[IO_WRITE] += add; + else + stat[IO_READ] += add; + /* + * Everywhere in the block layer, an IO is treated as sync if it is a + * read or a SYNC write. We follow the same norm. + */ + if (!(flags & REQ_RW) || flags & REQ_RW_SYNC) + stat[IO_SYNC] += add; + else + stat[IO_ASYNC] += add; +} + void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) { unsigned long flags; @@ -65,6 +86,41 @@ void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) } EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); +void blkiocg_update_request_dispatch_stats(struct blkio_group *blkg, + struct request *rq) +{ + struct blkio_group_stats *stats; + unsigned long flags; + + spin_lock_irqsave(&blkg->stats_lock, flags); + stats = &blkg->stats; + stats->sectors += blk_rq_sectors(rq); + io_add_stat(stats->io_serviced, 1, rq->cmd_flags); + io_add_stat(stats->io_service_bytes, blk_rq_sectors(rq) << 9, + rq->cmd_flags); + spin_unlock_irqrestore(&blkg->stats_lock, flags); +} + +void blkiocg_update_request_completion_stats(struct blkio_group *blkg, + struct request *rq) +{ + struct blkio_group_stats *stats; + unsigned long flags; + unsigned long long now = sched_clock(); + + spin_lock_irqsave(&blkg->stats_lock, flags); + stats = &blkg->stats; + if (time_after64(now, rq->io_start_time_ns)) + io_add_stat(stats->io_service_time, now - rq->io_start_time_ns, + rq->cmd_flags); + if (time_after64(rq->io_start_time_ns, rq->start_time_ns)) + io_add_stat(stats->io_wait_time, + rq->io_start_time_ns - rq->start_time_ns, + rq->cmd_flags); + spin_unlock_irqrestore(&blkg->stats_lock, flags); +} +EXPORT_SYMBOL_GPL(blkiocg_update_request_completion_stats); + void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, struct blkio_group *blkg, void *key, dev_t dev) { @@ -325,12 +381,12 @@ SHOW_FUNCTION_PER_GROUP(dequeue, get_stat, get_dequeue_stat, 0); #undef SHOW_FUNCTION_PER_GROUP #ifdef CONFIG_DEBUG_BLK_CGROUP -void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg, +void blkiocg_update_dequeue_stats(struct blkio_group *blkg, unsigned long dequeue) { blkg->stats.dequeue += dequeue; } -EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats); +EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats); #endif struct cftype blkio_files[] = { |