summaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-01 14:38:43 -0700
committerTejun Heo <tj@kernel.org>2012-04-01 14:38:43 -0700
commit2ce4d50f9cfab40831eee5e51e950d5c4724994b (patch)
tree80dec4f311844b6a2af8a3f621ba9965de345d6d /block/cfq-iosched.c
parent60c2bc2d5a12369deef395cda41638d7e6b6bf19 (diff)
cfq: collapse cfq.h into cfq-iosched.c
block/cfq.h contains some functions which interact with blkcg; however, this is only part of it and cfq-iosched.c already has quite some #ifdef CONFIG_CFQ_GROUP_IOSCHED. With conf/stat handling being moved to specific policies, having these relay functions isolated in cfq.h doesn't make much sense. Collapse cfq.h into cfq-iosched.c for now. Let's split blkcg support properly later if necessary. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c114
1 files changed, 113 insertions, 1 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 119e061a767..2e13e9e689b 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -15,7 +15,6 @@
#include <linux/ioprio.h>
#include <linux/blktrace_api.h>
#include "blk.h"
-#include "cfq.h"
static struct blkio_policy_type blkio_policy_cfq;
@@ -367,6 +366,9 @@ CFQ_CFQQ_FNS(wait_busy);
#undef CFQ_CFQQ_FNS
#ifdef CONFIG_CFQ_GROUP_IOSCHED
+
+#include "blk-cgroup.h"
+
static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg)
{
return blkg_to_pdata(blkg, &blkio_policy_cfq);
@@ -396,6 +398,82 @@ static inline void cfqg_put(struct cfq_group *cfqg)
blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
blkg_path(cfqg_to_blkg((cfqg))), ##args) \
+static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol,
+ struct blkio_group *curr_blkg,
+ bool direction, bool sync)
+{
+ blkiocg_update_io_add_stats(blkg, pol, curr_blkg, direction, sync);
+}
+
+static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol, unsigned long dequeue)
+{
+ blkiocg_update_dequeue_stats(blkg, pol, dequeue);
+}
+
+static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
+ struct blkio_policy_type *pol, unsigned long time,
+ unsigned long unaccounted_time)
+{
+ blkiocg_update_timeslice_used(blkg, pol, time, unaccounted_time);
+}
+
+static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg,
+ struct blkio_policy_type *pol)
+{
+ blkiocg_set_start_empty_time(blkg, pol);
+}
+
+static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol, bool direction,
+ bool sync)
+{
+ blkiocg_update_io_remove_stats(blkg, pol, direction, sync);
+}
+
+static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol, bool direction,
+ bool sync)
+{
+ blkiocg_update_io_merged_stats(blkg, pol, direction, sync);
+}
+
+static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol)
+{
+ blkiocg_update_idle_time_stats(blkg, pol);
+}
+
+static inline void
+cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol)
+{
+ blkiocg_update_avg_queue_size_stats(blkg, pol);
+}
+
+static inline void
+cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol)
+{
+ blkiocg_update_set_idle_time_stats(blkg, pol);
+}
+
+static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol, uint64_t bytes,
+ bool direction, bool sync)
+{
+ blkiocg_update_dispatch_stats(blkg, pol, bytes, direction, sync);
+}
+
+static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol, uint64_t start_time,
+ uint64_t io_start_time, bool direction, bool sync)
+{
+ blkiocg_update_completion_stats(blkg, pol, start_time, io_start_time,
+ direction, sync);
+}
+
#else /* CONFIG_CFQ_GROUP_IOSCHED */
static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg) { return NULL; }
@@ -407,6 +485,40 @@ static inline void cfqg_put(struct cfq_group *cfqg) { }
blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
+static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol,
+ struct blkio_group *curr_blkg, bool direction,
+ bool sync) { }
+static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol, unsigned long dequeue) { }
+static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
+ struct blkio_policy_type *pol, unsigned long time,
+ unsigned long unaccounted_time) { }
+static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg,
+ struct blkio_policy_type *pol) { }
+static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol, bool direction,
+ bool sync) { }
+static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol, bool direction,
+ bool sync) { }
+static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol) { }
+static inline void
+cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol) { }
+
+static inline void
+cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol) { }
+
+static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol, uint64_t bytes,
+ bool direction, bool sync) { }
+static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg,
+ struct blkio_policy_type *pol, uint64_t start_time,
+ uint64_t io_start_time, bool direction, bool sync) { }
+
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
#define cfq_log(cfqd, fmt, args...) \