summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c15
-rw-r--r--include/trace/events/block.h11
-rw-r--r--kernel/trace/blktrace.c6
3 files changed, 22 insertions, 10 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index eeaca0998df..d20ce1e849c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2668,12 +2668,19 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
return !(rqa->q <= rqb->q);
}
+static void queue_unplugged(struct request_queue *q, unsigned int depth)
+{
+ trace_block_unplug_io(q, depth);
+ __blk_run_queue(q, false);
+}
+
static void flush_plug_list(struct blk_plug *plug)
{
struct request_queue *q;
unsigned long flags;
struct request *rq;
LIST_HEAD(list);
+ unsigned int depth;
BUG_ON(plug->magic != PLUG_MAGIC);
@@ -2688,6 +2695,7 @@ static void flush_plug_list(struct blk_plug *plug)
}
q = NULL;
+ depth = 0;
local_irq_save(flags);
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
@@ -2696,10 +2704,11 @@ static void flush_plug_list(struct blk_plug *plug)
BUG_ON(!rq->q);
if (rq->q != q) {
if (q) {
- __blk_run_queue(q, false);
+ queue_unplugged(q, depth);
spin_unlock(q->queue_lock);
}
q = rq->q;
+ depth = 0;
spin_lock(q->queue_lock);
}
rq->cmd_flags &= ~REQ_ON_PLUG;
@@ -2711,10 +2720,12 @@ static void flush_plug_list(struct blk_plug *plug)
__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
else
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
+
+ depth++;
}
if (q) {
- __blk_run_queue(q, false);
+ queue_unplugged(q, depth);
spin_unlock(q->queue_lock);
}
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 43a985390bb..006e60b5830 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -401,9 +401,9 @@ TRACE_EVENT(block_plug,
DECLARE_EVENT_CLASS(block_unplug,
- TP_PROTO(struct request_queue *q),
+ TP_PROTO(struct request_queue *q, unsigned int depth),
- TP_ARGS(q),
+ TP_ARGS(q, depth),
TP_STRUCT__entry(
__field( int, nr_rq )
@@ -411,7 +411,7 @@ DECLARE_EVENT_CLASS(block_unplug,
),
TP_fast_assign(
- __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE];
+ __entry->nr_rq = depth;
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -421,15 +421,16 @@ DECLARE_EVENT_CLASS(block_unplug,
/**
* block_unplug_io - release of operations requests in request queue
* @q: request queue to unplug
+ * @depth: number of requests just added to the queue
*
* Unplug request queue @q because device driver is scheduled to work
* on elements in the request queue.
*/
DEFINE_EVENT(block_unplug, block_unplug_io,
- TP_PROTO(struct request_queue *q),
+ TP_PROTO(struct request_queue *q, unsigned int depth),
- TP_ARGS(q)
+ TP_ARGS(q, depth)
);
/**
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 824708cbfb7..3e3970d53d1 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -850,13 +850,13 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
}
-static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q)
+static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q,
+ unsigned int depth)
{
struct blk_trace *bt = q->blk_trace;
if (bt) {
- unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
- __be64 rpdu = cpu_to_be64(pdu);
+ __be64 rpdu = cpu_to_be64(depth);
__blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
sizeof(rpdu), &rpdu);