summaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/rcu.h122
1 files changed, 109 insertions, 13 deletions
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 669fbd62ec2..d2d88bed891 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -241,24 +241,73 @@ TRACE_EVENT(rcu_fqs,
/*
* Tracepoint for dyntick-idle entry/exit events. These take a string
- * as argument: "Start" for entering dyntick-idle mode and "End" for
- * leaving it.
+ * as argument: "Start" for entering dyntick-idle mode, "End" for
+ * leaving it, "--=" for events moving towards idle, and "++=" for events
+ * moving away from idle. "Error on entry: not idle task" and "Error on
+ * exit: not idle task" indicate that a non-idle task is erroneously
+ * toying with the idle loop.
+ *
+ * These events also take a pair of numbers, which indicate the nesting
+ * depth before and after the event of interest. Note that task-related
+ * events use the upper bits of each number, while interrupt-related
+ * events use the lower bits.
*/
TRACE_EVENT(rcu_dyntick,
- TP_PROTO(char *polarity),
+ TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
- TP_ARGS(polarity),
+ TP_ARGS(polarity, oldnesting, newnesting),
TP_STRUCT__entry(
__field(char *, polarity)
+ __field(long long, oldnesting)
+ __field(long long, newnesting)
),
TP_fast_assign(
__entry->polarity = polarity;
+ __entry->oldnesting = oldnesting;
+ __entry->newnesting = newnesting;
+ ),
+
+ TP_printk("%s %llx %llx", __entry->polarity,
+ __entry->oldnesting, __entry->newnesting)
+);
+
+/*
+ * Tracepoint for RCU preparation for idle, the goal being to get RCU
+ * processing done so that the current CPU can shut off its scheduling
+ * clock and enter dyntick-idle mode. One way to accomplish this is
+ * to drain all RCU callbacks from this CPU, and the other is to have
+ * done everything RCU requires for the current grace period. In this
+ * latter case, the CPU will be awakened at the end of the current grace
+ * period in order to process the remainder of its callbacks.
+ *
+ * These tracepoints take a string as argument:
+ *
+ * "No callbacks": Nothing to do, no callbacks on this CPU.
+ * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
+ * "Begin holdoff": Attempt failed, don't retry until next jiffy.
+ * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
+ * "More callbacks": Still more callbacks, try again to clear them out.
+ * "Callbacks drained": All callbacks processed, off to dyntick idle!
+ * "Timer": Timer fired to cause CPU to continue processing callbacks.
+ */
+TRACE_EVENT(rcu_prep_idle,
+
+ TP_PROTO(char *reason),
+
+ TP_ARGS(reason),
+
+ TP_STRUCT__entry(
+ __field(char *, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->reason = reason;
),
- TP_printk("%s", __entry->polarity)
+ TP_printk("%s", __entry->reason)
);
/*
@@ -412,27 +461,71 @@ TRACE_EVENT(rcu_invoke_kfree_callback,
/*
* Tracepoint for exiting rcu_do_batch after RCU callbacks have been
- * invoked. The first argument is the name of the RCU flavor and
- * the second argument is number of callbacks actually invoked.
+ * invoked. The first argument is the name of the RCU flavor,
+ * the second argument is number of callbacks actually invoked,
+ * the third argument (cb) is whether or not any of the callbacks that
+ * were ready to invoke at the beginning of this batch are still
+ * queued, the fourth argument (nr) is the return value of need_resched(),
+ * the fifth argument (iit) is 1 if the current task is the idle task,
+ * and the sixth argument (risk) is the return value from
+ * rcu_is_callbacks_kthread().
*/
TRACE_EVENT(rcu_batch_end,
- TP_PROTO(char *rcuname, int callbacks_invoked),
+ TP_PROTO(char *rcuname, int callbacks_invoked,
+ bool cb, bool nr, bool iit, bool risk),
- TP_ARGS(rcuname, callbacks_invoked),
+ TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
TP_STRUCT__entry(
__field(char *, rcuname)
__field(int, callbacks_invoked)
+ __field(bool, cb)
+ __field(bool, nr)
+ __field(bool, iit)
+ __field(bool, risk)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->callbacks_invoked = callbacks_invoked;
+ __entry->cb = cb;
+ __entry->nr = nr;
+ __entry->iit = iit;
+ __entry->risk = risk;
+ ),
+
+ TP_printk("%s CBs-invoked=%d idle=%c%c%c%c",
+ __entry->rcuname, __entry->callbacks_invoked,
+ __entry->cb ? 'C' : '.',
+ __entry->nr ? 'S' : '.',
+ __entry->iit ? 'I' : '.',
+ __entry->risk ? 'R' : '.')
+);
+
+/*
+ * Tracepoint for rcutorture readers. The first argument is the name
+ * of the RCU flavor from rcutorture's viewpoint and the second argument
+ * is the callback address.
+ */
+TRACE_EVENT(rcu_torture_read,
+
+ TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
+
+ TP_ARGS(rcutorturename, rhp),
+
+ TP_STRUCT__entry(
+ __field(char *, rcutorturename)
+ __field(struct rcu_head *, rhp)
+ ),
+
+ TP_fast_assign(
+ __entry->rcutorturename = rcutorturename;
+ __entry->rhp = rhp;
),
- TP_printk("%s CBs-invoked=%d",
- __entry->rcuname, __entry->callbacks_invoked)
+ TP_printk("%s torture read %p",
+ __entry->rcutorturename, __entry->rhp)
);
#else /* #ifdef CONFIG_RCU_TRACE */
@@ -443,13 +536,16 @@ TRACE_EVENT(rcu_batch_end,
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0)
#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
-#define trace_rcu_dyntick(polarity) do { } while (0)
+#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
+#define trace_rcu_prep_idle(reason) do { } while (0)
#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
-#define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
+#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
+ do { } while (0)
+#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
#endif /* #else #ifdef CONFIG_RCU_TRACE */