summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-07-09 18:51:58 +0200
committerIngo Molnar <mingo@elte.hu>2007-07-09 18:51:58 +0200
commit20d315d42aed95423a7203e1d7e84086004b5a00 (patch)
tree3649d00c3ed1053783727333de1291a71bdb3ca4 /kernel
parent6aa645ea5f7a246702e07f29edc7075d487ae4a3 (diff)
sched: add rq_clock()/__rq_clock()
add rq_clock()/__rq_clock(), a robust wrapper around sched_clock(), used by CFS. It protects against common type of sched_clock() problems (caused by hardware): time warps forwards and backwards. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c46
1 files changed, 46 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 085418bedcc..29eb227e33f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -389,6 +389,52 @@ static inline int cpu_of(struct rq *rq)
}
/*
+ * Per-runqueue clock, as finegrained as the platform can give us:
+ */
+static unsigned long long __rq_clock(struct rq *rq)
+{
+ u64 prev_raw = rq->prev_clock_raw;
+ u64 now = sched_clock();
+ s64 delta = now - prev_raw;
+ u64 clock = rq->clock;
+
+ /*
+ * Protect against sched_clock() occasionally going backwards:
+ */
+ if (unlikely(delta < 0)) {
+ clock++;
+ rq->clock_warps++;
+ } else {
+ /*
+ * Catch too large forward jumps too:
+ */
+ if (unlikely(delta > 2*TICK_NSEC)) {
+ clock++;
+ rq->clock_overflows++;
+ } else {
+ if (unlikely(delta > rq->clock_max_delta))
+ rq->clock_max_delta = delta;
+ clock += delta;
+ }
+ }
+
+ rq->prev_clock_raw = now;
+ rq->clock = clock;
+
+ return clock;
+}
+
+static inline unsigned long long rq_clock(struct rq *rq)
+{
+ int this_cpu = smp_processor_id();
+
+ if (this_cpu == cpu_of(rq))
+ return __rq_clock(rq);
+
+ return rq->clock;
+}
+
+/*
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
* See detach_destroy_domains: synchronize_sched for details.
*