summaryrefslogtreecommitdiffstats
path: root/kernel/rcutorture.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-01-10 16:21:07 -0800
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-01-28 22:19:54 -0800
commit0e11c8e8a60f8591556d142c2e1e53eaf86ab528 (patch)
tree98a824aba939ebd38178e60a3ba4eb97a29fb97c /kernel/rcutorture.c
parent7e8b1e78ea028cbd32337e2aea574a8466c796bb (diff)
rcu: Make rcutorture's shuffler task shuffle recently added tasks
A number of kthreads have been added to rcutorture, but the shuffler task was not informed of them, and thus did not shuffle them. This commit therefore adds the requisite shuffling, and, while in the area fixes up some whitespace issues. However, the shuffling is intended to keep randomly selected CPUs idle, which means that the RCU priority boosting kthreads need to avoid waking up every jiffy. This commit also makes that fix. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutorture.c')
-rw-r--r--kernel/rcutorture.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index a583f1ce713..3ebc8bfb552 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -846,7 +846,7 @@ static int rcu_torture_boost(void *arg)
/* Wait for the next test interval. */
oldstarttime = boost_starttime;
while (ULONG_CMP_LT(jiffies, oldstarttime)) {
- schedule_timeout_uninterruptible(1);
+ schedule_timeout_interruptible(oldstarttime - jiffies);
rcu_stutter_wait("rcu_torture_boost");
if (kthread_should_stop() ||
fullstop != FULLSTOP_DONTSTOP)
@@ -1318,19 +1318,35 @@ static void rcu_torture_shuffle_tasks(void)
set_cpus_allowed_ptr(reader_tasks[i],
shuffle_tmp_mask);
}
-
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++)
if (fakewriter_tasks[i])
set_cpus_allowed_ptr(fakewriter_tasks[i],
shuffle_tmp_mask);
}
-
if (writer_task)
set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
-
if (stats_task)
set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
+ if (stutter_task)
+ set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask);
+ if (fqs_task)
+ set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask);
+ if (shutdown_task)
+ set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask);
+#ifdef CONFIG_HOTPLUG_CPU
+ if (onoff_task)
+ set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask);
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+ if (stall_task)
+ set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask);
+ if (barrier_cbs_tasks)
+ for (i = 0; i < n_barrier_cbs; i++)
+ if (barrier_cbs_tasks[i])
+ set_cpus_allowed_ptr(barrier_cbs_tasks[i],
+ shuffle_tmp_mask);
+ if (barrier_task)
+ set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask);
if (rcu_idle_cpu == -1)
rcu_idle_cpu = num_online_cpus() - 1;