From b433c3d4549ae74935b585115f076c6fb7bc48fe Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 28 Jun 2010 16:51:01 +0200 Subject: init, sched: Fix race between init and kthreadd Ilya reported that on a very slow machine he could reliably reproduce a race between forking init and kthreadd. We first fork init so that it obtains pid-1, however since the scheduler is already fully running at this point it can preempt and run the init thread before we spawn and set kthreadd_task. The init thread can then attempt spawning kthreads without kthreadd being present which results in an OOPS. Reported-by: Ilya Loginov Signed-off-by: Peter Zijlstra Acked-by: Linus Torvalds LKML-Reference: <1277736661.3561.110.camel@laptop> Signed-off-by: Ingo Molnar --- init/main.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/init/main.c b/init/main.c index 3bdb152f412..633442f79e9 100644 --- a/init/main.c +++ b/init/main.c @@ -422,18 +422,26 @@ static void __init setup_command_line(char *command_line) * gcc-3.4 accidentally inlines this function, so use noinline. */ +static __initdata DECLARE_COMPLETION(kthreadd_done); + static noinline void __init_refok rest_init(void) __releases(kernel_lock) { int pid; rcu_scheduler_starting(); + /* + * We need to spawn init first so that it obtains pid-1, however + * the init task will end up wanting to create kthreads, which, if + * we schedule it before we create kthreadd, will OOPS. + */ kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); numa_default_policy(); pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); rcu_read_lock(); kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); rcu_read_unlock(); + complete(&kthreadd_done); unlock_kernel(); /* @@ -855,6 +863,10 @@ static noinline int init_post(void) static int __init kernel_init(void * unused) { + /* + * Wait until kthreadd is all set-up. + */ + wait_for_completion(&kthreadd_done); lock_kernel(); /* -- cgit v1.2.3-70-g09d2 From 9715856922bf8475f5428c29b6f4a9eebc97d391 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 30 Jun 2010 10:37:11 +0200 Subject: init: Fix comment Apparently "pid-1" confuses people... Requested-by: Randy Dunlap Signed-off-by: Peter Zijlstra Cc: torvalds@linux-foundation.org Cc: randy.dunlap@oracle.com Cc: Ilya Loginov LKML-Reference: <1277887031.1868.82.camel@laptop> Signed-off-by: Ingo Molnar --- init/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init/main.c b/init/main.c index 633442f79e9..1692df32244 100644 --- a/init/main.c +++ b/init/main.c @@ -431,7 +431,7 @@ static noinline void __init_refok rest_init(void) rcu_scheduler_starting(); /* - * We need to spawn init first so that it obtains pid-1, however + * We need to spawn init first so that it obtains pid 1, however * the init task will end up wanting to create kthreads, which, if * we schedule it before we create kthreadd, will OOPS. */ -- cgit v1.2.3-70-g09d2 From 8c215bd3890c347dfb6a2db4779755f8b9c298a9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 1 Jul 2010 09:07:17 +0200 Subject: sched: Cure nr_iowait_cpu() users Commit 0224cf4c5e (sched: Intoduce get_cpu_iowait_time_us()) broke things by not making sure preemption was indeed disabled by the callers of nr_iowait_cpu() which took the iowait value of the current cpu. This resulted in a heap of preempt warnings. Cure this by making nr_iowait_cpu() take a cpu number and fix up the callers to pass in the right number. Signed-off-by: Peter Zijlstra Cc: Arjan van de Ven Cc: Sergey Senozhatsky Cc: Rafael J. Wysocki Cc: Maxim Levitsky Cc: Len Brown Cc: Pavel Machek Cc: Jiri Slaby Cc: linux-pm@lists.linux-foundation.org LKML-Reference: <1277968037.1868.120.camel@laptop> Signed-off-by: Ingo Molnar --- drivers/cpuidle/governors/menu.c | 4 ++-- include/linux/sched.h | 2 +- kernel/sched.c | 4 ++-- kernel/time/tick-sched.c | 16 ++++++++-------- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 52ff8aa63f8..1b128702d30 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -143,7 +143,7 @@ static inline int which_bucket(unsigned int duration) * This allows us to calculate * E(duration)|iowait */ - if (nr_iowait_cpu()) + if (nr_iowait_cpu(smp_processor_id())) bucket = BUCKETS/2; if (duration < 10) @@ -175,7 +175,7 @@ static inline int performance_multiplier(void) mult += 2 * get_loadavg(); /* for IO wait tasks (per cpu!) we add 5x each */ - mult += 10 * nr_iowait_cpu(); + mult += 10 * nr_iowait_cpu(smp_processor_id()); return mult; } diff --git a/include/linux/sched.h b/include/linux/sched.h index f118809c953..747fcaedddb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -139,7 +139,7 @@ extern int nr_processes(void); extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); extern unsigned long nr_iowait(void); -extern unsigned long nr_iowait_cpu(void); +extern unsigned long nr_iowait_cpu(int cpu); extern unsigned long this_cpu_load(void); diff --git a/kernel/sched.c b/kernel/sched.c index a24d6d5d83f..f87abe3b017 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2864,9 +2864,9 @@ unsigned long nr_iowait(void) return sum; } -unsigned long nr_iowait_cpu(void) +unsigned long nr_iowait_cpu(int cpu) { - struct rq *this = this_rq(); + struct rq *this = cpu_rq(cpu); return atomic_read(&this->nr_iowait); } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1d7b9bc1c03..1a6f828e57a 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -154,14 +154,14 @@ static void tick_nohz_update_jiffies(ktime_t now) * Updates the per cpu time idle statistics counters */ static void -update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time) +update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) { ktime_t delta; if (ts->idle_active) { delta = ktime_sub(now, ts->idle_entrytime); ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); - if (nr_iowait_cpu() > 0) + if (nr_iowait_cpu(cpu) > 0) ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); ts->idle_entrytime = now; } @@ -175,19 +175,19 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); - update_ts_time_stats(ts, now, NULL); + update_ts_time_stats(cpu, ts, now, NULL); ts->idle_active = 0; sched_clock_idle_wakeup_event(0); } -static ktime_t tick_nohz_start_idle(struct tick_sched *ts) +static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) { ktime_t now; now = ktime_get(); - update_ts_time_stats(ts, now, NULL); + update_ts_time_stats(cpu, ts, now, NULL); ts->idle_entrytime = now; ts->idle_active = 1; @@ -216,7 +216,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) if (!tick_nohz_enabled) return -1; - update_ts_time_stats(ts, ktime_get(), last_update_time); + update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); return ktime_to_us(ts->idle_sleeptime); } @@ -242,7 +242,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) if (!tick_nohz_enabled) return -1; - update_ts_time_stats(ts, ktime_get(), last_update_time); + update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); return ktime_to_us(ts->iowait_sleeptime); } @@ -284,7 +284,7 @@ void tick_nohz_stop_sched_tick(int inidle) */ ts->inidle = 1; - now = tick_nohz_start_idle(ts); + now = tick_nohz_start_idle(cpu, ts); /* * If this cpu is offline and it is the one which updates -- cgit v1.2.3-70-g09d2