diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2007-05-09 02:34:04 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 12:30:51 -0700 |
commit | e7407dcc69e077ac34a527842db916abfbc458df (patch) | |
tree | 138b24bceee828dc256bb81f2b592926be9f1515 /kernel | |
parent | 5be9361cdff17fc76fa0c3e262ead94158555f16 (diff) |
call cpu_chain with CPU_DOWN_FAILED if CPU_DOWN_PREPARE failed
This makes cpu hotplug symmetrical: if CPU_UP_PREPARE fails we get
CPU_UP_CANCELED, so we can undo what ever happened on PREPARE. The same
should happen for CPU_DOWN_PREPARE.
[akpm@linux-foundation.org: fix for reduce-size-of-task_struct-on-64-bit-machines]
Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com>
Cc: Gautham Shenoy <ego@in.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 48810498b35..1a823944e97 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -97,7 +97,7 @@ static inline void check_for_tasks(int cpu) (!cputime_eq(p->utime, cputime_zero) || !cputime_eq(p->stime, cputime_zero))) printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ - (state = %ld, flags = %lx) \n", + (state = %ld, flags = %x) \n", p->comm, p->pid, cpu, p->state, p->flags); } write_unlock_irq(&tasklist_lock); @@ -122,9 +122,10 @@ static int take_cpu_down(void *unused) /* Requires cpu_add_remove_lock to be held */ static int _cpu_down(unsigned int cpu) { - int err; + int err, nr_calls = 0; struct task_struct *p; cpumask_t old_allowed, tmp; + void *hcpu = (void *)(long)cpu; if (num_online_cpus() == 1) return -EBUSY; @@ -132,11 +133,12 @@ static int _cpu_down(unsigned int cpu) if (!cpu_online(cpu)) return -EINVAL; - raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, - (void *)(long)cpu); - err = raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, - (void *)(long)cpu); + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); + err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, + hcpu, -1, &nr_calls); if (err == NOTIFY_BAD) { + __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, hcpu, + nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", __FUNCTION__, cpu); err = -EINVAL; @@ -156,7 +158,7 @@ static int _cpu_down(unsigned int cpu) if (IS_ERR(p) || cpu_online(cpu)) { /* CPU didn't die: tell everyone. Can't complain. */ if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, - (void *)(long)cpu) == NOTIFY_BAD) + hcpu) == NOTIFY_BAD) BUG(); if (IS_ERR(p)) { @@ -178,8 +180,7 @@ static int _cpu_down(unsigned int cpu) put_cpu(); /* CPU is completely dead: tell everyone. Too late to complain. */ - if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD, - (void *)(long)cpu) == NOTIFY_BAD) + if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu) == NOTIFY_BAD) BUG(); check_for_tasks(cpu); |