diff options
author | Gautham R Shenoy <ego@in.ibm.com> | 2008-01-25 21:08:02 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 21:08:02 +0100 |
commit | 95402b3829010fe1e208f44e4a158ccade88969a (patch) | |
tree | 3b9895b47623b4673e3c11121980e5171af76bbe /kernel | |
parent | 86ef5c9a8edd78e6bf92879f32329d89b2d55b5a (diff) |
cpu-hotplug: replace per-subsystem mutexes with get_online_cpus()
This patch converts the known per-subsystem mutexes to get_online_cpus
put_online_cpus. It also eliminates the CPU_LOCK_ACQUIRE and
CPU_LOCK_RELEASE hotplug notification events.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 4 | ||||
-rw-r--r-- | kernel/sched.c | 25 | ||||
-rw-r--r-- | kernel/workqueue.c | 35 |
3 files changed, 24 insertions, 40 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index b0c4152995f..e0d3a4f56ec 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -218,7 +218,6 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) return -EINVAL; cpu_hotplug_begin(); - raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err == NOTIFY_BAD) { @@ -271,7 +270,6 @@ out_thread: out_allowed: set_cpus_allowed(current, old_allowed); out_release: - raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); cpu_hotplug_done(); return err; } @@ -302,7 +300,6 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) return -EINVAL; cpu_hotplug_begin(); - raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); if (ret == NOTIFY_BAD) { @@ -326,7 +323,6 @@ out_notify: if (ret != 0) __raw_notifier_call_chain(&cpu_chain, CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); - raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); cpu_hotplug_done(); return ret; diff --git a/kernel/sched.c b/kernel/sched.c index 672aa68bfea..c0e2db683e2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -439,7 +439,6 @@ struct rq { }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); -static DEFINE_MUTEX(sched_hotcpu_mutex); static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) { @@ -4546,13 +4545,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) struct task_struct *p; int retval; - mutex_lock(&sched_hotcpu_mutex); + get_online_cpus(); read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (!p) { read_unlock(&tasklist_lock); - mutex_unlock(&sched_hotcpu_mutex); + put_online_cpus(); return -ESRCH; } @@ -4592,7 +4591,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) } out_unlock: put_task_struct(p); - mutex_unlock(&sched_hotcpu_mutex); + put_online_cpus(); return retval; } @@ -4649,7 +4648,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) struct task_struct *p; int retval; - mutex_lock(&sched_hotcpu_mutex); + get_online_cpus(); read_lock(&tasklist_lock); retval = -ESRCH; @@ -4665,7 +4664,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) out_unlock: read_unlock(&tasklist_lock); - mutex_unlock(&sched_hotcpu_mutex); + put_online_cpus(); return retval; } @@ -5625,9 +5624,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) struct rq *rq; switch (action) { - case CPU_LOCK_ACQUIRE: - mutex_lock(&sched_hotcpu_mutex); - break; case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: @@ -5697,9 +5693,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) spin_unlock_irq(&rq->lock); break; #endif - case CPU_LOCK_RELEASE: - mutex_unlock(&sched_hotcpu_mutex); - break; } return NOTIFY_OK; } @@ -6655,10 +6648,10 @@ static int arch_reinit_sched_domains(void) { int err; - mutex_lock(&sched_hotcpu_mutex); + get_online_cpus(); detach_destroy_domains(&cpu_online_map); err = arch_init_sched_domains(&cpu_online_map); - mutex_unlock(&sched_hotcpu_mutex); + put_online_cpus(); return err; } @@ -6769,12 +6762,12 @@ void __init sched_init_smp(void) { cpumask_t non_isolated_cpus; - mutex_lock(&sched_hotcpu_mutex); + get_online_cpus(); arch_init_sched_domains(&cpu_online_map); cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); if (cpus_empty(non_isolated_cpus)) cpu_set(smp_processor_id(), non_isolated_cpus); - mutex_unlock(&sched_hotcpu_mutex); + put_online_cpus(); /* XXX: Theoretical race here - CPU may be hotplugged now */ hotcpu_notifier(update_sched_domains, 0); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8db0b597509..52db48e7f6e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -67,9 +67,8 @@ struct workqueue_struct { #endif }; -/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove - threads to each one as cpus come/go. */ -static DEFINE_MUTEX(workqueue_mutex); +/* Serializes the accesses to the list of workqueues. */ +static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); static int singlethread_cpu __read_mostly; @@ -592,8 +591,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on); * Returns zero on success. * Returns -ve errno on failure. * - * Appears to be racy against CPU hotplug. - * * schedule_on_each_cpu() is very slow. */ int schedule_on_each_cpu(work_func_t func) @@ -605,7 +602,7 @@ int schedule_on_each_cpu(work_func_t func) if (!works) return -ENOMEM; - preempt_disable(); /* CPU hotplug */ + get_online_cpus(); for_each_online_cpu(cpu) { struct work_struct *work = per_cpu_ptr(works, cpu); @@ -613,8 +610,8 @@ int schedule_on_each_cpu(work_func_t func) set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); } - preempt_enable(); flush_workqueue(keventd_wq); + put_online_cpus(); free_percpu(works); return 0; } @@ -750,8 +747,10 @@ struct workqueue_struct *__create_workqueue_key(const char *name, err = create_workqueue_thread(cwq, singlethread_cpu); start_workqueue_thread(cwq, -1); } else { - mutex_lock(&workqueue_mutex); + get_online_cpus(); + spin_lock(&workqueue_lock); list_add(&wq->list, &workqueues); + spin_unlock(&workqueue_lock); for_each_possible_cpu(cpu) { cwq = init_cpu_workqueue(wq, cpu); @@ -760,7 +759,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, err = create_workqueue_thread(cwq, cpu); start_workqueue_thread(cwq, cpu); } - mutex_unlock(&workqueue_mutex); + put_online_cpus(); } if (err) { @@ -775,7 +774,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) { /* * Our caller is either destroy_workqueue() or CPU_DEAD, - * workqueue_mutex protects cwq->thread + * get_online_cpus() protects cwq->thread. */ if (cwq->thread == NULL) return; @@ -810,9 +809,11 @@ void destroy_workqueue(struct workqueue_struct *wq) struct cpu_workqueue_struct *cwq; int cpu; - mutex_lock(&workqueue_mutex); + get_online_cpus(); + spin_lock(&workqueue_lock); list_del(&wq->list); - mutex_unlock(&workqueue_mutex); + spin_unlock(&workqueue_lock); + put_online_cpus(); for_each_cpu_mask(cpu, *cpu_map) { cwq = per_cpu_ptr(wq->cpu_wq, cpu); @@ -835,13 +836,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, action &= ~CPU_TASKS_FROZEN; switch (action) { - case CPU_LOCK_ACQUIRE: - mutex_lock(&workqueue_mutex); - return NOTIFY_OK; - - case CPU_LOCK_RELEASE: - mutex_unlock(&workqueue_mutex); - return NOTIFY_OK; case CPU_UP_PREPARE: cpu_set(cpu, cpu_populated_map); @@ -854,7 +848,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, case CPU_UP_PREPARE: if (!create_workqueue_thread(cwq, cpu)) break; - printk(KERN_ERR "workqueue for %i failed\n", cpu); + printk(KERN_ERR "workqueue [%s] for %i failed\n", + wq->name, cpu); return NOTIFY_BAD; case CPU_ONLINE: |