diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sh/Kconfig | 7 | ||||
-rw-r--r-- | arch/sh/include/asm/irq.h | 3 | ||||
-rw-r--r-- | arch/sh/include/asm/smp-ops.h | 16 | ||||
-rw-r--r-- | arch/sh/include/asm/smp.h | 19 | ||||
-rw-r--r-- | arch/sh/kernel/idle.c | 6 | ||||
-rw-r--r-- | arch/sh/kernel/irq.c | 42 | ||||
-rw-r--r-- | arch/sh/kernel/localtimer.c | 4 | ||||
-rw-r--r-- | arch/sh/kernel/smp.c | 103 |
8 files changed, 194 insertions, 6 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index ba86bfba95a..d73bd4db5e8 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -706,6 +706,13 @@ config NR_CPUS This is purely to save memory - each supported CPU adds approximately eight kilobytes to the kernel image. +config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" + depends on SMP && HOTPLUG && EXPERIMENTAL + help + Say Y here to experiment with turning CPUs off and on. CPUs + can be controlled through /sys/devices/system/cpu. + source "kernel/Kconfig.preempt" config GUSA diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h index 99c593b3a82..02c2f0102cf 100644 --- a/arch/sh/include/asm/irq.h +++ b/arch/sh/include/asm/irq.h @@ -1,6 +1,7 @@ #ifndef __ASM_SH_IRQ_H #define __ASM_SH_IRQ_H +#include <linux/cpumask.h> #include <asm/machvec.h> /* @@ -50,6 +51,8 @@ static inline int generic_irq_demux(int irq) #define irq_demux(irq) sh_mv.mv_irq_demux(irq) void init_IRQ(void); +void migrate_irqs(void); + asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs); #ifdef CONFIG_IRQSTACKS diff --git a/arch/sh/include/asm/smp-ops.h b/arch/sh/include/asm/smp-ops.h index 0581b2a4c8c..c590f76856f 100644 --- a/arch/sh/include/asm/smp-ops.h +++ b/arch/sh/include/asm/smp-ops.h @@ -7,20 +7,27 @@ struct plat_smp_ops { void (*prepare_cpus)(unsigned int max_cpus); void (*start_cpu)(unsigned int cpu, unsigned long entry_point); void (*send_ipi)(unsigned int cpu, unsigned int message); + int (*cpu_disable)(unsigned int cpu); + void (*cpu_die)(unsigned int cpu); + void (*play_dead)(void); }; +extern struct plat_smp_ops *mp_ops; extern struct plat_smp_ops shx3_smp_ops; #ifdef CONFIG_SMP static inline void plat_smp_setup(void) { - extern struct plat_smp_ops *mp_ops; /* private */ - BUG_ON(!mp_ops); mp_ops->smp_setup(); } +static inline void play_dead(void) +{ + mp_ops->play_dead(); +} + extern void register_smp_ops(struct plat_smp_ops *ops); #else @@ -34,6 +41,11 @@ static inline void register_smp_ops(struct plat_smp_ops *ops) { } +static inline void play_dead(void) +{ + BUG(); +} + #endif /* CONFIG_SMP */ #endif /* __ASM_SH_SMP_OPS_H */ diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h index da5135b2579..9070d943ddd 100644 --- a/arch/sh/include/asm/smp.h +++ b/arch/sh/include/asm/smp.h @@ -38,9 +38,26 @@ void smp_timer_broadcast(const struct cpumask *mask); void local_timer_interrupt(void); void local_timer_setup(unsigned int cpu); +void local_timer_stop(unsigned int cpu); void arch_send_call_function_single_ipi(int cpu); -extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +void arch_send_call_function_ipi_mask(const struct cpumask *mask); + +void native_play_dead(void); +void native_cpu_die(unsigned int cpu); +int native_cpu_disable(unsigned int cpu); + +#ifdef CONFIG_HOTPLUG_CPU +void play_dead_common(void); +extern int __cpu_disable(void); + +static inline void __cpu_die(unsigned int cpu) +{ + extern struct plat_smp_ops *mp_ops; /* private */ + + mp_ops->cpu_die(cpu); +} +#endif static inline int hard_smp_processor_id(void) { diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 204005329fe..425d604e3a2 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -19,6 +19,7 @@ #include <asm/pgalloc.h> #include <asm/system.h> #include <asm/atomic.h> +#include <asm/smp.h> void (*pm_idle)(void) = NULL; @@ -89,10 +90,13 @@ void cpu_idle(void) while (1) { tick_nohz_stop_sched_tick(1); - while (!need_resched() && cpu_online(cpu)) { + while (!need_resched()) { check_pgt_cache(); rmb(); + if (cpu_is_offline(cpu)) + play_dead(); + local_irq_disable(); /* Don't trace irqs off for idle */ stop_critical_timings(); diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index f6a9319c28e..257de1f0692 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -12,6 +12,7 @@ #include <linux/kernel_stat.h> #include <linux/seq_file.h> #include <linux/ftrace.h> +#include <linux/delay.h> #include <asm/processor.h> #include <asm/machvec.h> #include <asm/uaccess.h> @@ -292,3 +293,44 @@ int __init arch_probe_nr_irqs(void) return 0; } #endif + +#ifdef CONFIG_HOTPLUG_CPU +static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) +{ + printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n", + irq, desc->node, cpu); + + raw_spin_lock_irq(&desc->lock); + desc->chip->set_affinity(irq, cpumask_of(cpu)); + raw_spin_unlock_irq(&desc->lock); +} + +/* + * The CPU has been marked offline. Migrate IRQs off this CPU. If + * the affinity settings do not allow other CPUs, force them onto any + * available CPU. + */ +void migrate_irqs(void) +{ + struct irq_desc *desc; + unsigned int irq, cpu = smp_processor_id(); + + for_each_irq_desc(irq, desc) { + if (desc->node == cpu) { + unsigned int newcpu = cpumask_any_and(desc->affinity, + cpu_online_mask); + if (newcpu >= nr_cpu_ids) { + if (printk_ratelimit()) + printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", + irq, cpu); + + cpumask_setall(desc->affinity); + newcpu = cpumask_any_and(desc->affinity, + cpu_online_mask); + } + + route_irq(desc, irq, newcpu); + } + } +} +#endif diff --git a/arch/sh/kernel/localtimer.c b/arch/sh/kernel/localtimer.c index 865a2f1029b..8bfc6dfa8b9 100644 --- a/arch/sh/kernel/localtimer.c +++ b/arch/sh/kernel/localtimer.c @@ -60,3 +60,7 @@ void local_timer_setup(unsigned int cpu) clockevents_register_device(clk); } + +void local_timer_stop(unsigned int cpu) +{ +} diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 21e7f8a9f3e..86cd6f94b53 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -79,6 +79,105 @@ void __init smp_prepare_boot_cpu(void) per_cpu(cpu_state, cpu) = CPU_ONLINE; } +#ifdef CONFIG_HOTPLUG_CPU +void native_cpu_die(unsigned int cpu) +{ + unsigned int i; + + for (i = 0; i < 10; i++) { + smp_rmb(); + if (per_cpu(cpu_state, cpu) == CPU_DEAD) { + if (system_state == SYSTEM_RUNNING) + pr_info("CPU %u is now offline\n", cpu); + + return; + } + + msleep(100); + } + + pr_err("CPU %u didn't die...\n", cpu); +} + +int native_cpu_disable(unsigned int cpu) +{ + return cpu == 0 ? -EPERM : 0; +} + +void play_dead_common(void) +{ + idle_task_exit(); + irq_ctx_exit(raw_smp_processor_id()); + mb(); + + __get_cpu_var(cpu_state) = CPU_DEAD; + local_irq_disable(); +} + +void native_play_dead(void) +{ + play_dead_common(); +} + +int __cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + struct task_struct *p; + int ret; + + ret = mp_ops->cpu_disable(cpu); + if (ret) + return ret; + + /* + * Take this CPU offline. Once we clear this, we can't return, + * and we must not schedule until we're ready to give up the cpu. + */ + set_cpu_online(cpu, false); + + /* + * OK - migrate IRQs away from this CPU + */ + migrate_irqs(); + + /* + * Stop the local timer for this CPU. + */ + local_timer_stop(cpu); + + /* + * Flush user cache and TLB mappings, and then remove this CPU + * from the vm mask set of all processes. + */ + flush_cache_all(); + local_flush_tlb_all(); + + read_lock(&tasklist_lock); + for_each_process(p) + if (p->mm) + cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); + read_unlock(&tasklist_lock); + + return 0; +} +#else /* ... !CONFIG_HOTPLUG_CPU */ +int native_cpu_disable(void) +{ + return -ENOSYS; +} + +void native_cpu_die(unsigned int cpu) +{ + /* We said "no" in __cpu_disable */ + BUG(); +} + +void native_play_dead(void) +{ + BUG(); +} +#endif + asmlinkage void __cpuinit start_secondary(void) { unsigned int cpu = smp_processor_id(); @@ -88,8 +187,8 @@ asmlinkage void __cpuinit start_secondary(void) atomic_inc(&mm->mm_count); atomic_inc(&mm->mm_users); current->active_mm = mm; - BUG_ON(current->mm); enter_lazy_tlb(mm, current); + local_flush_tlb_all(); per_cpu_trap_init(); @@ -156,6 +255,7 @@ int __cpuinit __cpu_up(unsigned int cpu) break; udelay(10); + barrier(); } if (cpu_online(cpu)) @@ -270,7 +370,6 @@ static void flush_tlb_mm_ipi(void *mm) * behalf of debugees, kswapd stealing pages from another process etc). * Kanoj 07/00. */ - void flush_tlb_mm(struct mm_struct *mm) { preempt_disable(); |