From 3366e3585fbf0d40ce6f2382b544851cf4df1654 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 30 Mar 2010 12:38:01 +0900 Subject: sh: Move platform smp ops in to their own structure. This cribs the MIPS plat_smp_ops approach for wrapping up the platform ops. This will allow for mixing and matching different ops on the same platform in the future. Signed-off-by: Paul Mundt --- arch/sh/include/asm/smp-ops.h | 39 +++++++++++++++++++++++++++++++++++++++ arch/sh/include/asm/smp.h | 18 +++++++++++------- 2 files changed, 50 insertions(+), 7 deletions(-) create mode 100644 arch/sh/include/asm/smp-ops.h (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/smp-ops.h b/arch/sh/include/asm/smp-ops.h new file mode 100644 index 00000000000..0581b2a4c8c --- /dev/null +++ b/arch/sh/include/asm/smp-ops.h @@ -0,0 +1,39 @@ +#ifndef __ASM_SH_SMP_OPS_H +#define __ASM_SH_SMP_OPS_H + +struct plat_smp_ops { + void (*smp_setup)(void); + unsigned int (*smp_processor_id)(void); + void (*prepare_cpus)(unsigned int max_cpus); + void (*start_cpu)(unsigned int cpu, unsigned long entry_point); + void (*send_ipi)(unsigned int cpu, unsigned int message); +}; + +extern struct plat_smp_ops shx3_smp_ops; + +#ifdef CONFIG_SMP + +static inline void plat_smp_setup(void) +{ + extern struct plat_smp_ops *mp_ops; /* private */ + + BUG_ON(!mp_ops); + mp_ops->smp_setup(); +} + +extern void register_smp_ops(struct plat_smp_ops *ops); + +#else + +static inline void plat_smp_setup(void) +{ + /* UP, nothing to do ... */ +} + +static inline void register_smp_ops(struct plat_smp_ops *ops) +{ +} + +#endif /* CONFIG_SMP */ + +#endif /* __ASM_SH_SMP_OPS_H */ diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h index 53ef26ced75..7f13d46ec8d 100644 --- a/arch/sh/include/asm/smp.h +++ b/arch/sh/include/asm/smp.h @@ -3,6 +3,7 @@ #include #include +#include #ifdef CONFIG_SMP @@ -11,7 +12,6 @@ #include #define raw_smp_processor_id() (current_thread_info()->cpu) -#define hard_smp_processor_id() plat_smp_processor_id() /* Map from cpu id to sequential logical cpu number. */ extern int __cpu_number_map[NR_CPUS]; @@ -36,15 +36,19 @@ void smp_timer_broadcast(const struct cpumask *mask); void local_timer_interrupt(void); void local_timer_setup(unsigned int cpu); -void plat_smp_setup(void); -void plat_prepare_cpus(unsigned int max_cpus); -int plat_smp_processor_id(void); -void plat_start_cpu(unsigned int cpu, unsigned long entry_point); -void plat_send_ipi(unsigned int cpu, unsigned int message); - void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +static inline int hard_smp_processor_id(void) +{ + extern struct plat_smp_ops *mp_ops; /* private */ + + if (!mp_ops) + return 0; /* boot CPU */ + + return mp_ops->smp_processor_id(); +} + #else #define hard_smp_processor_id() (0) -- cgit v1.2.3-70-g09d2 From 9715b8c7d55912fb6f5dd9b1c084d8eefcd0d848 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 26 Apr 2010 18:49:58 +0900 Subject: sh: provide percpu CPU states for hotplug notifiers. This provides percpu CPU states in preparation for CPU hotplug and the associated notifier chains. Signed-off-by: Paul Mundt --- arch/sh/include/asm/smp.h | 3 +++ arch/sh/kernel/smp.c | 14 ++++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h index 7f13d46ec8d..da5135b2579 100644 --- a/arch/sh/include/asm/smp.h +++ b/arch/sh/include/asm/smp.h @@ -10,6 +10,7 @@ #include #include #include +#include #define raw_smp_processor_id() (current_thread_info()->cpu) @@ -30,6 +31,8 @@ enum { SMP_MSG_NR, /* must be last */ }; +DECLARE_PER_CPU(int, cpu_state); + void smp_message_recv(unsigned int msg); void smp_timer_broadcast(const struct cpumask *mask); diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index cc87830ace7..4d27597c5da 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -33,6 +33,9 @@ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ struct plat_smp_ops *mp_ops = NULL; +/* State of each CPU */ +DEFINE_PER_CPU(int, cpu_state) = { 0 }; + void __cpuinit register_smp_ops(struct plat_smp_ops *ops) { if (mp_ops) @@ -72,11 +75,13 @@ void __devinit smp_prepare_boot_cpu(void) set_cpu_online(cpu, true); set_cpu_possible(cpu, true); + + per_cpu(cpu_state, cpu) = CPU_ONLINE; } asmlinkage void __cpuinit start_secondary(void) { - unsigned int cpu; + unsigned int cpu = smp_processor_id(); struct mm_struct *mm = &init_mm; enable_mmu(); @@ -90,12 +95,10 @@ asmlinkage void __cpuinit start_secondary(void) preempt_disable(); - notify_cpu_starting(smp_processor_id()); + notify_cpu_starting(cpu); local_irq_enable(); - cpu = smp_processor_id(); - /* Enable local timers */ local_timer_setup(cpu); calibrate_delay(); @@ -103,6 +106,7 @@ asmlinkage void __cpuinit start_secondary(void) smp_store_cpu_info(cpu); set_cpu_online(cpu, true); + per_cpu(cpu_state, cpu) = CPU_ONLINE; cpu_idle(); } @@ -127,6 +131,8 @@ int __cpuinit __cpu_up(unsigned int cpu) return PTR_ERR(tsk); } + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; + /* Fill in data in head.S for secondary cpus */ stack_start.sp = tsk->thread.sp; stack_start.thread_info = tsk->stack; -- cgit v1.2.3-70-g09d2 From 8db2bc4559639680a94d4492ae4b7ce71298a74f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 26 Apr 2010 18:59:47 +0900 Subject: sh: cache secondary CPUs idle loop. This provides a cache of the secondary CPUs idle loop for the cases where hotplug simply enters a low power state instead of resetting or powering off the core. Signed-off-by: Paul Mundt --- arch/sh/include/asm/processor.h | 4 ++++ arch/sh/kernel/smp.c | 13 +++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index 26b3f026eec..0a58cb25a65 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -85,6 +85,10 @@ struct sh_cpuinfo { struct tlb_info itlb; struct tlb_info dtlb; +#ifdef CONFIG_SMP + struct task_struct *idle; +#endif + unsigned long flags; } __attribute__ ((aligned(L1_CACHE_BYTES))); diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 3711a76a18e..21e7f8a9f3e 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -125,10 +125,15 @@ int __cpuinit __cpu_up(unsigned int cpu) struct task_struct *tsk; unsigned long timeout; - tsk = fork_idle(cpu); - if (IS_ERR(tsk)) { - printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu); - return PTR_ERR(tsk); + tsk = cpu_data[cpu].idle; + if (!tsk) { + tsk = fork_idle(cpu); + if (IS_ERR(tsk)) { + pr_err("Failed forking idle task for cpu %d\n", cpu); + return PTR_ERR(tsk); + } + + cpu_data[cpu].idle = tsk; } per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; -- cgit v1.2.3-70-g09d2 From 763142d1efb56effe614d71185781796c4b83c78 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 26 Apr 2010 19:08:55 +0900 Subject: sh: CPU hotplug support. This adds preliminary support for CPU hotplug for SH SMP systems. Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 7 +++ arch/sh/include/asm/irq.h | 3 ++ arch/sh/include/asm/smp-ops.h | 16 ++++++- arch/sh/include/asm/smp.h | 19 +++++++- arch/sh/kernel/idle.c | 6 ++- arch/sh/kernel/irq.c | 42 +++++++++++++++++ arch/sh/kernel/localtimer.c | 4 ++ arch/sh/kernel/smp.c | 103 +++++++++++++++++++++++++++++++++++++++++- 8 files changed, 194 insertions(+), 6 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index ba86bfba95a..d73bd4db5e8 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -706,6 +706,13 @@ config NR_CPUS This is purely to save memory - each supported CPU adds approximately eight kilobytes to the kernel image. +config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" + depends on SMP && HOTPLUG && EXPERIMENTAL + help + Say Y here to experiment with turning CPUs off and on. CPUs + can be controlled through /sys/devices/system/cpu. + source "kernel/Kconfig.preempt" config GUSA diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h index 99c593b3a82..02c2f0102cf 100644 --- a/arch/sh/include/asm/irq.h +++ b/arch/sh/include/asm/irq.h @@ -1,6 +1,7 @@ #ifndef __ASM_SH_IRQ_H #define __ASM_SH_IRQ_H +#include #include /* @@ -50,6 +51,8 @@ static inline int generic_irq_demux(int irq) #define irq_demux(irq) sh_mv.mv_irq_demux(irq) void init_IRQ(void); +void migrate_irqs(void); + asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs); #ifdef CONFIG_IRQSTACKS diff --git a/arch/sh/include/asm/smp-ops.h b/arch/sh/include/asm/smp-ops.h index 0581b2a4c8c..c590f76856f 100644 --- a/arch/sh/include/asm/smp-ops.h +++ b/arch/sh/include/asm/smp-ops.h @@ -7,20 +7,27 @@ struct plat_smp_ops { void (*prepare_cpus)(unsigned int max_cpus); void (*start_cpu)(unsigned int cpu, unsigned long entry_point); void (*send_ipi)(unsigned int cpu, unsigned int message); + int (*cpu_disable)(unsigned int cpu); + void (*cpu_die)(unsigned int cpu); + void (*play_dead)(void); }; +extern struct plat_smp_ops *mp_ops; extern struct plat_smp_ops shx3_smp_ops; #ifdef CONFIG_SMP static inline void plat_smp_setup(void) { - extern struct plat_smp_ops *mp_ops; /* private */ - BUG_ON(!mp_ops); mp_ops->smp_setup(); } +static inline void play_dead(void) +{ + mp_ops->play_dead(); +} + extern void register_smp_ops(struct plat_smp_ops *ops); #else @@ -34,6 +41,11 @@ static inline void register_smp_ops(struct plat_smp_ops *ops) { } +static inline void play_dead(void) +{ + BUG(); +} + #endif /* CONFIG_SMP */ #endif /* __ASM_SH_SMP_OPS_H */ diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h index da5135b2579..9070d943ddd 100644 --- a/arch/sh/include/asm/smp.h +++ b/arch/sh/include/asm/smp.h @@ -38,9 +38,26 @@ void smp_timer_broadcast(const struct cpumask *mask); void local_timer_interrupt(void); void local_timer_setup(unsigned int cpu); +void local_timer_stop(unsigned int cpu); void arch_send_call_function_single_ipi(int cpu); -extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +void arch_send_call_function_ipi_mask(const struct cpumask *mask); + +void native_play_dead(void); +void native_cpu_die(unsigned int cpu); +int native_cpu_disable(unsigned int cpu); + +#ifdef CONFIG_HOTPLUG_CPU +void play_dead_common(void); +extern int __cpu_disable(void); + +static inline void __cpu_die(unsigned int cpu) +{ + extern struct plat_smp_ops *mp_ops; /* private */ + + mp_ops->cpu_die(cpu); +} +#endif static inline int hard_smp_processor_id(void) { diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 204005329fe..425d604e3a2 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -19,6 +19,7 @@ #include #include #include +#include void (*pm_idle)(void) = NULL; @@ -89,10 +90,13 @@ void cpu_idle(void) while (1) { tick_nohz_stop_sched_tick(1); - while (!need_resched() && cpu_online(cpu)) { + while (!need_resched()) { check_pgt_cache(); rmb(); + if (cpu_is_offline(cpu)) + play_dead(); + local_irq_disable(); /* Don't trace irqs off for idle */ stop_critical_timings(); diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index f6a9319c28e..257de1f0692 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -292,3 +293,44 @@ int __init arch_probe_nr_irqs(void) return 0; } #endif + +#ifdef CONFIG_HOTPLUG_CPU +static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) +{ + printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n", + irq, desc->node, cpu); + + raw_spin_lock_irq(&desc->lock); + desc->chip->set_affinity(irq, cpumask_of(cpu)); + raw_spin_unlock_irq(&desc->lock); +} + +/* + * The CPU has been marked offline. Migrate IRQs off this CPU. If + * the affinity settings do not allow other CPUs, force them onto any + * available CPU. + */ +void migrate_irqs(void) +{ + struct irq_desc *desc; + unsigned int irq, cpu = smp_processor_id(); + + for_each_irq_desc(irq, desc) { + if (desc->node == cpu) { + unsigned int newcpu = cpumask_any_and(desc->affinity, + cpu_online_mask); + if (newcpu >= nr_cpu_ids) { + if (printk_ratelimit()) + printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", + irq, cpu); + + cpumask_setall(desc->affinity); + newcpu = cpumask_any_and(desc->affinity, + cpu_online_mask); + } + + route_irq(desc, irq, newcpu); + } + } +} +#endif diff --git a/arch/sh/kernel/localtimer.c b/arch/sh/kernel/localtimer.c index 865a2f1029b..8bfc6dfa8b9 100644 --- a/arch/sh/kernel/localtimer.c +++ b/arch/sh/kernel/localtimer.c @@ -60,3 +60,7 @@ void local_timer_setup(unsigned int cpu) clockevents_register_device(clk); } + +void local_timer_stop(unsigned int cpu) +{ +} diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 21e7f8a9f3e..86cd6f94b53 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -79,6 +79,105 @@ void __init smp_prepare_boot_cpu(void) per_cpu(cpu_state, cpu) = CPU_ONLINE; } +#ifdef CONFIG_HOTPLUG_CPU +void native_cpu_die(unsigned int cpu) +{ + unsigned int i; + + for (i = 0; i < 10; i++) { + smp_rmb(); + if (per_cpu(cpu_state, cpu) == CPU_DEAD) { + if (system_state == SYSTEM_RUNNING) + pr_info("CPU %u is now offline\n", cpu); + + return; + } + + msleep(100); + } + + pr_err("CPU %u didn't die...\n", cpu); +} + +int native_cpu_disable(unsigned int cpu) +{ + return cpu == 0 ? -EPERM : 0; +} + +void play_dead_common(void) +{ + idle_task_exit(); + irq_ctx_exit(raw_smp_processor_id()); + mb(); + + __get_cpu_var(cpu_state) = CPU_DEAD; + local_irq_disable(); +} + +void native_play_dead(void) +{ + play_dead_common(); +} + +int __cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + struct task_struct *p; + int ret; + + ret = mp_ops->cpu_disable(cpu); + if (ret) + return ret; + + /* + * Take this CPU offline. Once we clear this, we can't return, + * and we must not schedule until we're ready to give up the cpu. + */ + set_cpu_online(cpu, false); + + /* + * OK - migrate IRQs away from this CPU + */ + migrate_irqs(); + + /* + * Stop the local timer for this CPU. + */ + local_timer_stop(cpu); + + /* + * Flush user cache and TLB mappings, and then remove this CPU + * from the vm mask set of all processes. + */ + flush_cache_all(); + local_flush_tlb_all(); + + read_lock(&tasklist_lock); + for_each_process(p) + if (p->mm) + cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); + read_unlock(&tasklist_lock); + + return 0; +} +#else /* ... !CONFIG_HOTPLUG_CPU */ +int native_cpu_disable(void) +{ + return -ENOSYS; +} + +void native_cpu_die(unsigned int cpu) +{ + /* We said "no" in __cpu_disable */ + BUG(); +} + +void native_play_dead(void) +{ + BUG(); +} +#endif + asmlinkage void __cpuinit start_secondary(void) { unsigned int cpu = smp_processor_id(); @@ -88,8 +187,8 @@ asmlinkage void __cpuinit start_secondary(void) atomic_inc(&mm->mm_count); atomic_inc(&mm->mm_users); current->active_mm = mm; - BUG_ON(current->mm); enter_lazy_tlb(mm, current); + local_flush_tlb_all(); per_cpu_trap_init(); @@ -156,6 +255,7 @@ int __cpuinit __cpu_up(unsigned int cpu) break; udelay(10); + barrier(); } if (cpu_online(cpu)) @@ -270,7 +370,6 @@ static void flush_tlb_mm_ipi(void *mm) * behalf of debugees, kswapd stealing pages from another process etc). * Kanoj 07/00. */ - void flush_tlb_mm(struct mm_struct *mm) { preempt_disable(); -- cgit v1.2.3-70-g09d2