From dc825b17904a06bbd2f79d720b23156e4c01a22f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 15 Apr 2010 13:13:52 +0900 Subject: sh: intc: IRQ auto-distribution support. This implements support for hardware-managed IRQ balancing as implemented by SH-X3 cores (presently only hooked up for SH7786, but can probably be carried over to other SH-X3 cores, too). CPUs need to specify their distribution register along with the mask definitions, as these follow the same format. Peripheral IRQs that don't opt out of balancing will be automatically distributed at the whim of the hardware block, while each CPU needs to verify whether it is handling the IRQ or not, especially before clearing the mask. Signed-off-by: Paul Mundt --- arch/sh/kernel/irq.c | 49 +++++++++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 20 deletions(-) (limited to 'arch/sh/kernel/irq.c') diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index d2d41d04665..f6a9319c28e 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -113,19 +113,14 @@ union irq_ctx { static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; -#endif -asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs) +static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; +static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; + +static inline void handle_one_irq(unsigned int irq) { - struct pt_regs *old_regs = set_irq_regs(regs); -#ifdef CONFIG_IRQSTACKS union irq_ctx *curctx, *irqctx; -#endif - - irq_enter(); - irq = irq_demux(irq); -#ifdef CONFIG_IRQSTACKS curctx = (union irq_ctx *)current_thread_info(); irqctx = hardirq_ctx[smp_processor_id()]; @@ -164,20 +159,9 @@ asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs) "r5", "r6", "r7", "r8", "t", "pr" ); } else -#endif generic_handle_irq(irq); - - irq_exit(); - - set_irq_regs(old_regs); - return 1; } -#ifdef CONFIG_IRQSTACKS -static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; - -static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; - /* * allocate per-cpu stacks for hardirq and for softirq processing */ @@ -257,8 +241,33 @@ asmlinkage void do_softirq(void) local_irq_restore(flags); } +#else +static inline void handle_one_irq(unsigned int irq) +{ + generic_handle_irq(irq); +} #endif +asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + irq_enter(); + + irq = irq_demux(irq_lookup(irq)); + + if (irq != NO_IRQ_IGNORE) { + handle_one_irq(irq); + irq_finish(irq); + } + + irq_exit(); + + set_irq_regs(old_regs); + + return IRQ_HANDLED; +} + void __init init_IRQ(void) { plat_irq_setup(); -- cgit v1.2.3-70-g09d2 From 763142d1efb56effe614d71185781796c4b83c78 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 26 Apr 2010 19:08:55 +0900 Subject: sh: CPU hotplug support. This adds preliminary support for CPU hotplug for SH SMP systems. Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 7 +++ arch/sh/include/asm/irq.h | 3 ++ arch/sh/include/asm/smp-ops.h | 16 ++++++- arch/sh/include/asm/smp.h | 19 +++++++- arch/sh/kernel/idle.c | 6 ++- arch/sh/kernel/irq.c | 42 +++++++++++++++++ arch/sh/kernel/localtimer.c | 4 ++ arch/sh/kernel/smp.c | 103 +++++++++++++++++++++++++++++++++++++++++- 8 files changed, 194 insertions(+), 6 deletions(-) (limited to 'arch/sh/kernel/irq.c') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index ba86bfba95a..d73bd4db5e8 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -706,6 +706,13 @@ config NR_CPUS This is purely to save memory - each supported CPU adds approximately eight kilobytes to the kernel image. +config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" + depends on SMP && HOTPLUG && EXPERIMENTAL + help + Say Y here to experiment with turning CPUs off and on. CPUs + can be controlled through /sys/devices/system/cpu. + source "kernel/Kconfig.preempt" config GUSA diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h index 99c593b3a82..02c2f0102cf 100644 --- a/arch/sh/include/asm/irq.h +++ b/arch/sh/include/asm/irq.h @@ -1,6 +1,7 @@ #ifndef __ASM_SH_IRQ_H #define __ASM_SH_IRQ_H +#include #include /* @@ -50,6 +51,8 @@ static inline int generic_irq_demux(int irq) #define irq_demux(irq) sh_mv.mv_irq_demux(irq) void init_IRQ(void); +void migrate_irqs(void); + asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs); #ifdef CONFIG_IRQSTACKS diff --git a/arch/sh/include/asm/smp-ops.h b/arch/sh/include/asm/smp-ops.h index 0581b2a4c8c..c590f76856f 100644 --- a/arch/sh/include/asm/smp-ops.h +++ b/arch/sh/include/asm/smp-ops.h @@ -7,20 +7,27 @@ struct plat_smp_ops { void (*prepare_cpus)(unsigned int max_cpus); void (*start_cpu)(unsigned int cpu, unsigned long entry_point); void (*send_ipi)(unsigned int cpu, unsigned int message); + int (*cpu_disable)(unsigned int cpu); + void (*cpu_die)(unsigned int cpu); + void (*play_dead)(void); }; +extern struct plat_smp_ops *mp_ops; extern struct plat_smp_ops shx3_smp_ops; #ifdef CONFIG_SMP static inline void plat_smp_setup(void) { - extern struct plat_smp_ops *mp_ops; /* private */ - BUG_ON(!mp_ops); mp_ops->smp_setup(); } +static inline void play_dead(void) +{ + mp_ops->play_dead(); +} + extern void register_smp_ops(struct plat_smp_ops *ops); #else @@ -34,6 +41,11 @@ static inline void register_smp_ops(struct plat_smp_ops *ops) { } +static inline void play_dead(void) +{ + BUG(); +} + #endif /* CONFIG_SMP */ #endif /* __ASM_SH_SMP_OPS_H */ diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h index da5135b2579..9070d943ddd 100644 --- a/arch/sh/include/asm/smp.h +++ b/arch/sh/include/asm/smp.h @@ -38,9 +38,26 @@ void smp_timer_broadcast(const struct cpumask *mask); void local_timer_interrupt(void); void local_timer_setup(unsigned int cpu); +void local_timer_stop(unsigned int cpu); void arch_send_call_function_single_ipi(int cpu); -extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +void arch_send_call_function_ipi_mask(const struct cpumask *mask); + +void native_play_dead(void); +void native_cpu_die(unsigned int cpu); +int native_cpu_disable(unsigned int cpu); + +#ifdef CONFIG_HOTPLUG_CPU +void play_dead_common(void); +extern int __cpu_disable(void); + +static inline void __cpu_die(unsigned int cpu) +{ + extern struct plat_smp_ops *mp_ops; /* private */ + + mp_ops->cpu_die(cpu); +} +#endif static inline int hard_smp_processor_id(void) { diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 204005329fe..425d604e3a2 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -19,6 +19,7 @@ #include #include #include +#include void (*pm_idle)(void) = NULL; @@ -89,10 +90,13 @@ void cpu_idle(void) while (1) { tick_nohz_stop_sched_tick(1); - while (!need_resched() && cpu_online(cpu)) { + while (!need_resched()) { check_pgt_cache(); rmb(); + if (cpu_is_offline(cpu)) + play_dead(); + local_irq_disable(); /* Don't trace irqs off for idle */ stop_critical_timings(); diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index f6a9319c28e..257de1f0692 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -292,3 +293,44 @@ int __init arch_probe_nr_irqs(void) return 0; } #endif + +#ifdef CONFIG_HOTPLUG_CPU +static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) +{ + printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n", + irq, desc->node, cpu); + + raw_spin_lock_irq(&desc->lock); + desc->chip->set_affinity(irq, cpumask_of(cpu)); + raw_spin_unlock_irq(&desc->lock); +} + +/* + * The CPU has been marked offline. Migrate IRQs off this CPU. If + * the affinity settings do not allow other CPUs, force them onto any + * available CPU. + */ +void migrate_irqs(void) +{ + struct irq_desc *desc; + unsigned int irq, cpu = smp_processor_id(); + + for_each_irq_desc(irq, desc) { + if (desc->node == cpu) { + unsigned int newcpu = cpumask_any_and(desc->affinity, + cpu_online_mask); + if (newcpu >= nr_cpu_ids) { + if (printk_ratelimit()) + printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", + irq, cpu); + + cpumask_setall(desc->affinity); + newcpu = cpumask_any_and(desc->affinity, + cpu_online_mask); + } + + route_irq(desc, irq, newcpu); + } + } +} +#endif diff --git a/arch/sh/kernel/localtimer.c b/arch/sh/kernel/localtimer.c index 865a2f1029b..8bfc6dfa8b9 100644 --- a/arch/sh/kernel/localtimer.c +++ b/arch/sh/kernel/localtimer.c @@ -60,3 +60,7 @@ void local_timer_setup(unsigned int cpu) clockevents_register_device(clk); } + +void local_timer_stop(unsigned int cpu) +{ +} diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 21e7f8a9f3e..86cd6f94b53 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -79,6 +79,105 @@ void __init smp_prepare_boot_cpu(void) per_cpu(cpu_state, cpu) = CPU_ONLINE; } +#ifdef CONFIG_HOTPLUG_CPU +void native_cpu_die(unsigned int cpu) +{ + unsigned int i; + + for (i = 0; i < 10; i++) { + smp_rmb(); + if (per_cpu(cpu_state, cpu) == CPU_DEAD) { + if (system_state == SYSTEM_RUNNING) + pr_info("CPU %u is now offline\n", cpu); + + return; + } + + msleep(100); + } + + pr_err("CPU %u didn't die...\n", cpu); +} + +int native_cpu_disable(unsigned int cpu) +{ + return cpu == 0 ? -EPERM : 0; +} + +void play_dead_common(void) +{ + idle_task_exit(); + irq_ctx_exit(raw_smp_processor_id()); + mb(); + + __get_cpu_var(cpu_state) = CPU_DEAD; + local_irq_disable(); +} + +void native_play_dead(void) +{ + play_dead_common(); +} + +int __cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + struct task_struct *p; + int ret; + + ret = mp_ops->cpu_disable(cpu); + if (ret) + return ret; + + /* + * Take this CPU offline. Once we clear this, we can't return, + * and we must not schedule until we're ready to give up the cpu. + */ + set_cpu_online(cpu, false); + + /* + * OK - migrate IRQs away from this CPU + */ + migrate_irqs(); + + /* + * Stop the local timer for this CPU. + */ + local_timer_stop(cpu); + + /* + * Flush user cache and TLB mappings, and then remove this CPU + * from the vm mask set of all processes. + */ + flush_cache_all(); + local_flush_tlb_all(); + + read_lock(&tasklist_lock); + for_each_process(p) + if (p->mm) + cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); + read_unlock(&tasklist_lock); + + return 0; +} +#else /* ... !CONFIG_HOTPLUG_CPU */ +int native_cpu_disable(void) +{ + return -ENOSYS; +} + +void native_cpu_die(unsigned int cpu) +{ + /* We said "no" in __cpu_disable */ + BUG(); +} + +void native_play_dead(void) +{ + BUG(); +} +#endif + asmlinkage void __cpuinit start_secondary(void) { unsigned int cpu = smp_processor_id(); @@ -88,8 +187,8 @@ asmlinkage void __cpuinit start_secondary(void) atomic_inc(&mm->mm_count); atomic_inc(&mm->mm_users); current->active_mm = mm; - BUG_ON(current->mm); enter_lazy_tlb(mm, current); + local_flush_tlb_all(); per_cpu_trap_init(); @@ -156,6 +255,7 @@ int __cpuinit __cpu_up(unsigned int cpu) break; udelay(10); + barrier(); } if (cpu_online(cpu)) @@ -270,7 +370,6 @@ static void flush_tlb_mm_ipi(void *mm) * behalf of debugees, kswapd stealing pages from another process etc). * Kanoj 07/00. */ - void flush_tlb_mm(struct mm_struct *mm) { preempt_disable(); -- cgit v1.2.3-70-g09d2