From 9dc2d0f55fb693ae6d50c8dd3d934fe3133ca183 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 2 Nov 2005 11:51:15 +0000 Subject: [ARM] Fix realview machine type for patch 3060/1 Realview was missed in this cleanup... Signed-off-by: Russell King --- include/asm-arm/arch-realview/memory.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-arm/arch-realview/memory.h b/include/asm-arm/arch-realview/memory.h index 99667d5cc61..ed370abb638 100644 --- a/include/asm-arm/arch-realview/memory.h +++ b/include/asm-arm/arch-realview/memory.h @@ -23,7 +23,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET (0x00000000UL) +#define PHYS_OFFSET UL(0x00000000) /* * Virtual view <-> DMA view memory address translations -- cgit v1.2.3-70-g09d2 From bfca94590bfd3dcd958c542d2fb6406518150fee Mon Sep 17 00:00:00 2001 From: John Bowler Date: Wed, 2 Nov 2005 11:55:12 +0000 Subject: [ARM] 3083/1: include/asm-arm/arch-ixp4xx/io.h: eliminate warnings for pointer passed to integral function argument Patch from John Bowler Fix for a compiler warning, this wasn't apparent in 2.6.12, I believe the compiler options have been changed (somewhere) so that passing a (void*) to a (u32) argument is now warned. This accounts for the majority of the warnings in my builds of the 2.6.14 kernel for NSLU2. The patch changes pointer parameters declared as u32 to be declared as either, for read parameters: const volatile void __iomem * and for write parameters: volatile void __iomem * Signed-off-by: John Bowler Signed-off-by: Deepak Saxena Signed-off-by: Russell King --- include/asm-arm/arch-ixp4xx/io.h | 74 ++++++++++++++++++++++------------------ 1 file changed, 40 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/include/asm-arm/arch-ixp4xx/io.h b/include/asm-arm/arch-ixp4xx/io.h index 80d05ecad2f..688f7f90d93 100644 --- a/include/asm-arm/arch-ixp4xx/io.h +++ b/include/asm-arm/arch-ixp4xx/io.h @@ -80,9 +80,9 @@ __ixp4xx_iounmap(void __iomem *addr) #define __arch_ioremap(a, s, f, x) __ixp4xx_ioremap(a, s, f, x) #define __arch_iounmap(a) __ixp4xx_iounmap(a) -#define writeb(p, v) __ixp4xx_writeb(p, v) -#define writew(p, v) __ixp4xx_writew(p, v) -#define writel(p, v) __ixp4xx_writel(p, v) +#define writeb(v, p) __ixp4xx_writeb(v, p) +#define writew(v, p) __ixp4xx_writew(v, p) +#define writel(v, p) __ixp4xx_writel(v, p) #define writesb(p, v, l) __ixp4xx_writesb(p, v, l) #define writesw(p, v, l) __ixp4xx_writesw(p, v, l) @@ -97,8 +97,9 @@ __ixp4xx_iounmap(void __iomem *addr) #define readsl(p, v, l) __ixp4xx_readsl(p, v, l) static inline void -__ixp4xx_writeb(u8 value, u32 addr) +__ixp4xx_writeb(u8 value, volatile void __iomem *p) { + u32 addr = (u32)p; u32 n, byte_enables, data; if (addr >= VMALLOC_START) { @@ -113,15 +114,16 @@ __ixp4xx_writeb(u8 value, u32 addr) } static inline void -__ixp4xx_writesb(u32 bus_addr, const u8 *vaddr, int count) +__ixp4xx_writesb(volatile void __iomem *bus_addr, const u8 *vaddr, int count) { while (count--) writeb(*vaddr++, bus_addr); } static inline void -__ixp4xx_writew(u16 value, u32 addr) +__ixp4xx_writew(u16 value, volatile void __iomem *p) { + u32 addr = (u32)p; u32 n, byte_enables, data; if (addr >= VMALLOC_START) { @@ -136,15 +138,16 @@ __ixp4xx_writew(u16 value, u32 addr) } static inline void -__ixp4xx_writesw(u32 bus_addr, const u16 *vaddr, int count) +__ixp4xx_writesw(volatile void __iomem *bus_addr, const u16 *vaddr, int count) { while (count--) writew(*vaddr++, bus_addr); } static inline void -__ixp4xx_writel(u32 value, u32 addr) +__ixp4xx_writel(u32 value, volatile void __iomem *p) { + u32 addr = (u32)p; if (addr >= VMALLOC_START) { __raw_writel(value, addr); return; @@ -154,15 +157,16 @@ __ixp4xx_writel(u32 value, u32 addr) } static inline void -__ixp4xx_writesl(u32 bus_addr, const u32 *vaddr, int count) +__ixp4xx_writesl(volatile void __iomem *bus_addr, const u32 *vaddr, int count) { while (count--) writel(*vaddr++, bus_addr); } static inline unsigned char -__ixp4xx_readb(u32 addr) +__ixp4xx_readb(const volatile void __iomem *p) { + u32 addr = (u32)p; u32 n, byte_enables, data; if (addr >= VMALLOC_START) @@ -177,15 +181,16 @@ __ixp4xx_readb(u32 addr) } static inline void -__ixp4xx_readsb(u32 bus_addr, u8 *vaddr, u32 count) +__ixp4xx_readsb(const volatile void __iomem *bus_addr, u8 *vaddr, u32 count) { while (count--) *vaddr++ = readb(bus_addr); } static inline unsigned short -__ixp4xx_readw(u32 addr) +__ixp4xx_readw(const volatile void __iomem *p) { + u32 addr = (u32)p; u32 n, byte_enables, data; if (addr >= VMALLOC_START) @@ -200,15 +205,16 @@ __ixp4xx_readw(u32 addr) } static inline void -__ixp4xx_readsw(u32 bus_addr, u16 *vaddr, u32 count) +__ixp4xx_readsw(const volatile void __iomem *bus_addr, u16 *vaddr, u32 count) { while (count--) *vaddr++ = readw(bus_addr); } static inline unsigned long -__ixp4xx_readl(u32 addr) +__ixp4xx_readl(const volatile void __iomem *p) { + u32 addr = (u32)p; u32 data; if (addr >= VMALLOC_START) @@ -221,7 +227,7 @@ __ixp4xx_readl(u32 addr) } static inline void -__ixp4xx_readsl(u32 bus_addr, u32 *vaddr, u32 count) +__ixp4xx_readsl(const volatile void __iomem *bus_addr, u32 *vaddr, u32 count) { while (count--) *vaddr++ = readl(bus_addr); @@ -239,7 +245,7 @@ __ixp4xx_readsl(u32 bus_addr, u32 *vaddr, u32 count) eth_copy_and_sum((s),__mem_pci(c),(l),(b)) static inline int -check_signature(unsigned long bus_addr, const unsigned char *signature, +check_signature(const unsigned char __iomem *bus_addr, const unsigned char *signature, int length) { int retval = 0; @@ -389,7 +395,7 @@ __ixp4xx_insl(u32 io_addr, u32 *vaddr, u32 count) #define __is_io_address(p) (((unsigned long)p >= PIO_OFFSET) && \ ((unsigned long)p <= (PIO_MASK + PIO_OFFSET))) static inline unsigned int -__ixp4xx_ioread8(void __iomem *addr) +__ixp4xx_ioread8(const void __iomem *addr) { unsigned long port = (unsigned long __force)addr; if (__is_io_address(port)) @@ -398,12 +404,12 @@ __ixp4xx_ioread8(void __iomem *addr) #ifndef CONFIG_IXP4XX_INDIRECT_PCI return (unsigned int)__raw_readb(port); #else - return (unsigned int)__ixp4xx_readb(port); + return (unsigned int)__ixp4xx_readb(addr); #endif } static inline void -__ixp4xx_ioread8_rep(void __iomem *addr, void *vaddr, u32 count) +__ixp4xx_ioread8_rep(const void __iomem *addr, void *vaddr, u32 count) { unsigned long port = (unsigned long __force)addr; if (__is_io_address(port)) @@ -412,12 +418,12 @@ __ixp4xx_ioread8_rep(void __iomem *addr, void *vaddr, u32 count) #ifndef CONFIG_IXP4XX_INDIRECT_PCI __raw_readsb(addr, vaddr, count); #else - __ixp4xx_readsb(port, vaddr, count); + __ixp4xx_readsb(addr, vaddr, count); #endif } static inline unsigned int -__ixp4xx_ioread16(void __iomem *addr) +__ixp4xx_ioread16(const void __iomem *addr) { unsigned long port = (unsigned long __force)addr; if (__is_io_address(port)) @@ -426,12 +432,12 @@ __ixp4xx_ioread16(void __iomem *addr) #ifndef CONFIG_IXP4XX_INDIRECT_PCI return le16_to_cpu(__raw_readw((u32)port)); #else - return (unsigned int)__ixp4xx_readw((u32)port); + return (unsigned int)__ixp4xx_readw(addr); #endif } static inline void -__ixp4xx_ioread16_rep(void __iomem *addr, void *vaddr, u32 count) +__ixp4xx_ioread16_rep(const void __iomem *addr, void *vaddr, u32 count) { unsigned long port = (unsigned long __force)addr; if (__is_io_address(port)) @@ -440,12 +446,12 @@ __ixp4xx_ioread16_rep(void __iomem *addr, void *vaddr, u32 count) #ifndef CONFIG_IXP4XX_INDIRECT_PCI __raw_readsw(addr, vaddr, count); #else - __ixp4xx_readsw(port, vaddr, count); + __ixp4xx_readsw(addr, vaddr, count); #endif } static inline unsigned int -__ixp4xx_ioread32(void __iomem *addr) +__ixp4xx_ioread32(const void __iomem *addr) { unsigned long port = (unsigned long __force)addr; if (__is_io_address(port)) @@ -454,13 +460,13 @@ __ixp4xx_ioread32(void __iomem *addr) #ifndef CONFIG_IXP4XX_INDIRECT_PCI return le32_to_cpu(__raw_readl((u32)port)); #else - return (unsigned int)__ixp4xx_readl((u32)port); + return (unsigned int)__ixp4xx_readl(addr); #endif } } static inline void -__ixp4xx_ioread32_rep(void __iomem *addr, void *vaddr, u32 count) +__ixp4xx_ioread32_rep(const void __iomem *addr, void *vaddr, u32 count) { unsigned long port = (unsigned long __force)addr; if (__is_io_address(port)) @@ -469,7 +475,7 @@ __ixp4xx_ioread32_rep(void __iomem *addr, void *vaddr, u32 count) #ifndef CONFIG_IXP4XX_INDIRECT_PCI __raw_readsl(addr, vaddr, count); #else - __ixp4xx_readsl(port, vaddr, count); + __ixp4xx_readsl(addr, vaddr, count); #endif } @@ -483,7 +489,7 @@ __ixp4xx_iowrite8(u8 value, void __iomem *addr) #ifndef CONFIG_IXP4XX_INDIRECT_PCI __raw_writeb(value, port); #else - __ixp4xx_writeb(value, port); + __ixp4xx_writeb(value, addr); #endif } @@ -497,7 +503,7 @@ __ixp4xx_iowrite8_rep(void __iomem *addr, const void *vaddr, u32 count) #ifndef CONFIG_IXP4XX_INDIRECT_PCI __raw_writesb(addr, vaddr, count); #else - __ixp4xx_writesb(port, vaddr, count); + __ixp4xx_writesb(addr, vaddr, count); #endif } @@ -511,7 +517,7 @@ __ixp4xx_iowrite16(u16 value, void __iomem *addr) #ifndef CONFIG_IXP4XX_INDIRECT_PCI __raw_writew(cpu_to_le16(value), addr); #else - __ixp4xx_writew(value, port); + __ixp4xx_writew(value, addr); #endif } @@ -525,7 +531,7 @@ __ixp4xx_iowrite16_rep(void __iomem *addr, const void *vaddr, u32 count) #ifndef CONFIG_IXP4XX_INDIRECT_PCI __raw_writesw(addr, vaddr, count); #else - __ixp4xx_writesw(port, vaddr, count); + __ixp4xx_writesw(addr, vaddr, count); #endif } @@ -539,7 +545,7 @@ __ixp4xx_iowrite32(u32 value, void __iomem *addr) #ifndef CONFIG_IXP4XX_INDIRECT_PCI __raw_writel(cpu_to_le32(value), port); #else - __ixp4xx_writel(value, port); + __ixp4xx_writel(value, addr); #endif } @@ -553,7 +559,7 @@ __ixp4xx_iowrite32_rep(void __iomem *addr, const void *vaddr, u32 count) #ifndef CONFIG_IXP4XX_INDIRECT_PCI __raw_writesl(addr, vaddr, count); #else - __ixp4xx_writesl(port, vaddr, count); + __ixp4xx_writesl(addr, vaddr, count); #endif } -- cgit v1.2.3-70-g09d2 From 7e86df273292b3a88c14b9aed1006cddac2b4c23 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 2 Nov 2005 15:09:31 +0000 Subject: [ARM] Fix ARM rwlock implementations fb1c8f93d869b34cacb8b8932e2b83d96a19d720 broke the ARM rwlock code since it only partially updated the rwlock implementation. Properly update it. Signed-off-by: Russell King --- include/asm-arm/spinlock.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h index cb4906b4555..6ed4f916b16 100644 --- a/include/asm-arm/spinlock.h +++ b/include/asm-arm/spinlock.h @@ -80,7 +80,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) */ #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) -static inline void __raw_write_lock(rwlock_t *rw) +static inline void __raw_write_lock(raw_rwlock_t *rw) { unsigned long tmp; @@ -97,7 +97,7 @@ static inline void __raw_write_lock(rwlock_t *rw) smp_mb(); } -static inline int __raw_write_trylock(rwlock_t *rw) +static inline int __raw_write_trylock(raw_rwlock_t *rw) { unsigned long tmp; @@ -157,7 +157,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) smp_mb(); } -static inline void __raw_read_unlock(rwlock_t *rw) +static inline void __raw_read_unlock(raw_rwlock_t *rw) { unsigned long tmp, tmp2; -- cgit v1.2.3-70-g09d2 From 71f512e89704f5aa6fc0b97e4a719184080b8938 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 2 Nov 2005 21:51:40 +0000 Subject: [ARM SMP] Track CPU idle threads Track the idle thread task_struct for each CPU. Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 20 +++++++++++++------- include/asm-arm/cpu.h | 1 + 2 files changed, 14 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 82616494574..45877f5d571 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -80,19 +80,23 @@ static DEFINE_SPINLOCK(smp_call_function_lock); int __cpuinit __cpu_up(unsigned int cpu) { - struct task_struct *idle; + struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); + struct task_struct *idle = ci->idle; pgd_t *pgd; pmd_t *pmd; int ret; /* - * Spawn a new process manually. Grab a pointer to - * its task struct so we can mess with it + * Spawn a new process manually, if not already done. + * Grab a pointer to its task struct so we can mess with it */ - idle = fork_idle(cpu); - if (IS_ERR(idle)) { - printk(KERN_ERR "CPU%u: fork() failed\n", cpu); - return PTR_ERR(idle); + if (!idle) { + idle = fork_idle(cpu); + if (IS_ERR(idle)) { + printk(KERN_ERR "CPU%u: fork() failed\n", cpu); + return PTR_ERR(idle); + } + ci->idle = idle; } /* @@ -236,6 +240,8 @@ void __init smp_prepare_boot_cpu(void) { unsigned int cpu = smp_processor_id(); + per_cpu(cpu_data, cpu).idle = current; + cpu_set(cpu, cpu_possible_map); cpu_set(cpu, cpu_present_map); cpu_set(cpu, cpu_online_map); diff --git a/include/asm-arm/cpu.h b/include/asm-arm/cpu.h index fcbdd40cb66..751bc746207 100644 --- a/include/asm-arm/cpu.h +++ b/include/asm-arm/cpu.h @@ -16,6 +16,7 @@ struct cpuinfo_arm { struct cpu cpu; #ifdef CONFIG_SMP + struct task_struct *idle; unsigned int loops_per_jiffy; #endif }; -- cgit v1.2.3-70-g09d2 From a054a811597a17ffbe92bc4db04a4dc2f1b1ea55 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 2 Nov 2005 22:24:33 +0000 Subject: [ARM SMP] Add hotplug CPU infrastructure This patch adds the infrastructure to support hotplug CPU on ARM platforms. Signed-off-by: Russell King --- arch/arm/Kconfig | 7 ++++ arch/arm/kernel/irq.c | 31 +++++++++++++++++ arch/arm/kernel/process.c | 9 +++++ arch/arm/kernel/smp.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++ include/asm-arm/irq.h | 1 + include/asm-arm/smp.h | 10 ++++++ 6 files changed, 143 insertions(+) (limited to 'include') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index dc6d8342e5e..6b12d71978d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -349,6 +349,13 @@ config NR_CPUS depends on SMP default "4" +config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" + depends on SMP && HOTPLUG && EXPERIMENTAL + help + Say Y here to experiment with turning CPUs off and on. CPUs + can be controlled through /sys/devices/system/cpu. + config PREEMPT bool "Preemptible Kernel (EXPERIMENTAL)" depends on EXPERIMENTAL diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 3284118f356..9def4404e1f 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -1050,3 +1050,34 @@ static int __init noirqdebug_setup(char *str) } __setup("noirqdebug", noirqdebug_setup); + +#ifdef CONFIG_HOTPLUG_CPU +/* + * The CPU has been marked offline. Migrate IRQs off this CPU. If + * the affinity settings do not allow other CPUs, force them onto any + * available CPU. + */ +void migrate_irqs(void) +{ + unsigned int i, cpu = smp_processor_id(); + + for (i = 0; i < NR_IRQS; i++) { + struct irqdesc *desc = irq_desc + i; + + if (desc->cpu == cpu) { + unsigned int newcpu = any_online_cpu(desc->affinity); + + if (newcpu == NR_CPUS) { + if (printk_ratelimit()) + printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", + i, cpu); + + cpus_setall(desc->affinity); + newcpu = any_online_cpu(desc->affinity); + } + + route_irq(desc, i, newcpu); + } + } +} +#endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 409db6d5ec9..ba298277bec 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -105,6 +106,14 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { void (*idle)(void) = pm_idle; + +#ifdef CONFIG_HOTPLUG_CPU + if (cpu_is_offline(smp_processor_id())) { + leds_event(led_idle_start); + cpu_die(); + } +#endif + if (!idle) idle = default_idle; preempt_disable(); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index b40915dcd53..edb5a406922 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -159,6 +159,91 @@ int __cpuinit __cpu_up(unsigned int cpu) return ret; } +#ifdef CONFIG_HOTPLUG_CPU +/* + * __cpu_disable runs on the processor to be shutdown. + */ +int __cpuexit __cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + struct task_struct *p; + int ret; + + ret = mach_cpu_disable(cpu); + if (ret) + return ret; + + /* + * Take this CPU offline. Once we clear this, we can't return, + * and we must not schedule until we're ready to give up the cpu. + */ + cpu_clear(cpu, cpu_online_map); + + /* + * OK - migrate IRQs away from this CPU + */ + migrate_irqs(); + + /* + * Flush user cache and TLB mappings, and then remove this CPU + * from the vm mask set of all processes. + */ + flush_cache_all(); + local_flush_tlb_all(); + + read_lock(&tasklist_lock); + for_each_process(p) { + if (p->mm) + cpu_clear(cpu, p->mm->cpu_vm_mask); + } + read_unlock(&tasklist_lock); + + return 0; +} + +/* + * called on the thread which is asking for a CPU to be shutdown - + * waits until shutdown has completed, or it is timed out. + */ +void __cpuexit __cpu_die(unsigned int cpu) +{ + if (!platform_cpu_kill(cpu)) + printk("CPU%u: unable to kill\n", cpu); +} + +/* + * Called from the idle thread for the CPU which has been shutdown. + * + * Note that we disable IRQs here, but do not re-enable them + * before returning to the caller. This is also the behaviour + * of the other hotplug-cpu capable cores, so presumably coming + * out of idle fixes this. + */ +void __cpuexit cpu_die(void) +{ + unsigned int cpu = smp_processor_id(); + + local_irq_disable(); + idle_task_exit(); + + /* + * actual CPU shutdown procedure is at least platform (if not + * CPU) specific + */ + platform_cpu_die(cpu); + + /* + * Do not return to the idle loop - jump back to the secondary + * cpu initialisation. There's some initialisation which needs + * to be repeated to undo the effects of taking the CPU offline. + */ + __asm__("mov sp, %0\n" + " b secondary_start_kernel" + : + : "r" ((void *)current->thread_info + THREAD_SIZE - 8)); +} +#endif /* CONFIG_HOTPLUG_CPU */ + /* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. diff --git a/include/asm-arm/irq.h b/include/asm-arm/irq.h index f97912fbb10..59975ee43cf 100644 --- a/include/asm-arm/irq.h +++ b/include/asm-arm/irq.h @@ -47,5 +47,6 @@ struct irqaction; struct pt_regs; int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); +extern void migrate_irqs(void); #endif diff --git a/include/asm-arm/smp.h b/include/asm-arm/smp.h index dbb4d859c58..551cd3c3093 100644 --- a/include/asm-arm/smp.h +++ b/include/asm-arm/smp.h @@ -66,4 +66,14 @@ struct secondary_data { }; extern struct secondary_data secondary_data; +extern int __cpu_disable(void); +extern int mach_cpu_disable(unsigned int cpu); + +extern void __cpu_die(unsigned int cpu); +extern void cpu_die(void); + +extern void platform_cpu_die(unsigned int cpu); +extern int platform_cpu_kill(unsigned int cpu); +extern void platform_cpu_enable(unsigned int cpu); + #endif /* ifndef __ASM_ARM_SMP_H */ -- cgit v1.2.3-70-g09d2