diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 3 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 3 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 8 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 131 | ||||
-rw-r--r-- | arch/arm/kernel/smp_scu.c | 48 | ||||
-rw-r--r-- | arch/arm/kernel/smp_twd.c | 175 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 2 |
9 files changed, 334 insertions, 40 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 11a5197a221..ff89d0b3abc 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -22,6 +22,8 @@ obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o +obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 83b1da6b7ba..fc8af43c500 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -482,6 +482,9 @@ __und_usr: subeq r4, r2, #4 @ ARM instr at LR - 4 subne r4, r2, #2 @ Thumb instr at LR - 2 1: ldreqt r0, [r4] +#ifdef CONFIG_CPU_ENDIAN_BE8 + reveq r0, r0 @ little endian instruction +#endif beq call_fpe @ Thumb instruction #if __LINUX_ARM_ARCH__ >= 7 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index b55cb033180..366e5097a41 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -210,6 +210,9 @@ ENTRY(vector_swi) A710( teq ip, #0x0f000000 ) A710( bne .Larm710bug ) #endif +#ifdef CONFIG_CPU_ENDIAN_BE8 + rev r10, r10 @ little endian instruction +#endif #elif defined(CONFIG_AEABI) diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index c3265a2e7cd..1585423699e 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -365,7 +365,7 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) regs.ARM_r2 = (unsigned long)fn; regs.ARM_r3 = (unsigned long)do_exit; regs.ARM_pc = (unsigned long)kernel_thread_helper; - regs.ARM_cpsr = SVC_MODE; + regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE; return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 80b8b5c7e07..442b87476f9 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -426,9 +426,13 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, */ thumb = handler & 1; - if (thumb) + if (thumb) { cpsr |= PSR_T_BIT; - else +#if __LINUX_ARM_ARCH__ >= 7 + /* clear the If-Then Thumb-2 execution state */ + cpsr &= ~PSR_IT_MASK; +#endif + } else cpsr &= ~PSR_T_BIT; } #endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 6014dfd22af..de885fd256c 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -22,16 +22,20 @@ #include <linux/smp.h> #include <linux/seq_file.h> #include <linux/irq.h> +#include <linux/percpu.h> +#include <linux/clockchips.h> #include <asm/atomic.h> #include <asm/cacheflush.h> #include <asm/cpu.h> +#include <asm/cputype.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> +#include <asm/localtimer.h> /* * as from 2.5, kernels no longer have an init_tasks structure @@ -163,7 +167,7 @@ int __cpuexit __cpu_disable(void) * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu. */ - cpu_clear(cpu, cpu_online_map); + set_cpu_online(cpu, false); /* * OK - migrate IRQs away from this CPU @@ -274,9 +278,9 @@ asmlinkage void __cpuinit secondary_start_kernel(void) local_fiq_enable(); /* - * Setup local timer for this CPU. + * Setup the percpu timer for this CPU. */ - local_timer_setup(); + percpu_timer_setup(); calibrate_delay(); @@ -285,7 +289,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) /* * OK, now it's safe to let the boot CPU continue */ - cpu_set(cpu, cpu_online_map); + set_cpu_online(cpu, true); /* * OK, it's off to the idle thread for us @@ -383,10 +387,16 @@ void show_local_irqs(struct seq_file *p) seq_putc(p, '\n'); } +/* + * Timer (local or broadcast) support + */ +static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); + static void ipi_timer(void) { + struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); irq_enter(); - local_timer_interrupt(); + evt->event_handler(evt); irq_exit(); } @@ -405,6 +415,42 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs) } #endif +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +static void smp_timer_broadcast(const struct cpumask *mask) +{ + send_ipi_message(mask, IPI_TIMER); +} + +static void broadcast_timer_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ +} + +static void local_timer_setup(struct clock_event_device *evt) +{ + evt->name = "dummy_timer"; + evt->features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_DUMMY; + evt->rating = 400; + evt->mult = 1; + evt->set_mode = broadcast_timer_set_mode; + evt->broadcast = smp_timer_broadcast; + + clockevents_register_device(evt); +} +#endif + +void __cpuinit percpu_timer_setup(void) +{ + unsigned int cpu = smp_processor_id(); + struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); + + evt->cpumask = cpumask_of(cpu); + + local_timer_setup(evt); +} + static DEFINE_SPINLOCK(stop_lock); /* @@ -417,7 +463,7 @@ static void ipi_cpu_stop(unsigned int cpu) dump_stack(); spin_unlock(&stop_lock); - cpu_clear(cpu, cpu_online_map); + set_cpu_online(cpu, false); local_fiq_disable(); local_irq_disable(); @@ -501,11 +547,6 @@ void smp_send_reschedule(int cpu) send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); } -void smp_timer_broadcast(const struct cpumask *mask) -{ - send_ipi_message(mask, IPI_TIMER); -} - void smp_send_stop(void) { cpumask_t mask = cpu_online_map; @@ -545,6 +586,12 @@ struct tlb_args { unsigned long ta_end; }; +/* all SMP configurations have the extended CPUID registers */ +static inline int tlb_ops_need_broadcast(void) +{ + return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; +} + static inline void ipi_flush_tlb_all(void *ignored) { local_flush_tlb_all(); @@ -587,51 +634,61 @@ static inline void ipi_flush_tlb_kernel_range(void *arg) void flush_tlb_all(void) { - on_each_cpu(ipi_flush_tlb_all, NULL, 1); + if (tlb_ops_need_broadcast()) + on_each_cpu(ipi_flush_tlb_all, NULL, 1); + else + local_flush_tlb_all(); } void flush_tlb_mm(struct mm_struct *mm) { - on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); + if (tlb_ops_need_broadcast()) + on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); + else + local_flush_tlb_mm(mm); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { - struct tlb_args ta; - - ta.ta_vma = vma; - ta.ta_start = uaddr; - - on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_vma = vma; + ta.ta_start = uaddr; + on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); + } else + local_flush_tlb_page(vma, uaddr); } void flush_tlb_kernel_page(unsigned long kaddr) { - struct tlb_args ta; - - ta.ta_start = kaddr; - - on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_start = kaddr; + on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); + } else + local_flush_tlb_kernel_page(kaddr); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - struct tlb_args ta; - - ta.ta_vma = vma; - ta.ta_start = start; - ta.ta_end = end; - - on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_vma = vma; + ta.ta_start = start; + ta.ta_end = end; + on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); + } else + local_flush_tlb_range(vma, start, end); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - struct tlb_args ta; - - ta.ta_start = start; - ta.ta_end = end; - - on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_start = start; + ta.ta_end = end; + on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); + } else + local_flush_tlb_kernel_range(start, end); } diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c new file mode 100644 index 00000000000..d3831f616ee --- /dev/null +++ b/arch/arm/kernel/smp_scu.c @@ -0,0 +1,48 @@ +/* + * linux/arch/arm/kernel/smp_scu.c + * + * Copyright (C) 2002 ARM Ltd. + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/init.h> +#include <linux/io.h> + +#include <asm/smp_scu.h> +#include <asm/cacheflush.h> + +#define SCU_CTRL 0x00 +#define SCU_CONFIG 0x04 +#define SCU_CPU_STATUS 0x08 +#define SCU_INVALIDATE 0x0c +#define SCU_FPGA_REVISION 0x10 + +/* + * Get the number of CPU cores from the SCU configuration + */ +unsigned int __init scu_get_core_count(void __iomem *scu_base) +{ + unsigned int ncores = __raw_readl(scu_base + SCU_CONFIG); + return (ncores & 0x03) + 1; +} + +/* + * Enable the SCU + */ +void __init scu_enable(void __iomem *scu_base) +{ + u32 scu_ctrl; + + scu_ctrl = __raw_readl(scu_base + SCU_CTRL); + scu_ctrl |= 1; + __raw_writel(scu_ctrl, scu_base + SCU_CTRL); + + /* + * Ensure that the data accessed by CPU0 before the SCU was + * initialised is visible to the other CPUs. + */ + flush_cache_all(); +} diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c new file mode 100644 index 00000000000..d8c88c633c6 --- /dev/null +++ b/arch/arm/kernel/smp_twd.c @@ -0,0 +1,175 @@ +/* + * linux/arch/arm/kernel/smp_twd.c + * + * Copyright (C) 2002 ARM Ltd. + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/smp.h> +#include <linux/jiffies.h> +#include <linux/clockchips.h> +#include <linux/irq.h> +#include <linux/io.h> + +#include <asm/smp_twd.h> +#include <asm/hardware/gic.h> + +#define TWD_TIMER_LOAD 0x00 +#define TWD_TIMER_COUNTER 0x04 +#define TWD_TIMER_CONTROL 0x08 +#define TWD_TIMER_INTSTAT 0x0C + +#define TWD_WDOG_LOAD 0x20 +#define TWD_WDOG_COUNTER 0x24 +#define TWD_WDOG_CONTROL 0x28 +#define TWD_WDOG_INTSTAT 0x2C +#define TWD_WDOG_RESETSTAT 0x30 +#define TWD_WDOG_DISABLE 0x34 + +#define TWD_TIMER_CONTROL_ENABLE (1 << 0) +#define TWD_TIMER_CONTROL_ONESHOT (0 << 1) +#define TWD_TIMER_CONTROL_PERIODIC (1 << 1) +#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2) + +/* set up by the platform code */ +void __iomem *twd_base; + +static unsigned long twd_timer_rate; + +static void twd_set_mode(enum clock_event_mode mode, + struct clock_event_device *clk) +{ + unsigned long ctrl; + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + /* timer load already set up */ + ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE + | TWD_TIMER_CONTROL_PERIODIC; + break; + case CLOCK_EVT_MODE_ONESHOT: + /* period set, and timer enabled in 'next_event' hook */ + ctrl = TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT; + break; + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + default: + ctrl = 0; + } + + __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL); +} + +static int twd_set_next_event(unsigned long evt, + struct clock_event_device *unused) +{ + unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL); + + ctrl |= TWD_TIMER_CONTROL_ENABLE; + + __raw_writel(evt, twd_base + TWD_TIMER_COUNTER); + __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL); + + return 0; +} + +/* + * local_timer_ack: checks for a local timer interrupt. + * + * If a local timer interrupt has occurred, acknowledge and return 1. + * Otherwise, return 0. + */ +int twd_timer_ack(void) +{ + if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) { + __raw_writel(1, twd_base + TWD_TIMER_INTSTAT); + return 1; + } + + return 0; +} + +static void __cpuinit twd_calibrate_rate(void) +{ + unsigned long load, count; + u64 waitjiffies; + + /* + * If this is the first time round, we need to work out how fast + * the timer ticks + */ + if (twd_timer_rate == 0) { + printk(KERN_INFO "Calibrating local timer... "); + + /* Wait for a tick to start */ + waitjiffies = get_jiffies_64() + 1; + + while (get_jiffies_64() < waitjiffies) + udelay(10); + + /* OK, now the tick has started, let's get the timer going */ + waitjiffies += 5; + + /* enable, no interrupt or reload */ + __raw_writel(0x1, twd_base + TWD_TIMER_CONTROL); + + /* maximum value */ + __raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER); + + while (get_jiffies_64() < waitjiffies) + udelay(10); + + count = __raw_readl(twd_base + TWD_TIMER_COUNTER); + + twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); + + printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, + (twd_timer_rate / 100000) % 100); + } + + load = twd_timer_rate / HZ; + + __raw_writel(load, twd_base + TWD_TIMER_LOAD); +} + +/* + * Setup the local clock events for a CPU. + */ +void __cpuinit twd_timer_setup(struct clock_event_device *clk) +{ + unsigned long flags; + + twd_calibrate_rate(); + + clk->name = "local_timer"; + clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + clk->rating = 350; + clk->set_mode = twd_set_mode; + clk->set_next_event = twd_set_next_event; + clk->shift = 20; + clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift); + clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); + clk->min_delta_ns = clockevent_delta2ns(0xf, clk); + + /* Make sure our local interrupt controller has this enabled */ + local_irq_save(flags); + get_irq_chip(clk->irq)->unmask(clk->irq); + local_irq_restore(flags); + + clockevents_register_device(clk); +} + +/* + * take a local timer down + */ +void __cpuexit twd_timer_stop(void) +{ + __raw_writel(0, twd_base + TWD_TIMER_CONTROL); +} diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index c90f27250ea..6c077979254 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -141,6 +141,7 @@ SECTIONS .data : AT(__data_loc) { _data = .; /* address in memory */ + _sdata = .; /* * first, the init task union, aligned @@ -192,6 +193,7 @@ SECTIONS __bss_start = .; /* BSS */ *(.bss) *(COMMON) + __bss_stop = .; _end = .; } /* Stabs debugging sections. */ |