diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/kernel/arch_timer.c | 505 | ||||
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 43 | ||||
-rw-r--r-- | arch/arm/kernel/bios32.c | 9 | ||||
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 61 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 10 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 16 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 51 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 18 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 13 | ||||
-rw-r--r-- | arch/arm/kernel/psci.c | 211 | ||||
-rw-r--r-- | arch/arm/kernel/sched_clock.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 47 | ||||
-rw-r--r-- | arch/arm/kernel/smp_scu.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/smp_twd.c | 54 | ||||
-rw-r--r-- | arch/arm/kernel/time.c | 53 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 6 |
19 files changed, 433 insertions, 677 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 5bbec7b8183..5f3338eacad 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -82,5 +82,6 @@ obj-$(CONFIG_DEBUG_LL) += debug.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o +obj-$(CONFIG_ARM_PSCI) += psci.o extra-y := $(head-y) vmlinux.lds diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index c8ef20747ee..d957a51435d 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -9,516 +9,53 @@ * published by the Free Software Foundation. */ #include <linux/init.h> -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/device.h> -#include <linux/smp.h> -#include <linux/cpu.h> -#include <linux/jiffies.h> -#include <linux/clockchips.h> -#include <linux/interrupt.h> -#include <linux/of_irq.h> -#include <linux/io.h> +#include <linux/types.h> +#include <linux/errno.h> -#include <asm/cputype.h> #include <asm/delay.h> -#include <asm/localtimer.h> -#include <asm/arch_timer.h> -#include <asm/system_info.h> #include <asm/sched_clock.h> -static unsigned long arch_timer_rate; +#include <clocksource/arm_arch_timer.h> -enum ppi_nr { - PHYS_SECURE_PPI, - PHYS_NONSECURE_PPI, - VIRT_PPI, - HYP_PPI, - MAX_TIMER_PPI -}; - -static int arch_timer_ppi[MAX_TIMER_PPI]; - -static struct clock_event_device __percpu **arch_timer_evt; -static struct delay_timer arch_delay_timer; - -static bool arch_timer_use_virtual = true; - -/* - * Architected system timer support. - */ - -#define ARCH_TIMER_CTRL_ENABLE (1 << 0) -#define ARCH_TIMER_CTRL_IT_MASK (1 << 1) -#define ARCH_TIMER_CTRL_IT_STAT (1 << 2) - -#define ARCH_TIMER_REG_CTRL 0 -#define ARCH_TIMER_REG_FREQ 1 -#define ARCH_TIMER_REG_TVAL 2 - -#define ARCH_TIMER_PHYS_ACCESS 0 -#define ARCH_TIMER_VIRT_ACCESS 1 - -/* - * These register accessors are marked inline so the compiler can - * nicely work out which register we want, and chuck away the rest of - * the code. At least it does so with a recent GCC (4.6.3). - */ -static inline void arch_timer_reg_write(const int access, const int reg, u32 val) -{ - if (access == ARCH_TIMER_PHYS_ACCESS) { - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); - break; - } - } - - if (access == ARCH_TIMER_VIRT_ACCESS) { - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val)); - break; - } - } - - isb(); -} - -static inline u32 arch_timer_reg_read(const int access, const int reg) -{ - u32 val = 0; - - if (access == ARCH_TIMER_PHYS_ACCESS) { - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); - break; - case ARCH_TIMER_REG_FREQ: - asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); - break; - } - } - - if (access == ARCH_TIMER_VIRT_ACCESS) { - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val)); - break; - } - } - - return val; -} - -static inline cycle_t arch_timer_counter_read(const int access) -{ - cycle_t cval = 0; - - if (access == ARCH_TIMER_PHYS_ACCESS) - asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); - - if (access == ARCH_TIMER_VIRT_ACCESS) - asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); - - return cval; -} - -static inline cycle_t arch_counter_get_cntpct(void) -{ - return arch_timer_counter_read(ARCH_TIMER_PHYS_ACCESS); -} - -static inline cycle_t arch_counter_get_cntvct(void) -{ - return arch_timer_counter_read(ARCH_TIMER_VIRT_ACCESS); -} - -static irqreturn_t inline timer_handler(const int access, - struct clock_event_device *evt) -{ - unsigned long ctrl; - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); - if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { - ctrl |= ARCH_TIMER_CTRL_IT_MASK; - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); - evt->event_handler(evt); - return IRQ_HANDLED; - } - - return IRQ_NONE; -} - -static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) -{ - struct clock_event_device *evt = *(struct clock_event_device **)dev_id; - - return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); -} - -static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) -{ - struct clock_event_device *evt = *(struct clock_event_device **)dev_id; - - return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); -} - -static inline void timer_set_mode(const int access, int mode) -{ - unsigned long ctrl; - switch (mode) { - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); - ctrl &= ~ARCH_TIMER_CTRL_ENABLE; - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); - break; - default: - break; - } -} - -static void arch_timer_set_mode_virt(enum clock_event_mode mode, - struct clock_event_device *clk) -{ - timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode); -} - -static void arch_timer_set_mode_phys(enum clock_event_mode mode, - struct clock_event_device *clk) -{ - timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode); -} - -static inline void set_next_event(const int access, unsigned long evt) -{ - unsigned long ctrl; - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); - ctrl |= ARCH_TIMER_CTRL_ENABLE; - ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; - arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt); - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); -} - -static int arch_timer_set_next_event_virt(unsigned long evt, - struct clock_event_device *unused) -{ - set_next_event(ARCH_TIMER_VIRT_ACCESS, evt); - return 0; -} - -static int arch_timer_set_next_event_phys(unsigned long evt, - struct clock_event_device *unused) -{ - set_next_event(ARCH_TIMER_PHYS_ACCESS, evt); - return 0; -} - -static int __cpuinit arch_timer_setup(struct clock_event_device *clk) -{ - clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; - clk->name = "arch_sys_timer"; - clk->rating = 450; - if (arch_timer_use_virtual) { - clk->irq = arch_timer_ppi[VIRT_PPI]; - clk->set_mode = arch_timer_set_mode_virt; - clk->set_next_event = arch_timer_set_next_event_virt; - } else { - clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; - clk->set_mode = arch_timer_set_mode_phys; - clk->set_next_event = arch_timer_set_next_event_phys; - } - - clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); - - clockevents_config_and_register(clk, arch_timer_rate, - 0xf, 0x7fffffff); - - *__this_cpu_ptr(arch_timer_evt) = clk; - - if (arch_timer_use_virtual) - enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); - else { - enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); - if (arch_timer_ppi[PHYS_NONSECURE_PPI]) - enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); - } - - return 0; -} - -/* Is the optional system timer available? */ -static int local_timer_is_architected(void) -{ - return (cpu_architecture() >= CPU_ARCH_ARMv7) && - ((read_cpuid_ext(CPUID_EXT_PFR1) >> 16) & 0xf) == 1; -} - -static int arch_timer_available(void) -{ - unsigned long freq; - - if (!local_timer_is_architected()) - return -ENXIO; - - if (arch_timer_rate == 0) { - freq = arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS, - ARCH_TIMER_REG_FREQ); - - /* Check the timer frequency. */ - if (freq == 0) { - pr_warn("Architected timer frequency not available\n"); - return -EINVAL; - } - - arch_timer_rate = freq; - } - - pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n", - arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100, - arch_timer_use_virtual ? "virt" : "phys"); - return 0; -} - -static u32 notrace arch_counter_get_cntpct32(void) -{ - cycle_t cnt = arch_counter_get_cntpct(); - - /* - * The sched_clock infrastructure only knows about counters - * with at most 32bits. Forget about the upper 24 bits for the - * time being... - */ - return (u32)cnt; -} - -static u32 notrace arch_counter_get_cntvct32(void) -{ - cycle_t cnt = arch_counter_get_cntvct(); - - /* - * The sched_clock infrastructure only knows about counters - * with at most 32bits. Forget about the upper 24 bits for the - * time being... - */ - return (u32)cnt; -} - -static cycle_t arch_counter_read(struct clocksource *cs) -{ - /* - * Always use the physical counter for the clocksource. - * CNTHCTL.PL1PCTEN must be set to 1. - */ - return arch_counter_get_cntpct(); -} - -static unsigned long arch_timer_read_current_timer(void) +static unsigned long arch_timer_read_counter_long(void) { - return arch_counter_get_cntpct(); + return arch_timer_read_counter(); } -static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) +static u32 arch_timer_read_counter_u32(void) { - /* - * Always use the physical counter for the clocksource. - * CNTHCTL.PL1PCTEN must be set to 1. - */ - return arch_counter_get_cntpct(); + return arch_timer_read_counter(); } -static struct clocksource clocksource_counter = { - .name = "arch_sys_counter", - .rating = 400, - .read = arch_counter_read, - .mask = CLOCKSOURCE_MASK(56), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; - -static struct cyclecounter cyclecounter = { - .read = arch_counter_read_cc, - .mask = CLOCKSOURCE_MASK(56), -}; - -static struct timecounter timecounter; - -struct timecounter *arch_timer_get_timecounter(void) -{ - return &timecounter; -} - -static void __cpuinit arch_timer_stop(struct clock_event_device *clk) -{ - pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", - clk->irq, smp_processor_id()); - - if (arch_timer_use_virtual) - disable_percpu_irq(arch_timer_ppi[VIRT_PPI]); - else { - disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]); - if (arch_timer_ppi[PHYS_NONSECURE_PPI]) - disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); - } - - clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); -} - -static struct local_timer_ops arch_timer_ops __cpuinitdata = { - .setup = arch_timer_setup, - .stop = arch_timer_stop, -}; - -static struct clock_event_device arch_timer_global_evt; +static struct delay_timer arch_delay_timer; -static int __init arch_timer_register(void) +static void __init arch_timer_delay_timer_register(void) { - int err; - int ppi; - - err = arch_timer_available(); - if (err) - goto out; - - arch_timer_evt = alloc_percpu(struct clock_event_device *); - if (!arch_timer_evt) { - err = -ENOMEM; - goto out; - } - - clocksource_register_hz(&clocksource_counter, arch_timer_rate); - cyclecounter.mult = clocksource_counter.mult; - cyclecounter.shift = clocksource_counter.shift; - timecounter_init(&timecounter, &cyclecounter, - arch_counter_get_cntpct()); - - if (arch_timer_use_virtual) { - ppi = arch_timer_ppi[VIRT_PPI]; - err = request_percpu_irq(ppi, arch_timer_handler_virt, - "arch_timer", arch_timer_evt); - } else { - ppi = arch_timer_ppi[PHYS_SECURE_PPI]; - err = request_percpu_irq(ppi, arch_timer_handler_phys, - "arch_timer", arch_timer_evt); - if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { - ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; - err = request_percpu_irq(ppi, arch_timer_handler_phys, - "arch_timer", arch_timer_evt); - if (err) - free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], - arch_timer_evt); - } - } - - if (err) { - pr_err("arch_timer: can't register interrupt %d (%d)\n", - ppi, err); - goto out_free; - } - - err = local_timer_register(&arch_timer_ops); - if (err) { - /* - * We couldn't register as a local timer (could be - * because we're on a UP platform, or because some - * other local timer is already present...). Try as a - * global timer instead. - */ - arch_timer_global_evt.cpumask = cpumask_of(0); - err = arch_timer_setup(&arch_timer_global_evt); - } - if (err) - goto out_free_irq; - /* Use the architected timer for the delay loop. */ - arch_delay_timer.read_current_timer = &arch_timer_read_current_timer; - arch_delay_timer.freq = arch_timer_rate; + arch_delay_timer.read_current_timer = arch_timer_read_counter_long; + arch_delay_timer.freq = arch_timer_get_rate(); register_current_timer_delay(&arch_delay_timer); - return 0; - -out_free_irq: - if (arch_timer_use_virtual) - free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); - else { - free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], - arch_timer_evt); - if (arch_timer_ppi[PHYS_NONSECURE_PPI]) - free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], - arch_timer_evt); - } - -out_free: - free_percpu(arch_timer_evt); -out: - return err; } -static const struct of_device_id arch_timer_of_match[] __initconst = { - { .compatible = "arm,armv7-timer", }, - {}, -}; - int __init arch_timer_of_register(void) { - struct device_node *np; - u32 freq; - int i; - - np = of_find_matching_node(NULL, arch_timer_of_match); - if (!np) { - pr_err("arch_timer: can't find DT node\n"); - return -ENODEV; - } - - /* Try to determine the frequency from the device tree or CNTFRQ */ - if (!of_property_read_u32(np, "clock-frequency", &freq)) - arch_timer_rate = freq; - - for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) - arch_timer_ppi[i] = irq_of_parse_and_map(np, i); + int ret; - /* - * If no interrupt provided for virtual timer, we'll have to - * stick to the physical timer. It'd better be accessible... - */ - if (!arch_timer_ppi[VIRT_PPI]) { - arch_timer_use_virtual = false; + ret = arch_timer_init(); + if (ret) + return ret; - if (!arch_timer_ppi[PHYS_SECURE_PPI] || - !arch_timer_ppi[PHYS_NONSECURE_PPI]) { - pr_warn("arch_timer: No interrupt available, giving up\n"); - return -EINVAL; - } - } + arch_timer_delay_timer_register(); - return arch_timer_register(); + return 0; } int __init arch_timer_sched_clock_init(void) { - u32 (*cnt32)(void); - int err; - - err = arch_timer_available(); - if (err) - return err; - - if (arch_timer_use_virtual) - cnt32 = arch_counter_get_cntvct32; - else - cnt32 = arch_counter_get_cntpct32; + if (arch_timer_get_rate() == 0) + return -ENXIO; - setup_sched_clock(cnt32, 32, arch_timer_rate); + setup_sched_clock(arch_timer_read_counter_u32, + 32, arch_timer_get_rate()); return 0; } diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index c985b481192..5ce738b4350 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -13,6 +13,9 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/dma-mapping.h> +#ifdef CONFIG_KVM_ARM_HOST +#include <linux/kvm_host.h> +#endif #include <asm/cacheflush.h> #include <asm/glue-df.h> #include <asm/glue-pf.h> @@ -146,5 +149,45 @@ int main(void) DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); +#ifdef CONFIG_KVM_ARM_HOST + DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); + DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); + DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); + DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest)); + DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.vfp_host)); + DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); + DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); + DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); + DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs)); + DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs)); + DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs)); + DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); + DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); + DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); + DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); + DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr)); + DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); + DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); + DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); +#ifdef CONFIG_KVM_ARM_VGIC + DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); + DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); + DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr)); + DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr)); + DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr)); + DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr)); + DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); + DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); + DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); +#ifdef CONFIG_KVM_ARM_TIMER + DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); + DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); + DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); + DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); +#endif + DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); +#endif + DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); +#endif return 0; } diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 379cf329239..a1f73b502ef 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) return irq; } -static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) +static int pcibios_init_resources(int busnr, struct pci_sys_data *sys) { int ret; struct pci_host_bridge_window *window; @@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) return 0; } -static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) +static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head) { struct pci_sys_data *sys = NULL; int ret; @@ -464,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) sys->map_irq = hw->map_irq; INIT_LIST_HEAD(&sys->resources); + if (hw->private_data) + sys->private_data = hw->private_data[nr]; + ret = hw->setup(nr, sys); if (ret > 0) { @@ -493,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) } } -void __init pci_common_init(struct hw_pci *hw) +void pci_common_init(struct hw_pci *hw) { struct pci_sys_data *sys; LIST_HEAD(head); diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 5ff2e77782b..5eae53e7a2e 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -28,6 +28,7 @@ #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <linux/smp.h> +#include <linux/cpu_pm.h> #include <asm/cacheflush.h> #include <asm/cputype.h> @@ -35,6 +36,7 @@ #include <asm/hw_breakpoint.h> #include <asm/kdebug.h> #include <asm/traps.h> +#include <asm/hardware/coresight.h> /* Breakpoint currently in use for each BRP. */ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); @@ -49,6 +51,9 @@ static int core_num_wrps; /* Debug architecture version. */ static u8 debug_arch; +/* Does debug architecture support OS Save and Restore? */ +static bool has_ossr; + /* Maximum supported watchpoint length. */ static u8 max_watchpoint_len; @@ -903,6 +908,23 @@ static struct undef_hook debug_reg_hook = { .fn = debug_reg_trap, }; +/* Does this core support OS Save and Restore? */ +static bool core_has_os_save_restore(void) +{ + u32 oslsr; + + switch (get_debug_arch()) { + case ARM_DEBUG_ARCH_V7_1: + return true; + case ARM_DEBUG_ARCH_V7_ECP14: + ARM_DBG_READ(c1, c1, 4, oslsr); + if (oslsr & ARM_OSLSR_OSLM0) + return true; + default: + return false; + } +} + static void reset_ctrl_regs(void *unused) { int i, raw_num_brps, err = 0, cpu = smp_processor_id(); @@ -930,11 +952,7 @@ static void reset_ctrl_regs(void *unused) if ((val & 0x1) == 0) err = -EPERM; - /* - * Check whether we implement OS save and restore. - */ - ARM_DBG_READ(c1, c1, 4, val); - if ((val & 0x9) == 0) + if (!has_ossr) goto clear_vcr; break; case ARM_DEBUG_ARCH_V7_1: @@ -955,9 +973,9 @@ static void reset_ctrl_regs(void *unused) /* * Unconditionally clear the OS lock by writing a value - * other than 0xC5ACCE55 to the access register. + * other than CS_LAR_KEY to the access register. */ - ARM_DBG_WRITE(c1, c0, 4, 0); + ARM_DBG_WRITE(c1, c0, 4, ~CS_LAR_KEY); isb(); /* @@ -1015,6 +1033,30 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { .notifier_call = dbg_reset_notify, }; +#ifdef CONFIG_CPU_PM +static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action, + void *v) +{ + if (action == CPU_PM_EXIT) + reset_ctrl_regs(NULL); + + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { + .notifier_call = dbg_cpu_pm_notify, +}; + +static void __init pm_init(void) +{ + cpu_pm_register_notifier(&dbg_cpu_pm_nb); +} +#else +static inline void pm_init(void) +{ +} +#endif + static int __init arch_hw_breakpoint_init(void) { debug_arch = get_debug_arch(); @@ -1024,6 +1066,8 @@ static int __init arch_hw_breakpoint_init(void) return 0; } + has_ossr = core_has_os_save_restore(); + /* Determine how many BRPs/WRPs are available. */ core_num_brps = get_num_brps(); core_num_wrps = get_num_wrps(); @@ -1062,8 +1106,9 @@ static int __init arch_hw_breakpoint_init(void) hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, "breakpoint debug exception"); - /* Register hotplug notifier. */ + /* Register hotplug and PM notifiers. */ register_cpu_notifier(&dbg_reset_nb); + pm_init(); return 0; } arch_initcall(arch_hw_breakpoint_init); diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 896165096d6..8e4ef4c83a7 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -117,6 +117,16 @@ void __init init_IRQ(void) machine_desc->init_irq(); } +#ifdef CONFIG_MULTI_IRQ_HANDLER +void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) +{ + if (handle_arch_irq) + return; + + handle_arch_irq = handle_irq; +} +#endif + #ifdef CONFIG_SPARSE_IRQ int __init arch_probe_nr_irqs(void) { diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index f9e8657dd24..31e0eb353cd 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -149,12 +149,6 @@ again: static void armpmu_read(struct perf_event *event) { - struct hw_perf_event *hwc = &event->hw; - - /* Don't read disabled counters! */ - if (hwc->idx < 0) - return; - armpmu_event_update(event); } @@ -207,8 +201,6 @@ armpmu_del(struct perf_event *event, int flags) struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - WARN_ON(idx < 0); - armpmu_stop(event, PERF_EF_UPDATE); hw_events->events[idx] = NULL; clear_bit(idx, hw_events->used_mask); @@ -358,7 +350,7 @@ __hw_perf_event_init(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; - int mapping, err; + int mapping; mapping = armpmu->map_event(event); @@ -407,14 +399,12 @@ __hw_perf_event_init(struct perf_event *event) local64_set(&hwc->period_left, hwc->sample_period); } - err = 0; if (event->group_leader != event) { - err = validate_group(event); - if (err) + if (validate_group(event) != 0); return -EINVAL; } - return err; + return 0; } static int armpmu_event_init(struct perf_event *event) diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 5f6620684e2..1f2740e3dbc 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -147,7 +147,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->free_irq = cpu_pmu_free_irq; /* Ensure the PMU has sane values out of reset. */ - if (cpu_pmu && cpu_pmu->reset) + if (cpu_pmu->reset) on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); } @@ -201,48 +201,46 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = { static int probe_current_pmu(struct arm_pmu *pmu) { int cpu = get_cpu(); - unsigned long cpuid = read_cpuid_id(); - unsigned long implementor = (cpuid & 0xFF000000) >> 24; - unsigned long part_number = (cpuid & 0xFFF0); + unsigned long implementor = read_cpuid_implementor(); + unsigned long part_number = read_cpuid_part_number(); int ret = -ENODEV; pr_info("probing PMU on CPU %d\n", cpu); /* ARM Ltd CPUs. */ - if (0x41 == implementor) { + if (implementor == ARM_CPU_IMP_ARM) { switch (part_number) { - case 0xB360: /* ARM1136 */ - case 0xB560: /* ARM1156 */ - case 0xB760: /* ARM1176 */ + case ARM_CPU_PART_ARM1136: + case ARM_CPU_PART_ARM1156: + case ARM_CPU_PART_ARM1176: ret = armv6pmu_init(pmu); break; - case 0xB020: /* ARM11mpcore */ + case ARM_CPU_PART_ARM11MPCORE: ret = armv6mpcore_pmu_init(pmu); break; - case 0xC080: /* Cortex-A8 */ + case ARM_CPU_PART_CORTEX_A8: ret = armv7_a8_pmu_init(pmu); break; - case 0xC090: /* Cortex-A9 */ + case ARM_CPU_PART_CORTEX_A9: ret = armv7_a9_pmu_init(pmu); break; - case 0xC050: /* Cortex-A5 */ + case ARM_CPU_PART_CORTEX_A5: ret = armv7_a5_pmu_init(pmu); break; - case 0xC0F0: /* Cortex-A15 */ + case ARM_CPU_PART_CORTEX_A15: ret = armv7_a15_pmu_init(pmu); break; - case 0xC070: /* Cortex-A7 */ + case ARM_CPU_PART_CORTEX_A7: ret = armv7_a7_pmu_init(pmu); break; } /* Intel CPUs [xscale]. */ - } else if (0x69 == implementor) { - part_number = (cpuid >> 13) & 0x7; - switch (part_number) { - case 1: + } else if (implementor == ARM_CPU_IMP_INTEL) { + switch (xscale_cpu_arch_version()) { + case ARM_CPU_XSCALE_ARCH_V1: ret = xscale1pmu_init(pmu); break; - case 2: + case ARM_CPU_XSCALE_ARCH_V2: ret = xscale2pmu_init(pmu); break; } @@ -279,17 +277,22 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) } if (ret) { - pr_info("failed to register PMU devices!"); - kfree(pmu); - return ret; + pr_info("failed to probe PMU!"); + goto out_free; } cpu_pmu = pmu; cpu_pmu->plat_device = pdev; cpu_pmu_init(cpu_pmu); - armpmu_register(cpu_pmu, PERF_TYPE_RAW); + ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW); - return 0; + if (!ret) + return 0; + +out_free: + pr_info("failed to register PMU devices!"); + kfree(pmu); + return ret; } static struct platform_driver cpu_pmu_driver = { diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 041d0526a28..03664b0e8fa 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -106,7 +106,7 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -259,7 +259,7 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 4fbc757d9cf..8c79a9e70b8 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -157,8 +157,8 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -282,7 +282,7 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -399,8 +399,8 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, /* * The prefetch counters don't differentiate between the I @@ -527,8 +527,8 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -651,8 +651,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 2b0fe30ec12..63990c42fac 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -83,7 +83,7 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index c6dec5fc20a..047d3e40e47 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -172,14 +172,9 @@ static void default_idle(void) local_irq_enable(); } -void (*pm_idle)(void) = default_idle; -EXPORT_SYMBOL(pm_idle); - /* - * The idle thread, has rather strange semantics for calling pm_idle, - * but this is what x86 does and we need to do the same, so that - * things like cpuidle get called in the same way. The only difference - * is that we always respect 'hlt_counter' to prevent low power idle. + * The idle thread. + * We always respect 'hlt_counter' to prevent low power idle. */ void cpu_idle(void) { @@ -210,10 +205,10 @@ void cpu_idle(void) } else if (!need_resched()) { stop_critical_timings(); if (cpuidle_idle_call()) - pm_idle(); + default_idle(); start_critical_timings(); /* - * pm_idle functions must always + * default_idle functions must always * return with IRQs enabled. */ WARN_ON(irqs_disabled()); diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c new file mode 100644 index 00000000000..36531643cc2 --- /dev/null +++ b/arch/arm/kernel/psci.c @@ -0,0 +1,211 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#define pr_fmt(fmt) "psci: " fmt + +#include <linux/init.h> +#include <linux/of.h> + +#include <asm/compiler.h> +#include <asm/errno.h> +#include <asm/opcodes-sec.h> +#include <asm/opcodes-virt.h> +#include <asm/psci.h> + +struct psci_operations psci_ops; + +static int (*invoke_psci_fn)(u32, u32, u32, u32); + +enum psci_function { + PSCI_FN_CPU_SUSPEND, + PSCI_FN_CPU_ON, + PSCI_FN_CPU_OFF, + PSCI_FN_MIGRATE, + PSCI_FN_MAX, +}; + +static u32 psci_function_id[PSCI_FN_MAX]; + +#define PSCI_RET_SUCCESS 0 +#define PSCI_RET_EOPNOTSUPP -1 +#define PSCI_RET_EINVAL -2 +#define PSCI_RET_EPERM -3 + +static int psci_to_linux_errno(int errno) +{ + switch (errno) { + case PSCI_RET_SUCCESS: + return 0; + case PSCI_RET_EOPNOTSUPP: + return -EOPNOTSUPP; + case PSCI_RET_EINVAL: + return -EINVAL; + case PSCI_RET_EPERM: + return -EPERM; + }; + + return -EINVAL; +} + +#define PSCI_POWER_STATE_ID_MASK 0xffff +#define PSCI_POWER_STATE_ID_SHIFT 0 +#define PSCI_POWER_STATE_TYPE_MASK 0x1 +#define PSCI_POWER_STATE_TYPE_SHIFT 16 +#define PSCI_POWER_STATE_AFFL_MASK 0x3 +#define PSCI_POWER_STATE_AFFL_SHIFT 24 + +static u32 psci_power_state_pack(struct psci_power_state state) +{ + return ((state.id & PSCI_POWER_STATE_ID_MASK) + << PSCI_POWER_STATE_ID_SHIFT) | + ((state.type & PSCI_POWER_STATE_TYPE_MASK) + << PSCI_POWER_STATE_TYPE_SHIFT) | + ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK) + << PSCI_POWER_STATE_AFFL_SHIFT); +} + +/* + * The following two functions are invoked via the invoke_psci_fn pointer + * and will not be inlined, allowing us to piggyback on the AAPCS. + */ +static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, + u32 arg2) +{ + asm volatile( + __asmeq("%0", "r0") + __asmeq("%1", "r1") + __asmeq("%2", "r2") + __asmeq("%3", "r3") + __HVC(0) + : "+r" (function_id) + : "r" (arg0), "r" (arg1), "r" (arg2)); + + return function_id; +} + +static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, + u32 arg2) +{ + asm volatile( + __asmeq("%0", "r0") + __asmeq("%1", "r1") + __asmeq("%2", "r2") + __asmeq("%3", "r3") + __SMC(0) + : "+r" (function_id) + : "r" (arg0), "r" (arg1), "r" (arg2)); + + return function_id; +} + +static int psci_cpu_suspend(struct psci_power_state state, + unsigned long entry_point) +{ + int err; + u32 fn, power_state; + + fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; + power_state = psci_power_state_pack(state); + err = invoke_psci_fn(fn, power_state, entry_point, 0); + return psci_to_linux_errno(err); +} + +static int psci_cpu_off(struct psci_power_state state) +{ + int err; + u32 fn, power_state; + + fn = psci_function_id[PSCI_FN_CPU_OFF]; + power_state = psci_power_state_pack(state); + err = invoke_psci_fn(fn, power_state, 0, 0); + return psci_to_linux_errno(err); +} + +static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_CPU_ON]; + err = invoke_psci_fn(fn, cpuid, entry_point, 0); + return psci_to_linux_errno(err); +} + +static int psci_migrate(unsigned long cpuid) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_MIGRATE]; + err = invoke_psci_fn(fn, cpuid, 0, 0); + return psci_to_linux_errno(err); +} + +static const struct of_device_id psci_of_match[] __initconst = { + { .compatible = "arm,psci", }, + {}, +}; + +static int __init psci_init(void) +{ + struct device_node *np; + const char *method; + u32 id; + + np = of_find_matching_node(NULL, psci_of_match); + if (!np) + return 0; + + pr_info("probing function IDs from device-tree\n"); + + if (of_property_read_string(np, "method", &method)) { + pr_warning("missing \"method\" property\n"); + goto out_put_node; + } + + if (!strcmp("hvc", method)) { + invoke_psci_fn = __invoke_psci_fn_hvc; + } else if (!strcmp("smc", method)) { + invoke_psci_fn = __invoke_psci_fn_smc; + } else { + pr_warning("invalid \"method\" property: %s\n", method); + goto out_put_node; + } + + if (!of_property_read_u32(np, "cpu_suspend", &id)) { + psci_function_id[PSCI_FN_CPU_SUSPEND] = id; + psci_ops.cpu_suspend = psci_cpu_suspend; + } + + if (!of_property_read_u32(np, "cpu_off", &id)) { + psci_function_id[PSCI_FN_CPU_OFF] = id; + psci_ops.cpu_off = psci_cpu_off; + } + + if (!of_property_read_u32(np, "cpu_on", &id)) { + psci_function_id[PSCI_FN_CPU_ON] = id; + psci_ops.cpu_on = psci_cpu_on; + } + + if (!of_property_read_u32(np, "migrate", &id)) { + psci_function_id[PSCI_FN_MIGRATE] = id; + psci_ops.migrate = psci_migrate; + } + +out_put_node: + of_node_put(np); + return 0; +} +early_initcall(psci_init); diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index fc6692e2b60..bd6f56b9ec2 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -93,11 +93,11 @@ static void notrace update_sched_clock(void) * detectable in cyc_to_fixed_sched_clock(). */ raw_local_irq_save(flags); - cd.epoch_cyc = cyc; + cd.epoch_cyc_copy = cyc; smp_wmb(); cd.epoch_ns = ns; smp_wmb(); - cd.epoch_cyc_copy = cyc; + cd.epoch_cyc = cyc; raw_local_irq_restore(flags); } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 84f4cbf652e..5f73f7018f5 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -125,18 +125,6 @@ void __init smp_init_cpus(void) smp_ops.smp_init_cpus(); } -static void __init platform_smp_prepare_cpus(unsigned int max_cpus) -{ - if (smp_ops.smp_prepare_cpus) - smp_ops.smp_prepare_cpus(max_cpus); -} - -static void __cpuinit platform_secondary_init(unsigned int cpu) -{ - if (smp_ops.smp_secondary_init) - smp_ops.smp_secondary_init(cpu); -} - int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { if (smp_ops.smp_boot_secondary) @@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu) return 1; } -static void platform_cpu_die(unsigned int cpu) -{ - if (smp_ops.cpu_die) - smp_ops.cpu_die(cpu); -} - static int platform_cpu_disable(unsigned int cpu) { if (smp_ops.cpu_disable) @@ -257,7 +239,8 @@ void __ref cpu_die(void) * actual CPU shutdown procedure is at least platform (if not * CPU) specific. */ - platform_cpu_die(cpu); + if (smp_ops.cpu_die) + smp_ops.cpu_die(cpu); /* * Do not return to the idle loop - jump back to the secondary @@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void) /* * Give the platform a chance to do its own initialisation. */ - platform_secondary_init(cpu); + if (smp_ops.smp_secondary_init) + smp_ops.smp_secondary_init(cpu); notify_cpu_starting(cpu); @@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. A platform should - * re-initialize the map in platform_smp_prepare_cpus() if - * present != possible (e.g. physical hotplug). + * re-initialize the map in the platforms smp_prepare_cpus() + * if present != possible (e.g. physical hotplug). */ init_cpu_present(cpu_possible_mask); @@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) * Initialise the SCU if there are more than one CPU * and let them know where to start. */ - platform_smp_prepare_cpus(max_cpus); + if (smp_ops.smp_prepare_cpus) + smp_ops.smp_prepare_cpus(max_cpus); } } @@ -416,7 +401,8 @@ static void (*smp_cross_call)(const struct cpumask *, unsigned int); void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { - smp_cross_call = fn; + if (!smp_cross_call) + smp_cross_call = fn; } void arch_send_call_function_ipi_mask(const struct cpumask *mask) @@ -475,14 +461,8 @@ u64 smp_irq_stat_cpu(unsigned int cpu) */ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); -static void ipi_timer(void) -{ - struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); - evt->event_handler(evt); -} - #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST -static void smp_timer_broadcast(const struct cpumask *mask) +void tick_broadcast(const struct cpumask *mask) { smp_cross_call(mask, IPI_TIMER); } @@ -530,7 +510,6 @@ static void __cpuinit percpu_timer_setup(void) struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); evt->cpumask = cpumask_of(cpu); - evt->broadcast = smp_timer_broadcast; if (!lt_ops || lt_ops->setup(evt)) broadcast_timer_setup(evt); @@ -596,11 +575,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs) case IPI_WAKEUP: break; +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: irq_enter(); - ipi_timer(); + tick_receive_broadcast(); irq_exit(); break; +#endif case IPI_RESCHEDULE: scheduler_ipi(); diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index b9f015e843d..45eac87ed66 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base) int scu_power_mode(void __iomem *scu_base, unsigned int mode) { unsigned int val; - int cpu = cpu_logical_map(smp_processor_id()); + int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); if (mode > 3 || mode == 1 || cpu > 3) return -EINVAL; diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 49f335d301b..c092115d903 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -24,14 +24,12 @@ #include <asm/smp_twd.h> #include <asm/localtimer.h> -#include <asm/hardware/gic.h> /* set up by the platform code */ static void __iomem *twd_base; static struct clk *twd_clk; static unsigned long twd_timer_rate; -static bool common_setup_called; static DEFINE_PER_CPU(bool, percpu_setup_called); static struct clock_event_device __percpu **twd_evt; @@ -239,25 +237,28 @@ static irqreturn_t twd_handler(int irq, void *dev_id) return IRQ_NONE; } -static struct clk *twd_get_clock(void) +static void twd_get_clock(struct device_node *np) { - struct clk *clk; int err; - clk = clk_get_sys("smp_twd", NULL); - if (IS_ERR(clk)) { - pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk)); - return clk; + if (np) + twd_clk = of_clk_get(np, 0); + else + twd_clk = clk_get_sys("smp_twd", NULL); + + if (IS_ERR(twd_clk)) { + pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk)); + return; } - err = clk_prepare_enable(clk); + err = clk_prepare_enable(twd_clk); if (err) { pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); - clk_put(clk); - return ERR_PTR(err); + clk_put(twd_clk); + return; } - return clk; + twd_timer_rate = clk_get_rate(twd_clk); } /* @@ -280,26 +281,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk) } per_cpu(percpu_setup_called, cpu) = true; - /* - * This stuff only need to be done once for the entire TWD cluster - * during the runtime of the system. - */ - if (!common_setup_called) { - twd_clk = twd_get_clock(); - - /* - * We use IS_ERR_OR_NULL() here, because if the clock stubs - * are active we will get a valid clk reference which is - * however NULL and will return the rate 0. In that case we - * need to calibrate the rate instead. - */ - if (!IS_ERR_OR_NULL(twd_clk)) - twd_timer_rate = clk_get_rate(twd_clk); - else - twd_calibrate_rate(); - - common_setup_called = true; - } + twd_calibrate_rate(); /* * The following is done once per CPU the first time .setup() is @@ -330,7 +312,7 @@ static struct local_timer_ops twd_lt_ops __cpuinitdata = { .stop = twd_timer_stop, }; -static int __init twd_local_timer_common_register(void) +static int __init twd_local_timer_common_register(struct device_node *np) { int err; @@ -350,6 +332,8 @@ static int __init twd_local_timer_common_register(void) if (err) goto out_irq; + twd_get_clock(np); + return 0; out_irq: @@ -373,7 +357,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt) if (!twd_base) return -ENOMEM; - return twd_local_timer_common_register(); + return twd_local_timer_common_register(NULL); } #ifdef CONFIG_OF @@ -405,7 +389,7 @@ void __init twd_local_timer_of_register(void) goto out; } - err = twd_local_timer_common_register(); + err = twd_local_timer_common_register(np); out: WARN(err, "twd_local_timer_of_register failed (%d)\n", err); diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 09be0c3c906..955d92d265e 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -21,7 +21,6 @@ #include <linux/timex.h> #include <linux/errno.h> #include <linux/profile.h> -#include <linux/syscore_ops.h> #include <linux/timer.h> #include <linux/irq.h> @@ -31,11 +30,6 @@ #include <asm/mach/arch.h> #include <asm/mach/time.h> -/* - * Our system timer. - */ -static struct sys_timer *system_timer; - #if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || \ defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) /* this needs a better home */ @@ -69,16 +63,6 @@ unsigned long profile_pc(struct pt_regs *regs) EXPORT_SYMBOL(profile_pc); #endif -#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET -u32 arch_gettimeoffset(void) -{ - if (system_timer->offset != NULL) - return system_timer->offset() * 1000; - - return 0; -} -#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */ - #ifndef CONFIG_GENERIC_CLOCKEVENTS /* * Kernel system timer support. @@ -129,43 +113,8 @@ int __init register_persistent_clock(clock_access_fn read_boot, return -EINVAL; } -#if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS) -static int timer_suspend(void) -{ - if (system_timer->suspend) - system_timer->suspend(); - - return 0; -} - -static void timer_resume(void) -{ - if (system_timer->resume) - system_timer->resume(); -} -#else -#define timer_suspend NULL -#define timer_resume NULL -#endif - -static struct syscore_ops timer_syscore_ops = { - .suspend = timer_suspend, - .resume = timer_resume, -}; - -static int __init timer_init_syscore_ops(void) -{ - register_syscore_ops(&timer_syscore_ops); - - return 0; -} - -device_initcall(timer_init_syscore_ops); - void __init time_init(void) { - system_timer = machine_desc->timer; - system_timer->init(); + machine_desc->init_time(); sched_clock_postinit(); } - diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 11c1785bf63..b571484e9f0 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -19,7 +19,11 @@ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__idmap_text_start) = .; \ *(.idmap.text) \ - VMLINUX_SYMBOL(__idmap_text_end) = .; + VMLINUX_SYMBOL(__idmap_text_end) = .; \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ + *(.hyp.idmap.text) \ + VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; #ifdef CONFIG_HOTPLUG_CPU #define ARM_CPU_DISCARD(x) |