summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r--arch/arm/kernel/smp.c100
1 files changed, 2 insertions, 98 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 2dc19349eb1..72024ea8a3a 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -41,7 +41,6 @@
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
-#include <asm/localtimer.h>
#include <asm/smp_plat.h>
#include <asm/virt.h>
#include <asm/mach/arch.h>
@@ -156,8 +155,6 @@ int platform_can_cpu_hotplug(void)
}
#ifdef CONFIG_HOTPLUG_CPU
-static void percpu_timer_stop(void);
-
static int platform_cpu_kill(unsigned int cpu)
{
if (smp_ops.cpu_kill)
@@ -201,11 +198,6 @@ int __cpu_disable(void)
migrate_irqs();
/*
- * Stop the local timer for this CPU.
- */
- percpu_timer_stop();
-
- /*
* Flush user cache and TLB mappings, and then remove this CPU
* from the vm mask set of all processes.
*
@@ -326,8 +318,6 @@ static void smp_store_cpu_info(unsigned int cpuid)
store_cpu_topology(cpuid);
}
-static void percpu_timer_setup(void);
-
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@@ -382,11 +372,6 @@ asmlinkage void secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
- /*
- * Setup the percpu timer for this CPU.
- */
- percpu_timer_setup();
-
local_irq_enable();
local_fiq_enable();
@@ -398,17 +383,8 @@ asmlinkage void secondary_start_kernel(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
- int cpu;
- unsigned long bogosum = 0;
-
- for_each_online_cpu(cpu)
- bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
-
- printk(KERN_INFO "SMP: Total of %d processors activated "
- "(%lu.%02lu BogoMIPS).\n",
- num_online_cpus(),
- bogosum / (500000/HZ),
- (bogosum / (5000/HZ)) % 100);
+ printk(KERN_INFO "SMP: Total of %d processors activated.\n",
+ num_online_cpus());
hyp_mode_check();
}
@@ -433,12 +409,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
max_cpus = ncores;
if (ncores > 1 && max_cpus) {
/*
- * Enable the local timer or broadcast device for the
- * boot CPU, but only if we have more than one CPU.
- */
- percpu_timer_setup();
-
- /*
* Initialise the present map, which describes the set of CPUs
* actually populated at the present time. A platform should
* re-initialize the map in the platforms smp_prepare_cpus()
@@ -514,11 +484,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
return sum;
}
-/*
- * Timer (local or broadcast) support
- */
-static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
-
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
@@ -526,67 +491,6 @@ void tick_broadcast(const struct cpumask *mask)
}
#endif
-static void broadcast_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
-}
-
-static void broadcast_timer_setup(struct clock_event_device *evt)
-{
- evt->name = "dummy_timer";
- evt->features = CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_DUMMY;
- evt->rating = 100;
- evt->mult = 1;
- evt->set_mode = broadcast_timer_set_mode;
-
- clockevents_register_device(evt);
-}
-
-static struct local_timer_ops *lt_ops;
-
-#ifdef CONFIG_LOCAL_TIMERS
-int local_timer_register(struct local_timer_ops *ops)
-{
- if (!is_smp() || !setup_max_cpus)
- return -ENXIO;
-
- if (lt_ops)
- return -EBUSY;
-
- lt_ops = ops;
- return 0;
-}
-#endif
-
-static void percpu_timer_setup(void)
-{
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
-
- evt->cpumask = cpumask_of(cpu);
-
- if (!lt_ops || lt_ops->setup(evt))
- broadcast_timer_setup(evt);
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * The generic clock events code purposely does not stop the local timer
- * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
- * manually here.
- */
-static void percpu_timer_stop(void)
-{
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
-
- if (lt_ops)
- lt_ops->stop(evt);
-}
-#endif
-
static DEFINE_RAW_SPINLOCK(stop_lock);
/*