From 1292211058aaf872eeb2a0e2677d237916b4501f Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 7 Feb 2009 22:16:12 +0100 Subject: tracing/power: move the power trace headers to a dedicated file Impact: cleanup Move the power tracer headers to trace/power.h to keep ftrace.h and power bits more easy to maintain as separated topics. Signed-off-by: Frederic Weisbecker Cc: Arjan van de Ven Signed-off-by: Ingo Molnar --- arch/x86/kernel/process.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/kernel/process.c') diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e68bb9e3086..026819ffcb0 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include -- cgit v1.2.3-70-g09d2 From b5f9fd0f8a05c9bafb91a9a85b9110938d8e585b Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Wed, 11 Feb 2009 13:57:25 -0500 Subject: tracing: convert c/p state power tracer to use tracepoints Convert the c/p state "power" tracer to use tracepoints. Avoids a function call when the tracer is disabled. Signed-off-by: Jason Baron Acked-by: Ingo Molnar Signed-off-by: Steven Rostedt --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 2 + arch/x86/kernel/process.c | 3 + include/trace/power.h | 25 ++--- kernel/trace/trace_power.c | 173 +++++++++++++++++------------ 4 files changed, 119 insertions(+), 84 deletions(-) (limited to 'arch/x86/kernel/process.c') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 7ed925edf4d..c5d737cdb36 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -70,6 +70,8 @@ struct acpi_cpufreq_data { static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); +DEFINE_TRACE(power_mark); + /* acpi_perf_data is a pointer to percpu data. */ static struct acpi_processor_performance *acpi_perf_data; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 026819ffcb0..e0d0fd7ab51 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -19,6 +19,9 @@ EXPORT_SYMBOL(idle_nomwait); struct kmem_cache *task_xstate_cachep; +DEFINE_TRACE(power_start); +DEFINE_TRACE(power_end); + int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { *dst = *src; diff --git a/include/trace/power.h b/include/trace/power.h index c7cefbcdaea..2c733e58e89 100644 --- a/include/trace/power.h +++ b/include/trace/power.h @@ -2,6 +2,7 @@ #define _TRACE_POWER_H #include +#include enum { POWER_NONE = 0, @@ -18,18 +19,16 @@ struct power_trace { #endif }; -#ifdef CONFIG_POWER_TRACER -extern void trace_power_start(struct power_trace *it, unsigned int type, - unsigned int state); -extern void trace_power_mark(struct power_trace *it, unsigned int type, - unsigned int state); -extern void trace_power_end(struct power_trace *it); -#else -static inline void trace_power_start(struct power_trace *it, unsigned int type, - unsigned int state) { } -static inline void trace_power_mark(struct power_trace *it, unsigned int type, - unsigned int state) { } -static inline void trace_power_end(struct power_trace *it) { } -#endif +DECLARE_TRACE(power_start, + TPPROTO(struct power_trace *it, unsigned int type, unsigned int state), + TPARGS(it, type, state)); + +DECLARE_TRACE(power_mark, + TPPROTO(struct power_trace *it, unsigned int type, unsigned int state), + TPARGS(it, type, state)); + +DECLARE_TRACE(power_end, + TPPROTO(struct power_trace *it), + TPARGS(it)); #endif /* _TRACE_POWER_H */ diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index b1d0d087d3a..91ce672fb03 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c @@ -21,15 +21,116 @@ static struct trace_array *power_trace; static int __read_mostly trace_power_enabled; +static void probe_power_start(struct power_trace *it, unsigned int type, + unsigned int level) +{ + if (!trace_power_enabled) + return; + + memset(it, 0, sizeof(struct power_trace)); + it->state = level; + it->type = type; + it->stamp = ktime_get(); +} + + +static void probe_power_end(struct power_trace *it) +{ + struct ring_buffer_event *event; + struct trace_power *entry; + struct trace_array_cpu *data; + struct trace_array *tr = power_trace; + + if (!trace_power_enabled) + return; + + preempt_disable(); + it->end = ktime_get(); + data = tr->data[smp_processor_id()]; + + event = trace_buffer_lock_reserve(tr, TRACE_POWER, + sizeof(*entry), 0, 0); + if (!event) + goto out; + entry = ring_buffer_event_data(event); + entry->state_data = *it; + trace_buffer_unlock_commit(tr, event, 0, 0); + out: + preempt_enable(); +} + +static void probe_power_mark(struct power_trace *it, unsigned int type, + unsigned int level) +{ + struct ring_buffer_event *event; + struct trace_power *entry; + struct trace_array_cpu *data; + struct trace_array *tr = power_trace; + + if (!trace_power_enabled) + return; + + memset(it, 0, sizeof(struct power_trace)); + it->state = level; + it->type = type; + it->stamp = ktime_get(); + preempt_disable(); + it->end = it->stamp; + data = tr->data[smp_processor_id()]; + + event = trace_buffer_lock_reserve(tr, TRACE_POWER, + sizeof(*entry), 0, 0); + if (!event) + goto out; + entry = ring_buffer_event_data(event); + entry->state_data = *it; + trace_buffer_unlock_commit(tr, event, 0, 0); + out: + preempt_enable(); +} + +static int tracing_power_register(void) +{ + int ret; + + ret = register_trace_power_start(probe_power_start); + if (ret) { + pr_info("power trace: Couldn't activate tracepoint" + " probe to trace_power_start\n"); + return ret; + } + ret = register_trace_power_end(probe_power_end); + if (ret) { + pr_info("power trace: Couldn't activate tracepoint" + " probe to trace_power_end\n"); + goto fail_start; + } + ret = register_trace_power_mark(probe_power_mark); + if (ret) { + pr_info("power trace: Couldn't activate tracepoint" + " probe to trace_power_mark\n"); + goto fail_end; + } + return ret; +fail_end: + unregister_trace_power_end(probe_power_end); +fail_start: + unregister_trace_power_start(probe_power_start); + return ret; +} static void start_power_trace(struct trace_array *tr) { trace_power_enabled = 1; + tracing_power_register(); } static void stop_power_trace(struct trace_array *tr) { trace_power_enabled = 0; + unregister_trace_power_start(probe_power_start); + unregister_trace_power_end(probe_power_end); + unregister_trace_power_mark(probe_power_mark); } @@ -39,6 +140,7 @@ static int power_trace_init(struct trace_array *tr) power_trace = tr; trace_power_enabled = 1; + tracing_power_register(); for_each_cpu(cpu, cpu_possible_mask) tracing_reset(tr, cpu); @@ -95,74 +197,3 @@ static int init_power_trace(void) return register_tracer(&power_tracer); } device_initcall(init_power_trace); - -void trace_power_start(struct power_trace *it, unsigned int type, - unsigned int level) -{ - if (!trace_power_enabled) - return; - - memset(it, 0, sizeof(struct power_trace)); - it->state = level; - it->type = type; - it->stamp = ktime_get(); -} -EXPORT_SYMBOL_GPL(trace_power_start); - - -void trace_power_end(struct power_trace *it) -{ - struct ring_buffer_event *event; - struct trace_power *entry; - struct trace_array_cpu *data; - struct trace_array *tr = power_trace; - - if (!trace_power_enabled) - return; - - preempt_disable(); - it->end = ktime_get(); - data = tr->data[smp_processor_id()]; - - event = trace_buffer_lock_reserve(tr, TRACE_POWER, - sizeof(*entry), 0, 0); - if (!event) - goto out; - entry = ring_buffer_event_data(event); - entry->state_data = *it; - trace_buffer_unlock_commit(tr, event, 0, 0); - out: - preempt_enable(); -} -EXPORT_SYMBOL_GPL(trace_power_end); - -void trace_power_mark(struct power_trace *it, unsigned int type, - unsigned int level) -{ - struct ring_buffer_event *event; - struct trace_power *entry; - struct trace_array_cpu *data; - struct trace_array *tr = power_trace; - - if (!trace_power_enabled) - return; - - memset(it, 0, sizeof(struct power_trace)); - it->state = level; - it->type = type; - it->stamp = ktime_get(); - preempt_disable(); - it->end = it->stamp; - data = tr->data[smp_processor_id()]; - - event = trace_buffer_lock_reserve(tr, TRACE_POWER, - sizeof(*entry), 0, 0); - if (!event) - goto out; - entry = ring_buffer_event_data(event); - entry->state_data = *it; - trace_buffer_unlock_commit(tr, event, 0, 0); - out: - preempt_enable(); -} -EXPORT_SYMBOL_GPL(trace_power_mark); -- cgit v1.2.3-70-g09d2 From bc9b83dd1f66402b870301c3c7117b9c1484abb4 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Mar 2009 14:49:49 +1030 Subject: cpumask: convert c1e_mask in arch/x86/kernel/process.c to cpumask_var_t. Impact: reduce kernel size when CONFIG_CPUMASK_OFFSTACK=y Simple conversion. Signed-off-by: Rusty Russell --- arch/x86/kernel/process.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/x86/kernel/process.c') diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 6afa5232dbb..cad5431951a 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -474,12 +474,12 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) return 1; } -static cpumask_t c1e_mask = CPU_MASK_NONE; +static cpumask_var_t c1e_mask; static int c1e_detected; void c1e_remove_cpu(int cpu) { - cpu_clear(cpu, c1e_mask); + cpumask_clear_cpu(cpu, c1e_mask); } /* @@ -508,8 +508,8 @@ static void c1e_idle(void) if (c1e_detected) { int cpu = smp_processor_id(); - if (!cpu_isset(cpu, c1e_mask)) { - cpu_set(cpu, c1e_mask); + if (!cpumask_test_cpu(cpu, c1e_mask)) { + cpumask_set_cpu(cpu, c1e_mask); /* * Force broadcast so ACPI can not interfere. Needs * to run with interrupts enabled as it uses @@ -556,6 +556,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) pm_idle = mwait_idle; } else if (check_c1e_idle(c)) { printk(KERN_INFO "using C1E aware idle routine\n"); + alloc_cpumask_var(&c1e_mask, GFP_KERNEL); + cpumask_clear(c1e_mask); pm_idle = c1e_idle; } else pm_idle = default_idle; -- cgit v1.2.3-70-g09d2 From 4f0628963c86d2f97b8cb9acc024a7fe288a6a57 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Mar 2009 14:49:54 +1030 Subject: cpumask: use new cpumask functions throughout x86 Impact: cleanup 1) &cpu_online_map -> cpu_online_mask 2) first_cpu/next_cpu_nr -> cpumask_first/cpumask_next 3) cpu_*_map manipulation -> init_cpu_* / set_cpu_* Signed-off-by: Rusty Russell --- arch/x86/include/asm/topology.h | 4 ++-- arch/x86/kernel/apic/bigsmp_32.c | 16 ++++++++-------- arch/x86/kernel/apic/es7000_32.c | 6 +++--- arch/x86/kernel/apic/summit_32.c | 2 +- arch/x86/kernel/cpu/mcheck/mce_intel_64.c | 2 +- arch/x86/kernel/cpu/proc.c | 4 ++-- arch/x86/kernel/process.c | 2 +- arch/x86/kernel/smpboot.c | 11 +++++------ arch/x86/xen/smp.c | 6 +++--- 9 files changed, 26 insertions(+), 27 deletions(-) (limited to 'arch/x86/kernel/process.c') diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index f8b833e1257..1ce1e1afa80 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -174,11 +174,11 @@ static inline int early_cpu_to_node(int cpu) static inline const cpumask_t *cpumask_of_node(int node) { - return &cpu_online_map; + return cpu_online_mask; } static inline int node_to_first_cpu(int node) { - return first_cpu(cpu_online_map); + return cpumask_first(cpu_online_mask); } static inline void setup_node_to_cpumask_map(void) { } diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index d806ecaa948..676cdac385c 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -26,12 +26,12 @@ static int bigsmp_apic_id_registered(void) return 1; } -static const cpumask_t *bigsmp_target_cpus(void) +static const struct cpumask *bigsmp_target_cpus(void) { #ifdef CONFIG_SMP - return &cpu_online_map; + return cpu_online_mask; #else - return &cpumask_of_cpu(0); + return cpumask_of(0); #endif } @@ -118,9 +118,9 @@ static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) } /* As we are using single CPU as destination, pick only one CPU here */ -static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) +static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) { - return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); + return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask)); } static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, @@ -188,10 +188,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { { } /* NULL entry stops DMI scanning */ }; -static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask) +static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask) { - cpus_clear(*retmask); - cpu_set(cpu, *retmask); + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); } static int probe_bigsmp(void) diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 1322f5409e2..26d3a3eba75 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -460,9 +460,9 @@ static const cpumask_t *target_cpus_cluster(void) return cpu_all_mask; } -static const cpumask_t *es7000_target_cpus(void) +static const struct cpumask *es7000_target_cpus(void) { - return &cpumask_of_cpu(smp_processor_id()); + return cpumask_of(smp_processor_id()); } static unsigned long @@ -517,7 +517,7 @@ static void es7000_setup_apic_routing(void) "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", (apic_version[apic] == 0x14) ? "Physical Cluster" : "Logical Cluster", - nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); + nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); } static int es7000_apicid_to_node(int logical_apicid) diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index aac52fa873f..7f6bd908da4 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -192,7 +192,7 @@ static const cpumask_t *summit_target_cpus(void) * dest_LowestPrio mode logical clustered apic interrupt routing * Just start on cpu 0. IRQ balancing will spread load */ - return &cpumask_of_cpu(0); + return cpumask_of(0); } static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid) diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index aaa7d973093..96b2a85545a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c @@ -249,7 +249,7 @@ void cmci_rediscover(int dying) for_each_online_cpu (cpu) { if (cpu == dying) continue; - if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu))) + if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) continue; /* Recheck banks in case CPUs don't all have the same */ if (cmci_supported(&banks)) diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 4dd610e226e..f93047fed79 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -143,9 +143,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) static void *c_start(struct seq_file *m, loff_t *pos) { if (*pos == 0) /* just in case, cpu 0 is not the first */ - *pos = first_cpu(cpu_online_map); + *pos = cpumask_first(cpu_online_mask); else - *pos = next_cpu_nr(*pos - 1, cpu_online_map); + *pos = cpumask_next(*pos - 1, cpu_online_mask); if ((*pos) < nr_cpu_ids) return &cpu_data(*pos); return NULL; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index cad5431951a..6638294cec8 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -324,7 +324,7 @@ void stop_this_cpu(void *dummy) /* * Remove this CPU: */ - cpu_clear(smp_processor_id(), cpu_online_map); + set_cpu_online(smp_processor_id(), false); disable_local_APIC(); for (;;) { diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index d6427aa5696..58d24ef917d 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -296,7 +296,7 @@ notrace static void __cpuinit start_secondary(void *unused) __flush_tlb_all(); #endif - /* This must be done before setting cpu_online_map */ + /* This must be done before setting cpu_online_mask */ set_cpu_sibling_map(raw_smp_processor_id()); wmb(); @@ -904,9 +904,8 @@ int __cpuinit native_cpu_up(unsigned int cpu) */ static __init void disable_smp(void) { - /* use the read/write pointers to the present and possible maps */ - cpumask_copy(&cpu_present_map, cpumask_of(0)); - cpumask_copy(&cpu_possible_map, cpumask_of(0)); + init_cpu_present(cpumask_of(0)); + init_cpu_possible(cpumask_of(0)); smpboot_clear_io_apic_irqs(); if (smp_found_config) @@ -1149,11 +1148,11 @@ early_param("possible_cpus", _setup_possible_cpus); /* - * cpu_possible_map should be static, it cannot change as cpu's + * cpu_possible_mask should be static, it cannot change as cpu's * are onlined, or offlined. The reason is per-cpu data-structures * are allocated by some modules at init time, and dont expect to * do this dynamically on cpu arrival/departure. - * cpu_present_map on the other hand can change dynamically. + * cpu_present_mask on the other hand can change dynamically. * In case when cpu_hotplug is not compiled, then we resort to current * behaviour, which is cpu_possible == cpu_present. * - Ashok Raj diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 8d470562ffc..585a6e33083 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void) rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); if (rc >= 0) { num_processors++; - cpu_set(i, cpu_possible_map); + set_cpu_possible(i, true); } } } @@ -197,7 +197,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) continue; - cpu_clear(cpu, cpu_possible_map); + set_cpu_possible(cpu, false); } for_each_possible_cpu (cpu) { @@ -210,7 +210,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); - cpu_set(cpu, cpu_present_map); + set_cpu_present(cpu, true); } } -- cgit v1.2.3-70-g09d2 From 30e1e6d1af2b67558bccf322af2b3e0676b209ae Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 17 Mar 2009 14:50:34 +1030 Subject: cpumask: fix CONFIG_CPUMASK_OFFSTACK=y cpu hotunplug crash Impact: Fix cpu offline when CONFIG_MAXSMP=y Changeset bc9b83dd1f66402b870301c3c7117b9c1484abb4 "cpumask: convert c1e_mask in arch/x86/kernel/process.c to cpumask_var_t" contained a bug: c1e_mask is manipulated even if C1E isn't detected (and hence not allocated). This is simply fixed by checking for NULL (which gcc optimizes out anyway of CONFIG_CPUMASK_OFFSTACK=n, since it knows ce1_mask can never be NULL). In addition, fix a leak where select_idle_routine re-allocates (and re-clears) c1e_mask on every cpu init. Reported-by: Ingo Molnar Signed-off-by: Rusty Russell Cc: Mike Travis LKML-Reference: <200903171450.34549.rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/processor.h | 1 + arch/x86/kernel/cpu/common.c | 1 + arch/x86/kernel/process.c | 14 +++++++++++--- 3 files changed, 13 insertions(+), 3 deletions(-) (limited to 'arch/x86/kernel/process.c') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index d794d9483c5..9874dd98a29 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -733,6 +733,7 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx) extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); extern void select_idle_routine(const struct cpuinfo_x86 *c); +extern void init_c1e_mask(void); extern unsigned long boot_option_idle_override; extern unsigned long idle_halt; diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 82f6cc045ad..d7dd3c294e2 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -812,6 +812,7 @@ static void vgetcpu_set_mode(void) void __init identify_boot_cpu(void) { identify_cpu(&boot_cpu_data); + init_c1e_mask(); #ifdef CONFIG_X86_32 sysenter_setup(); enable_sep_cpu(); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 6638294cec8..78533a519d8 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -479,7 +479,8 @@ static int c1e_detected; void c1e_remove_cpu(int cpu) { - cpumask_clear_cpu(cpu, c1e_mask); + if (c1e_mask != NULL) + cpumask_clear_cpu(cpu, c1e_mask); } /* @@ -556,13 +557,20 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) pm_idle = mwait_idle; } else if (check_c1e_idle(c)) { printk(KERN_INFO "using C1E aware idle routine\n"); - alloc_cpumask_var(&c1e_mask, GFP_KERNEL); - cpumask_clear(c1e_mask); pm_idle = c1e_idle; } else pm_idle = default_idle; } +void __init init_c1e_mask(void) +{ + /* If we're using c1e_idle, we need to allocate c1e_mask. */ + if (pm_idle == c1e_idle) { + alloc_cpumask_var(&c1e_mask, GFP_KERNEL); + cpumask_clear(c1e_mask); + } +} + static int __init idle_setup(char *str) { if (!str) -- cgit v1.2.3-70-g09d2