summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLi Shaohua <shaohua.li@intel.com>2005-06-25 14:54:53 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 16:24:29 -0700
commit6fe940d6c300886de4ff1454d8ffd363172af433 (patch)
tree58c34aed66a85ff72bdba1d5e3a3e3c967621a04
parent67664c8f7e74def5adf66298a1245d82af72db2c (diff)
[PATCH] sep initializing rework
Make SEP init per-cpu, so it is hotplug safe. Signed-off-by: Li Shaohua<shaohua.li@intel.com> Signed-off-by: Ashok Raj <ashok.raj@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/kernel/cpu/common.c3
-rw-r--r--arch/i386/kernel/smp.c10
-rw-r--r--arch/i386/kernel/smpboot.c11
-rw-r--r--arch/i386/kernel/sysenter.c12
-rw-r--r--arch/i386/power/cpu.c6
-rw-r--r--include/asm-i386/processor.h2
-rw-r--r--include/asm-i386/smp.h2
7 files changed, 38 insertions, 8 deletions
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index b9954248d0a..d58e169fbdb 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -432,6 +432,9 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_MCE
mcheck_init(c);
#endif
+ if (c == &boot_cpu_data)
+ sysenter_setup();
+ enable_sep_cpu();
}
#ifdef CONFIG_X86_HT
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 35f521612b2..cec4bde6716 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -495,6 +495,16 @@ struct call_data_struct {
int wait;
};
+void lock_ipi_call_lock(void)
+{
+ spin_lock_irq(&call_lock);
+}
+
+void unlock_ipi_call_lock(void)
+{
+ spin_unlock_irq(&call_lock);
+}
+
static struct call_data_struct * call_data;
/*
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index ad74a46e9ef..c5517f33230 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -449,7 +449,18 @@ static void __init start_secondary(void *unused)
* the local TLBs too.
*/
local_flush_tlb();
+
+ /*
+ * We need to hold call_lock, so there is no inconsistency
+ * between the time smp_call_function() determines number of
+ * IPI receipients, and the time when the determination is made
+ * for which cpus receive the IPI. Holding this
+ * lock helps us to not include this cpu in a currently in progress
+ * smp_call_function().
+ */
+ lock_ipi_call_lock();
cpu_set(smp_processor_id(), cpu_online_map);
+ unlock_ipi_call_lock();
/* We can take interrupts now: we're officially "up". */
local_irq_enable();
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 960d8bd137d..0bada1870bd 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -21,11 +21,16 @@
extern asmlinkage void sysenter_entry(void);
-void enable_sep_cpu(void *info)
+void enable_sep_cpu(void)
{
int cpu = get_cpu();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ if (!boot_cpu_has(X86_FEATURE_SEP)) {
+ put_cpu();
+ return;
+ }
+
tss->ss1 = __KERNEL_CS;
tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
@@ -41,7 +46,7 @@ void enable_sep_cpu(void *info)
extern const char vsyscall_int80_start, vsyscall_int80_end;
extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
-static int __init sysenter_setup(void)
+int __init sysenter_setup(void)
{
void *page = (void *)get_zeroed_page(GFP_ATOMIC);
@@ -58,8 +63,5 @@ static int __init sysenter_setup(void)
&vsyscall_sysenter_start,
&vsyscall_sysenter_end - &vsyscall_sysenter_start);
- on_each_cpu(enable_sep_cpu, NULL, 1, 1);
return 0;
}
-
-__initcall(sysenter_setup);
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index 6f521cf19a1..d099d01461f 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -22,9 +22,11 @@
#include <linux/device.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
+
#include <asm/uaccess.h>
#include <asm/acpi.h>
#include <asm/tlbflush.h>
+#include <asm/processor.h>
static struct saved_context saved_context;
@@ -33,8 +35,6 @@ unsigned long saved_context_esp, saved_context_ebp;
unsigned long saved_context_esi, saved_context_edi;
unsigned long saved_context_eflags;
-extern void enable_sep_cpu(void *);
-
void __save_processor_state(struct saved_context *ctxt)
{
kernel_fpu_begin();
@@ -136,7 +136,7 @@ void __restore_processor_state(struct saved_context *ctxt)
* sysenter MSRs
*/
if (boot_cpu_has(X86_FEATURE_SEP))
- enable_sep_cpu(NULL);
+ enable_sep_cpu();
fix_processor_context();
do_fpu_end();
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index c76c50e9622..6f0f93d0d41 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -691,5 +691,7 @@ extern void select_idle_routine(const struct cpuinfo_x86 *c);
#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
extern unsigned long boot_option_idle_override;
+extern void enable_sep_cpu(void);
+extern int sysenter_setup(void);
#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 507f2fd39a6..2451ead0ca5 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -42,6 +42,8 @@ extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
extern void smp_invalidate_rcv(void); /* Process an NMI */
extern void (*mtrr_hook) (void);
extern void zap_low_mappings (void);
+extern void lock_ipi_call_lock(void);
+extern void unlock_ipi_call_lock(void);
#define MAX_APICID 256
extern u8 x86_cpu_to_apicid[];