summaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/Makefile2
-rw-r--r--arch/i386/kernel/apic.c4
-rw-r--r--arch/i386/kernel/cpu/mcheck/p4.c2
-rw-r--r--arch/i386/kernel/io_apic.c10
-rw-r--r--arch/i386/kernel/irq.c3
-rw-r--r--arch/i386/kernel/process.c53
-rw-r--r--arch/i386/kernel/smp.c2
7 files changed, 7 insertions, 69 deletions
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index f7ac1aea1d8..bd28f9f9b4b 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -31,7 +31,7 @@ LDFLAGS_vmlinux := --emit-relocs
endif
CHECKFLAGS += -D__i386__
-CFLAGS += -pipe -msoft-float -mregparm=3
+CFLAGS += -pipe -msoft-float -mregparm=3 -freg-struct-return
# prevent gcc from keeping the stack 16 byte aligned
CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 9655c233e6f..7a2c9cbdb51 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -38,7 +38,6 @@
#include <asm/hpet.h>
#include <asm/i8253.h>
#include <asm/nmi.h>
-#include <asm/idle.h>
#include <mach_apic.h>
#include <mach_apicdef.h>
@@ -561,7 +560,6 @@ void fastcall smp_apic_timer_interrupt(struct pt_regs *regs)
* Besides, if we don't timer interrupts ignore the global
* interrupt lock, which is the WrongThing (tm) to do.
*/
- exit_idle();
irq_enter();
local_apic_timer_interrupt();
irq_exit();
@@ -1221,7 +1219,6 @@ void smp_spurious_interrupt(struct pt_regs *regs)
{
unsigned long v;
- exit_idle();
irq_enter();
/*
* Check if this really is a spurious interrupt and ACK it
@@ -1245,7 +1242,6 @@ void smp_error_interrupt(struct pt_regs *regs)
{
unsigned long v, v1;
- exit_idle();
irq_enter();
/* First tickle the hardware, only then report what went on. -- REW */
v = apic_read(APIC_ESR);
diff --git a/arch/i386/kernel/cpu/mcheck/p4.c b/arch/i386/kernel/cpu/mcheck/p4.c
index 8359c19d3a2..504434a4601 100644
--- a/arch/i386/kernel/cpu/mcheck/p4.c
+++ b/arch/i386/kernel/cpu/mcheck/p4.c
@@ -12,7 +12,6 @@
#include <asm/system.h>
#include <asm/msr.h>
#include <asm/apic.h>
-#include <asm/idle.h>
#include <asm/therm_throt.h>
@@ -60,7 +59,6 @@ static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_therm
fastcall void smp_thermal_interrupt(struct pt_regs *regs)
{
- exit_idle();
irq_enter();
vendor_thermal_interrupt(regs);
irq_exit();
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 4ccebd454e2..6fec4dab70b 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -343,7 +343,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
break;
entry = irq_2_pin + entry->next;
}
- set_native_irq_info(irq, cpumask);
+ irq_desc[irq].affinity = cpumask;
spin_unlock_irqrestore(&ioapic_lock, flags);
}
@@ -1354,7 +1354,7 @@ static void __init setup_IO_APIC_irqs(void)
}
spin_lock_irqsave(&ioapic_lock, flags);
__ioapic_write_entry(apic, pin, entry);
- set_native_irq_info(irq, TARGET_CPUS);
+ irq_desc[irq].affinity = TARGET_CPUS;
spin_unlock_irqrestore(&ioapic_lock, flags);
}
}
@@ -2585,7 +2585,7 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
write_msi_msg(irq, &msg);
- set_native_irq_info(irq, mask);
+ irq_desc[irq].affinity = mask;
}
#endif /* CONFIG_SMP */
@@ -2669,7 +2669,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
dest = cpu_mask_to_apicid(mask);
target_ht_irq(irq, dest);
- set_native_irq_info(irq, mask);
+ irq_desc[irq].affinity = mask;
}
#endif
@@ -2875,7 +2875,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
spin_lock_irqsave(&ioapic_lock, flags);
__ioapic_write_entry(ioapic, pin, entry);
- set_native_irq_info(irq, TARGET_CPUS);
+ irq_desc[irq].affinity = TARGET_CPUS;
spin_unlock_irqrestore(&ioapic_lock, flags);
return 0;
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 0f2ca590bf2..8db8d514c9c 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -18,8 +18,6 @@
#include <linux/cpu.h>
#include <linux/delay.h>
-#include <asm/idle.h>
-
#include <asm/apic.h>
#include <asm/uaccess.h>
@@ -77,7 +75,6 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
union irq_ctx *curctx, *irqctx;
u32 *isp;
#endif
- exit_idle();
if (unlikely((unsigned)irq >= NR_IRQS)) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index bea304d48cd..393a67d5d94 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -49,7 +49,6 @@
#include <asm/i387.h>
#include <asm/desc.h>
#include <asm/vm86.h>
-#include <asm/idle.h>
#ifdef CONFIG_MATH_EMULATION
#include <asm/math_emu.h>
#endif
@@ -82,42 +81,6 @@ void (*pm_idle)(void);
EXPORT_SYMBOL(pm_idle);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
- atomic_notifier_chain_register(&idle_notifier, n);
-}
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
- atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-
-static DEFINE_PER_CPU(volatile unsigned long, idle_state);
-
-void enter_idle(void)
-{
- /* needs to be atomic w.r.t. interrupts, not against other CPUs */
- __set_bit(0, &__get_cpu_var(idle_state));
- atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
-}
-
-static void __exit_idle(void)
-{
- /* needs to be atomic w.r.t. interrupts, not against other CPUs */
- if (__test_and_clear_bit(0, &__get_cpu_var(idle_state)) == 0)
- return;
- atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
-}
-
-void exit_idle(void)
-{
- if (current->pid)
- return;
- __exit_idle();
-}
-
void disable_hlt(void)
{
hlt_counter++;
@@ -168,7 +131,6 @@ EXPORT_SYMBOL(default_idle);
*/
static void poll_idle (void)
{
- local_irq_enable();
cpu_relax();
}
@@ -229,16 +191,7 @@ void cpu_idle(void)
play_dead();
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
-
- /*
- * Idle routines should keep interrupts disabled
- * from here on, until they go to idle.
- * Otherwise, idle callbacks can misfire.
- */
- local_irq_disable();
- enter_idle();
idle();
- __exit_idle();
}
tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
@@ -293,11 +246,7 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
- __sti_mwait(eax, ecx);
- else
- local_irq_enable();
- } else {
- local_irq_enable();
+ __mwait(eax, ecx);
}
}
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 9bd9637ae69..0e8977871b1 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -23,7 +23,6 @@
#include <asm/mtrr.h>
#include <asm/tlbflush.h>
-#include <asm/idle.h>
#include <mach_apic.h>
/*
@@ -624,7 +623,6 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs)
/*
* At this point the info structure may be out of scope unless wait==1
*/
- exit_idle();
irq_enter();
(*func)(info);
irq_exit();