summaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/apic.h11
-rw-r--r--include/asm-i386/bugs.h2
-rw-r--r--include/asm-i386/desc.h2
-rw-r--r--include/asm-i386/elf.h4
-rw-r--r--include/asm-i386/hpet.h16
-rw-r--r--include/asm-i386/i8253.h15
-rw-r--r--include/asm-i386/idle.h14
-rw-r--r--include/asm-i386/mach-default/do_timer.h78
-rw-r--r--include/asm-i386/mach-voyager/do_timer.h27
-rw-r--r--include/asm-i386/mce.h2
-rw-r--r--include/asm-i386/mmu_context.h2
-rw-r--r--include/asm-i386/mpspec.h1
-rw-r--r--include/asm-i386/msr.h3
-rw-r--r--include/asm-i386/paravirt.h168
-rw-r--r--include/asm-i386/pda.h12
-rw-r--r--include/asm-i386/pgalloc.h30
-rw-r--r--include/asm-i386/processor.h14
-rw-r--r--include/asm-i386/ptrace.h8
-rw-r--r--include/asm-i386/segment.h19
-rw-r--r--include/asm-i386/setup.h2
-rw-r--r--include/asm-i386/smp.h5
-rw-r--r--include/asm-i386/time.h1
-rw-r--r--include/asm-i386/timer.h3
-rw-r--r--include/asm-i386/tsc.h49
-rw-r--r--include/asm-i386/vmi.h262
-rw-r--r--include/asm-i386/vmi_time.h103
26 files changed, 616 insertions, 237 deletions
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 41a44319905..cc6b1652249 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -43,6 +43,8 @@ extern void generic_apic_probe(void);
#define apic_write native_apic_write
#define apic_write_atomic native_apic_write_atomic
#define apic_read native_apic_read
+#define setup_boot_clock setup_boot_APIC_clock
+#define setup_secondary_clock setup_secondary_APIC_clock
#endif
static __inline fastcall void native_apic_write(unsigned long reg,
@@ -93,9 +95,7 @@ static inline void ack_APIC_irq(void)
apic_write_around(APIC_EOI, 0);
}
-extern void (*wait_timer_tick)(void);
-
-extern int get_maxlvt(void);
+extern int lapic_get_maxlvt(void);
extern void clear_local_APIC(void);
extern void connect_bsp_APIC (void);
extern void disconnect_bsp_APIC (int virt_wire_setup);
@@ -111,14 +111,9 @@ extern void smp_local_timer_interrupt (void);
extern void setup_boot_APIC_clock (void);
extern void setup_secondary_APIC_clock (void);
extern int APIC_init_uniprocessor (void);
-extern void disable_APIC_timer(void);
-extern void enable_APIC_timer(void);
extern void enable_NMI_through_LVT0 (void * dummy);
-void smp_send_timer_broadcast_ipi(void);
-void switch_APIC_timer_to_ipi(void *cpumask);
-void switch_ipi_to_APIC_timer(void *cpumask);
#define ARCH_APICTIMER_STOPS_ON_C3 1
extern int timer_over_8254;
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index 38f1aebbbdb..c90c7c49930 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -160,7 +160,7 @@ static void __init check_config(void)
* If we configured ourselves for a TSC, we'd better have one!
*/
#ifdef CONFIG_X86_TSC
- if (!cpu_has_tsc)
+ if (!cpu_has_tsc && !tsc_disable)
panic("Kernel compiled for Pentium+, requires TSC feature!");
#endif
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index f398cc45644..050831f34f7 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -22,7 +22,7 @@ struct Xgt_desc_struct {
extern struct Xgt_desc_struct idt_descr;
DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
-
+extern struct Xgt_desc_struct early_gdt_descr;
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
{
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index 369035dfe4b..8d33c9bb7c1 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -90,8 +90,8 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
pr_reg[6] = regs->eax; \
pr_reg[7] = regs->xds; \
pr_reg[8] = regs->xes; \
- savesegment(fs,pr_reg[9]); \
- pr_reg[10] = regs->xgs; \
+ pr_reg[9] = regs->xfs; \
+ savesegment(gs,pr_reg[10]); \
pr_reg[11] = regs->orig_eax; \
pr_reg[12] = regs->eip; \
pr_reg[13] = regs->xcs; \
diff --git a/include/asm-i386/hpet.h b/include/asm-i386/hpet.h
index e47be9a56cc..fc03cf9de5c 100644
--- a/include/asm-i386/hpet.h
+++ b/include/asm-i386/hpet.h
@@ -90,16 +90,19 @@
#define HPET_MIN_PERIOD (100000UL)
#define HPET_TICK_RATE (HZ * 100000UL)
-extern unsigned long hpet_tick; /* hpet clks count per tick */
extern unsigned long hpet_address; /* hpet memory map physical address */
-extern int hpet_use_timer;
+extern int is_hpet_enabled(void);
+#ifdef CONFIG_X86_64
+extern unsigned long hpet_tick; /* hpet clks count per tick */
+extern int hpet_use_timer;
extern int hpet_rtc_timer_init(void);
extern int hpet_enable(void);
-extern int hpet_reenable(void);
-extern int is_hpet_enabled(void);
extern int is_hpet_capable(void);
extern int hpet_readl(unsigned long a);
+#else
+extern int hpet_enable(void);
+#endif
#ifdef CONFIG_HPET_EMULATE_RTC
extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
@@ -110,5 +113,10 @@ extern int hpet_rtc_dropped_irq(void);
extern int hpet_rtc_timer_init(void);
extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
#endif /* CONFIG_HPET_EMULATE_RTC */
+
+#else
+
+static inline int hpet_enable(void) { return 0; }
+
#endif /* CONFIG_HPET_TIMER */
#endif /* _I386_HPET_H */
diff --git a/include/asm-i386/i8253.h b/include/asm-i386/i8253.h
index 015d8df0769..6cb0dd4dcdd 100644
--- a/include/asm-i386/i8253.h
+++ b/include/asm-i386/i8253.h
@@ -1,6 +1,21 @@
#ifndef __ASM_I8253_H__
#define __ASM_I8253_H__
+#include <linux/clockchips.h>
+
extern spinlock_t i8253_lock;
+extern struct clock_event_device *global_clock_event;
+
+/**
+ * pit_interrupt_hook - hook into timer tick
+ * @regs: standard registers from interrupt
+ *
+ * Call the global clock event handler.
+ **/
+static inline void pit_interrupt_hook(void)
+{
+ global_clock_event->event_handler(global_clock_event);
+}
+
#endif /* __ASM_I8253_H__ */
diff --git a/include/asm-i386/idle.h b/include/asm-i386/idle.h
new file mode 100644
index 00000000000..87ab9391119
--- /dev/null
+++ b/include/asm-i386/idle.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_I386_IDLE_H
+#define _ASM_I386_IDLE_H 1
+
+#define IDLE_START 1
+#define IDLE_END 2
+
+struct notifier_block;
+void idle_notifier_register(struct notifier_block *n);
+void idle_notifier_unregister(struct notifier_block *n);
+
+void exit_idle(void);
+void enter_idle(void);
+
+#endif
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h
index 7d606e3364a..56e5689863a 100644
--- a/include/asm-i386/mach-default/do_timer.h
+++ b/include/asm-i386/mach-default/do_timer.h
@@ -1,86 +1,16 @@
/* defines for inline arch setup functions */
+#include <linux/clockchips.h>
-#include <asm/apic.h>
#include <asm/i8259.h>
+#include <asm/i8253.h>
/**
* do_timer_interrupt_hook - hook into timer tick
- * @regs: standard registers from interrupt
*
- * Description:
- * This hook is called immediately after the timer interrupt is ack'd.
- * It's primary purpose is to allow architectures that don't possess
- * individual per CPU clocks (like the CPU APICs supply) to broadcast the
- * timer interrupt as a means of triggering reschedules etc.
+ * Call the pit clock event handler. see asm/i8253.h
**/
static inline void do_timer_interrupt_hook(void)
{
- do_timer(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode_vm(get_irq_regs()));
-#endif
-/*
- * In the SMP case we use the local APIC timer interrupt to do the
- * profiling, except when we simulate SMP mode on a uniprocessor
- * system, in that case we have to call the local interrupt handler.
- */
-#ifndef CONFIG_X86_LOCAL_APIC
- profile_tick(CPU_PROFILING);
-#else
- if (!using_apic_timer)
- smp_local_timer_interrupt();
-#endif
-}
-
-
-/* you can safely undefine this if you don't have the Neptune chipset */
-
-#define BUGGY_NEPTUN_TIMER
-
-/**
- * do_timer_overflow - process a detected timer overflow condition
- * @count: hardware timer interrupt count on overflow
- *
- * Description:
- * This call is invoked when the jiffies count has not incremented but
- * the hardware timer interrupt has. It means that a timer tick interrupt
- * came along while the previous one was pending, thus a tick was missed
- **/
-static inline int do_timer_overflow(int count)
-{
- int i;
-
- spin_lock(&i8259A_lock);
- /*
- * This is tricky when I/O APICs are used;
- * see do_timer_interrupt().
- */
- i = inb(0x20);
- spin_unlock(&i8259A_lock);
-
- /* assumption about timer being IRQ0 */
- if (i & 0x01) {
- /*
- * We cannot detect lost timer interrupts ...
- * well, that's why we call them lost, don't we? :)
- * [hmm, on the Pentium and Alpha we can ... sort of]
- */
- count -= LATCH;
- } else {
-#ifdef BUGGY_NEPTUN_TIMER
- /*
- * for the Neptun bug we know that the 'latch'
- * command doesn't latch the high and low value
- * of the counter atomically. Thus we have to
- * substract 256 from the counter
- * ... funny, isnt it? :)
- */
-
- count -= 256;
-#else
- printk("do_slow_gettimeoffset(): hardware timer problem?\n");
-#endif
- }
- return count;
+ pit_interrupt_hook();
}
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h
index 04e69c104a7..60f9dcc15d5 100644
--- a/include/asm-i386/mach-voyager/do_timer.h
+++ b/include/asm-i386/mach-voyager/do_timer.h
@@ -1,25 +1,18 @@
/* defines for inline arch setup functions */
+#include <linux/clockchips.h>
+
#include <asm/voyager.h>
+#include <asm/i8253.h>
+/**
+ * do_timer_interrupt_hook - hook into timer tick
+ * @regs: standard registers from interrupt
+ *
+ * Call the pit clock event handler. see asm/i8253.h
+ **/
static inline void do_timer_interrupt_hook(void)
{
- do_timer(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode_vm(irq_regs));
-#endif
-
+ pit_interrupt_hook();
voyager_timer_interrupt();
}
-static inline int do_timer_overflow(int count)
-{
- /* can't read the ISR, just assume 1 tick
- overflow */
- if(count > LATCH || count < 0) {
- printk(KERN_ERR "VOYAGER PROBLEM: count is %d, latch is %d\n", count, LATCH);
- count = LATCH;
- }
- count -= LATCH;
-
- return count;
-}
diff --git a/include/asm-i386/mce.h b/include/asm-i386/mce.h
index 7cc1a973bf0..b0a02ee34ff 100644
--- a/include/asm-i386/mce.h
+++ b/include/asm-i386/mce.h
@@ -3,3 +3,5 @@ extern void mcheck_init(struct cpuinfo_x86 *c);
#else
#define mcheck_init(c) do {} while(0)
#endif
+
+extern int mce_disabled;
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 68ff102d6f5..e6aa30f8de5 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -63,7 +63,7 @@ static inline void switch_mm(struct mm_struct *prev,
}
#define deactivate_mm(tsk, mm) \
- asm("movl %0,%%fs": :"r" (0));
+ asm("movl %0,%%gs": :"r" (0));
#define activate_mm(prev, next) \
switch_mm((prev),(next),NULL)
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h
index 770bf6da8c3..f21349399d1 100644
--- a/include/asm-i386/mpspec.h
+++ b/include/asm-i386/mpspec.h
@@ -23,7 +23,6 @@ extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
extern int mpc_default_type;
extern unsigned long mp_lapic_addr;
extern int pic_mode;
-extern int using_apic_timer;
#ifdef CONFIG_ACPI
extern void mp_register_lapic (u8 id, u8 enabled);
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 609a3899475..6db40d0583f 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -307,4 +307,7 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
+/* Geode defined MSRs */
+#define MSR_GEODE_BUSCONT_CONF0 0x1900
+
#endif /* __ASM_MSR_H */
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index 9f06265065f..6317e0a4d73 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -59,90 +59,102 @@ struct paravirt_ops
convention. This makes it easier to implement inline
assembler replacements. */
- void (fastcall *cpuid)(unsigned int *eax, unsigned int *ebx,
+ void (*cpuid)(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
- unsigned long (fastcall *get_debugreg)(int regno);
- void (fastcall *set_debugreg)(int regno, unsigned long value);
+ unsigned long (*get_debugreg)(int regno);
+ void (*set_debugreg)(int regno, unsigned long value);
- void (fastcall *clts)(void);
+ void (*clts)(void);
- unsigned long (fastcall *read_cr0)(void);
- void (fastcall *write_cr0)(unsigned long);
+ unsigned long (*read_cr0)(void);
+ void (*write_cr0)(unsigned long);
- unsigned long (fastcall *read_cr2)(void);
- void (fastcall *write_cr2)(unsigned long);
+ unsigned long (*read_cr2)(void);
+ void (*write_cr2)(unsigned long);
- unsigned long (fastcall *read_cr3)(void);
- void (fastcall *write_cr3)(unsigned long);
+ unsigned long (*read_cr3)(void);
+ void (*write_cr3)(unsigned long);
- unsigned long (fastcall *read_cr4_safe)(void);
- unsigned long (fastcall *read_cr4)(void);
- void (fastcall *write_cr4)(unsigned long);
+ unsigned long (*read_cr4_safe)(void);
+ unsigned long (*read_cr4)(void);
+ void (*write_cr4)(unsigned long);
- unsigned long (fastcall *save_fl)(void);
- void (fastcall *restore_fl)(unsigned long);
- void (fastcall *irq_disable)(void);
- void (fastcall *irq_enable)(void);
- void (fastcall *safe_halt)(void);
- void (fastcall *halt)(void);
- void (fastcall *wbinvd)(void);
+ unsigned long (*save_fl)(void);
+ void (*restore_fl)(unsigned long);
+ void (*irq_disable)(void);
+ void (*irq_enable)(void);
+ void (*safe_halt)(void);
+ void (*halt)(void);
+ void (*wbinvd)(void);
/* err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
- u64 (fastcall *read_msr)(unsigned int msr, int *err);
- int (fastcall *write_msr)(unsigned int msr, u64 val);
-
- u64 (fastcall *read_tsc)(void);
- u64 (fastcall *read_pmc)(void);
-
- void (fastcall *load_tr_desc)(void);
- void (fastcall *load_gdt)(const struct Xgt_desc_struct *);
- void (fastcall *load_idt)(const struct Xgt_desc_struct *);
- void (fastcall *store_gdt)(struct Xgt_desc_struct *);
- void (fastcall *store_idt)(struct Xgt_desc_struct *);
- void (fastcall *set_ldt)(const void *desc, unsigned entries);
- unsigned long (fastcall *store_tr)(void);
- void (fastcall *load_tls)(struct thread_struct *t, unsigned int cpu);
- void (fastcall *write_ldt_entry)(void *dt, int entrynum,
+ u64 (*read_msr)(unsigned int msr, int *err);
+ int (*write_msr)(unsigned int msr, u64 val);
+
+ u64 (*read_tsc)(void);
+ u64 (*read_pmc)(void);
+
+ void (*load_tr_desc)(void);
+ void (*load_gdt)(const struct Xgt_desc_struct *);
+ void (*load_idt)(const struct Xgt_desc_struct *);
+ void (*store_gdt)(struct Xgt_desc_struct *);
+ void (*store_idt)(struct Xgt_desc_struct *);
+ void (*set_ldt)(const void *desc, unsigned entries);
+ unsigned long (*store_tr)(void);
+ void (*load_tls)(struct thread_struct *t, unsigned int cpu);
+ void (*write_ldt_entry)(void *dt, int entrynum,
u32 low, u32 high);
- void (fastcall *write_gdt_entry)(void *dt, int entrynum,
+ void (*write_gdt_entry)(void *dt, int entrynum,
u32 low, u32 high);
- void (fastcall *write_idt_entry)(void *dt, int entrynum,
+ void (*write_idt_entry)(void *dt, int entrynum,
u32 low, u32 high);
- void (fastcall *load_esp0)(struct tss_struct *tss,
+ void (*load_esp0)(struct tss_struct *tss,
struct thread_struct *thread);
- void (fastcall *set_iopl_mask)(unsigned mask);
+ void (*set_iopl_mask)(unsigned mask);
- void (fastcall *io_delay)(void);
+ void (*io_delay)(void);
void (*const_udelay)(unsigned long loops);
#ifdef CONFIG_X86_LOCAL_APIC
- void (fastcall *apic_write)(unsigned long reg, unsigned long v);
- void (fastcall *apic_write_atomic)(unsigned long reg, unsigned long v);
- unsigned long (fastcall *apic_read)(unsigned long reg);
+ void (*apic_write)(unsigned long reg, unsigned long v);
+ void (*apic_write_atomic)(unsigned long reg, unsigned long v);
+ unsigned long (*apic_read)(unsigned long reg);
+ void (*setup_boot_clock)(void);
+ void (*setup_secondary_clock)(void);
#endif
- void (fastcall *flush_tlb_user)(void);
- void (fastcall *flush_tlb_kernel)(void);
- void (fastcall *flush_tlb_single)(u32 addr);
-
- void (fastcall *set_pte)(pte_t *ptep, pte_t pteval);
- void (fastcall *set_pte_at)(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval);
- void (fastcall *set_pmd)(pmd_t *pmdp, pmd_t pmdval);
- void (fastcall *pte_update)(struct mm_struct *mm, u32 addr, pte_t *ptep);
- void (fastcall *pte_update_defer)(struct mm_struct *mm, u32 addr, pte_t *ptep);
+ void (*flush_tlb_user)(void);
+ void (*flush_tlb_kernel)(void);
+ void (*flush_tlb_single)(u32 addr);
+
+ void (*alloc_pt)(u32 pfn);
+ void (*alloc_pd)(u32 pfn);
+ void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
+ void (*release_pt)(u32 pfn);
+ void (*release_pd)(u32 pfn);
+
+ void (*set_pte)(pte_t *ptep, pte_t pteval);
+ void (*set_pte_at)(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval);
+ void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
+ void (*pte_update)(struct mm_struct *mm, u32 addr, pte_t *ptep);
+ void (*pte_update_defer)(struct mm_struct *mm, u32 addr, pte_t *ptep);
#ifdef CONFIG_X86_PAE
- void (fastcall *set_pte_atomic)(pte_t *ptep, pte_t pteval);
- void (fastcall *set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
- void (fastcall *set_pud)(pud_t *pudp, pud_t pudval);
- void (fastcall *pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
- void (fastcall *pmd_clear)(pmd_t *pmdp);
+ void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
+ void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
+ void (*set_pud)(pud_t *pudp, pud_t pudval);
+ void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+ void (*pmd_clear)(pmd_t *pmdp);
#endif
+ void (*set_lazy_mode)(int mode);
+
/* These two are jmp to, not actually called. */
- void (fastcall *irq_enable_sysexit)(void);
- void (fastcall *iret)(void);
+ void (*irq_enable_sysexit)(void);
+ void (*iret)(void);
+
+ void (*startup_ipi_hook)(int phys_apicid, unsigned long start_eip, unsigned long start_esp);
};
/* Mark a paravirt probe function. */
@@ -313,13 +325,38 @@ static inline unsigned long apic_read(unsigned long reg)
{
return paravirt_ops.apic_read(reg);
}
+
+static inline void setup_boot_clock(void)
+{
+ paravirt_ops.setup_boot_clock();
+}
+
+static inline void setup_secondary_clock(void)
+{
+ paravirt_ops.setup_secondary_clock();
+}
#endif
+#ifdef CONFIG_SMP
+static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
+ unsigned long start_esp)
+{
+ return paravirt_ops.startup_ipi_hook(phys_apicid, start_eip, start_esp);
+}
+#endif
#define __flush_tlb() paravirt_ops.flush_tlb_user()
#define __flush_tlb_global() paravirt_ops.flush_tlb_kernel()
#define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr)
+#define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn)
+#define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn)
+
+#define paravirt_alloc_pd(pfn) paravirt_ops.alloc_pd(pfn)
+#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) \
+ paravirt_ops.alloc_pd_clone(pfn, clonepfn, start, count)
+#define paravirt_release_pd(pfn) paravirt_ops.release_pd(pfn)
+
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
paravirt_ops.set_pte(ptep, pteval);
@@ -372,6 +409,19 @@ static inline void pmd_clear(pmd_t *pmdp)
}
#endif
+/* Lazy mode for batching updates / context switch */
+#define PARAVIRT_LAZY_NONE 0
+#define PARAVIRT_LAZY_MMU 1
+#define PARAVIRT_LAZY_CPU 2
+
+#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
+#define arch_enter_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_CPU)
+#define arch_leave_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_MMU)
+#define arch_leave_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
+
/* These all sit in the .parainstructions section to tell us what to patch. */
struct paravirt_patch {
u8 *instr; /* original instructions */
diff --git a/include/asm-i386/pda.h b/include/asm-i386/pda.h
index 2ba2736aa10..b12d59a318b 100644
--- a/include/asm-i386/pda.h
+++ b/include/asm-i386/pda.h
@@ -39,19 +39,19 @@ extern struct i386_pda _proxy_pda;
if (0) { T__ tmp__; tmp__ = (val); } \
switch (sizeof(_proxy_pda.field)) { \
case 1: \
- asm(op "b %1,%%gs:%c2" \
+ asm(op "b %1,%%fs:%c2" \
: "+m" (_proxy_pda.field) \
:"ri" ((T__)val), \
"i"(pda_offset(field))); \
break; \
case 2: \
- asm(op "w %1,%%gs:%c2" \
+ asm(op "w %1,%%fs:%c2" \
: "+m" (_proxy_pda.field) \
:"ri" ((T__)val), \
"i"(pda_offset(field))); \
break; \
case 4: \
- asm(op "l %1,%%gs:%c2" \
+ asm(op "l %1,%%fs:%c2" \
: "+m" (_proxy_pda.field) \
:"ri" ((T__)val), \
"i"(pda_offset(field))); \
@@ -65,19 +65,19 @@ extern struct i386_pda _proxy_pda;
typeof(_proxy_pda.field) ret__; \
switch (sizeof(_proxy_pda.field)) { \
case 1: \
- asm(op "b %%gs:%c1,%0" \
+ asm(op "b %%fs:%c1,%0" \
: "=r" (ret__) \
: "i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
break; \
case 2: \
- asm(op "w %%gs:%c1,%0" \
+ asm(op "w %%fs:%c1,%0" \
: "=r" (ret__) \
: "i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
break; \
case 4: \
- asm(op "l %%gs:%c1,%0" \
+ asm(op "l %%fs:%c1,%0" \
: "=r" (ret__) \
: "i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
index 4b1e61359f8..c8dc2d0141a 100644
--- a/include/asm-i386/pgalloc.h
+++ b/include/asm-i386/pgalloc.h
@@ -5,13 +5,31 @@
#include <linux/threads.h>
#include <linux/mm.h> /* for struct page */
-#define pmd_populate_kernel(mm, pmd, pte) \
- set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define paravirt_alloc_pt(pfn) do { } while (0)
+#define paravirt_alloc_pd(pfn) do { } while (0)
+#define paravirt_alloc_pd(pfn) do { } while (0)
+#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
+#define paravirt_release_pt(pfn) do { } while (0)
+#define paravirt_release_pd(pfn) do { } while (0)
+#endif
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+do { \
+ paravirt_alloc_pt(__pa(pte) >> PAGE_SHIFT); \
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
+} while (0)
#define pmd_populate(mm, pmd, pte) \
+do { \
+ paravirt_alloc_pt(page_to_pfn(pte)); \
set_pmd(pmd, __pmd(_PAGE_TABLE + \
((unsigned long long)page_to_pfn(pte) << \
- (unsigned long long) PAGE_SHIFT)))
+ (unsigned long long) PAGE_SHIFT))); \
+} while (0)
+
/*
* Allocate and free page tables.
*/
@@ -32,7 +50,11 @@ static inline void pte_free(struct page *pte)
}
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ paravirt_release_pt(page_to_pfn(pte)); \
+ tlb_remove_page((tlb),(pte)); \
+} while (0)
#ifdef CONFIG_X86_PAE
/*
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 359f10b54f5..edfbe46a5e1 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -257,6 +257,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
: :"a" (eax), "c" (ecx));
}
+static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+{
+ /* "mwait %eax,%ecx;" */
+ asm volatile(
+ "sti; .byte 0x0f,0x01,0xc9;"
+ : :"a" (eax), "c" (ecx));
+}
+
extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
/* from system description table in BIOS. Mostly for MCA use, but
@@ -424,7 +432,7 @@ struct thread_struct {
.vm86_info = NULL, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
- .gs = __KERNEL_PDA, \
+ .fs = __KERNEL_PDA, \
}
/*
@@ -442,8 +450,8 @@ struct thread_struct {
}
#define start_thread(regs, new_eip, new_esp) do { \
- __asm__("movl %0,%%fs": :"r" (0)); \
- regs->xgs = 0; \
+ __asm__("movl %0,%%gs": :"r" (0)); \
+ regs->xfs = 0; \
set_fs(USER_DS); \
regs->xds = __USER_DS; \
regs->xes = __USER_DS; \
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
index bdbc894339b..6002597b9e1 100644
--- a/include/asm-i386/ptrace.h
+++ b/include/asm-i386/ptrace.h
@@ -16,8 +16,8 @@ struct pt_regs {
long eax;
int xds;
int xes;
- /* int xfs; */
- int xgs;
+ int xfs;
+ /* int xgs; */
long orig_eax;
long eip;
int xcs;
@@ -49,6 +49,10 @@ static inline int user_mode_vm(struct pt_regs *regs)
{
return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL;
}
+static inline int v8086_mode(struct pt_regs *regs)
+{
+ return (regs->eflags & VM_MASK);
+}
#define instruction_pointer(regs) ((regs)->eip)
#define regs_return_value(regs) ((regs)->eax)
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h
index 3c796af3377..065f10bfa48 100644
--- a/include/asm-i386/segment.h
+++ b/include/asm-i386/segment.h
@@ -83,14 +83,8 @@
* The GDT has 32 entries
*/
#define GDT_ENTRIES 32
-
#define GDT_SIZE (GDT_ENTRIES * 8)
-/* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */
-#define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8)
-/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
-
/* Simple and small GDT entries for booting only */
#define GDT_ENTRY_BOOT_CS 2
@@ -134,4 +128,17 @@
#ifndef CONFIG_PARAVIRT
#define get_kernel_rpl() 0
#endif
+/*
+ * Matching rules for certain types of segments.
+ */
+
+/* Matches only __KERNEL_CS, ignoring PnP / USER / APM segments */
+#define SEGMENT_IS_KERNEL_CODE(x) (((x) & 0xfc) == GDT_ENTRY_KERNEL_CS * 8)
+
+/* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */
+#define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8)
+
+/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
+
#endif
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index 76316275d6f..0e8077cbfda 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -77,6 +77,8 @@ int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
void __init add_memory_region(unsigned long long start,
unsigned long long size, int type);
+extern unsigned long init_pg_tables_end;
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 64fe624c02c..6bf0033a301 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -52,6 +52,11 @@ extern void cpu_exit_clear(void);
extern void cpu_uninit(void);
#endif
+#ifndef CONFIG_PARAVIRT
+#define startup_ipi_hook(phys_apicid, start_eip, start_esp) \
+do { } while (0)
+#endif
+
/*
* This function is needed by all SMP systems. It must _always_ be valid
* from the initial startup. We map APIC_BASE very early in page_setup(),
diff --git a/include/asm-i386/time.h b/include/asm-i386/time.h
index ea8065af825..571b4294dc2 100644
--- a/include/asm-i386/time.h
+++ b/include/asm-i386/time.h
@@ -30,6 +30,7 @@ static inline int native_set_wallclock(unsigned long nowtime)
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
+extern unsigned long long native_sched_clock(void);
#else /* !CONFIG_PARAVIRT */
#define get_wallclock() native_get_wallclock()
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
index d0ebd05f851..4752c3a6a70 100644
--- a/include/asm-i386/timer.h
+++ b/include/asm-i386/timer.h
@@ -8,6 +8,9 @@ void setup_pit_timer(void);
/* Modifiers for buggy PIT handling */
extern int pit_latch_buggy;
extern int timer_ack;
+extern int no_timer_check;
+extern unsigned long long (*custom_sched_clock)(void);
+extern int no_sync_cmos_clock;
extern int recalibrate_cpu_khz(void);
#endif
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
index c13933185c1..e997891cc7c 100644
--- a/include/asm-i386/tsc.h
+++ b/include/asm-i386/tsc.h
@@ -1,48 +1 @@
-/*
- * linux/include/asm-i386/tsc.h
- *
- * i386 TSC related functions
- */
-#ifndef _ASM_i386_TSC_H
-#define _ASM_i386_TSC_H
-
-#include <asm/processor.h>
-
-/*
- * Standard way to access the cycle counter on i586+ CPUs.
- * Currently only used on SMP.
- *
- * If you really have a SMP machine with i486 chips or older,
- * compile for that, and this will just always return zero.
- * That's ok, it just means that the nicer scheduling heuristics
- * won't work for you.
- *
- * We only use the low 32 bits, and we'd simply better make sure
- * that we reschedule before that wraps. Scheduling at least every
- * four billion cycles just basically sounds like a good idea,
- * regardless of how fast the machine is.
- */
-typedef unsigned long long cycles_t;
-
-extern unsigned int cpu_khz;
-extern unsigned int tsc_khz;
-
-static inline cycles_t get_cycles(void)
-{
- unsigned long long ret = 0;
-
-#ifndef CONFIG_X86_TSC
- if (!cpu_has_tsc)
- return 0;
-#endif
-
-#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
- rdtscll(ret);
-#endif
- return ret;
-}
-
-extern void tsc_init(void);
-extern void mark_tsc_unstable(void);
-
-#endif
+#include <asm-x86_64/tsc.h>
diff --git a/include/asm-i386/vmi.h b/include/asm-i386/vmi.h
new file mode 100644
index 00000000000..43c89333037
--- /dev/null
+++ b/include/asm-i386/vmi.h
@@ -0,0 +1,262 @@
+/*
+ * VMI interface definition
+ *
+ * Copyright (C) 2005, VMware, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Maintained by: Zachary Amsden zach@vmware.com
+ *
+ */
+#include <linux/types.h>
+
+/*
+ *---------------------------------------------------------------------
+ *
+ * VMI Option ROM API
+ *
+ *---------------------------------------------------------------------
+ */
+#define VMI_SIGNATURE 0x696d5663 /* "cVmi" */
+
+#define PCI_VENDOR_ID_VMWARE 0x15AD
+#define PCI_DEVICE_ID_VMWARE_VMI 0x0801
+
+/*
+ * We use two version numbers for compatibility, with the major
+ * number signifying interface breakages, and the minor number
+ * interface extensions.
+ */
+#define VMI_API_REV_MAJOR 3
+#define VMI_API_REV_MINOR 0
+
+#define VMI_CALL_CPUID 0
+#define VMI_CALL_WRMSR 1
+#define VMI_CALL_RDMSR 2
+#define VMI_CALL_SetGDT 3
+#define VMI_CALL_SetLDT 4
+#define VMI_CALL_SetIDT 5
+#define VMI_CALL_SetTR 6
+#define VMI_CALL_GetGDT 7
+#define VMI_CALL_GetLDT 8
+#define VMI_CALL_GetIDT 9
+#define VMI_CALL_GetTR 10
+#define VMI_CALL_WriteGDTEntry 11
+#define VMI_CALL_WriteLDTEntry 12
+#define VMI_CALL_WriteIDTEntry 13
+#define VMI_CALL_UpdateKernelStack 14
+#define VMI_CALL_SetCR0 15
+#define VMI_CALL_SetCR2 16
+#define VMI_CALL_SetCR3 17
+#define VMI_CALL_SetCR4 18
+#define VMI_CALL_GetCR0 19
+#define VMI_CALL_GetCR2 20
+#define VMI_CALL_GetCR3 21
+#define VMI_CALL_GetCR4 22
+#define VMI_CALL_WBINVD 23
+#define VMI_CALL_SetDR 24
+#define VMI_CALL_GetDR 25
+#define VMI_CALL_RDPMC 26
+#define VMI_CALL_RDTSC 27
+#define VMI_CALL_CLTS 28
+#define VMI_CALL_EnableInterrupts 29
+#define VMI_CALL_DisableInterrupts 30
+#define VMI_CALL_GetInterruptMask 31
+#define VMI_CALL_SetInterruptMask 32
+#define VMI_CALL_IRET 33
+#define VMI_CALL_SYSEXIT 34
+#define VMI_CALL_Halt 35
+#define VMI_CALL_Reboot 36
+#define VMI_CALL_Shutdown 37
+#define VMI_CALL_SetPxE 38
+#define VMI_CALL_SetPxELong 39
+#define VMI_CALL_UpdatePxE 40
+#define VMI_CALL_UpdatePxELong 41
+#define VMI_CALL_MachineToPhysical 42
+#define VMI_CALL_PhysicalToMachine 43
+#define VMI_CALL_AllocatePage 44
+#define VMI_CALL_ReleasePage 45
+#define VMI_CALL_InvalPage 46
+#define VMI_CALL_FlushTLB 47
+#define VMI_CALL_SetLinearMapping 48
+
+#define VMI_CALL_SetIOPLMask 61
+#define VMI_CALL_SetInitialAPState 62
+#define VMI_CALL_APICWrite 63
+#define VMI_CALL_APICRead 64
+#define VMI_CALL_SetLazyMode 73
+
+/*
+ *---------------------------------------------------------------------
+ *
+ * MMU operation flags
+ *
+ *---------------------------------------------------------------------
+ */
+
+/* Flags used by VMI_{Allocate|Release}Page call */
+#define VMI_PAGE_PAE 0x10 /* Allocate PAE shadow */
+#define VMI_PAGE_CLONE 0x20 /* Clone from another shadow */
+#define VMI_PAGE_ZEROED 0x40 /* Page is pre-zeroed */
+
+
+/* Flags shared by Allocate|Release Page and PTE updates */
+#define VMI_PAGE_PT 0x01
+#define VMI_PAGE_PD 0x02
+#define VMI_PAGE_PDP 0x04
+#define VMI_PAGE_PML4 0x08
+
+#define VMI_PAGE_NORMAL 0x00 /* for debugging */
+
+/* Flags used by PTE updates */
+#define VMI_PAGE_CURRENT_AS 0x10 /* implies VMI_PAGE_VA_MASK is valid */
+#define VMI_PAGE_DEFER 0x20 /* may queue update until TLB inval */
+#define VMI_PAGE_VA_MASK 0xfffff000
+
+#ifdef CONFIG_X86_PAE
+#define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_PAE | VMI_PAGE_ZEROED)
+#define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_PAE | VMI_PAGE_ZEROED)
+#else
+#define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_ZEROED)
+#define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_ZEROED)
+#endif
+
+/* Flags used by VMI_FlushTLB call */
+#define VMI_FLUSH_TLB 0x01
+#define VMI_FLUSH_GLOBAL 0x02
+
+/*
+ *---------------------------------------------------------------------
+ *
+ * VMI relocation definitions for ROM call get_reloc
+ *
+ *---------------------------------------------------------------------
+ */
+
+/* VMI Relocation types */
+#define VMI_RELOCATION_NONE 0
+#define VMI_RELOCATION_CALL_REL 1
+#define VMI_RELOCATION_JUMP_REL 2
+#define VMI_RELOCATION_NOP 3
+
+#ifndef __ASSEMBLY__
+struct vmi_relocation_info {
+ unsigned char *eip;
+ unsigned char type;
+ unsigned char reserved[3];
+};
+#endif
+
+
+/*
+ *---------------------------------------------------------------------
+ *
+ * Generic ROM structures and definitions
+ *
+ *---------------------------------------------------------------------
+ */
+
+#ifndef __ASSEMBLY__
+
+struct vrom_header {
+ u16 rom_signature; // option ROM signature
+ u8 rom_length; // ROM length in 512 byte chunks
+ u8 rom_entry[4]; // 16-bit code entry point
+ u8 rom_pad0; // 4-byte align pad
+ u32 vrom_signature; // VROM identification signature
+ u8 api_version_min;// Minor version of API
+ u8 api_version_maj;// Major version of API
+ u8 jump_slots; // Number of jump slots
+ u8 reserved1; // Reserved for expansion
+ u32 virtual_top; // Hypervisor virtual address start
+ u16 reserved2; // Reserved for expansion
+ u16 license_offs; // Offset to License string
+ u16 pci_header_offs;// Offset to PCI OPROM header
+ u16 pnp_header_offs;// Offset to PnP OPROM header
+ u32 rom_pad3; // PnP reserverd / VMI reserved
+ u8 reserved[96]; // Reserved for headers
+ char vmi_init[8]; // VMI_Init jump point
+ char get_reloc[8]; // VMI_GetRelocationInfo jump point
+} __attribute__((packed));
+
+struct pnp_header {
+ char sig[4];
+ char rev;
+ char size;
+ short next;
+ short res;
+ long devID;
+ unsigned short manufacturer_offset;
+ unsigned short product_offset;
+} __attribute__((packed));
+
+struct pci_header {
+ char sig[4];
+ short vendorID;
+ short deviceID;
+ short vpdData;
+ short size;
+ char rev;
+ char class;
+ char subclass;
+ char interface;
+ short chunks;
+ char rom_version_min;
+ char rom_version_maj;
+ char codetype;
+ char lastRom;
+ short reserved;
+} __attribute__((packed));
+
+/* Function prototypes for bootstrapping */
+extern void vmi_init(void);
+extern void vmi_bringup(void);
+extern void vmi_apply_boot_page_allocations(void);
+
+/* State needed to start an application processor in an SMP system. */
+struct vmi_ap_state {
+ u32 cr0;
+ u32 cr2;
+ u32 cr3;
+ u32 cr4;
+
+ u64 efer;
+
+ u32 eip;
+ u32 eflags;
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+ u32 esp;
+ u32 ebp;
+ u32 esi;
+ u32 edi;
+ u16 cs;
+ u16 ss;
+ u16 ds;
+ u16 es;
+ u16 fs;
+ u16 gs;
+ u16 ldtr;
+
+ u16 gdtr_limit;
+ u32 gdtr_base;
+ u32 idtr_base;
+ u16 idtr_limit;
+};
+
+#endif
diff --git a/include/asm-i386/vmi_time.h b/include/asm-i386/vmi_time.h
new file mode 100644
index 00000000000..c1293121100
--- /dev/null
+++ b/include/asm-i386/vmi_time.h
@@ -0,0 +1,103 @@
+/*
+ * VMI Time wrappers
+ *
+ * Copyright (C) 2006, VMware, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to dhecht@vmware.com
+ *
+ */
+
+#ifndef __VMI_TIME_H
+#define __VMI_TIME_H
+
+/*
+ * Raw VMI call indices for timer functions
+ */
+#define VMI_CALL_GetCycleFrequency 66
+#define VMI_CALL_GetCycleCounter 67
+#define VMI_CALL_SetAlarm 68
+#define VMI_CALL_CancelAlarm 69
+#define VMI_CALL_GetWallclockTime 70
+#define VMI_CALL_WallclockUpdated 71
+
+/* Cached VMI timer operations */
+extern struct vmi_timer_ops {
+ u64 (*get_cycle_frequency)(void);
+ u64 (*get_cycle_counter)(int);
+ u64 (*get_wallclock)(void);
+ int (*wallclock_updated)(void);
+ void (*set_alarm)(u32 flags, u64 expiry, u64 period);
+ void (*cancel_alarm)(u32 flags);
+} vmi_timer_ops;
+
+/* Prototypes */
+extern void __init vmi_time_init(void);
+extern unsigned long vmi_get_wallclock(void);
+extern int vmi_set_wallclock(unsigned long now);
+extern unsigned long long vmi_sched_clock(void);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+extern void __init vmi_timer_setup_boot_alarm(void);
+extern void __init vmi_timer_setup_secondary_alarm(void);
+extern void apic_vmi_timer_interrupt(void);
+#endif
+
+#ifdef CONFIG_NO_IDLE_HZ
+extern int vmi_stop_hz_timer(void);
+extern void vmi_account_time_restart_hz_timer(void);
+#endif
+
+/*
+ * When run under a hypervisor, a vcpu is always in one of three states:
+ * running, halted, or ready. The vcpu is in the 'running' state if it
+ * is executing. When the vcpu executes the halt interface, the vcpu
+ * enters the 'halted' state and remains halted until there is some work
+ * pending for the vcpu (e.g. an alarm expires, host I/O completes on
+ * behalf of virtual I/O). At this point, the vcpu enters the 'ready'
+ * state (waiting for the hypervisor to reschedule it). Finally, at any
+ * time when the vcpu is not in the 'running' state nor the 'halted'
+ * state, it is in the 'ready' state.
+ *
+ * Real time is advances while the vcpu is 'running', 'ready', or
+ * 'halted'. Stolen time is the time in which the vcpu is in the
+ * 'ready' state. Available time is the remaining time -- the vcpu is
+ * either 'running' or 'halted'.
+ *
+ * All three views of time are accessible through the VMI cycle
+ * counters.
+ */
+
+/* The cycle counters. */
+#define VMI_CYCLES_REAL 0
+#define VMI_CYCLES_AVAILABLE 1
+#define VMI_CYCLES_STOLEN 2
+
+/* The alarm interface 'flags' bits */
+#define VMI_ALARM_COUNTERS 2
+
+#define VMI_ALARM_COUNTER_MASK 0x000000ff
+
+#define VMI_ALARM_WIRED_IRQ0 0x00000000
+#define VMI_ALARM_WIRED_LVTT 0x00010000
+
+#define VMI_ALARM_IS_ONESHOT 0x00000000
+#define VMI_ALARM_IS_PERIODIC 0x00000100
+
+#define CONFIG_VMI_ALARM_HZ 100
+
+#endif