summaryrefslogtreecommitdiffstats
path: root/arch/ia64/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/include')
-rw-r--r--arch/ia64/include/asm/break.h9
-rw-r--r--arch/ia64/include/asm/cacheflush.h2
-rw-r--r--arch/ia64/include/asm/device.h3
-rw-r--r--arch/ia64/include/asm/dma-mapping.h50
-rw-r--r--arch/ia64/include/asm/iommu.h16
-rw-r--r--arch/ia64/include/asm/kregs.h2
-rw-r--r--arch/ia64/include/asm/machvec.h4
-rw-r--r--arch/ia64/include/asm/machvec_dig_vtd.h38
-rw-r--r--arch/ia64/include/asm/machvec_init.h1
-rw-r--r--arch/ia64/include/asm/machvec_xen.h22
-rw-r--r--arch/ia64/include/asm/meminit.h3
-rw-r--r--arch/ia64/include/asm/native/inst.h10
-rw-r--r--arch/ia64/include/asm/native/pvchk_inst.h263
-rw-r--r--arch/ia64/include/asm/paravirt.h4
-rw-r--r--arch/ia64/include/asm/pci.h15
-rw-r--r--arch/ia64/include/asm/ptrace.h8
-rw-r--r--arch/ia64/include/asm/pvclock-abi.h48
-rw-r--r--arch/ia64/include/asm/swiotlb.h56
-rw-r--r--arch/ia64/include/asm/sync_bitops.h51
-rw-r--r--arch/ia64/include/asm/syscall.h163
-rw-r--r--arch/ia64/include/asm/thread_info.h3
-rw-r--r--arch/ia64/include/asm/timex.h2
-rw-r--r--arch/ia64/include/asm/unistd.h1
-rw-r--r--arch/ia64/include/asm/xen/events.h50
-rw-r--r--arch/ia64/include/asm/xen/grant_table.h29
-rw-r--r--arch/ia64/include/asm/xen/hypercall.h265
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h89
-rw-r--r--arch/ia64/include/asm/xen/inst.h458
-rw-r--r--arch/ia64/include/asm/xen/interface.h346
-rw-r--r--arch/ia64/include/asm/xen/irq.h44
-rw-r--r--arch/ia64/include/asm/xen/minstate.h134
-rw-r--r--arch/ia64/include/asm/xen/page.h65
-rw-r--r--arch/ia64/include/asm/xen/privop.h129
-rw-r--r--arch/ia64/include/asm/xen/xcom_hcall.h51
-rw-r--r--arch/ia64/include/asm/xen/xencomm.h42
35 files changed, 2457 insertions, 19 deletions
diff --git a/arch/ia64/include/asm/break.h b/arch/ia64/include/asm/break.h
index f0340203989..e90c40ec9ed 100644
--- a/arch/ia64/include/asm/break.h
+++ b/arch/ia64/include/asm/break.h
@@ -20,4 +20,13 @@
*/
#define __IA64_BREAK_SYSCALL 0x100000
+/*
+ * Xen specific break numbers:
+ */
+#define __IA64_XEN_HYPERCALL 0x1000
+/* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used
+ for xen hyperprivops */
+#define __IA64_XEN_HYPERPRIVOP_START 0x1
+#define __IA64_XEN_HYPERPRIVOP_MAX 0x1a
+
#endif /* _ASM_IA64_BREAK_H */
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
index afcfbda76e2..c8ce2719fee 100644
--- a/arch/ia64/include/asm/cacheflush.h
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -34,6 +34,8 @@ do { \
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
extern void flush_icache_range (unsigned long start, unsigned long end);
+extern void clflush_cache_range(void *addr, int size);
+
#define flush_icache_user_range(vma, page, user_addr, len) \
do { \
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h
index 3db6daf7f25..41ab85d66f3 100644
--- a/arch/ia64/include/asm/device.h
+++ b/arch/ia64/include/asm/device.h
@@ -10,6 +10,9 @@ struct dev_archdata {
#ifdef CONFIG_ACPI
void *acpi_handle;
#endif
+#ifdef CONFIG_DMAR
+ void *iommu; /* hook for IOMMU specific extension */
+#endif
};
#endif /* _ASM_IA64_DEVICE_H */
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 06ff1ba2146..bbab7e2b0fc 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -7,6 +7,49 @@
*/
#include <asm/machvec.h>
#include <linux/scatterlist.h>
+#include <asm/swiotlb.h>
+
+struct dma_mapping_ops {
+ int (*mapping_error)(struct device *dev,
+ dma_addr_t dma_addr);
+ void* (*alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+ void (*free_coherent)(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+ dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr,
+ size_t size, int direction);
+ void (*unmap_single)(struct device *dev, dma_addr_t addr,
+ size_t size, int direction);
+ void (*sync_single_for_cpu)(struct device *hwdev,
+ dma_addr_t dma_handle, size_t size,
+ int direction);
+ void (*sync_single_for_device)(struct device *hwdev,
+ dma_addr_t dma_handle, size_t size,
+ int direction);
+ void (*sync_single_range_for_cpu)(struct device *hwdev,
+ dma_addr_t dma_handle, unsigned long offset,
+ size_t size, int direction);
+ void (*sync_single_range_for_device)(struct device *hwdev,
+ dma_addr_t dma_handle, unsigned long offset,
+ size_t size, int direction);
+ void (*sync_sg_for_cpu)(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ int direction);
+ void (*sync_sg_for_device)(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ int direction);
+ int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction);
+ void (*unmap_sg)(struct device *hwdev,
+ struct scatterlist *sg, int nents,
+ int direction);
+ int (*dma_supported_op)(struct device *hwdev, u64 mask);
+ int is_phys;
+};
+
+extern struct dma_mapping_ops *dma_ops;
+extern struct ia64_machine_vector ia64_mv;
+extern void set_iommu_machvec(void);
#define dma_alloc_coherent(dev, size, handle, gfp) \
platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA)
@@ -96,4 +139,11 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size,
#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
+static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
+{
+ return dma_ops;
+}
+
+
+
#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
new file mode 100644
index 00000000000..5fb2bb93de3
--- /dev/null
+++ b/arch/ia64/include/asm/iommu.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_IA64_IOMMU_H
+#define _ASM_IA64_IOMMU_H 1
+
+#define cpu_has_x2apic 0
+/* 10 seconds */
+#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
+
+extern void pci_iommu_shutdown(void);
+extern void no_iommu_init(void);
+extern int force_iommu, no_iommu;
+extern int iommu_detected;
+extern void iommu_dma_init(void);
+extern void machvec_init(const char *name);
+extern int forbid_dac;
+
+#endif
diff --git a/arch/ia64/include/asm/kregs.h b/arch/ia64/include/asm/kregs.h
index aefcdfee7f2..39e65f6639f 100644
--- a/arch/ia64/include/asm/kregs.h
+++ b/arch/ia64/include/asm/kregs.h
@@ -32,7 +32,7 @@
#define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */
#define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/
-#define IA64_TR_ALLOC_MAX 32 /* Max number for dynamic use*/
+#define IA64_TR_ALLOC_MAX 64 /* Max number for dynamic use*/
/* Processor status register bits: */
#define IA64_PSR_BE_BIT 1
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 2b850ccafef..1ea28bcee33 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -120,6 +120,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# include <asm/machvec_hpsim.h>
# elif defined (CONFIG_IA64_DIG)
# include <asm/machvec_dig.h>
+# elif defined(CONFIG_IA64_DIG_VTD)
+# include <asm/machvec_dig_vtd.h>
# elif defined (CONFIG_IA64_HP_ZX1)
# include <asm/machvec_hpzx1.h>
# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
@@ -128,6 +130,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# include <asm/machvec_sn2.h>
# elif defined (CONFIG_IA64_SGI_UV)
# include <asm/machvec_uv.h>
+# elif defined (CONFIG_IA64_XEN_GUEST)
+# include <asm/machvec_xen.h>
# elif defined (CONFIG_IA64_GENERIC)
# ifdef MACHVEC_PLATFORM_HEADER
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h
new file mode 100644
index 00000000000..3400b561e71
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_dig_vtd.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_IA64_MACHVEC_DIG_VTD_h
+#define _ASM_IA64_MACHVEC_DIG_VTD_h
+
+extern ia64_mv_setup_t dig_setup;
+extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent;
+extern ia64_mv_dma_free_coherent vtd_free_coherent;
+extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs;
+extern ia64_mv_dma_supported iommu_dma_supported;
+extern ia64_mv_dma_mapping_error vtd_dma_mapping_error;
+extern ia64_mv_dma_init pci_iommu_alloc;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure. When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name "dig_vtd"
+#define platform_setup dig_setup
+#define platform_dma_init pci_iommu_alloc
+#define platform_dma_alloc_coherent vtd_alloc_coherent
+#define platform_dma_free_coherent vtd_free_coherent
+#define platform_dma_map_single_attrs vtd_map_single_attrs
+#define platform_dma_unmap_single_attrs vtd_unmap_single_attrs
+#define platform_dma_map_sg_attrs vtd_map_sg_attrs
+#define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs
+#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
+#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
+#define platform_dma_sync_single_for_device machvec_dma_sync_single
+#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
+#define platform_dma_supported iommu_dma_supported
+#define platform_dma_mapping_error vtd_dma_mapping_error
+
+#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */
diff --git a/arch/ia64/include/asm/machvec_init.h b/arch/ia64/include/asm/machvec_init.h
index 7f21249fba3..ef964b28684 100644
--- a/arch/ia64/include/asm/machvec_init.h
+++ b/arch/ia64/include/asm/machvec_init.h
@@ -1,3 +1,4 @@
+#include <asm/iommu.h>
#include <asm/machvec.h>
extern ia64_mv_send_ipi_t ia64_send_ipi;
diff --git a/arch/ia64/include/asm/machvec_xen.h b/arch/ia64/include/asm/machvec_xen.h
new file mode 100644
index 00000000000..55f9228056c
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_xen.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_IA64_MACHVEC_XEN_h
+#define _ASM_IA64_MACHVEC_XEN_h
+
+extern ia64_mv_setup_t dig_setup;
+extern ia64_mv_cpu_init_t xen_cpu_init;
+extern ia64_mv_irq_init_t xen_irq_init;
+extern ia64_mv_send_ipi_t xen_platform_send_ipi;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure. When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name "xen"
+#define platform_setup dig_setup
+#define platform_cpu_init xen_cpu_init
+#define platform_irq_init xen_irq_init
+#define platform_send_ipi xen_platform_send_ipi
+
+#endif /* _ASM_IA64_MACHVEC_XEN_h */
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 7245a578159..6bc96ee5432 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -18,10 +18,11 @@
* - crash dumping code reserved region
* - Kernel memory map built from EFI memory map
* - ELF core header
+ * - xen start info if CONFIG_XEN
*
* More could be added if necessary
*/
-#define IA64_MAX_RSVD_REGIONS 8
+#define IA64_MAX_RSVD_REGIONS 9
struct rsvd_region {
unsigned long start; /* virtual address of beginning of element */
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h
index c8efbf7b849..0a1026cca4f 100644
--- a/arch/ia64/include/asm/native/inst.h
+++ b/arch/ia64/include/asm/native/inst.h
@@ -36,8 +36,13 @@
;; \
movl clob = PARAVIRT_POISON; \
;;
+# define CLOBBER_PRED(pred_clob) \
+ ;; \
+ cmp.eq pred_clob, p0 = r0, r0 \
+ ;;
#else
-# define CLOBBER(clob) /* nothing */
+# define CLOBBER(clob) /* nothing */
+# define CLOBBER_PRED(pred_clob) /* nothing */
#endif
#define MOV_FROM_IFA(reg) \
@@ -136,7 +141,8 @@
#define SSM_PSR_I(pred, pred_clob, clob) \
(pred) ssm psr.i \
- CLOBBER(clob)
+ CLOBBER(clob) \
+ CLOBBER_PRED(pred_clob)
#define RSM_PSR_I(pred, clob0, clob1) \
(pred) rsm psr.i \
diff --git a/arch/ia64/include/asm/native/pvchk_inst.h b/arch/ia64/include/asm/native/pvchk_inst.h
new file mode 100644
index 00000000000..b8e6eb1090d
--- /dev/null
+++ b/arch/ia64/include/asm/native/pvchk_inst.h
@@ -0,0 +1,263 @@
+#ifndef _ASM_NATIVE_PVCHK_INST_H
+#define _ASM_NATIVE_PVCHK_INST_H
+
+/******************************************************************************
+ * arch/ia64/include/asm/native/pvchk_inst.h
+ * Checker for paravirtualizations of privileged operations.
+ *
+ * Copyright (C) 2005 Hewlett-Packard Co
+ * Dan Magenheimer <dan.magenheimer@hp.com>
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/**********************************************
+ * Instructions paravirtualized for correctness
+ **********************************************/
+
+/* "fc" and "thash" are privilege-sensitive instructions, meaning they
+ * may have different semantics depending on whether they are executed
+ * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
+ * be allowed to execute directly, lest incorrect semantics result.
+ */
+
+#define fc .error "fc should not be used directly."
+#define thash .error "thash should not be used directly."
+
+/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
+ * is not currently used (though it may be in a long-format VHPT system!)
+ * and the semantics of cover only change if psr.ic is off which is very
+ * rare (and currently non-existent outside of assembly code
+ */
+#define ttag .error "ttag should not be used directly."
+#define cover .error "cover should not be used directly."
+
+/* There are also privilege-sensitive registers. These registers are
+ * readable at any privilege level but only writable at PL0.
+ */
+#define cpuid .error "cpuid should not be used directly."
+#define pmd .error "pmd should not be used directly."
+
+/*
+ * mov ar.eflag =
+ * mov = ar.eflag
+ */
+
+/**********************************************
+ * Instructions paravirtualized for performance
+ **********************************************/
+/*
+ * Those instructions include '.' which can't be handled by cpp.
+ * or can't be handled by cpp easily.
+ * They are handled by sed instead of cpp.
+ */
+
+/* for .S
+ * itc.i
+ * itc.d
+ *
+ * bsw.0
+ * bsw.1
+ *
+ * ssm psr.ic | PSR_DEFAULT_BITS
+ * ssm psr.ic
+ * rsm psr.ic
+ * ssm psr.i
+ * rsm psr.i
+ * rsm psr.i | psr.ic
+ * rsm psr.dt
+ * ssm psr.dt
+ *
+ * mov = cr.ifa
+ * mov = cr.itir
+ * mov = cr.isr
+ * mov = cr.iha
+ * mov = cr.ipsr
+ * mov = cr.iim
+ * mov = cr.iip
+ * mov = cr.ivr
+ * mov = psr
+ *
+ * mov cr.ifa =
+ * mov cr.itir =
+ * mov cr.iha =
+ * mov cr.ipsr =
+ * mov cr.ifs =
+ * mov cr.iip =
+ * mov cr.kr =
+ */
+
+/* for intrinsics
+ * ssm psr.i
+ * rsm psr.i
+ * mov = psr
+ * mov = ivr
+ * mov = tpr
+ * mov cr.itm =
+ * mov eoi =
+ * mov rr[] =
+ * mov = rr[]
+ * mov = kr
+ * mov kr =
+ * ptc.ga
+ */
+
+/*************************************************************
+ * define paravirtualized instrcution macros as nop to ingore.
+ * and check whether arguments are appropriate.
+ *************************************************************/
+
+/* check whether reg is a regular register */
+.macro is_rreg_in reg
+ .ifc "\reg", "r0"
+ nop 0
+ .exitm
+ .endif
+ ;;
+ mov \reg = r0
+ ;;
+.endm
+#define IS_RREG_IN(reg) is_rreg_in reg ;
+
+#define IS_RREG_OUT(reg) \
+ ;; \
+ mov reg = r0 \
+ ;;
+
+#define IS_RREG_CLOB(reg) IS_RREG_OUT(reg)
+
+/* check whether pred is a predicate register */
+#define IS_PRED_IN(pred) \
+ ;; \
+ (pred) nop 0 \
+ ;;
+
+#define IS_PRED_OUT(pred) \
+ ;; \
+ cmp.eq pred, p0 = r0, r0 \
+ ;;
+
+#define IS_PRED_CLOB(pred) IS_PRED_OUT(pred)
+
+
+#define DO_SAVE_MIN(__COVER, SAVE_IFS, EXTRA, WORKAROUND) \
+ nop 0
+#define MOV_FROM_IFA(reg) \
+ IS_RREG_OUT(reg)
+#define MOV_FROM_ITIR(reg) \
+ IS_RREG_OUT(reg)
+#define MOV_FROM_ISR(reg) \
+ IS_RREG_OUT(reg)
+#define MOV_FROM_IHA(reg) \
+ IS_RREG_OUT(reg)
+#define MOV_FROM_IPSR(pred, reg) \
+ IS_PRED_IN(pred) \
+ IS_RREG_OUT(reg)
+#define MOV_FROM_IIM(reg) \
+ IS_RREG_OUT(reg)
+#define MOV_FROM_IIP(reg) \
+ IS_RREG_OUT(reg)
+#define MOV_FROM_IVR(reg, clob) \
+ IS_RREG_OUT(reg) \
+ IS_RREG_CLOB(clob)
+#define MOV_FROM_PSR(pred, reg, clob) \
+ IS_PRED_IN(pred) \
+ IS_RREG_OUT(reg) \
+ IS_RREG_CLOB(clob)
+#define MOV_TO_IFA(reg, clob) \
+ IS_RREG_IN(reg) \
+ IS_RREG_CLOB(clob)
+#define MOV_TO_ITIR(pred, reg, clob) \
+ IS_PRED_IN(pred) \
+ IS_RREG_IN(reg) \
+ IS_RREG_CLOB(clob)
+#define MOV_TO_IHA(pred, reg, clob) \
+ IS_PRED_IN(pred) \
+ IS_RREG_IN(reg) \
+ IS_RREG_CLOB(clob)
+#define MOV_TO_IPSR(pred, reg, clob) \
+ IS_PRED_IN(pred) \
+ IS_RREG_IN(reg) \
+ IS_RREG_CLOB(clob)
+#define MOV_TO_IFS(pred, reg, clob) \
+ IS_PRED_IN(pred) \
+ IS_RREG_IN(reg) \
+ IS_RREG_CLOB(clob)
+#define MOV_TO_IIP(reg, clob) \
+ IS_RREG_IN(reg) \
+ IS_RREG_CLOB(clob)
+#define MOV_TO_KR(kr, reg, clob0, clob1) \
+ IS_RREG_IN(reg) \
+ IS_RREG_CLOB(clob0) \
+ IS_RREG_CLOB(clob1)
+#define ITC_I(pred, reg, clob) \
+ IS_PRED_IN(pred) \
+ IS_RREG_IN(reg) \
+ IS_RREG_CLOB(clob)
+#define ITC_D(pred, reg, clob) \
+ IS_PRED_IN(pred) \
+ IS_RREG_IN(reg) \
+ IS_RREG_CLOB(clob)
+#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
+ IS_PRED_IN(pred_i) \
+ IS_PRED_IN(pred_d) \
+ IS_RREG_IN(reg) \
+ IS_RREG_CLOB(clob)
+#define THASH(pred, reg0, reg1, clob) \
+ IS_PRED_IN(pred) \
+ IS_RREG_OUT(reg0) \
+ IS_RREG_IN(reg1) \
+ IS_RREG_CLOB(clob)
+#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
+ IS_RREG_CLOB(clob0) \
+ IS_RREG_CLOB(clob1)
+#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
+ IS_RREG_CLOB(clob0) \
+ IS_RREG_CLOB(clob1)
+#define RSM_PSR_IC(clob) \
+ IS_RREG_CLOB(clob)
+#define SSM_PSR_I(pred, pred_clob, clob) \
+ IS_PRED_IN(pred) \
+ IS_PRED_CLOB(pred_clob) \
+ IS_RREG_CLOB(clob)
+#define RSM_PSR_I(pred, clob0, clob1) \
+ IS_PRED_IN(pred) \
+ IS_RREG_CLOB(clob0) \
+ IS_RREG_CLOB(clob1)
+#define RSM_PSR_I_IC(clob0, clob1, clob2) \
+ IS_RREG_CLOB(clob0) \
+ IS_RREG_CLOB(clob1) \
+ IS_RREG_CLOB(clob2)
+#define RSM_PSR_DT \
+ nop 0
+#define SSM_PSR_DT_AND_SRLZ_I \
+ nop 0
+#define BSW_0(clob0, clob1, clob2) \
+ IS_RREG_CLOB(clob0) \
+ IS_RREG_CLOB(clob1) \
+ IS_RREG_CLOB(clob2)
+#define BSW_1(clob0, clob1) \
+ IS_RREG_CLOB(clob0) \
+ IS_RREG_CLOB(clob1)
+#define COVER \
+ nop 0
+#define RFI \
+ br.ret.sptk.many rp /* defining nop causes dependency error */
+
+#endif /* _ASM_NATIVE_PVCHK_INST_H */
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
index 660cab04483..2bf3636473f 100644
--- a/arch/ia64/include/asm/paravirt.h
+++ b/arch/ia64/include/asm/paravirt.h
@@ -117,7 +117,7 @@ static inline void paravirt_post_smp_prepare_boot_cpu(void)
struct pv_iosapic_ops {
void (*pcat_compat_init)(void);
- struct irq_chip *(*get_irq_chip)(unsigned long trigger);
+ struct irq_chip *(*__get_irq_chip)(unsigned long trigger);
unsigned int (*__read)(char __iomem *iosapic, unsigned int reg);
void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val);
@@ -135,7 +135,7 @@ iosapic_pcat_compat_init(void)
static inline struct irq_chip*
iosapic_get_irq_chip(unsigned long trigger)
{
- return pv_iosapic_ops.get_irq_chip(trigger);
+ return pv_iosapic_ops.__get_irq_chip(trigger);
}
static inline unsigned int
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index 0149097b736..1d660d89db0 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -95,16 +95,8 @@ extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
#define HAVE_PCI_LEGACY
extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
- struct vm_area_struct *vma);
-extern ssize_t pci_read_legacy_io(struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count);
-extern ssize_t pci_write_legacy_io(struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count);
-extern int pci_mmap_legacy_mem(struct kobject *kobj,
- struct bin_attribute *attr,
- struct vm_area_struct *vma);
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state);
#define pci_get_legacy_mem platform_pci_get_legacy_mem
#define pci_legacy_read platform_pci_legacy_read
@@ -164,4 +156,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
}
+#ifdef CONFIG_DMAR
+extern void pci_iommu_alloc(void);
+#endif
#endif /* _ASM_IA64_PCI_H */
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
index 15f8dcfe6ee..6417c1ecb44 100644
--- a/arch/ia64/include/asm/ptrace.h
+++ b/arch/ia64/include/asm/ptrace.h
@@ -240,6 +240,12 @@ struct switch_stack {
*/
# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ /* FIXME: should this be bspstore + nr_dirty regs? */
+ return regs->ar_bspstore;
+}
+
#define regs_return_value(regs) ((regs)->r8)
/* Conserve space in histogram by encoding slot bits in address
@@ -319,6 +325,8 @@ struct switch_stack {
#define arch_has_block_step() (1)
extern void user_enable_block_step(struct task_struct *);
+#define __ARCH_WANT_COMPAT_SYS_PTRACE
+
#endif /* !__KERNEL__ */
/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h
new file mode 100644
index 00000000000..44ef9ef8f5b
--- /dev/null
+++ b/arch/ia64/include/asm/pvclock-abi.h
@@ -0,0 +1,48 @@
+/*
+ * same structure to x86's
+ * Hopefully asm-x86/pvclock-abi.h would be moved to somewhere more generic.
+ * For now, define same duplicated definitions.
+ */
+
+#ifndef _ASM_IA64__PVCLOCK_ABI_H
+#define _ASM_IA64__PVCLOCK_ABI_H
+#ifndef __ASSEMBLY__
+
+/*
+ * These structs MUST NOT be changed.
+ * They are the ABI between hypervisor and guest OS.
+ * Both Xen and KVM are using this.
+ *
+ * pvclock_vcpu_time_info holds the system time and the tsc timestamp
+ * of the last update. So the guest can use the tsc delta to get a
+ * more precise system time. There is one per virtual cpu.
+ *
+ * pvclock_wall_clock references the point in time when the system
+ * time was zero (usually boot time), thus the guest calculates the
+ * current wall clock by adding the system time.
+ *
+ * Protocol for the "version" fields is: hypervisor raises it (making
+ * it uneven) before it starts updating the fields and raises it again
+ * (making it even) when it is done. Thus the guest can make sure the
+ * time values it got are consistent by checking the version before
+ * and after reading them.
+ */
+
+struct pvclock_vcpu_time_info {
+ u32 version;
+ u32 pad0;
+ u64 tsc_timestamp;
+ u64 system_time;
+ u32 tsc_to_system_mul;
+ s8 tsc_shift;
+ u8 pad[3];
+} __attribute__((__packed__)); /* 32 bytes */
+
+struct pvclock_wall_clock {
+ u32 version;
+ u32 sec;
+ u32 nsec;
+} __attribute__((__packed__));
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_IA64__PVCLOCK_ABI_H */
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
new file mode 100644
index 00000000000..fb79423834d
--- /dev/null
+++ b/arch/ia64/include/asm/swiotlb.h
@@ -0,0 +1,56 @@
+#ifndef ASM_IA64__SWIOTLB_H
+#define ASM_IA64__SWIOTLB_H
+
+#include <linux/dma-mapping.h>
+
+/* SWIOTLB interface */
+
+extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
+ size_t size, int dir);
+extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags);
+extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, int dir);
+extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t dev_addr,
+ size_t size, int dir);
+extern void swiotlb_sync_single_for_device(struct device *hwdev,
+ dma_addr_t dev_addr,
+ size_t size, int dir);
+extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev,
+ dma_addr_t dev_addr,
+ unsigned long offset,
+ size_t size, int dir);
+extern void swiotlb_sync_single_range_for_device(struct device *hwdev,
+ dma_addr_t dev_addr,
+ unsigned long offset,
+ size_t size, int dir);
+extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ int dir);
+extern void swiotlb_sync_sg_for_device(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ int dir);
+extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction);
+extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction);
+extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
+extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
+extern void swiotlb_init(void);
+
+extern int swiotlb_force;
+
+#ifdef CONFIG_SWIOTLB
+extern int swiotlb;
+extern void pci_swiotlb_init(void);
+#else
+#define swiotlb 0
+static inline void pci_swiotlb_init(void)
+{
+}
+#endif
+
+#endif /* ASM_IA64__SWIOTLB_H */
diff --git a/arch/ia64/include/asm/sync_bitops.h b/arch/ia64/include/asm/sync_bitops.h
new file mode 100644
index 00000000000..593c12eeb27
--- /dev/null
+++ b/arch/ia64/include/asm/sync_bitops.h
@@ -0,0 +1,51 @@
+#ifndef _ASM_IA64_SYNC_BITOPS_H
+#define _ASM_IA64_SYNC_BITOPS_H
+
+/*
+ * Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *
+ * Based on synch_bitops.h which Dan Magenhaimer wrote.
+ *
+ * bit operations which provide guaranteed strong synchronisation
+ * when communicating with Xen or other guest OSes running on other CPUs.
+ */
+
+static inline void sync_set_bit(int nr, volatile void *addr)
+{
+ set_bit(nr, addr);
+}
+
+static inline void sync_clear_bit(int nr, volatile void *addr)
+{
+ clear_bit(nr, addr);
+}
+
+static inline void sync_change_bit(int nr, volatile void *addr)
+{
+ change_bit(nr, addr);
+}
+
+static inline int sync_test_and_set_bit(int nr, volatile void *addr)
+{
+ return test_and_set_bit(nr, addr);
+}
+
+static inline int sync_test_and_clear_bit(int nr, volatile void *addr)
+{
+ return test_and_clear_bit(nr, addr);
+}
+
+static inline int sync_test_and_change_bit(int nr, volatile void *addr)
+{
+ return test_and_change_bit(nr, addr);
+}
+
+static inline int sync_test_bit(int nr, const volatile void *addr)
+{
+ return test_bit(nr, addr);
+}
+
+#define sync_cmpxchg(ptr, old, new) \
+ ((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new)))
+
+#endif /* _ASM_IA64_SYNC_BITOPS_H */
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
new file mode 100644
index 00000000000..2f758a42f94
--- /dev/null
+++ b/arch/ia64/include/asm/syscall.h
@@ -0,0 +1,163 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008 Intel Corp. Shaohua Li <shaohua.li@intel.com>
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H 1
+
+#include <linux/sched.h>
+#include <linux/err.h>
+
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((long)regs->cr_ifs < 0) /* Not a syscall */
+ return -1;
+
+#ifdef CONFIG_IA32_SUPPORT
+ if (IS_IA32_PROCESS(regs))
+ return regs->r1;
+#endif
+
+ return regs->r15;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+#ifdef CONFIG_IA32_SUPPORT
+ if (IS_IA32_PROCESS(regs))
+ regs->r8 = regs->r1;
+#endif
+
+ /* do nothing */
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+#ifdef CONFIG_IA32_SUPPORT
+ if (IS_IA32_PROCESS(regs))
+ return regs->r8;
+#endif
+
+ return regs->r10 == -1 ? regs->r8:0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->r8;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+#ifdef CONFIG_IA32_SUPPORT
+ if (IS_IA32_PROCESS(regs)) {
+ regs->r8 = (long) error ? error : val;
+ return;
+ }
+#endif
+
+ if (error) {
+ /* error < 0, but ia64 uses > 0 return value */
+ regs->r8 = -error;
+ regs->r10 = -1;
+ } else {
+ regs->r8 = val;
+ regs->r10 = 0;
+ }
+}
+
+extern void ia64_syscall_get_set_arguments(struct task_struct *task,
+ struct pt_regs *regs, unsigned int i, unsigned int n,
+ unsigned long *args, int rw);
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+
+#ifdef CONFIG_IA32_SUPPORT
+ if (IS_IA32_PROCESS(regs)) {
+ switch (i + n) {
+ case 6:
+ if (!n--) break;
+ *args++ = regs->r13;
+ case 5:
+ if (!n--) break;
+ *args++ = regs->r15;
+ case 4:
+ if (!n--) break;
+ *args++ = regs->r14;
+ case 3:
+ if (!n--) break;
+ *args++ = regs->r10;
+ case 2:
+ if (!n--) break;
+ *args++ = regs->r9;
+ case 1:
+ if (!n--) break;
+ *args++ = regs->r11;
+ case 0:
+ if (!n--) break;
+ default:
+ BUG();
+ break;
+ }
+
+ return;
+ }
+#endif
+ ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+
+#ifdef CONFIG_IA32_SUPPORT
+ if (IS_IA32_PROCESS(regs)) {
+ switch (i + n) {
+ case 6:
+ if (!n--) break;
+ regs->r13 = *args++;
+ case 5:
+ if (!n--) break;
+ regs->r15 = *args++;
+ case 4:
+ if (!n--) break;
+ regs->r14 = *args++;
+ case 3:
+ if (!n--) break;
+ regs->r10 = *args++;
+ case 2:
+ if (!n--) break;
+ regs->r9 = *args++;
+ case 1:
+ if (!n--) break;
+ regs->r11 = *args++;
+ case 0:
+ if (!n--) break;
+ }
+
+ return;
+ }
+#endif
+ ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
+}
+#endif /* _ASM_SYSCALL_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 7c60fcdd2ef..ae6922626bf 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -87,9 +87,6 @@ struct thread_info {
#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER))
#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
-#define tsk_set_notify_resume(tsk) \
- set_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME)
-extern void tsk_clear_notify_resume(struct task_struct *tsk);
#endif /* !__ASSEMBLY */
/*
diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h
index 05a6baf8a47..4e03cfe74a0 100644
--- a/arch/ia64/include/asm/timex.h
+++ b/arch/ia64/include/asm/timex.h
@@ -39,4 +39,6 @@ get_cycles (void)
return ret;
}
+extern void ia64_cpu_local_tick (void);
+
#endif /* _ASM_IA64_TIMEX_H */
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index d535833aab5..f791576355a 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -337,6 +337,7 @@
# define __ARCH_WANT_SYS_NICE
# define __ARCH_WANT_SYS_OLD_GETRLIMIT
# define __ARCH_WANT_SYS_OLDUMOUNT
+# define __ARCH_WANT_SYS_PAUSE
# define __ARCH_WANT_SYS_SIGPENDING
# define __ARCH_WANT_SYS_SIGPROCMASK
# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h
new file mode 100644
index 00000000000..73248781fba
--- /dev/null
+++ b/arch/ia64/include/asm/xen/events.h
@@ -0,0 +1,50 @@
+/******************************************************************************
+ * arch/ia64/include/asm/xen/events.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef _ASM_IA64_XEN_EVENTS_H
+#define _ASM_IA64_XEN_EVENTS_H
+
+enum ipi_vector {
+ XEN_RESCHEDULE_VECTOR,
+ XEN_IPI_VECTOR,
+ XEN_CMCP_VECTOR,
+ XEN_CPEP_VECTOR,
+
+ XEN_NR_IPIS,
+};
+
+static inline int xen_irqs_disabled(struct pt_regs *regs)
+{
+ return !(ia64_psr(regs)->i);
+}
+
+static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs;
+ old_regs = set_irq_regs(regs);
+ irq_enter();
+ __do_IRQ(irq);
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+#define irq_ctx_init(cpu) do { } while (0)
+
+#endif /* _ASM_IA64_XEN_EVENTS_H */
diff --git a/arch/ia64/include/asm/xen/grant_table.h b/arch/ia64/include/asm/xen/grant_table.h
new file mode 100644
index 00000000000..2b1fae0e2d1
--- /dev/null
+++ b/arch/ia64/include/asm/xen/grant_table.h
@@ -0,0 +1,29 @@
+/******************************************************************************
+ * arch/ia64/include/asm/xen/grant_table.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _ASM_IA64_XEN_GRANT_TABLE_H
+#define _ASM_IA64_XEN_GRANT_TABLE_H
+
+struct vm_struct *xen_alloc_vm_area(unsigned long size);
+void xen_free_vm_area(struct vm_struct *area);
+
+#endif /* _ASM_IA64_XEN_GRANT_TABLE_H */
diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h
new file mode 100644
index 00000000000..96fc62366aa
--- /dev/null
+++ b/arch/ia64/include/asm/xen/hypercall.h
@@ -0,0 +1,265 @@
+/******************************************************************************
+ * hypercall.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _ASM_IA64_XEN_HYPERCALL_H
+#define _ASM_IA64_XEN_HYPERCALL_H
+
+#include <xen/interface/xen.h>
+#include <xen/interface/physdev.h>
+#include <xen/interface/sched.h>
+#include <asm/xen/xcom_hcall.h>
+struct xencomm_handle;
+extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
+ unsigned long a3, unsigned long a4,
+ unsigned long a5, unsigned long cmd);
+
+/*
+ * Assembler stubs for hyper-calls.
+ */
+
+#define _hypercall0(type, name) \
+({ \
+ long __res; \
+ __res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\
+ (type)__res; \
+})
+
+#define _hypercall1(type, name, a1) \
+({ \
+ long __res; \
+ __res = __hypercall((unsigned long)a1, \
+ 0, 0, 0, 0, __HYPERVISOR_##name); \
+ (type)__res; \
+})
+
+#define _hypercall2(type, name, a1, a2) \
+({ \
+ long __res; \
+ __res = __hypercall((unsigned long)a1, \
+ (unsigned long)a2, \
+ 0, 0, 0, __HYPERVISOR_##name); \
+ (type)__res; \
+})
+
+#define _hypercall3(type, name, a1, a2, a3) \
+({ \
+ long __res; \
+ __res = __hypercall((unsigned long)a1, \
+ (unsigned long)a2, \
+ (unsigned long)a3, \
+ 0, 0, __HYPERVISOR_##name); \
+ (type)__res; \
+})
+
+#define _hypercall4(type, name, a1, a2, a3, a4) \
+({ \
+ long __res; \
+ __res = __hypercall((unsigned long)a1, \
+ (unsigned long)a2, \
+ (unsigned long)a3, \
+ (unsigned long)a4, \
+ 0, __HYPERVISOR_##name); \
+ (type)__res; \
+})
+
+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
+({ \
+ long __res; \
+ __res = __hypercall((unsigned long)a1, \
+ (unsigned long)a2, \
+ (unsigned long)a3, \
+ (unsigned long)a4, \
+ (unsigned long)a5, \
+ __HYPERVISOR_##name); \
+ (type)__res; \
+})
+
+
+static inline int
+xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
+{
+ return _hypercall2(int, sched_op_new, cmd, arg);
+}
+
+static inline long
+HYPERVISOR_set_timer_op(u64 timeout)
+{
+ unsigned long timeout_hi = (unsigned long)(timeout >> 32);
+ unsigned long timeout_lo = (unsigned long)timeout;
+ return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
+}
+
+static inline int
+xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list,
+ int nr_calls)
+{
+ return _hypercall2(int, multicall, call_list, nr_calls);
+}
+
+static inline int
+xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg)
+{
+ return _hypercall2(int, memory_op, cmd, arg);
+}
+
+static inline int
+xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg)
+{
+ return _hypercall2(int, event_channel_op, cmd, arg);
+}
+
+static inline int
+xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg)
+{
+ return _hypercall2(int, xen_version, cmd, arg);
+}
+
+static inline int
+xencomm_arch_hypercall_console_io(int cmd, int count,
+ struct xencomm_handle *str)
+{
+ return _hypercall3(int, console_io, cmd, count, str);
+}
+
+static inline int
+xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg)
+{
+ return _hypercall2(int, physdev_op, cmd, arg);
+}
+
+static inline int
+xencomm_arch_hypercall_grant_table_op(unsigned int cmd,
+ struct xencomm_handle *uop,
+ unsigned int count)
+{
+ return _hypercall3(int, grant_table_op, cmd, uop, count);
+}
+
+int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
+
+extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg);
+
+static inline int
+xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg)
+{
+ return _hypercall2(int, callback_op, cmd, arg);
+}
+
+static inline long
+xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg)
+{
+ return _hypercall3(long, vcpu_op, cmd, cpu, arg);
+}
+
+static inline int
+HYPERVISOR_physdev_op(int cmd, void *arg)
+{
+ switch (cmd) {
+ case PHYSDEVOP_eoi:
+ return _hypercall1(int, ia64_fast_eoi,
+ ((struct physdev_eoi *)arg)->irq);
+ default:
+ return xencomm_hypercall_physdev_op(cmd, arg);
+ }
+}
+
+static inline long
+xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg)
+{
+ return _hypercall1(long, opt_feature, arg);
+}
+
+/* for balloon driver */
+#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0)
+
+/* Use xencomm to do hypercalls. */
+#define HYPERVISOR_sched_op xencomm_hypercall_sched_op
+#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op
+#define HYPERVISOR_callback_op xencomm_hypercall_callback_op
+#define HYPERVISOR_multicall xencomm_hypercall_multicall
+#define HYPERVISOR_xen_version xencomm_hypercall_xen_version
+#define HYPERVISOR_console_io xencomm_hypercall_console_io
+#define HYPERVISOR_memory_op xencomm_hypercall_memory_op
+#define HYPERVISOR_suspend xencomm_hypercall_suspend
+#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op
+#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature
+
+/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */
+#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; })
+
+static inline int
+HYPERVISOR_shutdown(
+ unsigned int reason)
+{
+ struct sched_shutdown sched_shutdown = {
+ .reason = reason
+ };
+
+ int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
+
+ return rc;
+}
+
+/* for netfront.c, netback.c */
+#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
+
+static inline void
+MULTI_update_va_mapping(
+ struct multicall_entry *mcl, unsigned long va,
+ pte_t new_val, unsigned long flags)
+{
+ mcl->op = __HYPERVISOR_update_va_mapping;
+ mcl->result = 0;
+}
+
+static inline void
+MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
+ void *uop, unsigned int count)
+{
+ mcl->op = __HYPERVISOR_grant_table_op;
+ mcl->args[0] = cmd;
+ mcl->args[1] = (unsigned long)uop;
+ mcl->args[2] = count;
+}
+
+static inline void
+MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
+ int count, int *success_count, domid_t domid)
+{
+ mcl->op = __HYPERVISOR_mmu_update;
+ mcl->args[0] = (unsigned long)req;
+ mcl->args[1] = count;
+ mcl->args[2] = (unsigned long)success_count;
+ mcl->args[3] = domid;
+}
+
+#endif /* _ASM_IA64_XEN_HYPERCALL_H */
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
new file mode 100644
index 00000000000..7a804e80fc6
--- /dev/null
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -0,0 +1,89 @@
+/******************************************************************************
+ * hypervisor.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _ASM_IA64_XEN_HYPERVISOR_H
+#define _ASM_IA64_XEN_HYPERVISOR_H
+
+#ifdef CONFIG_XEN
+
+#include <linux/init.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/version.h> /* to compile feature.c */
+#include <xen/features.h> /* to comiple xen-netfront.c */
+#include <asm/xen/hypercall.h>
+
+/* xen_domain_type is set before executing any C code by early_xen_setup */
+enum xen_domain_type {
+ XEN_NATIVE,
+ XEN_PV_DOMAIN,
+ XEN_HVM_DOMAIN,
+};
+
+extern enum xen_domain_type xen_domain_type;
+
+#define xen_domain() (xen_domain_type != XEN_NATIVE)
+#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN)
+#define xen_initial_domain() (xen_pv_domain() && \
+ (xen_start_info->flags & SIF_INITDOMAIN))
+#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
+
+/* deprecated. remove this */
+#define is_running_on_xen() (xen_domain_type == XEN_PV_DOMAIN)
+
+extern struct shared_info *HYPERVISOR_shared_info;
+extern struct start_info *xen_start_info;
+
+void __init xen_setup_vcpu_info_placement(void);
+void force_evtchn_callback(void);
+
+/* for drivers/xen/balloon/balloon.c */
+#ifdef CONFIG_XEN_SCRUB_PAGES
+#define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
+#else
+#define scrub_pages(_p, _n) ((void)0)
+#endif
+
+/* For setup_arch() in arch/ia64/kernel/setup.c */
+void xen_ia64_enable_opt_feature(void);
+
+#else /* CONFIG_XEN */
+
+#define xen_domain() (0)
+#define xen_pv_domain() (0)
+#define xen_initial_domain() (0)
+#define xen_hvm_domain() (0)
+#define is_running_on_xen() (0) /* deprecated. remove this */
+#endif
+
+#define is_initial_xendomain() (0) /* deprecated. remove this */
+
+#endif /* _ASM_IA64_XEN_HYPERVISOR_H */
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h
new file mode 100644
index 00000000000..19c2ae1d878
--- /dev/null
+++ b/arch/ia64/include/asm/xen/inst.h
@@ -0,0 +1,458 @@
+/******************************************************************************
+ * arch/ia64/include/asm/xen/inst.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <asm/xen/privop.h>
+
+#define ia64_ivt xen_ivt
+#define DO_SAVE_MIN XEN_DO_SAVE_MIN
+
+#define __paravirt_switch_to xen_switch_to
+#define __paravirt_leave_syscall xen_leave_syscall
+#define __paravirt_work_processed_syscall xen_work_processed_syscall
+#define __paravirt_leave_kernel xen_leave_kernel
+#define __paravirt_pending_syscall_end xen_work_pending_syscall_end
+#define __paravirt_work_processed_syscall_target \
+ xen_work_processed_syscall
+
+#define MOV_FROM_IFA(reg) \
+ movl reg = XSI_IFA; \
+ ;; \
+ ld8 reg = [reg]
+
+#define MOV_FROM_ITIR(reg) \
+ movl reg = XSI_ITIR; \
+ ;; \
+ ld8 reg = [reg]
+
+#define MOV_FROM_ISR(reg) \
+ movl reg = XSI_ISR; \
+ ;; \
+ ld8 reg = [reg]
+
+#define MOV_FROM_IHA(reg) \
+ movl reg = XSI_IHA; \
+ ;; \
+ ld8 reg = [reg]
+
+#define MOV_FROM_IPSR(pred, reg) \
+(pred) movl reg = XSI_IPSR; \
+ ;; \
+(pred) ld8 reg = [reg]
+
+#define MOV_FROM_IIM(reg) \
+ movl reg = XSI_IIM; \
+ ;; \
+ ld8 reg = [reg]
+
+#define MOV_FROM_IIP(reg) \
+ movl reg = XSI_IIP; \
+ ;; \
+ ld8 reg = [reg]
+
+.macro __MOV_FROM_IVR reg, clob
+ .ifc "\reg", "r8"
+ XEN_HYPER_GET_IVR
+ .exitm
+ .endif
+ .ifc "\clob", "r8"
+ XEN_HYPER_GET_IVR
+ ;;
+ mov \reg = r8
+ .exitm
+ .endif
+
+ mov \clob = r8
+ ;;
+ XEN_HYPER_GET_IVR
+ ;;
+ mov \reg = r8
+ ;;
+ mov r8 = \clob
+.endm
+#define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
+
+.macro __MOV_FROM_PSR pred, reg, clob
+ .ifc "\reg", "r8"
+ (\pred) XEN_HYPER_GET_PSR;
+ .exitm
+ .endif
+ .ifc "\clob", "r8"
+ (\pred) XEN_HYPER_GET_PSR
+ ;;
+ (\pred) mov \reg = r8
+ .exitm
+ .endif
+
+ (\pred) mov \clob = r8
+ (\pred) XEN_HYPER_GET_PSR
+ ;;
+ (\pred) mov \reg = r8
+ (\pred) mov r8 = \clob
+.endm
+#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
+
+
+#define MOV_TO_IFA(reg, clob) \
+ movl clob = XSI_IFA; \
+ ;; \
+ st8 [clob] = reg \
+
+#define MOV_TO_ITIR(pred, reg, clob) \
+(pred) movl clob = XSI_ITIR; \
+ ;; \
+(pred) st8 [clob] = reg
+
+#define MOV_TO_IHA(pred, reg, clob) \
+(pred) movl clob = XSI_IHA; \
+ ;; \
+(pred) st8 [clob] = reg
+
+#define MOV_TO_IPSR(pred, reg, clob) \
+(pred) movl clob = XSI_IPSR; \
+ ;; \
+(pred) st8 [clob] = reg; \
+ ;;
+
+#define MOV_TO_IFS(pred, reg, clob) \
+(pred) movl clob = XSI_IFS; \
+ ;; \
+(pred) st8 [clob] = reg; \
+ ;;
+
+#define MOV_TO_IIP(reg, clob) \
+ movl clob = XSI_IIP; \
+ ;; \
+ st8 [clob] = reg
+
+.macro ____MOV_TO_KR kr, reg, clob0, clob1
+ .ifc "\clob0", "r9"
+ .error "clob0 \clob0 must not be r9"
+ .endif
+ .ifc "\clob1", "r8"
+ .error "clob1 \clob1 must not be r8"
+ .endif
+
+ .ifnc "\reg", "r9"
+ .ifnc "\clob1", "r9"
+ mov \clob1 = r9
+ .endif
+ mov r9 = \reg
+ .endif
+ .ifnc "\clob0", "r8"
+ mov \clob0 = r8
+ .endif
+ mov r8 = \kr
+ ;;
+ XEN_HYPER_SET_KR
+
+ .ifnc "\reg", "r9"
+ .ifnc "\clob1", "r9"
+ mov r9 = \clob1
+ .endif
+ .endif
+ .ifnc "\clob0", "r8"
+ mov r8 = \clob0
+ .endif
+.endm
+
+.macro __MOV_TO_KR kr, reg, clob0, clob1
+ .ifc "\clob0", "r9"
+ ____MOV_TO_KR \kr, \reg, \clob1, \clob0
+ .exitm
+ .endif
+ .ifc "\clob1", "r8"
+ ____MOV_TO_KR \kr, \reg, \clob1, \clob0
+ .exitm
+ .endif
+
+ ____MOV_TO_KR \kr, \reg, \clob0, \clob1
+.endm
+
+#define MOV_TO_KR(kr, reg, clob0, clob1) \
+ __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
+
+
+.macro __ITC_I pred, reg, clob
+ .ifc "\reg", "r8"
+ (\pred) XEN_HYPER_ITC_I
+ .exitm
+ .endif
+ .ifc "\clob", "r8"
+ (\pred) mov r8 = \reg
+ ;;
+ (\pred) XEN_HYPER_ITC_I
+ .exitm
+ .endif
+
+ (\pred) mov \clob = r8
+ (\pred) mov r8 = \reg
+ ;;
+ (\pred) XEN_HYPER_ITC_I
+ ;;
+ (\pred) mov r8 = \clob
+ ;;
+.endm
+#define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
+
+.macro __ITC_D pred, reg, clob
+ .ifc "\reg", "r8"
+ (\pred) XEN_HYPER_ITC_D
+ ;;
+ .exitm
+ .endif
+ .ifc "\clob", "r8"
+ (\pred) mov r8 = \reg
+ ;;
+ (\pred) XEN_HYPER_ITC_D
+ ;;
+ .exitm
+ .endif
+
+ (\pred) mov \clob = r8
+ (\pred) mov r8 = \reg
+ ;;
+ (\pred) XEN_HYPER_ITC_D
+ ;;
+ (\pred) mov r8 = \clob
+ ;;
+.endm
+#define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
+
+.macro __ITC_I_AND_D pred_i, pred_d, reg, clob
+ .ifc "\reg", "r8"
+ (\pred_i)XEN_HYPER_ITC_I
+ ;;
+ (\pred_d)XEN_HYPER_ITC_D
+ ;;
+ .exitm
+ .endif
+ .ifc "\clob", "r8"
+ mov r8 = \reg
+ ;;
+ (\pred_i)XEN_HYPER_ITC_I
+ ;;
+ (\pred_d)XEN_HYPER_ITC_D
+ ;;
+ .exitm
+ .endif
+
+ mov \clob = r8
+ mov r8 = \reg
+ ;;
+ (\pred_i)XEN_HYPER_ITC_I
+ ;;
+ (\pred_d)XEN_HYPER_ITC_D
+ ;;
+ mov r8 = \clob
+ ;;
+.endm
+#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
+ __ITC_I_AND_D pred_i, pred_d, reg, clob
+
+.macro __THASH pred, reg0, reg1, clob
+ .ifc "\reg0", "r8"
+ (\pred) mov r8 = \reg1
+ (\pred) XEN_HYPER_THASH
+ .exitm
+ .endc
+ .ifc "\reg1", "r8"
+ (\pred) XEN_HYPER_THASH
+ ;;
+ (\pred) mov \reg0 = r8
+ ;;
+ .exitm
+ .endif
+ .ifc "\clob", "r8"
+ (\pred) mov r8 = \reg1
+ (\pred) XEN_HYPER_THASH
+ ;;
+ (\pred) mov \reg0 = r8
+ ;;
+ .exitm
+ .endif
+
+ (\pred) mov \clob = r8
+ (\pred) mov r8 = \reg1
+ (\pred) XEN_HYPER_THASH
+ ;;
+ (\pred) mov \reg0 = r8
+ (\pred) mov r8 = \clob
+ ;;
+.endm
+#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
+
+#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
+ mov clob0 = 1; \
+ movl clob1 = XSI_PSR_IC; \
+ ;; \
+ st4 [clob1] = clob0 \
+ ;;
+
+#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
+ ;; \
+ srlz.d; \
+ mov clob1 = 1; \
+ movl clob0 = XSI_PSR_IC; \
+ ;; \
+ st4 [clob0] = clob1
+
+#define RSM_PSR_IC(clob) \
+ movl clob = XSI_PSR_IC; \
+ ;; \
+ st4 [clob] = r0; \
+ ;;
+
+/* pred will be clobbered */
+#define MASK_TO_PEND_OFS (-1)
+#define SSM_PSR_I(pred, pred_clob, clob) \
+(pred) movl clob = XSI_PSR_I_ADDR \
+ ;; \
+(pred) ld8 clob = [clob] \
+ ;; \
+ /* if (pred) vpsr.i = 1 */ \
+ /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
+(pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
+ ;; \
+ /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
+(pred) ld1 clob = [clob] \
+ ;; \
+(pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
+ ;; \
+(pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
+
+#define RSM_PSR_I(pred, clob0, clob1) \
+ movl clob0 = XSI_PSR_I_ADDR; \
+ mov clob1 = 1; \
+ ;; \
+ ld8 clob0 = [clob0]; \
+ ;; \
+(pred) st1 [clob0] = clob1
+
+#define RSM_PSR_I_IC(clob0, clob1, clob2) \
+ movl clob0 = XSI_PSR_I_ADDR; \
+ movl clob1 = XSI_PSR_IC; \
+ ;; \
+ ld8 clob0 = [clob0]; \
+ mov clob2 = 1; \
+ ;; \
+ /* note: clears both vpsr.i and vpsr.ic! */ \
+ st1 [clob0] = clob2; \
+ st4 [clob1] = r0; \
+ ;;
+
+#define RSM_PSR_DT \
+ XEN_HYPER_RSM_PSR_DT
+
+#define SSM_PSR_DT_AND_SRLZ_I \
+ XEN_HYPER_SSM_PSR_DT
+
+#define BSW_0(clob0, clob1, clob2) \
+ ;; \
+ /* r16-r31 all now hold bank1 values */ \
+ mov clob2 = ar.unat; \
+ movl clob0 = XSI_BANK1_R16; \
+ movl clob1 = XSI_BANK1_R16 + 8; \
+ ;; \
+.mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
+.mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
+ ;; \
+.mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
+.mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
+ ;; \
+.mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
+.mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
+ ;; \
+.mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
+.mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
+ ;; \
+.mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
+.mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
+ ;; \
+.mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
+.mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
+ ;; \
+.mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
+.mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
+ ;; \
+.mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
+.mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
+ ;; \
+ mov clob1 = ar.unat; \
+ movl clob0 = XSI_B1NAT; \
+ ;; \
+ st8 [clob0] = clob1; \
+ mov ar.unat = clob2; \
+ movl clob0 = XSI_BANKNUM; \
+ ;; \
+ st4 [clob0] = r0
+
+
+ /* FIXME: THIS CODE IS NOT NaT SAFE! */
+#define XEN_BSW_1(clob) \
+ mov clob = ar.unat; \
+ movl r30 = XSI_B1NAT; \
+ ;; \
+ ld8 r30 = [r30]; \
+ mov r31 = 1; \
+ ;; \
+ mov ar.unat = r30; \
+ movl r30 = XSI_BANKNUM; \
+ ;; \
+ st4 [r30] = r31; \
+ movl r30 = XSI_BANK1_R16; \
+ movl r31 = XSI_BANK1_R16+8; \
+ ;; \
+ ld8.fill r16 = [r30], 16; \
+ ld8.fill r17 = [r31], 16; \
+ ;; \
+ ld8.fill r18 = [r30], 16; \
+ ld8.fill r19 = [r31], 16; \
+ ;; \
+ ld8.fill r20 = [r30], 16; \
+ ld8.fill r21 = [r31], 16; \
+ ;; \
+ ld8.fill r22 = [r30], 16; \
+ ld8.fill r23 = [r31], 16; \
+ ;; \
+ ld8.fill r24 = [r30], 16; \
+ ld8.fill r25 = [r31], 16; \
+ ;; \
+ ld8.fill r26 = [r30], 16; \
+ ld8.fill r27 = [r31], 16; \
+ ;; \
+ ld8.fill r28 = [r30], 16; \
+ ld8.fill r29 = [r31], 16; \
+ ;; \
+ ld8.fill r30 = [r30]; \
+ ld8.fill r31 = [r31]; \
+ ;; \
+ mov ar.unat = clob
+
+#define BSW_1(clob0, clob1) XEN_BSW_1(clob1)
+
+
+#define COVER \
+ XEN_HYPER_COVER
+
+#define RFI \
+ XEN_HYPER_RFI; \
+ dv_serialize_data
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h
new file mode 100644
index 00000000000..f00fab40854
--- /dev/null
+++ b/arch/ia64/include/asm/xen/interface.h
@@ -0,0 +1,346 @@
+/******************************************************************************
+ * arch-ia64/hypervisor-if.h
+ *
+ * Guest OS interface to IA64 Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright by those who contributed. (in alphabetical order)
+ *
+ * Anthony Xu <anthony.xu@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ * Fred Yang <fred.yang@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ * Alex Williamson <alex.williamson@hp.com>
+ * Chris Wright <chrisw@sous-sol.org>
+ * Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
+ * Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
+ * Hollis Blanchard <hollisb@us.ibm.com>
+ * Isaku Yamahata <yamahata@valinux.co.jp>
+ * Jan Beulich <jbeulich@novell.com>
+ * John Levon <john.levon@sun.com>
+ * Kazuhiro Suzuki <kaz@jp.fujitsu.com>
+ * Keir Fraser <keir.fraser@citrix.com>
+ * Kouya Shimura <kouya@jp.fujitsu.com>
+ * Masaki Kanno <kanno.masaki@jp.fujitsu.com>
+ * Matt Chapman <matthewc@hp.com>
+ * Matthew Chapman <matthewc@hp.com>
+ * Samuel Thibault <samuel.thibault@eu.citrix.com>
+ * Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
+ * Tristan Gingold <tgingold@free.fr>
+ * Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
+ * Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com>
+ * Zhang Xin <xing.z.zhang@intel.com>
+ * Zhang xiantao <xiantao.zhang@intel.com>
+ * dan.magenheimer@hp.com
+ * ian.pratt@cl.cam.ac.uk
+ * michael.fetterman@cl.cam.ac.uk
+ */
+
+#ifndef _ASM_IA64_XEN_INTERFACE_H
+#define _ASM_IA64_XEN_INTERFACE_H
+
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef struct { type *p; } __guest_handle_ ## name
+
+#define DEFINE_GUEST_HANDLE_STRUCT(name) \
+ __DEFINE_GUEST_HANDLE(name, struct name)
+#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
+#define GUEST_HANDLE(name) __guest_handle_ ## name
+#define GUEST_HANDLE_64(name) GUEST_HANDLE(name)
+#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
+
+#ifndef __ASSEMBLY__
+/* Guest handles for primitive C types. */
+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
+__DEFINE_GUEST_HANDLE(uint, unsigned int);
+__DEFINE_GUEST_HANDLE(ulong, unsigned long);
+__DEFINE_GUEST_HANDLE(u64, unsigned long);
+DEFINE_GUEST_HANDLE(char);
+DEFINE_GUEST_HANDLE(int);
+DEFINE_GUEST_HANDLE(long);
+DEFINE_GUEST_HANDLE(void);
+
+typedef unsigned long xen_pfn_t;
+DEFINE_GUEST_HANDLE(xen_pfn_t);
+#define PRI_xen_pfn "lx"
+#endif
+
+/* Arch specific VIRQs definition */
+#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */
+#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */
+#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */
+
+/* Maximum number of virtual CPUs in multi-processor guests. */
+/* keep sizeof(struct shared_page) <= PAGE_SIZE.
+ * this is checked in arch/ia64/xen/hypervisor.c. */
+#define MAX_VIRT_CPUS 64
+
+#ifndef __ASSEMBLY__
+
+#define INVALID_MFN (~0UL)
+
+union vac {
+ unsigned long value;
+ struct {
+ int a_int:1;
+ int a_from_int_cr:1;
+ int a_to_int_cr:1;
+ int a_from_psr:1;
+ int a_from_cpuid:1;
+ int a_cover:1;
+ int a_bsw:1;
+ long reserved:57;
+ };
+};
+
+union vdc {
+ unsigned long value;
+ struct {
+ int d_vmsw:1;
+ int d_extint:1;
+ int d_ibr_dbr:1;
+ int d_pmc:1;
+ int d_to_pmd:1;
+ int d_itm:1;
+ long reserved:58;
+ };
+};
+
+struct mapped_regs {
+ union vac vac;
+ union vdc vdc;
+ unsigned long virt_env_vaddr;
+ unsigned long reserved1[29];
+ unsigned long vhpi;
+ unsigned long reserved2[95];
+ union {
+ unsigned long vgr[16];
+ unsigned long bank1_regs[16]; /* bank1 regs (r16-r31)
+ when bank0 active */
+ };
+ union {
+ unsigned long vbgr[16];
+ unsigned long bank0_regs[16]; /* bank0 regs (r16-r31)
+ when bank1 active */
+ };
+ unsigned long vnat;
+ unsigned long vbnat;
+ unsigned long vcpuid[5];
+ unsigned long reserved3[11];
+ unsigned long vpsr;
+ unsigned long vpr;
+ unsigned long reserved4[76];
+ union {
+ unsigned long vcr[128];
+ struct {
+ unsigned long dcr; /* CR0 */
+ unsigned long itm;
+ unsigned long iva;
+ unsigned long rsv1[5];
+ unsigned long pta; /* CR8 */
+ unsigned long rsv2[7];
+ unsigned long ipsr; /* CR16 */
+ unsigned long isr;
+ unsigned long rsv3;
+ unsigned long iip;
+ unsigned long ifa;
+ unsigned long itir;
+ unsigned long iipa;
+ unsigned long ifs;
+ unsigned long iim; /* CR24 */
+ unsigned long iha;
+ unsigned long rsv4[38];
+ unsigned long lid; /* CR64 */
+ unsigned long ivr;
+ unsigned long tpr;
+ unsigned long eoi;
+ unsigned long irr[4];
+ unsigned long itv; /* CR72 */
+ unsigned long pmv;
+ unsigned long cmcv;
+ unsigned long rsv5[5];
+ unsigned long lrr0; /* CR80 */
+ unsigned long lrr1;
+ unsigned long rsv6[46];
+ };
+ };
+ union {
+ unsigned long reserved5[128];
+ struct {
+ unsigned long precover_ifs;
+ unsigned long unat; /* not sure if this is needed
+ until NaT arch is done */
+ int interrupt_collection_enabled; /* virtual psr.ic */
+
+ /* virtual interrupt deliverable flag is
+ * evtchn_upcall_mask in shared info area now.
+ * interrupt_mask_addr is the address
+ * of evtchn_upcall_mask for current vcpu
+ */
+ unsigned char *interrupt_mask_addr;
+ int pending_interruption;
+ unsigned char vpsr_pp;
+ unsigned char vpsr_dfh;
+ unsigned char hpsr_dfh;
+ unsigned char hpsr_mfh;
+ unsigned long reserved5_1[4];
+ int metaphysical_mode; /* 1 = use metaphys mapping
+ 0 = use virtual */
+ int banknum; /* 0 or 1, which virtual
+ register bank is active */
+ unsigned long rrs[8]; /* region registers */
+ unsigned long krs[8]; /* kernel registers */
+ unsigned long tmp[16]; /* temp registers
+ (e.g. for hyperprivops) */
+ };
+ };
+};
+
+struct arch_vcpu_info {
+ /* nothing */
+};
+
+/*
+ * This structure is used for magic page in domain pseudo physical address
+ * space and the result of XENMEM_machine_memory_map.
+ * As the XENMEM_machine_memory_map result,
+ * xen_memory_map::nr_entries indicates the size in bytes
+ * including struct xen_ia64_memmap_info. Not the number of entries.
+ */
+struct xen_ia64_memmap_info {
+ uint64_t efi_memmap_size; /* size of EFI memory map */
+ uint64_t efi_memdesc_size; /* size of an EFI memory map
+ * descriptor */
+ uint32_t efi_memdesc_version; /* memory descriptor version */
+ void *memdesc[0]; /* array of efi_memory_desc_t */
+};
+
+struct arch_shared_info {
+ /* PFN of the start_info page. */
+ unsigned long start_info_pfn;
+
+ /* Interrupt vector for event channel. */
+ int evtchn_vector;
+
+ /* PFN of memmap_info page */
+ unsigned int memmap_info_num_pages; /* currently only = 1 case is
+ supported. */
+ unsigned long memmap_info_pfn;
+
+ uint64_t pad[31];
+};
+
+struct xen_callback {
+ unsigned long ip;
+};
+typedef struct xen_callback xen_callback_t;
+
+#endif /* !__ASSEMBLY__ */
+
+/* Size of the shared_info area (this is not related to page size). */
+#define XSI_SHIFT 14
+#define XSI_SIZE (1 << XSI_SHIFT)
+/* Log size of mapped_regs area (64 KB - only 4KB is used). */
+#define XMAPPEDREGS_SHIFT 12
+#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT)
+/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */
+#define XMAPPEDREGS_OFS XSI_SIZE
+
+/* Hyperprivops. */
+#define HYPERPRIVOP_START 0x1
+#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0)
+#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1)
+#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2)
+#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3)
+#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4)
+#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5)
+#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6)
+#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7)
+#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8)
+#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9)
+#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa)
+#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb)
+#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc)
+#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd)
+#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe)
+#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf)
+#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10)
+#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11)
+#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12)
+#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13)
+#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14)
+#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15)
+#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16)
+#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17)
+#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18)
+#define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19)
+#define HYPERPRIVOP_MAX (0x1a)
+
+/* Fast and light hypercalls. */
+#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1
+
+/* Xencomm macros. */
+#define XENCOMM_INLINE_MASK 0xf800000000000000UL
+#define XENCOMM_INLINE_FLAG 0x8000000000000000UL
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Optimization features.
+ * The hypervisor may do some special optimizations for guests. This hypercall
+ * can be used to switch on/of these special optimizations.
+ */
+#define __HYPERVISOR_opt_feature 0x700UL
+
+#define XEN_IA64_OPTF_OFF 0x0
+#define XEN_IA64_OPTF_ON 0x1
+
+/*
+ * If this feature is switched on, the hypervisor inserts the
+ * tlb entries without calling the guests traphandler.
+ * This is useful in guests using region 7 for identity mapping
+ * like the linux kernel does.
+ */
+#define XEN_IA64_OPTF_IDENT_MAP_REG7 1
+
+/* Identity mapping of region 4 addresses in HVM. */
+#define XEN_IA64_OPTF_IDENT_MAP_REG4 2
+
+/* Identity mapping of region 5 addresses in HVM. */
+#define XEN_IA64_OPTF_IDENT_MAP_REG5 3
+
+#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0)
+
+struct xen_ia64_opt_feature {
+ unsigned long cmd; /* Which feature */
+ unsigned char on; /* Switch feature on/off */
+ union {
+ struct {
+ /* The page protection bit mask of the pte.
+ * This will be or'ed with the pte. */
+ unsigned long pgprot;
+ unsigned long key; /* A protection key for itir.*/
+ };
+ };
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_XEN_INTERFACE_H */
diff --git a/arch/ia64/include/asm/xen/irq.h b/arch/ia64/include/asm/xen/irq.h
new file mode 100644
index 00000000000..a9045098300
--- /dev/null
+++ b/arch/ia64/include/asm/xen/irq.h
@@ -0,0 +1,44 @@
+/******************************************************************************
+ * arch/ia64/include/asm/xen/irq.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _ASM_IA64_XEN_IRQ_H
+#define _ASM_IA64_XEN_IRQ_H
+
+/*
+ * The flat IRQ space is divided into two regions:
+ * 1. A one-to-one mapping of real physical IRQs. This space is only used
+ * if we have physical device-access privilege. This region is at the
+ * start of the IRQ space so that existing device drivers do not need
+ * to be modified to translate physical IRQ numbers into our IRQ space.
+ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
+ * are bound using the provided bind/unbind functions.
+ */
+
+#define XEN_PIRQ_BASE 0
+#define XEN_NR_PIRQS 256
+
+#define XEN_DYNIRQ_BASE (XEN_PIRQ_BASE + XEN_NR_PIRQS)
+#define XEN_NR_DYNIRQS (NR_CPUS * 8)
+
+#define XEN_NR_IRQS (XEN_NR_PIRQS + XEN_NR_DYNIRQS)
+
+#endif /* _ASM_IA64_XEN_IRQ_H */
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h
new file mode 100644
index 00000000000..4d92d9bbda7
--- /dev/null
+++ b/arch/ia64/include/asm/xen/minstate.h
@@ -0,0 +1,134 @@
+/*
+ * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
+ * the minimum state necessary that allows us to turn psr.ic back
+ * on.
+ *
+ * Assumed state upon entry:
+ * psr.ic: off
+ * r31: contains saved predicates (pr)
+ *
+ * Upon exit, the state is as follows:
+ * psr.ic: off
+ * r2 = points to &pt_regs.r16
+ * r8 = contents of ar.ccv
+ * r9 = contents of ar.csd
+ * r10 = contents of ar.ssd
+ * r11 = FPSR_DEFAULT
+ * r12 = kernel sp (kernel virtual address)
+ * r13 = points to current task_struct (kernel virtual address)
+ * p15 = TRUE if psr.i is set in cr.ipsr
+ * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
+ * preserved
+ * CONFIG_XEN note: p6/p7 are not preserved
+ *
+ * Note that psr.ic is NOT turned on by this macro. This is so that
+ * we can pass interruption state as arguments to a handler.
+ */
+#define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \
+ mov r16=IA64_KR(CURRENT); /* M */ \
+ mov r27=ar.rsc; /* M */ \
+ mov r20=r1; /* A */ \
+ mov r25=ar.unat; /* M */ \
+ MOV_FROM_IPSR(p0,r29); /* M */ \
+ MOV_FROM_IIP(r28); /* M */ \
+ mov r21=ar.fpsr; /* M */ \
+ mov r26=ar.pfs; /* I */ \
+ __COVER; /* B;; (or nothing) */ \
+ adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
+ ;; \
+ ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
+ st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
+ adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
+ /* switch from user to kernel RBS: */ \
+ ;; \
+ invala; /* M */ \
+ /* SAVE_IFS;*/ /* see xen special handling below */ \
+ cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
+ ;; \
+(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
+ ;; \
+(pUStk) mov.m r24=ar.rnat; \
+(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
+(pKStk) mov r1=sp; /* get sp */ \
+ ;; \
+(pUStk) lfetch.fault.excl.nt1 [r22]; \
+(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
+(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
+ ;; \
+(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
+(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
+ ;; \
+(pUStk) mov r18=ar.bsp; \
+(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
+ adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
+ adds r16=PT(CR_IPSR),r1; \
+ ;; \
+ lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
+ st8 [r16]=r29; /* save cr.ipsr */ \
+ ;; \
+ lfetch.fault.excl.nt1 [r17]; \
+ tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
+ mov r29=b0 \
+ ;; \
+ WORKAROUND; \
+ adds r16=PT(R8),r1; /* initialize first base pointer */ \
+ adds r17=PT(R9),r1; /* initialize second base pointer */ \
+(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r8,16; \
+.mem.offset 8,0; st8.spill [r17]=r9,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r10,24; \
+ movl r8=XSI_PRECOVER_IFS; \
+.mem.offset 8,0; st8.spill [r17]=r11,24; \
+ ;; \
+ /* xen special handling for possibly lazy cover */ \
+ /* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \
+ ld8 r30=[r8]; \
+(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
+ st8 [r16]=r28,16; /* save cr.iip */ \
+ ;; \
+ st8 [r17]=r30,16; /* save cr.ifs */ \
+ mov r8=ar.ccv; \
+ mov r9=ar.csd; \
+ mov r10=ar.ssd; \
+ movl r11=FPSR_DEFAULT; /* L-unit */ \
+ ;; \
+ st8 [r16]=r25,16; /* save ar.unat */ \
+ st8 [r17]=r26,16; /* save ar.pfs */ \
+ shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
+ ;; \
+ st8 [r16]=r27,16; /* save ar.rsc */ \
+(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
+(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
+ ;; /* avoid RAW on r16 & r17 */ \
+(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
+ st8 [r17]=r31,16; /* save predicates */ \
+(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
+ ;; \
+ st8 [r16]=r29,16; /* save b0 */ \
+ st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
+ cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
+.mem.offset 8,0; st8.spill [r17]=r12,16; \
+ adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r13,16; \
+.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
+ mov r13=IA64_KR(CURRENT); /* establish `current' */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r15,16; \
+.mem.offset 8,0; st8.spill [r17]=r14,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r2,16; \
+.mem.offset 8,0; st8.spill [r17]=r3,16; \
+ ACCOUNT_GET_STAMP \
+ adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
+ ;; \
+ EXTRA; \
+ movl r1=__gp; /* establish kernel global pointer */ \
+ ;; \
+ ACCOUNT_SYS_ENTER \
+ BSW_1(r3,r14); /* switch back to bank 1 (must be last in insn group) */ \
+ ;;
diff --git a/arch/ia64/include/asm/xen/page.h b/arch/ia64/include/asm/xen/page.h
new file mode 100644
index 00000000000..03441a780b5
--- /dev/null
+++ b/arch/ia64/include/asm/xen/page.h
@@ -0,0 +1,65 @@
+/******************************************************************************
+ * arch/ia64/include/asm/xen/page.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _ASM_IA64_XEN_PAGE_H
+#define _ASM_IA64_XEN_PAGE_H
+
+#define INVALID_P2M_ENTRY (~0UL)
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+ return mfn;
+}
+
+static inline unsigned long pfn_to_mfn(unsigned long pfn)
+{
+ return pfn;
+}
+
+#define phys_to_machine_mapping_valid(_x) (1)
+
+static inline void *mfn_to_virt(unsigned long mfn)
+{
+ return __va(mfn << PAGE_SHIFT);
+}
+
+static inline unsigned long virt_to_mfn(void *virt)
+{
+ return __pa(virt) >> PAGE_SHIFT;
+}
+
+/* for tpmfront.c */
+static inline unsigned long virt_to_machine(void *virt)
+{
+ return __pa(virt);
+}
+
+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+ /* nothing */
+}
+
+#define pte_mfn(_x) pte_pfn(_x)
+#define mfn_pte(_x, _y) __pte_ma(0) /* unmodified use */
+#define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */
+
+#endif /* _ASM_IA64_XEN_PAGE_H */
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h
new file mode 100644
index 00000000000..71ec7546e10
--- /dev/null
+++ b/arch/ia64/include/asm/xen/privop.h
@@ -0,0 +1,129 @@
+#ifndef _ASM_IA64_XEN_PRIVOP_H
+#define _ASM_IA64_XEN_PRIVOP_H
+
+/*
+ * Copyright (C) 2005 Hewlett-Packard Co
+ * Dan Magenheimer <dan.magenheimer@hp.com>
+ *
+ * Paravirtualizations of privileged operations for Xen/ia64
+ *
+ *
+ * inline privop and paravirt_alt support
+ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ */
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h> /* arch-ia64.h requires uint64_t */
+#endif
+#include <asm/xen/interface.h>
+
+/* At 1 MB, before per-cpu space but still addressable using addl instead
+ of movl. */
+#define XSI_BASE 0xfffffffffff00000
+
+/* Address of mapped regs. */
+#define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE)
+
+#ifdef __ASSEMBLY__
+#define XEN_HYPER_RFI break HYPERPRIVOP_RFI
+#define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT
+#define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT
+#define XEN_HYPER_COVER break HYPERPRIVOP_COVER
+#define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D
+#define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I
+#define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I
+#define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR
+#define XEN_HYPER_THASH break HYPERPRIVOP_THASH
+#define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D
+#define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR
+#define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR
+#define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4
+
+#define XSI_IFS (XSI_BASE + XSI_IFS_OFS)
+#define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS)
+#define XSI_IFA (XSI_BASE + XSI_IFA_OFS)
+#define XSI_ISR (XSI_BASE + XSI_ISR_OFS)
+#define XSI_IIM (XSI_BASE + XSI_IIM_OFS)
+#define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS)
+#define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
+#define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS)
+#define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS)
+#define XSI_IIP (XSI_BASE + XSI_IIP_OFS)
+#define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS)
+#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS)
+#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS)
+#define XSI_IHA (XSI_BASE + XSI_IHA_OFS)
+#endif
+
+#ifndef __ASSEMBLY__
+
+/************************************************/
+/* Instructions paravirtualized for correctness */
+/************************************************/
+
+/* "fc" and "thash" are privilege-sensitive instructions, meaning they
+ * may have different semantics depending on whether they are executed
+ * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
+ * be allowed to execute directly, lest incorrect semantics result. */
+extern void xen_fc(unsigned long addr);
+extern unsigned long xen_thash(unsigned long addr);
+
+/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
+ * is not currently used (though it may be in a long-format VHPT system!)
+ * and the semantics of cover only change if psr.ic is off which is very
+ * rare (and currently non-existent outside of assembly code */
+
+/* There are also privilege-sensitive registers. These registers are
+ * readable at any privilege level but only writable at PL0. */
+extern unsigned long xen_get_cpuid(int index);
+extern unsigned long xen_get_pmd(int index);
+
+extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
+extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
+
+/************************************************/
+/* Instructions paravirtualized for performance */
+/************************************************/
+
+/* Xen uses memory-mapped virtual privileged registers for access to many
+ * performance-sensitive privileged registers. Some, like the processor
+ * status register (psr), are broken up into multiple memory locations.
+ * Others, like "pend", are abstractions based on privileged registers.
+ * "Pend" is guaranteed to be set if reading cr.ivr would return a
+ * (non-spurious) interrupt. */
+#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
+
+#define XSI_PSR_I \
+ (*XEN_MAPPEDREGS->interrupt_mask_addr)
+#define xen_get_virtual_psr_i() \
+ (!XSI_PSR_I)
+#define xen_set_virtual_psr_i(_val) \
+ ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
+#define xen_set_virtual_psr_ic(_val) \
+ ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
+#define xen_get_virtual_pend() \
+ (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
+
+/* Although all privileged operations can be left to trap and will
+ * be properly handled by Xen, some are frequent enough that we use
+ * hyperprivops for performance. */
+extern unsigned long xen_get_psr(void);
+extern unsigned long xen_get_ivr(void);
+extern unsigned long xen_get_tpr(void);
+extern void xen_hyper_ssm_i(void);
+extern void xen_set_itm(unsigned long);
+extern void xen_set_tpr(unsigned long);
+extern void xen_eoi(unsigned long);
+extern unsigned long xen_get_rr(unsigned long index);
+extern void xen_set_rr(unsigned long index, unsigned long val);
+extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
+ unsigned long val2, unsigned long val3,
+ unsigned long val4);
+extern void xen_set_kr(unsigned long index, unsigned long val);
+extern void xen_ptcga(unsigned long addr, unsigned long size);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_XEN_PRIVOP_H */
diff --git a/arch/ia64/include/asm/xen/xcom_hcall.h b/arch/ia64/include/asm/xen/xcom_hcall.h
new file mode 100644
index 00000000000..20b2950c71b
--- /dev/null
+++ b/arch/ia64/include/asm/xen/xcom_hcall.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_IA64_XEN_XCOM_HCALL_H
+#define _ASM_IA64_XEN_XCOM_HCALL_H
+
+/* These function creates inline or mini descriptor for the parameters and
+ calls the corresponding xencomm_arch_hypercall_X.
+ Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless
+ they want to use their own wrapper. */
+extern int xencomm_hypercall_console_io(int cmd, int count, char *str);
+
+extern int xencomm_hypercall_event_channel_op(int cmd, void *op);
+
+extern int xencomm_hypercall_xen_version(int cmd, void *arg);
+
+extern int xencomm_hypercall_physdev_op(int cmd, void *op);
+
+extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
+ unsigned int count);
+
+extern int xencomm_hypercall_sched_op(int cmd, void *arg);
+
+extern int xencomm_hypercall_multicall(void *call_list, int nr_calls);
+
+extern int xencomm_hypercall_callback_op(int cmd, void *arg);
+
+extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg);
+
+extern int xencomm_hypercall_suspend(unsigned long srec);
+
+extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg);
+
+extern long xencomm_hypercall_opt_feature(void *arg);
+
+#endif /* _ASM_IA64_XEN_XCOM_HCALL_H */
diff --git a/arch/ia64/include/asm/xen/xencomm.h b/arch/ia64/include/asm/xen/xencomm.h
new file mode 100644
index 00000000000..cded677bebf
--- /dev/null
+++ b/arch/ia64/include/asm/xen/xencomm.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_IA64_XEN_XENCOMM_H
+#define _ASM_IA64_XEN_XENCOMM_H
+
+#include <xen/xencomm.h>
+#include <asm/pgtable.h>
+
+/* Must be called before any hypercall. */
+extern void xencomm_initialize(void);
+extern int xencomm_is_initialized(void);
+
+/* Check if virtual contiguity means physical contiguity
+ * where the passed address is a pointer value in virtual address.
+ * On ia64, identity mapping area in region 7 or the piece of region 5
+ * that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL]
+ */
+static inline int xencomm_is_phys_contiguous(unsigned long addr)
+{
+ return (PAGE_OFFSET <= addr &&
+ addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) ||
+ (KERNEL_START <= addr &&
+ addr < KERNEL_START + KERNEL_TR_PAGE_SIZE);
+}
+
+#endif /* _ASM_IA64_XEN_XENCOMM_H */