diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-12 12:39:30 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-12 12:39:50 +0200 |
commit | 4c7145a1ec1bb789d5f07e47510e8bda546a7c4a (patch) | |
tree | e2767b77e5413473a3bba302237f4669a203f183 /include/asm-x86 | |
parent | 74e91604b2452c15bbe72d77b37cf47ed0310d13 (diff) | |
parent | fd048088306656824958e7783ffcee27e241b361 (diff) |
Merge branch 'linus' into x86/spinlocks
Done to prevent this failure of an Octopus merge:
Added arch/arm/include/asm/byteorder.h in both, but differently.
ERROR: Merge conflict in arch/arm/include/asm/byteorder.h
Auto-merging include/asm-x86/spinlock.h
ERROR: Merge conflict in include/asm-x86/spinlock.h
fatal: merge program failed
Diffstat (limited to 'include/asm-x86')
36 files changed, 731 insertions, 155 deletions
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h index bd76299586b..392e17336be 100644 --- a/include/asm-x86/acpi.h +++ b/include/asm-x86/acpi.h @@ -140,6 +140,8 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) boot_cpu_data.x86_model <= 0x05 && boot_cpu_data.x86_mask < 0x0A) return 1; + else if (boot_cpu_has(X86_FEATURE_AMDC1E)) + return 1; else return max_cstate; } diff --git a/include/asm-x86/amd_iommu.h b/include/asm-x86/amd_iommu.h index 783f43e5805..041d0db7da2 100644 --- a/include/asm-x86/amd_iommu.h +++ b/include/asm-x86/amd_iommu.h @@ -20,10 +20,13 @@ #ifndef ASM_X86__AMD_IOMMU_H #define ASM_X86__AMD_IOMMU_H +#include <linux/irqreturn.h> + #ifdef CONFIG_AMD_IOMMU extern int amd_iommu_init(void); extern int amd_iommu_init_dma_ops(void); extern void amd_iommu_detect(void); +extern irqreturn_t amd_iommu_int_handler(int irq, void *data); #else static inline int amd_iommu_init(void) { return -ENODEV; } static inline void amd_iommu_detect(void) { } diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h index 1ffa4e53c98..b3085869a17 100644 --- a/include/asm-x86/amd_iommu_types.h +++ b/include/asm-x86/amd_iommu_types.h @@ -37,6 +37,7 @@ /* Capability offsets used by the driver */ #define MMIO_CAP_HDR_OFFSET 0x00 #define MMIO_RANGE_OFFSET 0x0c +#define MMIO_MISC_OFFSET 0x10 /* Masks, shifts and macros to parse the device range capability */ #define MMIO_RANGE_LD_MASK 0xff000000 @@ -48,6 +49,7 @@ #define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) #define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) #define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) +#define MMIO_MSI_NUM(x) ((x) & 0x1f) /* Flag masks for the AMD IOMMU exclusion range */ #define MMIO_EXCL_ENABLE_MASK 0x01ULL @@ -69,6 +71,25 @@ /* MMIO status bits */ #define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 +/* event logging constants */ +#define EVENT_ENTRY_SIZE 0x10 +#define EVENT_TYPE_SHIFT 28 +#define EVENT_TYPE_MASK 0xf +#define EVENT_TYPE_ILL_DEV 0x1 +#define EVENT_TYPE_IO_FAULT 0x2 +#define EVENT_TYPE_DEV_TAB_ERR 0x3 +#define EVENT_TYPE_PAGE_TAB_ERR 0x4 +#define EVENT_TYPE_ILL_CMD 0x5 +#define EVENT_TYPE_CMD_HARD_ERR 0x6 +#define EVENT_TYPE_IOTLB_INV_TO 0x7 +#define EVENT_TYPE_INV_DEV_REQ 0x8 +#define EVENT_DEVID_MASK 0xffff +#define EVENT_DEVID_SHIFT 0 +#define EVENT_DOMID_MASK 0xffff +#define EVENT_DOMID_SHIFT 0 +#define EVENT_FLAGS_MASK 0xfff +#define EVENT_FLAGS_SHIFT 0x10 + /* feature control bits */ #define CONTROL_IOMMU_EN 0x00ULL #define CONTROL_HT_TUN_EN 0x01ULL @@ -109,6 +130,8 @@ #define DEV_ENTRY_NMI_PASS 0xba #define DEV_ENTRY_LINT0_PASS 0xbe #define DEV_ENTRY_LINT1_PASS 0xbf +#define DEV_ENTRY_MODE_MASK 0x07 +#define DEV_ENTRY_MODE_SHIFT 0x09 /* constants to configure the command buffer */ #define CMD_BUFFER_SIZE 8192 @@ -116,6 +139,10 @@ #define MMIO_CMD_SIZE_SHIFT 56 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) +/* constants for event buffer handling */ +#define EVT_BUFFER_SIZE 8192 /* 512 entries */ +#define EVT_LEN_MASK (0x9ULL << 56) + #define PAGE_MODE_1_LEVEL 0x01 #define PAGE_MODE_2_LEVEL 0x02 #define PAGE_MODE_3_LEVEL 0x03 @@ -134,6 +161,7 @@ #define IOMMU_MAP_SIZE_L3 (1ULL << 39) #define IOMMU_PTE_P (1ULL << 0) +#define IOMMU_PTE_TV (1ULL << 1) #define IOMMU_PTE_U (1ULL << 59) #define IOMMU_PTE_FC (1ULL << 60) #define IOMMU_PTE_IR (1ULL << 61) @@ -159,6 +187,9 @@ #define MAX_DOMAIN_ID 65536 +/* FIXME: move this macro to <linux/pci.h> */ +#define PCI_BUS(x) (((x) >> 8) & 0xff) + /* * This structure contains generic data for IOMMU protection domains * independent of their use. @@ -196,6 +227,15 @@ struct dma_ops_domain { * just calculate its address in constant time. */ u64 **pte_pages; + + /* This will be set to true when TLB needs to be flushed */ + bool need_flush; + + /* + * if this is a preallocated domain, keep the device for which it was + * preallocated in this variable + */ + u16 target_dev; }; /* @@ -208,8 +248,9 @@ struct amd_iommu { /* locks the accesses to the hardware */ spinlock_t lock; - /* device id of this IOMMU */ - u16 devid; + /* Pointer to PCI device of this IOMMU */ + struct pci_dev *dev; + /* * Capability pointer. There could be more than one IOMMU per PCI * device function if there are more than one AMD IOMMU capability @@ -225,6 +266,9 @@ struct amd_iommu { /* capabilities of that IOMMU read from ACPI */ u32 cap; + /* pci domain of this IOMMU */ + u16 pci_seg; + /* first device this IOMMU handles. read from PCI */ u16 first_device; /* last device this IOMMU handles. read from PCI */ @@ -240,9 +284,19 @@ struct amd_iommu { /* size of command buffer */ u32 cmd_buf_size; + /* event buffer virtual address */ + u8 *evt_buf; + /* size of event buffer */ + u32 evt_buf_size; + /* MSI number for event interrupt */ + u16 evt_msi_num; + /* if one, we need to send a completion wait command */ int need_sync; + /* true if interrupts for this IOMMU are already enabled */ + bool int_enabled; + /* default dma_ops domain for that IOMMU */ struct dma_ops_domain *default_dom; }; @@ -322,6 +376,12 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap; /* will be 1 if device isolation is enabled */ extern int amd_iommu_isolate; +/* + * If true, the addresses will be flushed on unmap time, not when + * they are reused + */ +extern bool amd_iommu_unmap_flush; + /* takes a PCI device id and prints it out in a readable form */ static inline void print_devid(u16 devid, int nl) { diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index 1311c82b165..d76a0839abe 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -134,9 +134,7 @@ static inline void ack_x2APIC_irq(void) static inline void ack_APIC_irq(void) { /* - * ack_APIC_irq() actually gets compiled as a single instruction: - * - a single rmw on Pentium/82489DX - * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) + * ack_APIC_irq() actually gets compiled as a single instruction * ... yummie. */ diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h index 2439ae49e8a..e1355f44d7c 100644 --- a/include/asm-x86/asm.h +++ b/include/asm-x86/asm.h @@ -20,17 +20,22 @@ #define _ASM_PTR __ASM_SEL(.long, .quad) #define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) -#define _ASM_MOV_UL __ASM_SIZE(mov) +#define _ASM_MOV __ASM_SIZE(mov) #define _ASM_INC __ASM_SIZE(inc) #define _ASM_DEC __ASM_SIZE(dec) #define _ASM_ADD __ASM_SIZE(add) #define _ASM_SUB __ASM_SIZE(sub) #define _ASM_XADD __ASM_SIZE(xadd) + #define _ASM_AX __ASM_REG(ax) #define _ASM_BX __ASM_REG(bx) #define _ASM_CX __ASM_REG(cx) #define _ASM_DX __ASM_REG(dx) +#define _ASM_SP __ASM_REG(sp) +#define _ASM_BP __ASM_REG(bp) +#define _ASM_SI __ASM_REG(si) +#define _ASM_DI __ASM_REG(di) /* Exception table entry */ # define _ASM_EXTABLE(from,to) \ diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 61989b93b47..451a74762bd 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h @@ -424,16 +424,6 @@ static inline int fls(int x) #undef ADDR -static inline void set_bit_string(unsigned long *bitmap, - unsigned long i, int len) -{ - unsigned long end = i + len; - while (i < end) { - __set_bit(i, bitmap); - i++; - } -} - #ifdef __KERNEL__ #include <asm-generic/bitops/sched.h> diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h index ae514c76a96..dc604985f2a 100644 --- a/include/asm-x86/bugs.h +++ b/include/asm-x86/bugs.h @@ -3,7 +3,7 @@ extern void check_bugs(void); -#ifdef CONFIG_CPU_SUP_INTEL_32 +#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32) int ppro_with_ram_bug(void); #else static inline int ppro_with_ram_bug(void) { return 0; } diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 59859cb28a3..68840ef1b35 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -24,6 +24,8 @@ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy((dst), (src), (len)) +#define PG_non_WB PG_arch_1 +PAGEFLAG(NonWB, non_WB) /* * The set_memory_* API can be used to change various attributes of a virtual @@ -66,6 +68,9 @@ int set_memory_rw(unsigned long addr, int numpages); int set_memory_np(unsigned long addr, int numpages); int set_memory_4k(unsigned long addr, int numpages); +int set_memory_array_uc(unsigned long *addr, int addrinarray); +int set_memory_array_wb(unsigned long *addr, int addrinarray); + /* * For legacy compatibility with the old APIs, a few functions * are provided that work on a "struct page". @@ -96,8 +101,6 @@ int set_pages_rw(struct page *page, int numpages); void clflush_cache_range(void *addr, unsigned int size); -void cpa_init(void); - #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); extern const int rodata_test_data; diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 7ac4d93d20e..adfeae6586e 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h @@ -6,7 +6,7 @@ #include <asm/required-features.h> -#define NCAPINTS 8 /* N 32-bit words worth of info */ +#define NCAPINTS 9 /* N 32-bit words worth of info */ /* * Note: If the comment begins with a quoted string, that string is used @@ -80,6 +80,7 @@ #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ +#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ #define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ @@ -89,6 +90,7 @@ #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ #define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ +#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ #define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ @@ -150,6 +152,13 @@ */ #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ +/* Virtualization flags: Linux defined */ +#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ +#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */ +#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ +#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ +#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ + #if defined(__KERNEL__) && !defined(__ASSEMBLY__) #include <linux/bitops.h> diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h index 5d200e78bd8..219c33d6361 100644 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h @@ -9,12 +9,12 @@ #include <linux/scatterlist.h> #include <asm/io.h> #include <asm/swiotlb.h> +#include <asm-generic/dma-coherent.h> extern dma_addr_t bad_dma_address; extern int iommu_merge; -extern struct device fallback_dev; +extern struct device x86_dma_fallback_dev; extern int panic_on_overflow; -extern int force_iommu; struct dma_mapping_ops { int (*mapping_error)(struct device *dev, @@ -25,9 +25,6 @@ struct dma_mapping_ops { void *vaddr, dma_addr_t dma_handle); dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, size_t size, int direction); - /* like map_single, but doesn't check the device mask */ - dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr, - size_t size, int direction); void (*unmap_single)(struct device *dev, dma_addr_t addr, size_t size, int direction); void (*sync_single_for_cpu)(struct device *hwdev, @@ -68,7 +65,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) return dma_ops; else return dev->archdata.dma_ops; -#endif +#endif /* ASM_X86__DMA_MAPPING_H */ } /* Make sure we keep the same behaviour */ @@ -87,17 +84,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - +#define dma_is_consistent(d, h) (1) extern int dma_supported(struct device *hwdev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask); +extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t flag); + static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size, int direction) @@ -247,7 +241,68 @@ static inline int dma_get_cache_alignment(void) return boot_cpu_data.x86_clflush_size; } -#define dma_is_consistent(d, h) (1) +static inline unsigned long dma_alloc_coherent_mask(struct device *dev, + gfp_t gfp) +{ + unsigned long dma_mask = 0; -#include <asm-generic/dma-coherent.h> -#endif /* ASM_X86__DMA_MAPPING_H */ + dma_mask = dev->coherent_dma_mask; + if (!dma_mask) + dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK; + + return dma_mask; +} + +static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) +{ +#ifdef CONFIG_X86_64 + unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); + + if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) + gfp |= GFP_DMA32; +#endif + return gfp; +} + +static inline void * +dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp) +{ + struct dma_mapping_ops *ops = get_dma_ops(dev); + void *memory; + + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); + + if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) + return memory; + + if (!dev) { + dev = &x86_dma_fallback_dev; + gfp |= GFP_DMA; + } + + if (!is_device_dma_capable(dev)) + return NULL; + + if (!ops->alloc_coherent) + return NULL; + + return ops->alloc_coherent(dev, size, dma_handle, + dma_alloc_coherent_gfp_flags(dev, gfp)); +} + +static inline void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t bus) +{ + struct dma_mapping_ops *ops = get_dma_ops(dev); + + WARN_ON(irqs_disabled()); /* for portability */ + + if (dma_release_from_coherent(dev, get_order(size), vaddr)) + return; + + if (ops->free_coherent) + ops->free_coherent(dev, size, vaddr, bus); +} + +#endif diff --git a/include/asm-x86/ds.h b/include/asm-x86/ds.h index 6b27c686fa1..c3c953a45b2 100644 --- a/include/asm-x86/ds.h +++ b/include/asm-x86/ds.h @@ -2,71 +2,237 @@ * Debug Store (DS) support * * This provides a low-level interface to the hardware's Debug Store - * feature that is used for last branch recording (LBR) and + * feature that is used for branch trace store (BTS) and * precise-event based sampling (PEBS). * - * Different architectures use a different DS layout/pointer size. - * The below functions therefore work on a void*. + * It manages: + * - per-thread and per-cpu allocation of BTS and PEBS + * - buffer memory allocation (optional) + * - buffer overflow handling + * - buffer access * + * It assumes: + * - get_task_struct on all parameter tasks + * - current is allowed to trace parameter tasks * - * Since there is no user for PEBS, yet, only LBR (or branch - * trace store, BTS) is supported. * - * - * Copyright (C) 2007 Intel Corporation. - * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007 + * Copyright (C) 2007-2008 Intel Corporation. + * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008 */ #ifndef ASM_X86__DS_H #define ASM_X86__DS_H +#ifdef CONFIG_X86_DS + #include <linux/types.h> #include <linux/init.h> -struct cpuinfo_x86; +struct task_struct; -/* a branch trace record entry +/* + * Request BTS or PEBS + * + * Due to alignement constraints, the actual buffer may be slightly + * smaller than the requested or provided buffer. * - * In order to unify the interface between various processor versions, - * we use the below data structure for all processors. + * Returns 0 on success; -Eerrno otherwise + * + * task: the task to request recording for; + * NULL for per-cpu recording on the current cpu + * base: the base pointer for the (non-pageable) buffer; + * NULL if buffer allocation requested + * size: the size of the requested or provided buffer + * ovfl: pointer to a function to be called on buffer overflow; + * NULL if cyclic buffer requested */ -enum bts_qualifier { - BTS_INVALID = 0, - BTS_BRANCH, - BTS_TASK_ARRIVES, - BTS_TASK_DEPARTS -}; +typedef void (*ds_ovfl_callback_t)(struct task_struct *); +extern int ds_request_bts(struct task_struct *task, void *base, size_t size, + ds_ovfl_callback_t ovfl); +extern int ds_request_pebs(struct task_struct *task, void *base, size_t size, + ds_ovfl_callback_t ovfl); + +/* + * Release BTS or PEBS resources + * + * Frees buffers allocated on ds_request. + * + * Returns 0 on success; -Eerrno otherwise + * + * task: the task to release resources for; + * NULL to release resources for the current cpu + */ +extern int ds_release_bts(struct task_struct *task); +extern int ds_release_pebs(struct task_struct *task); + +/* + * Return the (array) index of the write pointer. + * (assuming an array of BTS/PEBS records) + * + * Returns -Eerrno on error + * + * task: the task to access; + * NULL to access the current cpu + * pos (out): if not NULL, will hold the result + */ +extern int ds_get_bts_index(struct task_struct *task, size_t *pos); +extern int ds_get_pebs_index(struct task_struct *task, size_t *pos); + +/* + * Return the (array) index one record beyond the end of the array. + * (assuming an array of BTS/PEBS records) + * + * Returns -Eerrno on error + * + * task: the task to access; + * NULL to access the current cpu + * pos (out): if not NULL, will hold the result + */ +extern int ds_get_bts_end(struct task_struct *task, size_t *pos); +extern int ds_get_pebs_end(struct task_struct *task, size_t *pos); + +/* + * Provide a pointer to the BTS/PEBS record at parameter index. + * (assuming an array of BTS/PEBS records) + * + * The pointer points directly into the buffer. The user is + * responsible for copying the record. + * + * Returns the size of a single record on success; -Eerrno on error + * + * task: the task to access; + * NULL to access the current cpu + * index: the index of the requested record + * record (out): pointer to the requested record + */ +extern int ds_access_bts(struct task_struct *task, + size_t index, const void **record); +extern int ds_access_pebs(struct task_struct *task, + size_t index, const void **record); + +/* + * Write one or more BTS/PEBS records at the write pointer index and + * advance the write pointer. + * + * If size is not a multiple of the record size, trailing bytes are + * zeroed out. + * + * May result in one or more overflow notifications. + * + * If called during overflow handling, that is, with index >= + * interrupt threshold, the write will wrap around. + * + * An overflow notification is given if and when the interrupt + * threshold is reached during or after the write. + * + * Returns the number of bytes written or -Eerrno. + * + * task: the task to access; + * NULL to access the current cpu + * buffer: the buffer to write + * size: the size of the buffer + */ +extern int ds_write_bts(struct task_struct *task, + const void *buffer, size_t size); +extern int ds_write_pebs(struct task_struct *task, + const void *buffer, size_t size); + +/* + * Same as ds_write_bts/pebs, but omit ownership checks. + * + * This is needed to have some other task than the owner of the + * BTS/PEBS buffer or the parameter task itself write into the + * respective buffer. + */ +extern int ds_unchecked_write_bts(struct task_struct *task, + const void *buffer, size_t size); +extern int ds_unchecked_write_pebs(struct task_struct *task, + const void *buffer, size_t size); + +/* + * Reset the write pointer of the BTS/PEBS buffer. + * + * Returns 0 on success; -Eerrno on error + * + * task: the task to access; + * NULL to access the current cpu + */ +extern int ds_reset_bts(struct task_struct *task); +extern int ds_reset_pebs(struct task_struct *task); + +/* + * Clear the BTS/PEBS buffer and reset the write pointer. + * The entire buffer will be zeroed out. + * + * Returns 0 on success; -Eerrno on error + * + * task: the task to access; + * NULL to access the current cpu + */ +extern int ds_clear_bts(struct task_struct *task); +extern int ds_clear_pebs(struct task_struct *task); + +/* + * Provide the PEBS counter reset value. + * + * Returns 0 on success; -Eerrno on error + * + * task: the task to access; + * NULL to access the current cpu + * value (out): the counter reset value + */ +extern int ds_get_pebs_reset(struct task_struct *task, u64 *value); + +/* + * Set the PEBS counter reset value. + * + * Returns 0 on success; -Eerrno on error + * + * task: the task to access; + * NULL to access the current cpu + * value: the new counter reset value + */ +extern int ds_set_pebs_reset(struct task_struct *task, u64 value); + +/* + * Initialization + */ +struct cpuinfo_x86; +extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *); + + -struct bts_struct { - u64 qualifier; - union { - /* BTS_BRANCH */ - struct { - u64 from_ip; - u64 to_ip; - } lbr; - /* BTS_TASK_ARRIVES or - BTS_TASK_DEPARTS */ - u64 jiffies; - } variant; +/* + * The DS context - part of struct thread_struct. + */ +struct ds_context { + /* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */ + unsigned char *ds; + /* the owner of the BTS and PEBS configuration, respectively */ + struct task_struct *owner[2]; + /* buffer overflow notification function for BTS and PEBS */ + ds_ovfl_callback_t callback[2]; + /* the original buffer address */ + void *buffer[2]; + /* the number of allocated pages for on-request allocated buffers */ + unsigned int pages[2]; + /* use count */ + unsigned long count; + /* a pointer to the context location inside the thread_struct + * or the per_cpu context array */ + struct ds_context **this; + /* a pointer to the task owning this context, or NULL, if the + * context is owned by a cpu */ + struct task_struct *task; }; -/* Overflow handling mechanisms */ -#define DS_O_SIGNAL 1 /* send overflow signal */ -#define DS_O_WRAP 2 /* wrap around */ - -extern int ds_allocate(void **, size_t); -extern int ds_free(void **); -extern int ds_get_bts_size(void *); -extern int ds_get_bts_end(void *); -extern int ds_get_bts_index(void *); -extern int ds_set_overflow(void *, int); -extern int ds_get_overflow(void *); -extern int ds_clear(void *); -extern int ds_read_bts(void *, int, struct bts_struct *); -extern int ds_write_bts(void *, const struct bts_struct *); -extern unsigned long ds_debugctl_mask(void); -extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *c); +/* called by exit_thread() to free leftover contexts */ +extern void ds_free(struct ds_context *context); + +#else /* CONFIG_X86_DS */ + +#define ds_init_intel(config) do {} while (0) +#endif /* CONFIG_X86_DS */ #endif /* ASM_X86__DS_H */ diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h index cd678b2d6a7..5c4745bec90 100644 --- a/include/asm-x86/elf.h +++ b/include/asm-x86/elf.h @@ -148,8 +148,9 @@ do { \ static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp) { - asm volatile("movl %0,%%fs" :: "r" (0)); - asm volatile("movl %0,%%es; movl %0,%%ds" : : "r" (__USER32_DS)); + loadsegment(fs, 0); + loadsegment(ds, __USER32_DS); + loadsegment(es, __USER32_DS); load_gs_index(0); regs->ip = ip; regs->sp = sp; diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h index 07f44584414..605edb39ef9 100644 --- a/include/asm-x86/gart.h +++ b/include/asm-x86/gart.h @@ -29,6 +29,8 @@ extern int fix_aperture; #define AMD64_GARTCACHECTL 0x9c #define AMD64_GARTEN (1<<0) +extern int agp_amd64_init(void); + static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) { u32 tmp, ctl; @@ -52,15 +54,15 @@ static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size) return 0; if (aper_base + aper_size > 0x100000000ULL) { - printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n"); + printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n"); return 0; } if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { - printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n"); + printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n"); return 0; } if (aper_size < min_size) { - printk(KERN_ERR "Aperture too small (%d MB) than (%d MB)\n", + printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n", aper_size>>20, min_size>>20); return 0; } diff --git a/include/asm-x86/idle.h b/include/asm-x86/idle.h index dc9c7944847..baa3f783d27 100644 --- a/include/asm-x86/idle.h +++ b/include/asm-x86/idle.h @@ -10,4 +10,6 @@ void idle_notifier_register(struct notifier_block *n); void enter_idle(void); void exit_idle(void); +void c1e_remove_cpu(int cpu); + #endif /* ASM_X86__IDLE_H */ diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h index e86f44148c6..546ad3110fe 100644 --- a/include/asm-x86/iommu.h +++ b/include/asm-x86/iommu.h @@ -6,6 +6,7 @@ extern void no_iommu_init(void); extern struct dma_mapping_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; +extern int dmar_disabled; extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len); diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h index 83a7ee228ab..d283863354d 100644 --- a/include/asm-x86/kgdb.h +++ b/include/asm-x86/kgdb.h @@ -39,12 +39,13 @@ enum regnames { GDB_FS, /* 14 */ GDB_GS, /* 15 */ }; +#define NUMREGBYTES ((GDB_GS+1)*4) #else /* ! CONFIG_X86_32 */ -enum regnames { +enum regnames64 { GDB_AX, /* 0 */ - GDB_DX, /* 1 */ + GDB_BX, /* 1 */ GDB_CX, /* 2 */ - GDB_BX, /* 3 */ + GDB_DX, /* 3 */ GDB_SI, /* 4 */ GDB_DI, /* 5 */ GDB_BP, /* 6 */ @@ -58,18 +59,15 @@ enum regnames { GDB_R14, /* 14 */ GDB_R15, /* 15 */ GDB_PC, /* 16 */ - GDB_PS, /* 17 */ }; -#endif /* CONFIG_X86_32 */ -/* - * Number of bytes of registers: - */ -#ifdef CONFIG_X86_32 -# define NUMREGBYTES 64 -#else -# define NUMREGBYTES ((GDB_PS+1)*8) -#endif +enum regnames32 { + GDB_PS = 34, + GDB_CS, + GDB_SS, +}; +#define NUMREGBYTES ((GDB_SS+1)*4) +#endif /* CONFIG_X86_32 */ static inline void arch_kgdb_breakpoint(void) { diff --git a/include/asm-x86/mach-rdc321x/gpio.h b/include/asm-x86/mach-rdc321x/gpio.h index 6184561980f..94b6cdf532e 100644 --- a/include/asm-x86/mach-rdc321x/gpio.h +++ b/include/asm-x86/mach-rdc321x/gpio.h @@ -1,6 +1,8 @@ #ifndef ASM_X86__MACH_RDC321X__GPIO_H #define ASM_X86__MACH_RDC321X__GPIO_H +#include <linux/kernel.h> + extern int rdc_gpio_get_value(unsigned gpio); extern void rdc_gpio_set_value(unsigned gpio, int value); extern int rdc_gpio_direction_input(unsigned gpio); @@ -18,6 +20,7 @@ static inline int gpio_request(unsigned gpio, const char *label) static inline void gpio_free(unsigned gpio) { + might_sleep(); rdc_gpio_free(gpio); } diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h index a30d7a9c829..9d5aff14334 100644 --- a/include/asm-x86/mmu.h +++ b/include/asm-x86/mmu.h @@ -7,14 +7,9 @@ /* * The x86 doesn't have a mmu context, but * we put the segment information here. - * - * cpu_vm_mask is used to optimize ldt flushing. */ typedef struct { void *ldt; -#ifdef CONFIG_X86_64 - rwlock_t ldtlock; -#endif int size; struct mutex lock; void *vdso; diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 118da365e37..be2241a818f 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -5,11 +5,12 @@ #include <asm/mpspec_def.h> +extern int apic_version[MAX_APICS]; + #ifdef CONFIG_X86_32 #include <mach_mpspec.h> extern unsigned int def_to_bigsmp; -extern int apic_version[MAX_APICS]; extern u8 apicid_2_node[]; extern int pic_mode; diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index 3052f058ab0..0bb43301a20 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h @@ -176,6 +176,7 @@ #define MSR_IA32_TSC 0x00000010 #define MSR_IA32_PLATFORM_ID 0x00000017 #define MSR_IA32_EBL_CR_POWERON 0x0000002a +#define MSR_IA32_FEATURE_CONTROL 0x0000003a #define MSR_IA32_APICBASE 0x0000001b #define MSR_IA32_APICBASE_BSP (1<<8) @@ -310,4 +311,19 @@ /* Geode defined MSRs */ #define MSR_GEODE_BUSCONT_CONF0 0x00001900 +/* Intel VT MSRs */ +#define MSR_IA32_VMX_BASIC 0x00000480 +#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481 +#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482 +#define MSR_IA32_VMX_EXIT_CTLS 0x00000483 +#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484 +#define MSR_IA32_VMX_MISC 0x00000485 +#define MSR_IA32_VMX_CR0_FIXED0 0x00000486 +#define MSR_IA32_VMX_CR0_FIXED1 0x00000487 +#define MSR_IA32_VMX_CR4_FIXED0 0x00000488 +#define MSR_IA32_VMX_CR4_FIXED1 0x00000489 +#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a +#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b +#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c + #endif /* ASM_X86__MSR_INDEX_H */ diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h index f8b76f38390..d5e715f024d 100644 --- a/include/asm-x86/nmi.h +++ b/include/asm-x86/nmi.h @@ -34,6 +34,7 @@ extern void stop_apic_nmi_watchdog(void *); extern void disable_timer_nmi_watchdog(void); extern void enable_timer_nmi_watchdog(void); extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason); +extern void cpu_nmi_set_wd_enabled(void); extern atomic_t nmi_active; extern unsigned int nmi_watchdog; diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index 79544e6ffb8..c9157477675 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -57,6 +57,7 @@ typedef struct { pgdval_t pgd; } pgd_t; typedef struct { pgprotval_t pgprot; } pgprot_t; extern int page_is_ram(unsigned long pagenr); +extern int pagerange_is_ram(unsigned long start, unsigned long end); extern int devmem_is_allowed(unsigned long pagenr); extern void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot); diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h index f32062a821c..72f7305682c 100644 --- a/include/asm-x86/page_32.h +++ b/include/asm-x86/page_32.h @@ -89,9 +89,6 @@ extern int nx_enabled; extern unsigned int __VMALLOC_RESERVE; extern int sysctl_legacy_va_layout; -#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) -#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE) - extern void find_low_pfn_range(void); extern unsigned long init_memory_mapping(unsigned long start, unsigned long end); diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index cba612c89e0..d7d358a4399 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h @@ -252,13 +252,13 @@ struct pv_mmu_ops { * Hooks for allocating/releasing pagetable pages when they're * attached to a pagetable */ - void (*alloc_pte)(struct mm_struct *mm, u32 pfn); - void (*alloc_pmd)(struct mm_struct *mm, u32 pfn); - void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); - void (*alloc_pud)(struct mm_struct *mm, u32 pfn); - void (*release_pte)(u32 pfn); - void (*release_pmd)(u32 pfn); - void (*release_pud)(u32 pfn); + void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); + void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); + void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count); + void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); + void (*release_pte)(unsigned long pfn); + void (*release_pmd)(unsigned long pfn); + void (*release_pud)(unsigned long pfn); /* Pagetable manipulation functions */ void (*set_pte)(pte_t *ptep, pte_t pteval); @@ -986,35 +986,35 @@ static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd); } -static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn) +static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) { PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); } -static inline void paravirt_release_pte(unsigned pfn) +static inline void paravirt_release_pte(unsigned long pfn) { PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); } -static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn) +static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) { PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); } -static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn, - unsigned start, unsigned count) +static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn, + unsigned long start, unsigned long count) { PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); } -static inline void paravirt_release_pmd(unsigned pfn) +static inline void paravirt_release_pmd(unsigned long pfn) { PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); } -static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn) +static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) { PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); } -static inline void paravirt_release_pud(unsigned pfn) +static inline void paravirt_release_pud(unsigned long pfn) { PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); } diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h index 60440b19162..81762081dcd 100644 --- a/include/asm-x86/pgtable-2level.h +++ b/include/asm-x86/pgtable-2level.h @@ -53,9 +53,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) #endif -#define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_none(x) (!(x).pte_low) -#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) /* * Bits 0, 6 and 7 are taken, split up the 29 bits of offset diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index e713bd5f39a..75f4276b5dd 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h @@ -151,18 +151,11 @@ static inline int pte_same(pte_t a, pte_t b) return a.pte_low == b.pte_low && a.pte_high == b.pte_high; } -#define pte_page(x) pfn_to_page(pte_pfn(x)) - static inline int pte_none(pte_t pte) { return !pte.pte_low && !pte.pte_high; } -static inline unsigned long pte_pfn(pte_t pte) -{ - return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; -} - /* * Bits 0, 6 and 7 are taken in the low part of the pte, * put the 32 bits of offset into the high part. diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 57d919a2d79..ed932453ef2 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -19,6 +19,7 @@ #define _PAGE_BIT_UNUSED3 11 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) @@ -36,6 +37,7 @@ #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) +#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) #define __HAVE_ARCH_PTE_SPECIAL #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) @@ -130,6 +132,17 @@ #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC +/* + * early identity mapping pte attrib macros. + */ +#ifdef CONFIG_X86_64 +#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC +#else +#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ +#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ +#endif + #ifndef __ASSEMBLY__ /* @@ -186,6 +199,13 @@ static inline int pte_special(pte_t pte) return pte_val(pte) & _PAGE_SPECIAL; } +static inline unsigned long pte_pfn(pte_t pte) +{ + return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; +} + +#define pte_page(pte) pfn_to_page(pte_pfn(pte)) + static inline int pmd_large(pmd_t pte) { return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 45c8235400f..8de702dc7d6 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -57,8 +57,7 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); * area for the same reason. ;) */ #define VMALLOC_OFFSET (8 * 1024 * 1024) -#define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \ - & ~(VMALLOC_OFFSET - 1)) +#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET) #ifdef CONFIG_X86_PAE #define LAST_PKMAP 512 #else @@ -74,6 +73,8 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) #endif +#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE) + /* * Define this if things work differently on an i386 and an i486: * it will (on an i486) warn about kernel memory accesses that are diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index e3dcf7a08a0..fde9770e53d 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -175,8 +175,6 @@ static inline int pmd_bad(pmd_t pmd) #define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ -#define pte_page(x) pfn_to_page(pte_pfn((x))) -#define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) /* * Macro to mark a page protection value as "uncacheable". diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index bbbbe1fc5ce..c7d35464a4b 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -20,6 +20,7 @@ struct mm_struct; #include <asm/msr.h> #include <asm/desc_defs.h> #include <asm/nops.h> +#include <asm/ds.h> #include <linux/personality.h> #include <linux/cpumask.h> @@ -75,9 +76,9 @@ struct cpuinfo_x86 { int x86_tlbsize; __u8 x86_virt_bits; __u8 x86_phys_bits; +#endif /* CPUID returned core id bits: */ __u8 x86_coreid_bits; -#endif /* Max extended CPUID function supported: */ __u32 extended_cpuid_level; /* Maximum supported CPUID level, -1=no CPUID: */ @@ -166,11 +167,7 @@ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); extern unsigned short num_cache_leaves; extern void detect_extended_topology(struct cpuinfo_x86 *c); -#if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64) extern void detect_ht(struct cpuinfo_x86 *c); -#else -static inline void detect_ht(struct cpuinfo_x86 *c) {} -#endif static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) @@ -434,9 +431,14 @@ struct thread_struct { unsigned io_bitmap_max; /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ unsigned long debugctlmsr; -/* Debug Store - if not 0 points to a DS Save Area configuration; - * goes into MSR_IA32_DS_AREA */ - unsigned long ds_area_msr; +#ifdef CONFIG_X86_DS +/* Debug Store context; see include/asm-x86/ds.h; goes into MSR_IA32_DS_AREA */ + struct ds_context *ds_ctx; +#endif /* CONFIG_X86_DS */ +#ifdef CONFIG_X86_PTRACE_BTS +/* the signal to send on a bts buffer overflow */ + unsigned int bts_ovfl_signal; +#endif /* CONFIG_X86_PTRACE_BTS */ }; static inline unsigned long native_get_debugreg(int regno) diff --git a/include/asm-x86/ptrace-abi.h b/include/asm-x86/ptrace-abi.h index d0cf3344a58..4298b8882a7 100644 --- a/include/asm-x86/ptrace-abi.h +++ b/include/asm-x86/ptrace-abi.h @@ -80,8 +80,9 @@ #define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ -#ifndef __ASSEMBLY__ +#ifdef CONFIG_X86_PTRACE_BTS +#ifndef __ASSEMBLY__ #include <asm/types.h> /* configuration/status structure used in PTRACE_BTS_CONFIG and @@ -97,20 +98,20 @@ struct ptrace_bts_config { /* actual size of bts_struct in bytes */ __u32 bts_size; }; -#endif +#endif /* __ASSEMBLY__ */ #define PTRACE_BTS_O_TRACE 0x1 /* branch trace */ #define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */ #define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow instead of wrapping around */ -#define PTRACE_BTS_O_CUT_SIZE 0x8 /* cut requested size to max available - instead of failing */ +#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */ #define PTRACE_BTS_CONFIG 40 /* Configure branch trace recording. ADDR points to a struct ptrace_bts_config. DATA gives the size of that buffer. - A new buffer is allocated, iff the size changes. + A new buffer is allocated, if requested in the flags. + An overflow signal may only be requested for new buffers. Returns the number of bytes read. */ #define PTRACE_BTS_STATUS 41 @@ -119,7 +120,7 @@ struct ptrace_bts_config { Returns the number of bytes written. */ #define PTRACE_BTS_SIZE 42 -/* Return the number of available BTS records. +/* Return the number of available BTS records for draining. DATA and ADDR are ignored. */ #define PTRACE_BTS_GET 43 @@ -139,5 +140,6 @@ struct ptrace_bts_config { BTS records are read from oldest to newest. Returns number of BTS records drained. */ +#endif /* CONFIG_X86_PTRACE_BTS */ #endif /* ASM_X86__PTRACE_ABI_H */ diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index 66ff7bd4737..d64a6109716 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h @@ -127,14 +127,48 @@ struct pt_regs { #endif /* __KERNEL__ */ #endif /* !__i386__ */ + +#ifdef CONFIG_X86_PTRACE_BTS +/* a branch trace record entry + * + * In order to unify the interface between various processor versions, + * we use the below data structure for all processors. + */ +enum bts_qualifier { + BTS_INVALID = 0, + BTS_BRANCH, + BTS_TASK_ARRIVES, + BTS_TASK_DEPARTS +}; + +struct bts_struct { + __u64 qualifier; + union { + /* BTS_BRANCH */ + struct { + __u64 from_ip; + __u64 to_ip; + } lbr; + /* BTS_TASK_ARRIVES or + BTS_TASK_DEPARTS */ + __u64 jiffies; + } variant; +}; +#endif /* CONFIG_X86_PTRACE_BTS */ + #ifdef __KERNEL__ -/* the DS BTS struct is used for ptrace as well */ -#include <asm/ds.h> +#include <linux/init.h> +struct cpuinfo_x86; struct task_struct; +#ifdef CONFIG_X86_PTRACE_BTS +extern void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *); extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier); +#else +#define ptrace_bts_init_intel(config) do {} while (0) +#endif /* CONFIG_X86_PTRACE_BTS */ extern unsigned long profile_pc(struct pt_regs *regs); @@ -216,6 +250,11 @@ static inline unsigned long frame_pointer(struct pt_regs *regs) return regs->bp; } +static inline unsigned long user_stack_pointer(struct pt_regs *regs) +{ + return regs->sp; +} + /* * These are defined as per linux/ptrace.h, which see. */ diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h index 519a8ecbfc9..e39376d7de5 100644 --- a/include/asm-x86/resume-trace.h +++ b/include/asm-x86/resume-trace.h @@ -7,7 +7,7 @@ do { \ if (pm_trace_enabled) { \ const void *tracedata; \ - asm volatile(_ASM_MOV_UL " $1f,%0\n" \ + asm volatile(_ASM_MOV " $1f,%0\n" \ ".section .tracedata,\"a\"\n" \ "1:\t.word %c1\n\t" \ _ASM_PTR " %c2\n" \ diff --git a/include/asm-x86/syscall.h b/include/asm-x86/syscall.h new file mode 100644 index 00000000000..04c47dc5597 --- /dev/null +++ b/include/asm-x86/syscall.h @@ -0,0 +1,211 @@ +/* + * Access to user system call parameters and results + * + * Copyright (C) 2008 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * + * See asm-generic/syscall.h for descriptions of what we must do here. + */ + +#ifndef _ASM_SYSCALL_H +#define _ASM_SYSCALL_H 1 + +#include <linux/sched.h> +#include <linux/err.h> + +static inline long syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + /* + * We always sign-extend a -1 value being set here, + * so this is always either -1L or a syscall number. + */ + return regs->orig_ax; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + regs->ax = regs->orig_ax; +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + unsigned long error = regs->ax; +#ifdef CONFIG_IA32_EMULATION + /* + * TS_COMPAT is set for 32-bit syscall entries and then + * remains set until we return to user mode. + */ + if (task_thread_info(task)->status & TS_COMPAT) + /* + * Sign-extend the value so (int)-EFOO becomes (long)-EFOO + * and will match correctly in comparisons. + */ + error = (long) (int) error; +#endif + return IS_ERR_VALUE(error) ? error : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->ax; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + regs->ax = (long) error ?: val; +} + +#ifdef CONFIG_X86_32 + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + unsigned long *args) +{ + BUG_ON(i + n > 6); + memcpy(args, ®s->bx + i, n * sizeof(args[0])); +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + const unsigned long *args) +{ + BUG_ON(i + n > 6); + memcpy(®s->bx + i, args, n * sizeof(args[0])); +} + +#else /* CONFIG_X86_64 */ + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + unsigned long *args) +{ +# ifdef CONFIG_IA32_EMULATION + if (task_thread_info(task)->status & TS_COMPAT) + switch (i + n) { + case 6: + if (!n--) break; + *args++ = regs->bp; + case 5: + if (!n--) break; + *args++ = regs->di; + case 4: + if (!n--) break; + *args++ = regs->si; + case 3: + if (!n--) break; + *args++ = regs->dx; + case 2: + if (!n--) break; + *args++ = regs->cx; + case 1: + if (!n--) break; + *args++ = regs->bx; + case 0: + if (!n--) break; + default: + BUG(); + break; + } + else +# endif + switch (i + n) { + case 6: + if (!n--) break; + *args++ = regs->r9; + case 5: + if (!n--) break; + *args++ = regs->r8; + case 4: + if (!n--) break; + *args++ = regs->r10; + case 3: + if (!n--) break; + *args++ = regs->dx; + case 2: + if (!n--) break; + *args++ = regs->si; + case 1: + if (!n--) break; + *args++ = regs->di; + case 0: + if (!n--) break; + default: + BUG(); + break; + } +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + const unsigned long *args) +{ +# ifdef CONFIG_IA32_EMULATION + if (task_thread_info(task)->status & TS_COMPAT) + switch (i + n) { + case 6: + if (!n--) break; + regs->bp = *args++; + case 5: + if (!n--) break; + regs->di = *args++; + case 4: + if (!n--) break; + regs->si = *args++; + case 3: + if (!n--) break; + regs->dx = *args++; + case 2: + if (!n--) break; + regs->cx = *args++; + case 1: + if (!n--) break; + regs->bx = *args++; + case 0: + if (!n--) break; + default: + BUG(); + } + else +# endif + switch (i + n) { + case 6: + if (!n--) break; + regs->r9 = *args++; + case 5: + if (!n--) break; + regs->r8 = *args++; + case 4: + if (!n--) break; + regs->r10 = *args++; + case 3: + if (!n--) break; + regs->dx = *args++; + case 2: + if (!n--) break; + regs->si = *args++; + case 1: + if (!n--) break; + regs->di = *args++; + case 0: + if (!n--) break; + default: + BUG(); + } +} + +#endif /* CONFIG_X86_32 */ + +#endif /* _ASM_SYSCALL_H */ diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h index 30586f2ee55..3f4e52bb77f 100644 --- a/include/asm-x86/thread_info.h +++ b/include/asm-x86/thread_info.h @@ -71,6 +71,7 @@ struct thread_info { * Warning: layout of LSW is hardcoded in entry.S */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ @@ -93,6 +94,7 @@ struct thread_info { #define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) @@ -133,7 +135,7 @@ struct thread_info { /* Only used for 64 bit */ #define _TIF_DO_NOTIFY_MASK \ - (_TIF_SIGPENDING|_TIF_MCE_NOTIFY) + (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME) /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index 5cfd2951c9e..c96c1f5d07a 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h @@ -7,6 +7,7 @@ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/prefetch.h> +#include <linux/lockdep.h> #include <asm/page.h> /* |