From 7974891db234467eaf1fec613ec0129cb4ac2332 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 28 Jun 2010 14:15:54 +0200 Subject: x86: Always use irq stacks IRQ stacks provide much better safety against unexpected stack use from interrupts, at the minimal downside of slightly higher memory usage. Enable irq stacks also for the default 8k stack on 32-bit kernels to minimize the problem of stack overflows through interrupt activity. This is what the 64-bit kernel and various other architectures already do. Signed-off-by: Christoph Hellwig LKML-Reference: <20100628121554.GA6605@lst.de> Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/irq.h | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 5458380b6ef..0bf5b008365 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -19,18 +19,16 @@ static inline int irq_canonicalize(int irq) # define ARCH_HAS_NMI_WATCHDOG #endif -#ifdef CONFIG_4KSTACKS - extern void irq_ctx_init(int cpu); - extern void irq_ctx_exit(int cpu); -# define __ARCH_HAS_DO_SOFTIRQ +#ifdef CONFIG_X86_32 +extern void irq_ctx_init(int cpu); +extern void irq_ctx_exit(int cpu); #else # define irq_ctx_init(cpu) do { } while (0) # define irq_ctx_exit(cpu) do { } while (0) -# ifdef CONFIG_X86_64 -# define __ARCH_HAS_DO_SOFTIRQ -# endif #endif +#define __ARCH_HAS_DO_SOFTIRQ + #ifdef CONFIG_HOTPLUG_CPU #include extern void fixup_irqs(void); -- cgit v1.2.3-70-g09d2 From dcfa726280116dd31adad37da940f542663567d0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 28 Jun 2010 14:16:14 +0200 Subject: x86: Remove CONFIG_4KSTACKS These days 4 kilobytes of stack just aren't enough for reliably operation, and people using lots of threads have long switched to 64-bit kernels, so remove the CONFIG_4KSTACKS option. Signed-off-by: Christoph Hellwig LKML-Reference: <20100628121614.GB6605@lst.de> Signed-off-by: Thomas Gleixner --- arch/x86/Kconfig.debug | 9 --------- arch/x86/include/asm/module.h | 7 +------ arch/x86/include/asm/page_32_types.h | 4 ---- 3 files changed, 1 insertion(+), 19 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index badda8e20e7..7f1530838bc 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -121,15 +121,6 @@ config DEBUG_NX_TEST and the software setup of this feature. If in doubt, say "N" -config 4KSTACKS - bool "Use 4Kb for kernel stacks instead of 8Kb" - depends on X86_32 - ---help--- - If you say Y here the kernel will use a 4Kb stacksize for the - kernel stack attached to each process/thread. This facilitates - running more threads on a system and also reduces the pressure - on the VM subsystem for higher order allocations. - config DOUBLEFAULT default y bool "Enable doublefault exception handler" if EMBEDDED diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index 3e2ce58a31a..67763c5d8b4 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h @@ -60,12 +60,7 @@ #endif #ifdef CONFIG_X86_32 -# ifdef CONFIG_4KSTACKS -# define MODULE_STACKSIZE "4KSTACKS " -# else -# define MODULE_STACKSIZE "" -# endif -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE +# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY #endif #endif /* _ASM_X86_MODULE_H */ diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index 6f1b7331313..ade619ff9e2 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h @@ -15,11 +15,7 @@ */ #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) -#ifdef CONFIG_4KSTACKS -#define THREAD_ORDER 0 -#else #define THREAD_ORDER 1 -#endif #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) #define STACKFAULT_STACK 0 -- cgit v1.2.3-70-g09d2 From f6e9456c9272bb570df6e217cdbe007e270b1c4e Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 21 Jul 2010 19:03:58 +0200 Subject: x86, cleanup: Remove obsolete boot_cpu_id variable boot_cpu_id is there for historical reasons and was renamed to boot_cpu_physical_apicid in patch: c70dcb7 x86: change boot_cpu_id to boot_cpu_physical_apicid However, there are some remaining occurrences of boot_cpu_id that are never touched in the kernel and thus its value is always 0. This patch removes boot_cpu_id completely. Signed-off-by: Robert Richter LKML-Reference: <1279731838-1522-8-git-send-email-robert.richter@amd.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/apb_timer.h | 1 - arch/x86/include/asm/cpu.h | 1 - arch/x86/kernel/apb_timer.c | 2 +- arch/x86/kernel/apic/io_apic.c | 12 ++++++------ arch/x86/kernel/cpu/amd.c | 2 +- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/cpu/intel.c | 2 +- arch/x86/kernel/reboot.c | 2 +- arch/x86/kernel/setup.c | 1 - arch/x86/kernel/setup_percpu.c | 2 +- arch/x86/mm/k8topology_64.c | 6 +++--- 11 files changed, 15 insertions(+), 18 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h index a69b1ac9eaf..2fefa501d3b 100644 --- a/arch/x86/include/asm/apb_timer.h +++ b/arch/x86/include/asm/apb_timer.h @@ -54,7 +54,6 @@ extern struct clock_event_device *global_clock_event; extern unsigned long apbt_quick_calibrate(void); extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); extern void apbt_setup_secondary_clock(void); -extern unsigned int boot_cpu_id; extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index b185091bf19..4fab24de26b 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -32,6 +32,5 @@ extern void arch_unregister_cpu(int); DECLARE_PER_CPU(int, cpu_state); -extern unsigned int boot_cpu_id; #endif /* _ASM_X86_CPU_H */ diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 8dd77800ff5..08f75fb4f50 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -343,7 +343,7 @@ void apbt_setup_secondary_clock(void) /* Don't register boot CPU clockevent */ cpu = smp_processor_id(); - if (cpu == boot_cpu_id) + if (!cpu) return; /* * We need to calculate the scaled math multiplication factor for diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 4dc0084ec1b..8884928d7bc 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -162,7 +162,7 @@ int __init arch_early_irq_init(void) cfg = irq_cfgx; count = ARRAY_SIZE(irq_cfgx); - node= cpu_to_node(boot_cpu_id); + node = cpu_to_node(0); for (i = 0; i < count; i++) { desc = irq_to_desc(i); @@ -1483,7 +1483,7 @@ static void __init setup_IO_APIC_irqs(void) int notcon = 0; struct irq_desc *desc; struct irq_cfg *cfg; - int node = cpu_to_node(boot_cpu_id); + int node = cpu_to_node(0); apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); @@ -1548,7 +1548,7 @@ static void __init setup_IO_APIC_irqs(void) void setup_IO_APIC_irq_extra(u32 gsi) { int apic_id = 0, pin, idx, irq; - int node = cpu_to_node(boot_cpu_id); + int node = cpu_to_node(0); struct irq_desc *desc; struct irq_cfg *cfg; @@ -2925,7 +2925,7 @@ static inline void __init check_timer(void) { struct irq_desc *desc = irq_to_desc(0); struct irq_cfg *cfg = desc->chip_data; - int node = cpu_to_node(boot_cpu_id); + int node = cpu_to_node(0); int apic1, pin1, apic2, pin2; unsigned long flags; int no_pin1 = 0; @@ -3279,7 +3279,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node) int create_irq(void) { - int node = cpu_to_node(boot_cpu_id); + int node = cpu_to_node(0); unsigned int irq_want; int irq; @@ -3901,7 +3901,7 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq, if (dev) node = dev_to_node(dev); else - node = cpu_to_node(boot_cpu_id); + node = cpu_to_node(0); desc = irq_to_desc_alloc_node(irq, node); if (!desc) { diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 60a57b13082..3a7c852f021 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -148,7 +148,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP /* calling is from identify_secondary_cpu() ? */ - if (c->cpu_index == boot_cpu_id) + if (!c->cpu_index) return; /* diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 490dac63c2d..787b3c7c662 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -665,7 +665,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) this_cpu->c_early_init(c); #ifdef CONFIG_SMP - c->cpu_index = boot_cpu_id; + c->cpu_index = 0; #endif filter_cpuid_features(c, false); } diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 85f69cdeae1..3a683ea5267 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -169,7 +169,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP /* calling is from identify_secondary_cpu() ? */ - if (c->cpu_index == boot_cpu_id) + if (!c->cpu_index) return; /* diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index e3af342fe83..7a4cf14223b 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -84,7 +84,7 @@ static int __init reboot_setup(char *str) } /* we will leave sorting out the final value when we are ready to reboot, since we might not - have set up boot_cpu_id or smp_num_cpu */ + have detected BSP APIC ID or smp_num_cpu */ break; #endif /* CONFIG_SMP */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b008e788320..dede5c4bae4 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -125,7 +125,6 @@ unsigned long max_pfn_mapped; RESERVE_BRK(dmi_alloc, 65536); #endif -unsigned int boot_cpu_id __read_mostly; static __initdata unsigned long _brk_start = (unsigned long)__brk_base; unsigned long _brk_end = (unsigned long)__brk_base; diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index a60df9ae645..2335c15c93a 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -253,7 +253,7 @@ void __init setup_per_cpu_areas(void) * Up to this point, the boot CPU has been using .init.data * area. Reload any changed state for the boot CPU. */ - if (cpu == boot_cpu_id) + if (!cpu) switch_to_new_gdt(cpu); } diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c index 970ed579d4e..240f86462a8 100644 --- a/arch/x86/mm/k8topology_64.c +++ b/arch/x86/mm/k8topology_64.c @@ -54,8 +54,8 @@ static __init int find_northbridge(void) static __init void early_get_boot_cpu_id(void) { /* - * need to get boot_cpu_id so can use that to create apicid_to_node - * in k8_scan_nodes() + * need to get the APIC ID of the BSP so can use that to + * create apicid_to_node in k8_scan_nodes() */ #ifdef CONFIG_X86_MPPARSE /* @@ -212,7 +212,7 @@ int __init k8_scan_nodes(void) bits = boot_cpu_data.x86_coreid_bits; cores = (1< 0) { pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid); -- cgit v1.2.3-70-g09d2 From 61c77326d1df079f202fa79403c3ccd8c5966a81 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 16 Aug 2010 09:16:55 +0800 Subject: x86, mm: Avoid unnecessary TLB flush In x86, access and dirty bits are set automatically by CPU when CPU accesses memory. When we go into the code path of below flush_tlb_fix_spurious_fault(), we already set dirty bit for pte and don't need flush tlb. This might mean tlb entry in some CPUs hasn't dirty bit set, but this doesn't matter. When the CPUs do page write, they will automatically check the bit and no software involved. On the other hand, flush tlb in below position is harmful. Test creates CPU number of threads, each thread writes to a same but random address in same vma range and we measure the total time. Under a 4 socket system, original time is 1.96s, while with the patch, the time is 0.8s. Under a 2 socket system, there is 20% time cut too. perf shows a lot of time are taking to send ipi/handle ipi for tlb flush. Signed-off-by: Shaohua Li LKML-Reference: <20100816011655.GA362@sli10-desk.sh.intel.com> Acked-by: Suresh Siddha Cc: Andrea Archangeli Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/pgtable.h | 2 ++ include/asm-generic/pgtable.h | 4 ++++ mm/memory.c | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index a34c785c5a6..2d0a33bd297 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -603,6 +603,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, pte_update(mm, addr, ptep); } +#define flush_tlb_fix_spurious_fault(vma, address) + /* * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); * diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index e2bd73e8f9c..f4d4120e512 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -129,6 +129,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #define move_pte(pte, prot, old_addr, new_addr) (pte) #endif +#ifndef flush_tlb_fix_spurious_fault +#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) +#endif + #ifndef pgprot_noncached #define pgprot_noncached(prot) (prot) #endif diff --git a/mm/memory.c b/mm/memory.c index 2ed2267439d..a40da698396 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3147,7 +3147,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, * with threads. */ if (flags & FAULT_FLAG_WRITE) - flush_tlb_page(vma, address); + flush_tlb_fix_spurious_fault(vma, address); } unlock: pte_unmap_unlock(pte, ptl); -- cgit v1.2.3-70-g09d2 From 9863c90f682fba34cdc26c3437e8c00da6c83fa4 Mon Sep 17 00:00:00 2001 From: Alok Kataria Date: Mon, 23 Aug 2010 14:49:11 -0700 Subject: x86, vmware: Remove deprecated VMI kernel support With the recent innovations in CPU hardware acceleration technologies from Intel and AMD, VMware ran a few experiments to compare these techniques to guest paravirtualization technique on VMware's platform. These hardware assisted virtualization techniques have outperformed the performance benefits provided by VMI in most of the workloads. VMware expects that these hardware features will be ubiquitous in a couple of years, as a result, VMware has started a phased retirement of this feature from the hypervisor. Please note that VMI has always been an optimization and non-VMI kernels still work fine on VMware's platform. Latest versions of VMware's product which support VMI are, Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence releases for these products will continue supporting VMI. For more details about VMI retirement take a look at this, http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html This feature removal was scheduled for 2.6.37 back in September 2009. Signed-off-by: Alok N Kataria LKML-Reference: <1282600151.19396.22.camel@ank32.eng.vmware.com> Signed-off-by: H. Peter Anvin --- Documentation/feature-removal-schedule.txt | 28 - Documentation/kernel-parameters.txt | 2 +- arch/x86/Kconfig | 19 - arch/x86/include/asm/vmi.h | 269 --------- arch/x86/include/asm/vmi_time.h | 98 ---- arch/x86/kernel/Makefile | 1 - arch/x86/kernel/setup.c | 12 +- arch/x86/kernel/smpboot.c | 2 - arch/x86/kernel/vmi_32.c | 893 ----------------------------- arch/x86/kernel/vmiclock_32.c | 317 ---------- 10 files changed, 5 insertions(+), 1636 deletions(-) delete mode 100644 arch/x86/include/asm/vmi.h delete mode 100644 arch/x86/include/asm/vmi_time.h delete mode 100644 arch/x86/kernel/vmi_32.c delete mode 100644 arch/x86/kernel/vmiclock_32.c (limited to 'arch/x86/include') diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 842aa9de84a..5e2bc4ab897 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -386,34 +386,6 @@ Who: Tejun Heo ---------------------------- -What: Support for VMware's guest paravirtuliazation technique [VMI] will be - dropped. -When: 2.6.37 or earlier. -Why: With the recent innovations in CPU hardware acceleration technologies - from Intel and AMD, VMware ran a few experiments to compare these - techniques to guest paravirtualization technique on VMware's platform. - These hardware assisted virtualization techniques have outperformed the - performance benefits provided by VMI in most of the workloads. VMware - expects that these hardware features will be ubiquitous in a couple of - years, as a result, VMware has started a phased retirement of this - feature from the hypervisor. We will be removing this feature from the - Kernel too. Right now we are targeting 2.6.37 but can retire earlier if - technical reasons (read opportunity to remove major chunk of pvops) - arise. - - Please note that VMI has always been an optimization and non-VMI kernels - still work fine on VMware's platform. - Latest versions of VMware's product which support VMI are, - Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence - releases for these products will continue supporting VMI. - - For more details about VMI retirement take a look at this, - http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html - -Who: Alok N Kataria - ----------------------------- - What: Support for lcd_switch and display_get in asus-laptop driver When: March 2010 Why: These two features use non-standard interfaces. There are the diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 2c85c0692b0..582409801a4 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -455,7 +455,7 @@ and is between 256 and 4096 characters. It is defined in the file [ARM] imx_timer1,OSTS,netx_timer,mpu_timer2, pxa_timer,timer3,32k_counter,timer0_1 [AVR32] avr32 - [X86-32] pit,hpet,tsc,vmi-timer; + [X86-32] pit,hpet,tsc; scx200_hrt on Geode; cyclone on IBM x440 [MIPS] MIPS [PARISC] cr16 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cea0cd9a316..f0ee331feea 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -517,25 +517,6 @@ if PARAVIRT_GUEST source "arch/x86/xen/Kconfig" -config VMI - bool "VMI Guest support (DEPRECATED)" - select PARAVIRT - depends on X86_32 - ---help--- - VMI provides a paravirtualized interface to the VMware ESX server - (it could be used by other hypervisors in theory too, but is not - at the moment), by linking the kernel to a GPL-ed ROM module - provided by the hypervisor. - - As of September 2009, VMware has started a phased retirement - of this feature from VMware's products. Please see - feature-removal-schedule.txt for details. If you are - planning to enable this option, please note that you cannot - live migrate a VMI enabled VM to a future VMware product, - which doesn't support VMI. So if you expect your kernel to - seamlessly migrate to newer VMware products, keep this - disabled. - config KVM_CLOCK bool "KVM paravirtualized clock" select PARAVIRT diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h deleted file mode 100644 index 61e08c0a290..00000000000 --- a/arch/x86/include/asm/vmi.h +++ /dev/null @@ -1,269 +0,0 @@ -/* - * VMI interface definition - * - * Copyright (C) 2005, VMware, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Maintained by: Zachary Amsden zach@vmware.com - * - */ -#include - -/* - *--------------------------------------------------------------------- - * - * VMI Option ROM API - * - *--------------------------------------------------------------------- - */ -#define VMI_SIGNATURE 0x696d5663 /* "cVmi" */ - -#define PCI_VENDOR_ID_VMWARE 0x15AD -#define PCI_DEVICE_ID_VMWARE_VMI 0x0801 - -/* - * We use two version numbers for compatibility, with the major - * number signifying interface breakages, and the minor number - * interface extensions. - */ -#define VMI_API_REV_MAJOR 3 -#define VMI_API_REV_MINOR 0 - -#define VMI_CALL_CPUID 0 -#define VMI_CALL_WRMSR 1 -#define VMI_CALL_RDMSR 2 -#define VMI_CALL_SetGDT 3 -#define VMI_CALL_SetLDT 4 -#define VMI_CALL_SetIDT 5 -#define VMI_CALL_SetTR 6 -#define VMI_CALL_GetGDT 7 -#define VMI_CALL_GetLDT 8 -#define VMI_CALL_GetIDT 9 -#define VMI_CALL_GetTR 10 -#define VMI_CALL_WriteGDTEntry 11 -#define VMI_CALL_WriteLDTEntry 12 -#define VMI_CALL_WriteIDTEntry 13 -#define VMI_CALL_UpdateKernelStack 14 -#define VMI_CALL_SetCR0 15 -#define VMI_CALL_SetCR2 16 -#define VMI_CALL_SetCR3 17 -#define VMI_CALL_SetCR4 18 -#define VMI_CALL_GetCR0 19 -#define VMI_CALL_GetCR2 20 -#define VMI_CALL_GetCR3 21 -#define VMI_CALL_GetCR4 22 -#define VMI_CALL_WBINVD 23 -#define VMI_CALL_SetDR 24 -#define VMI_CALL_GetDR 25 -#define VMI_CALL_RDPMC 26 -#define VMI_CALL_RDTSC 27 -#define VMI_CALL_CLTS 28 -#define VMI_CALL_EnableInterrupts 29 -#define VMI_CALL_DisableInterrupts 30 -#define VMI_CALL_GetInterruptMask 31 -#define VMI_CALL_SetInterruptMask 32 -#define VMI_CALL_IRET 33 -#define VMI_CALL_SYSEXIT 34 -#define VMI_CALL_Halt 35 -#define VMI_CALL_Reboot 36 -#define VMI_CALL_Shutdown 37 -#define VMI_CALL_SetPxE 38 -#define VMI_CALL_SetPxELong 39 -#define VMI_CALL_UpdatePxE 40 -#define VMI_CALL_UpdatePxELong 41 -#define VMI_CALL_MachineToPhysical 42 -#define VMI_CALL_PhysicalToMachine 43 -#define VMI_CALL_AllocatePage 44 -#define VMI_CALL_ReleasePage 45 -#define VMI_CALL_InvalPage 46 -#define VMI_CALL_FlushTLB 47 -#define VMI_CALL_SetLinearMapping 48 - -#define VMI_CALL_SetIOPLMask 61 -#define VMI_CALL_SetInitialAPState 62 -#define VMI_CALL_APICWrite 63 -#define VMI_CALL_APICRead 64 -#define VMI_CALL_IODelay 65 -#define VMI_CALL_SetLazyMode 73 - -/* - *--------------------------------------------------------------------- - * - * MMU operation flags - * - *--------------------------------------------------------------------- - */ - -/* Flags used by VMI_{Allocate|Release}Page call */ -#define VMI_PAGE_PAE 0x10 /* Allocate PAE shadow */ -#define VMI_PAGE_CLONE 0x20 /* Clone from another shadow */ -#define VMI_PAGE_ZEROED 0x40 /* Page is pre-zeroed */ - - -/* Flags shared by Allocate|Release Page and PTE updates */ -#define VMI_PAGE_PT 0x01 -#define VMI_PAGE_PD 0x02 -#define VMI_PAGE_PDP 0x04 -#define VMI_PAGE_PML4 0x08 - -#define VMI_PAGE_NORMAL 0x00 /* for debugging */ - -/* Flags used by PTE updates */ -#define VMI_PAGE_CURRENT_AS 0x10 /* implies VMI_PAGE_VA_MASK is valid */ -#define VMI_PAGE_DEFER 0x20 /* may queue update until TLB inval */ -#define VMI_PAGE_VA_MASK 0xfffff000 - -#ifdef CONFIG_X86_PAE -#define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_PAE | VMI_PAGE_ZEROED) -#define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_PAE | VMI_PAGE_ZEROED) -#else -#define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_ZEROED) -#define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_ZEROED) -#endif - -/* Flags used by VMI_FlushTLB call */ -#define VMI_FLUSH_TLB 0x01 -#define VMI_FLUSH_GLOBAL 0x02 - -/* - *--------------------------------------------------------------------- - * - * VMI relocation definitions for ROM call get_reloc - * - *--------------------------------------------------------------------- - */ - -/* VMI Relocation types */ -#define VMI_RELOCATION_NONE 0 -#define VMI_RELOCATION_CALL_REL 1 -#define VMI_RELOCATION_JUMP_REL 2 -#define VMI_RELOCATION_NOP 3 - -#ifndef __ASSEMBLY__ -struct vmi_relocation_info { - unsigned char *eip; - unsigned char type; - unsigned char reserved[3]; -}; -#endif - - -/* - *--------------------------------------------------------------------- - * - * Generic ROM structures and definitions - * - *--------------------------------------------------------------------- - */ - -#ifndef __ASSEMBLY__ - -struct vrom_header { - u16 rom_signature; /* option ROM signature */ - u8 rom_length; /* ROM length in 512 byte chunks */ - u8 rom_entry[4]; /* 16-bit code entry point */ - u8 rom_pad0; /* 4-byte align pad */ - u32 vrom_signature; /* VROM identification signature */ - u8 api_version_min;/* Minor version of API */ - u8 api_version_maj;/* Major version of API */ - u8 jump_slots; /* Number of jump slots */ - u8 reserved1; /* Reserved for expansion */ - u32 virtual_top; /* Hypervisor virtual address start */ - u16 reserved2; /* Reserved for expansion */ - u16 license_offs; /* Offset to License string */ - u16 pci_header_offs;/* Offset to PCI OPROM header */ - u16 pnp_header_offs;/* Offset to PnP OPROM header */ - u32 rom_pad3; /* PnP reserverd / VMI reserved */ - u8 reserved[96]; /* Reserved for headers */ - char vmi_init[8]; /* VMI_Init jump point */ - char get_reloc[8]; /* VMI_GetRelocationInfo jump point */ -} __attribute__((packed)); - -struct pnp_header { - char sig[4]; - char rev; - char size; - short next; - short res; - long devID; - unsigned short manufacturer_offset; - unsigned short product_offset; -} __attribute__((packed)); - -struct pci_header { - char sig[4]; - short vendorID; - short deviceID; - short vpdData; - short size; - char rev; - char class; - char subclass; - char interface; - short chunks; - char rom_version_min; - char rom_version_maj; - char codetype; - char lastRom; - short reserved; -} __attribute__((packed)); - -/* Function prototypes for bootstrapping */ -#ifdef CONFIG_VMI -extern void vmi_init(void); -extern void vmi_activate(void); -extern void vmi_bringup(void); -#else -static inline void vmi_init(void) {} -static inline void vmi_activate(void) {} -static inline void vmi_bringup(void) {} -#endif - -/* State needed to start an application processor in an SMP system. */ -struct vmi_ap_state { - u32 cr0; - u32 cr2; - u32 cr3; - u32 cr4; - - u64 efer; - - u32 eip; - u32 eflags; - u32 eax; - u32 ebx; - u32 ecx; - u32 edx; - u32 esp; - u32 ebp; - u32 esi; - u32 edi; - u16 cs; - u16 ss; - u16 ds; - u16 es; - u16 fs; - u16 gs; - u16 ldtr; - - u16 gdtr_limit; - u32 gdtr_base; - u32 idtr_base; - u16 idtr_limit; -}; - -#endif diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h deleted file mode 100644 index c6e0bee93e3..00000000000 --- a/arch/x86/include/asm/vmi_time.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * VMI Time wrappers - * - * Copyright (C) 2006, VMware, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to dhecht@vmware.com - * - */ - -#ifndef _ASM_X86_VMI_TIME_H -#define _ASM_X86_VMI_TIME_H - -/* - * Raw VMI call indices for timer functions - */ -#define VMI_CALL_GetCycleFrequency 66 -#define VMI_CALL_GetCycleCounter 67 -#define VMI_CALL_SetAlarm 68 -#define VMI_CALL_CancelAlarm 69 -#define VMI_CALL_GetWallclockTime 70 -#define VMI_CALL_WallclockUpdated 71 - -/* Cached VMI timer operations */ -extern struct vmi_timer_ops { - u64 (*get_cycle_frequency)(void); - u64 (*get_cycle_counter)(int); - u64 (*get_wallclock)(void); - int (*wallclock_updated)(void); - void (*set_alarm)(u32 flags, u64 expiry, u64 period); - void (*cancel_alarm)(u32 flags); -} vmi_timer_ops; - -/* Prototypes */ -extern void __init vmi_time_init(void); -extern unsigned long vmi_get_wallclock(void); -extern int vmi_set_wallclock(unsigned long now); -extern unsigned long long vmi_sched_clock(void); -extern unsigned long vmi_tsc_khz(void); - -#ifdef CONFIG_X86_LOCAL_APIC -extern void __devinit vmi_time_bsp_init(void); -extern void __devinit vmi_time_ap_init(void); -#endif - -/* - * When run under a hypervisor, a vcpu is always in one of three states: - * running, halted, or ready. The vcpu is in the 'running' state if it - * is executing. When the vcpu executes the halt interface, the vcpu - * enters the 'halted' state and remains halted until there is some work - * pending for the vcpu (e.g. an alarm expires, host I/O completes on - * behalf of virtual I/O). At this point, the vcpu enters the 'ready' - * state (waiting for the hypervisor to reschedule it). Finally, at any - * time when the vcpu is not in the 'running' state nor the 'halted' - * state, it is in the 'ready' state. - * - * Real time is advances while the vcpu is 'running', 'ready', or - * 'halted'. Stolen time is the time in which the vcpu is in the - * 'ready' state. Available time is the remaining time -- the vcpu is - * either 'running' or 'halted'. - * - * All three views of time are accessible through the VMI cycle - * counters. - */ - -/* The cycle counters. */ -#define VMI_CYCLES_REAL 0 -#define VMI_CYCLES_AVAILABLE 1 -#define VMI_CYCLES_STOLEN 2 - -/* The alarm interface 'flags' bits */ -#define VMI_ALARM_COUNTERS 2 - -#define VMI_ALARM_COUNTER_MASK 0x000000ff - -#define VMI_ALARM_WIRED_IRQ0 0x00000000 -#define VMI_ALARM_WIRED_LVTT 0x00010000 - -#define VMI_ALARM_IS_ONESHOT 0x00000000 -#define VMI_ALARM_IS_PERIODIC 0x00000100 - -#define CONFIG_VMI_ALARM_HZ 100 - -#endif /* _ASM_X86_VMI_TIME_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0925676266b..801127cd975 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -91,7 +91,6 @@ obj-$(CONFIG_K8_NB) += k8.o obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o -obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o obj-$(CONFIG_KVM_GUEST) += kvm.o obj-$(CONFIG_KVM_CLOCK) += kvmclock.o obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index c3a4fbb2b99..feb4c21e499 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -83,7 +83,6 @@ #include #include #include -#include #include #include #include @@ -734,10 +733,10 @@ void __init setup_arch(char **cmdline_p) printk(KERN_INFO "Command line: %s\n", boot_command_line); #endif - /* VMI may relocate the fixmap; do this before touching ioremap area */ - vmi_init(); - - /* OFW also may relocate the fixmap */ + /* + * If we have OLPC OFW, we might end up relocating the fixmap due to + * reserve_top(), so do this before touching the ioremap area. + */ olpc_ofw_detect(); early_trap_init(); @@ -838,9 +837,6 @@ void __init setup_arch(char **cmdline_p) x86_report_nx(); - /* Must be before kernel pagetables are setup */ - vmi_activate(); - /* after early param, so could get panic from serial */ reserve_early_setup_data(); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 8b3bfc4dd70..63a1a5596ac 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -62,7 +62,6 @@ #include #include #include -#include #include #include #include @@ -311,7 +310,6 @@ notrace static void __cpuinit start_secondary(void *unused) __flush_tlb_all(); #endif - vmi_bringup(); cpu_init(); preempt_disable(); smp_callin(); diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c deleted file mode 100644 index ce9fbacb752..00000000000 --- a/arch/x86/kernel/vmi_32.c +++ /dev/null @@ -1,893 +0,0 @@ -/* - * VMI specific paravirt-ops implementation - * - * Copyright (C) 2005, VMware, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to zach@vmware.com - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Convenient for calling VMI functions indirectly in the ROM */ -typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); -typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int); - -#define call_vrom_func(rom,func) \ - (((VROMFUNC *)(rom->func))()) - -#define call_vrom_long_func(rom,func,arg) \ - (((VROMLONGFUNC *)(rom->func)) (arg)) - -static struct vrom_header *vmi_rom; -static int disable_pge; -static int disable_pse; -static int disable_sep; -static int disable_tsc; -static int disable_mtrr; -static int disable_noidle; -static int disable_vmi_timer; - -/* Cached VMI operations */ -static struct { - void (*cpuid)(void /* non-c */); - void (*_set_ldt)(u32 selector); - void (*set_tr)(u32 selector); - void (*write_idt_entry)(struct desc_struct *, int, u32, u32); - void (*write_gdt_entry)(struct desc_struct *, int, u32, u32); - void (*write_ldt_entry)(struct desc_struct *, int, u32, u32); - void (*set_kernel_stack)(u32 selector, u32 sp0); - void (*allocate_page)(u32, u32, u32, u32, u32); - void (*release_page)(u32, u32); - void (*set_pte)(pte_t, pte_t *, unsigned); - void (*update_pte)(pte_t *, unsigned); - void (*set_linear_mapping)(int, void *, u32, u32); - void (*_flush_tlb)(int); - void (*set_initial_ap_state)(int, int); - void (*halt)(void); - void (*set_lazy_mode)(int mode); -} vmi_ops; - -/* Cached VMI operations */ -struct vmi_timer_ops vmi_timer_ops; - -/* - * VMI patching routines. - */ -#define MNEM_CALL 0xe8 -#define MNEM_JMP 0xe9 -#define MNEM_RET 0xc3 - -#define IRQ_PATCH_INT_MASK 0 -#define IRQ_PATCH_DISABLE 5 - -static inline void patch_offset(void *insnbuf, - unsigned long ip, unsigned long dest) -{ - *(unsigned long *)(insnbuf+1) = dest-ip-5; -} - -static unsigned patch_internal(int call, unsigned len, void *insnbuf, - unsigned long ip) -{ - u64 reloc; - struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc; - reloc = call_vrom_long_func(vmi_rom, get_reloc, call); - switch(rel->type) { - case VMI_RELOCATION_CALL_REL: - BUG_ON(len < 5); - *(char *)insnbuf = MNEM_CALL; - patch_offset(insnbuf, ip, (unsigned long)rel->eip); - return 5; - - case VMI_RELOCATION_JUMP_REL: - BUG_ON(len < 5); - *(char *)insnbuf = MNEM_JMP; - patch_offset(insnbuf, ip, (unsigned long)rel->eip); - return 5; - - case VMI_RELOCATION_NOP: - /* obliterate the whole thing */ - return 0; - - case VMI_RELOCATION_NONE: - /* leave native code in place */ - break; - - default: - BUG(); - } - return len; -} - -/* - * Apply patch if appropriate, return length of new instruction - * sequence. The callee does nop padding for us. - */ -static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, - unsigned long ip, unsigned len) -{ - switch (type) { - case PARAVIRT_PATCH(pv_irq_ops.irq_disable): - return patch_internal(VMI_CALL_DisableInterrupts, len, - insns, ip); - case PARAVIRT_PATCH(pv_irq_ops.irq_enable): - return patch_internal(VMI_CALL_EnableInterrupts, len, - insns, ip); - case PARAVIRT_PATCH(pv_irq_ops.restore_fl): - return patch_internal(VMI_CALL_SetInterruptMask, len, - insns, ip); - case PARAVIRT_PATCH(pv_irq_ops.save_fl): - return patch_internal(VMI_CALL_GetInterruptMask, len, - insns, ip); - case PARAVIRT_PATCH(pv_cpu_ops.iret): - return patch_internal(VMI_CALL_IRET, len, insns, ip); - case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit): - return patch_internal(VMI_CALL_SYSEXIT, len, insns, ip); - default: - break; - } - return len; -} - -/* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */ -static void vmi_cpuid(unsigned int *ax, unsigned int *bx, - unsigned int *cx, unsigned int *dx) -{ - int override = 0; - if (*ax == 1) - override = 1; - asm volatile ("call *%6" - : "=a" (*ax), - "=b" (*bx), - "=c" (*cx), - "=d" (*dx) - : "0" (*ax), "2" (*cx), "r" (vmi_ops.cpuid)); - if (override) { - if (disable_pse) - *dx &= ~X86_FEATURE_PSE; - if (disable_pge) - *dx &= ~X86_FEATURE_PGE; - if (disable_sep) - *dx &= ~X86_FEATURE_SEP; - if (disable_tsc) - *dx &= ~X86_FEATURE_TSC; - if (disable_mtrr) - *dx &= ~X86_FEATURE_MTRR; - } -} - -static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new) -{ - if (gdt[nr].a != new->a || gdt[nr].b != new->b) - write_gdt_entry(gdt, nr, new, 0); -} - -static void vmi_load_tls(struct thread_struct *t, unsigned int cpu) -{ - struct desc_struct *gdt = get_cpu_gdt_table(cpu); - vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0]); - vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1]); - vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2]); -} - -static void vmi_set_ldt(const void *addr, unsigned entries) -{ - unsigned cpu = smp_processor_id(); - struct desc_struct desc; - - pack_descriptor(&desc, (unsigned long)addr, - entries * sizeof(struct desc_struct) - 1, - DESC_LDT, 0); - write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &desc, DESC_LDT); - vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0); -} - -static void vmi_set_tr(void) -{ - vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct)); -} - -static void vmi_write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) -{ - u32 *idt_entry = (u32 *)g; - vmi_ops.write_idt_entry(dt, entry, idt_entry[0], idt_entry[1]); -} - -static void vmi_write_gdt_entry(struct desc_struct *dt, int entry, - const void *desc, int type) -{ - u32 *gdt_entry = (u32 *)desc; - vmi_ops.write_gdt_entry(dt, entry, gdt_entry[0], gdt_entry[1]); -} - -static void vmi_write_ldt_entry(struct desc_struct *dt, int entry, - const void *desc) -{ - u32 *ldt_entry = (u32 *)desc; - vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]); -} - -static void vmi_load_sp0(struct tss_struct *tss, - struct thread_struct *thread) -{ - tss->x86_tss.sp0 = thread->sp0; - - /* This can only happen when SEP is enabled, no need to test "SEP"arately */ - if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { - tss->x86_tss.ss1 = thread->sysenter_cs; - wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); - } - vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.sp0); -} - -static void vmi_flush_tlb_user(void) -{ - vmi_ops._flush_tlb(VMI_FLUSH_TLB); -} - -static void vmi_flush_tlb_kernel(void) -{ - vmi_ops._flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL); -} - -/* Stub to do nothing at all; used for delays and unimplemented calls */ -static void vmi_nop(void) -{ -} - -static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn) -{ - vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); -} - -static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn) -{ - /* - * This call comes in very early, before mem_map is setup. - * It is called only for swapper_pg_dir, which already has - * data on it. - */ - vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); -} - -static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count) -{ - vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); -} - -static void vmi_release_pte(unsigned long pfn) -{ - vmi_ops.release_page(pfn, VMI_PAGE_L1); -} - -static void vmi_release_pmd(unsigned long pfn) -{ - vmi_ops.release_page(pfn, VMI_PAGE_L2); -} - -/* - * We use the pgd_free hook for releasing the pgd page: - */ -static void vmi_pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - unsigned long pfn = __pa(pgd) >> PAGE_SHIFT; - - vmi_ops.release_page(pfn, VMI_PAGE_L2); -} - -/* - * Helper macros for MMU update flags. We can defer updates until a flush - * or page invalidation only if the update is to the current address space - * (otherwise, there is no flush). We must check against init_mm, since - * this could be a kernel update, which usually passes init_mm, although - * sometimes this check can be skipped if we know the particular function - * is only called on user mode PTEs. We could change the kernel to pass - * current->active_mm here, but in particular, I was unsure if changing - * mm/highmem.c to do this would still be correct on other architectures. - */ -#define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \ - (!mustbeuser && (mm) == &init_mm)) -#define vmi_flags_addr(mm, addr, level, user) \ - ((level) | (is_current_as(mm, user) ? \ - (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0)) -#define vmi_flags_addr_defer(mm, addr, level, user) \ - ((level) | (is_current_as(mm, user) ? \ - (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0)) - -static void vmi_update_pte(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); -} - -static void vmi_update_pte_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0)); -} - -static void vmi_set_pte(pte_t *ptep, pte_t pte) -{ - /* XXX because of set_pmd_pte, this can be called on PT or PD layers */ - vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT); -} - -static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) -{ - vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); -} - -static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval) -{ -#ifdef CONFIG_X86_PAE - const pte_t pte = { .pte = pmdval.pmd }; -#else - const pte_t pte = { pmdval.pud.pgd.pgd }; -#endif - vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD); -} - -#ifdef CONFIG_X86_PAE - -static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval) -{ - /* - * XXX This is called from set_pmd_pte, but at both PT - * and PD layers so the VMI_PAGE_PT flag is wrong. But - * it is only called for large page mapping changes, - * the Xen backend, doesn't support large pages, and the - * ESX backend doesn't depend on the flag. - */ - set_64bit((unsigned long long *)ptep,pte_val(pteval)); - vmi_ops.update_pte(ptep, VMI_PAGE_PT); -} - -static void vmi_set_pud(pud_t *pudp, pud_t pudval) -{ - /* Um, eww */ - const pte_t pte = { .pte = pudval.pgd.pgd }; - vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP); -} - -static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - const pte_t pte = { .pte = 0 }; - vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); -} - -static void vmi_pmd_clear(pmd_t *pmd) -{ - const pte_t pte = { .pte = 0 }; - vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD); -} -#endif - -#ifdef CONFIG_SMP -static void __devinit -vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, - unsigned long start_esp) -{ - struct vmi_ap_state ap; - - /* Default everything to zero. This is fine for most GPRs. */ - memset(&ap, 0, sizeof(struct vmi_ap_state)); - - ap.gdtr_limit = GDT_SIZE - 1; - ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid); - - ap.idtr_limit = IDT_ENTRIES * 8 - 1; - ap.idtr_base = (unsigned long) idt_table; - - ap.ldtr = 0; - - ap.cs = __KERNEL_CS; - ap.eip = (unsigned long) start_eip; - ap.ss = __KERNEL_DS; - ap.esp = (unsigned long) start_esp; - - ap.ds = __USER_DS; - ap.es = __USER_DS; - ap.fs = __KERNEL_PERCPU; - ap.gs = __KERNEL_STACK_CANARY; - - ap.eflags = 0; - -#ifdef CONFIG_X86_PAE - /* efer should match BSP efer. */ - if (cpu_has_nx) { - unsigned l, h; - rdmsr(MSR_EFER, l, h); - ap.efer = (unsigned long long) h << 32 | l; - } -#endif - - ap.cr3 = __pa(swapper_pg_dir); - /* Protected mode, paging, AM, WP, NE, MP. */ - ap.cr0 = 0x80050023; - ap.cr4 = mmu_cr4_features; - vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid); -} -#endif - -static void vmi_start_context_switch(struct task_struct *prev) -{ - paravirt_start_context_switch(prev); - vmi_ops.set_lazy_mode(2); -} - -static void vmi_end_context_switch(struct task_struct *next) -{ - vmi_ops.set_lazy_mode(0); - paravirt_end_context_switch(next); -} - -static void vmi_enter_lazy_mmu(void) -{ - paravirt_enter_lazy_mmu(); - vmi_ops.set_lazy_mode(1); -} - -static void vmi_leave_lazy_mmu(void) -{ - vmi_ops.set_lazy_mode(0); - paravirt_leave_lazy_mmu(); -} - -static inline int __init check_vmi_rom(struct vrom_header *rom) -{ - struct pci_header *pci; - struct pnp_header *pnp; - const char *manufacturer = "UNKNOWN"; - const char *product = "UNKNOWN"; - const char *license = "unspecified"; - - if (rom->rom_signature != 0xaa55) - return 0; - if (rom->vrom_signature != VMI_SIGNATURE) - return 0; - if (rom->api_version_maj != VMI_API_REV_MAJOR || - rom->api_version_min+1 < VMI_API_REV_MINOR+1) { - printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n", - rom->api_version_maj, - rom->api_version_min); - return 0; - } - - /* - * Relying on the VMI_SIGNATURE field is not 100% safe, so check - * the PCI header and device type to make sure this is really a - * VMI device. - */ - if (!rom->pci_header_offs) { - printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n"); - return 0; - } - - pci = (struct pci_header *)((char *)rom+rom->pci_header_offs); - if (pci->vendorID != PCI_VENDOR_ID_VMWARE || - pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) { - /* Allow it to run... anyways, but warn */ - printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n"); - } - - if (rom->pnp_header_offs) { - pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs); - if (pnp->manufacturer_offset) - manufacturer = (const char *)rom+pnp->manufacturer_offset; - if (pnp->product_offset) - product = (const char *)rom+pnp->product_offset; - } - - if (rom->license_offs) - license = (char *)rom+rom->license_offs; - - printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n", - manufacturer, product, - rom->api_version_maj, rom->api_version_min, - pci->rom_version_maj, pci->rom_version_min); - - /* Don't allow BSD/MIT here for now because we don't want to end up - with any binary only shim layers */ - if (strcmp(license, "GPL") && strcmp(license, "GPL v2")) { - printk(KERN_WARNING "VMI: Non GPL license `%s' found for ROM. Not used.\n", - license); - return 0; - } - - return 1; -} - -/* - * Probe for the VMI option ROM - */ -static inline int __init probe_vmi_rom(void) -{ - unsigned long base; - - /* VMI ROM is in option ROM area, check signature */ - for (base = 0xC0000; base < 0xE0000; base += 2048) { - struct vrom_header *romstart; - romstart = (struct vrom_header *)isa_bus_to_virt(base); - if (check_vmi_rom(romstart)) { - vmi_rom = romstart; - return 1; - } - } - return 0; -} - -/* - * VMI setup common to all processors - */ -void vmi_bringup(void) -{ - /* We must establish the lowmem mapping for MMU ops to work */ - if (vmi_ops.set_linear_mapping) - vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0); -} - -/* - * Return a pointer to a VMI function or NULL if unimplemented - */ -static void *vmi_get_function(int vmicall) -{ - u64 reloc; - const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; - reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall); - BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); - if (rel->type == VMI_RELOCATION_CALL_REL) - return (void *)rel->eip; - else - return NULL; -} - -/* - * Helper macro for making the VMI paravirt-ops fill code readable. - * For unimplemented operations, fall back to default, unless nop - * is returned by the ROM. - */ -#define para_fill(opname, vmicall) \ -do { \ - reloc = call_vrom_long_func(vmi_rom, get_reloc, \ - VMI_CALL_##vmicall); \ - if (rel->type == VMI_RELOCATION_CALL_REL) \ - opname = (void *)rel->eip; \ - else if (rel->type == VMI_RELOCATION_NOP) \ - opname = (void *)vmi_nop; \ - else if (rel->type != VMI_RELOCATION_NONE) \ - printk(KERN_WARNING "VMI: Unknown relocation " \ - "type %d for " #vmicall"\n",\ - rel->type); \ -} while (0) - -/* - * Helper macro for making the VMI paravirt-ops fill code readable. - * For cached operations which do not match the VMI ROM ABI and must - * go through a tranlation stub. Ignore NOPs, since it is not clear - * a NOP * VMI function corresponds to a NOP paravirt-op when the - * functions are not in 1-1 correspondence. - */ -#define para_wrap(opname, wrapper, cache, vmicall) \ -do { \ - reloc = call_vrom_long_func(vmi_rom, get_reloc, \ - VMI_CALL_##vmicall); \ - BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \ - if (rel->type == VMI_RELOCATION_CALL_REL) { \ - opname = wrapper; \ - vmi_ops.cache = (void *)rel->eip; \ - } \ -} while (0) - -/* - * Activate the VMI interface and switch into paravirtualized mode - */ -static inline int __init activate_vmi(void) -{ - short kernel_cs; - u64 reloc; - const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; - - /* - * Prevent page tables from being allocated in highmem, even if - * CONFIG_HIGHPTE is enabled. - */ - __userpte_alloc_gfp &= ~__GFP_HIGHMEM; - - if (call_vrom_func(vmi_rom, vmi_init) != 0) { - printk(KERN_ERR "VMI ROM failed to initialize!"); - return 0; - } - savesegment(cs, kernel_cs); - - pv_info.paravirt_enabled = 1; - pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; - pv_info.name = "vmi [deprecated]"; - - pv_init_ops.patch = vmi_patch; - - /* - * Many of these operations are ABI compatible with VMI. - * This means we can fill in the paravirt-ops with direct - * pointers into the VMI ROM. If the calling convention for - * these operations changes, this code needs to be updated. - * - * Exceptions - * CPUID paravirt-op uses pointers, not the native ISA - * halt has no VMI equivalent; all VMI halts are "safe" - * no MSR support yet - just trap and emulate. VMI uses the - * same ABI as the native ISA, but Linux wants exceptions - * from bogus MSR read / write handled - * rdpmc is not yet used in Linux - */ - - /* CPUID is special, so very special it gets wrapped like a present */ - para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID); - - para_fill(pv_cpu_ops.clts, CLTS); - para_fill(pv_cpu_ops.get_debugreg, GetDR); - para_fill(pv_cpu_ops.set_debugreg, SetDR); - para_fill(pv_cpu_ops.read_cr0, GetCR0); - para_fill(pv_mmu_ops.read_cr2, GetCR2); - para_fill(pv_mmu_ops.read_cr3, GetCR3); - para_fill(pv_cpu_ops.read_cr4, GetCR4); - para_fill(pv_cpu_ops.write_cr0, SetCR0); - para_fill(pv_mmu_ops.write_cr2, SetCR2); - para_fill(pv_mmu_ops.write_cr3, SetCR3); - para_fill(pv_cpu_ops.write_cr4, SetCR4); - - para_fill(pv_irq_ops.save_fl.func, GetInterruptMask); - para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask); - para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts); - para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts); - - para_fill(pv_cpu_ops.wbinvd, WBINVD); - para_fill(pv_cpu_ops.read_tsc, RDTSC); - - /* The following we emulate with trap and emulate for now */ - /* paravirt_ops.read_msr = vmi_rdmsr */ - /* paravirt_ops.write_msr = vmi_wrmsr */ - /* paravirt_ops.rdpmc = vmi_rdpmc */ - - /* TR interface doesn't pass TR value, wrap */ - para_wrap(pv_cpu_ops.load_tr_desc, vmi_set_tr, set_tr, SetTR); - - /* LDT is special, too */ - para_wrap(pv_cpu_ops.set_ldt, vmi_set_ldt, _set_ldt, SetLDT); - - para_fill(pv_cpu_ops.load_gdt, SetGDT); - para_fill(pv_cpu_ops.load_idt, SetIDT); - para_fill(pv_cpu_ops.store_gdt, GetGDT); - para_fill(pv_cpu_ops.store_idt, GetIDT); - para_fill(pv_cpu_ops.store_tr, GetTR); - pv_cpu_ops.load_tls = vmi_load_tls; - para_wrap(pv_cpu_ops.write_ldt_entry, vmi_write_ldt_entry, - write_ldt_entry, WriteLDTEntry); - para_wrap(pv_cpu_ops.write_gdt_entry, vmi_write_gdt_entry, - write_gdt_entry, WriteGDTEntry); - para_wrap(pv_cpu_ops.write_idt_entry, vmi_write_idt_entry, - write_idt_entry, WriteIDTEntry); - para_wrap(pv_cpu_ops.load_sp0, vmi_load_sp0, set_kernel_stack, UpdateKernelStack); - para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask); - para_fill(pv_cpu_ops.io_delay, IODelay); - - para_wrap(pv_cpu_ops.start_context_switch, vmi_start_context_switch, - set_lazy_mode, SetLazyMode); - para_wrap(pv_cpu_ops.end_context_switch, vmi_end_context_switch, - set_lazy_mode, SetLazyMode); - - para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu, - set_lazy_mode, SetLazyMode); - para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy_mmu, - set_lazy_mode, SetLazyMode); - - /* user and kernel flush are just handled with different flags to FlushTLB */ - para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB); - para_wrap(pv_mmu_ops.flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB); - para_fill(pv_mmu_ops.flush_tlb_single, InvalPage); - - /* - * Until a standard flag format can be agreed on, we need to - * implement these as wrappers in Linux. Get the VMI ROM - * function pointers for the two backend calls. - */ -#ifdef CONFIG_X86_PAE - vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong); - vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong); -#else - vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE); - vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE); -#endif - - if (vmi_ops.set_pte) { - pv_mmu_ops.set_pte = vmi_set_pte; - pv_mmu_ops.set_pte_at = vmi_set_pte_at; - pv_mmu_ops.set_pmd = vmi_set_pmd; -#ifdef CONFIG_X86_PAE - pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic; - pv_mmu_ops.set_pud = vmi_set_pud; - pv_mmu_ops.pte_clear = vmi_pte_clear; - pv_mmu_ops.pmd_clear = vmi_pmd_clear; -#endif - } - - if (vmi_ops.update_pte) { - pv_mmu_ops.pte_update = vmi_update_pte; - pv_mmu_ops.pte_update_defer = vmi_update_pte_defer; - } - - vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); - if (vmi_ops.allocate_page) { - pv_mmu_ops.alloc_pte = vmi_allocate_pte; - pv_mmu_ops.alloc_pmd = vmi_allocate_pmd; - pv_mmu_ops.alloc_pmd_clone = vmi_allocate_pmd_clone; - } - - vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); - if (vmi_ops.release_page) { - pv_mmu_ops.release_pte = vmi_release_pte; - pv_mmu_ops.release_pmd = vmi_release_pmd; - pv_mmu_ops.pgd_free = vmi_pgd_free; - } - - /* Set linear is needed in all cases */ - vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); - - /* - * These MUST always be patched. Don't support indirect jumps - * through these operations, as the VMI interface may use either - * a jump or a call to get to these operations, depending on - * the backend. They are performance critical anyway, so requiring - * a patch is not a big problem. - */ - pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0; - pv_cpu_ops.iret = (void *)0xbadbab0; - -#ifdef CONFIG_SMP - para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState); -#endif - -#ifdef CONFIG_X86_LOCAL_APIC - para_fill(apic->read, APICRead); - para_fill(apic->write, APICWrite); -#endif - - /* - * Check for VMI timer functionality by probing for a cycle frequency method - */ - reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency); - if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) { - vmi_timer_ops.get_cycle_frequency = (void *)rel->eip; - vmi_timer_ops.get_cycle_counter = - vmi_get_function(VMI_CALL_GetCycleCounter); - vmi_timer_ops.get_wallclock = - vmi_get_function(VMI_CALL_GetWallclockTime); - vmi_timer_ops.wallclock_updated = - vmi_get_function(VMI_CALL_WallclockUpdated); - vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm); - vmi_timer_ops.cancel_alarm = - vmi_get_function(VMI_CALL_CancelAlarm); - x86_init.timers.timer_init = vmi_time_init; -#ifdef CONFIG_X86_LOCAL_APIC - x86_init.timers.setup_percpu_clockev = vmi_time_bsp_init; - x86_cpuinit.setup_percpu_clockev = vmi_time_ap_init; -#endif - pv_time_ops.sched_clock = vmi_sched_clock; - x86_platform.calibrate_tsc = vmi_tsc_khz; - x86_platform.get_wallclock = vmi_get_wallclock; - x86_platform.set_wallclock = vmi_set_wallclock; - - /* We have true wallclock functions; disable CMOS clock sync */ - no_sync_cmos_clock = 1; - } else { - disable_noidle = 1; - disable_vmi_timer = 1; - } - - para_fill(pv_irq_ops.safe_halt, Halt); - - /* - * Alternative instruction rewriting doesn't happen soon enough - * to convert VMI_IRET to a call instead of a jump; so we have - * to do this before IRQs get reenabled. Fortunately, it is - * idempotent. - */ - apply_paravirt(__parainstructions, __parainstructions_end); - - vmi_bringup(); - - return 1; -} - -#undef para_fill - -void __init vmi_init(void) -{ - if (!vmi_rom) - probe_vmi_rom(); - else - check_vmi_rom(vmi_rom); - - /* In case probing for or validating the ROM failed, basil */ - if (!vmi_rom) - return; - - reserve_top_address(-vmi_rom->virtual_top); - -#ifdef CONFIG_X86_IO_APIC - /* This is virtual hardware; timer routing is wired correctly */ - no_timer_check = 1; -#endif -} - -void __init vmi_activate(void) -{ - unsigned long flags; - - if (!vmi_rom) - return; - - local_irq_save(flags); - activate_vmi(); - local_irq_restore(flags & X86_EFLAGS_IF); -} - -static int __init parse_vmi(char *arg) -{ - if (!arg) - return -EINVAL; - - if (!strcmp(arg, "disable_pge")) { - clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); - disable_pge = 1; - } else if (!strcmp(arg, "disable_pse")) { - clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE); - disable_pse = 1; - } else if (!strcmp(arg, "disable_sep")) { - clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP); - disable_sep = 1; - } else if (!strcmp(arg, "disable_tsc")) { - clear_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC); - disable_tsc = 1; - } else if (!strcmp(arg, "disable_mtrr")) { - clear_cpu_cap(&boot_cpu_data, X86_FEATURE_MTRR); - disable_mtrr = 1; - } else if (!strcmp(arg, "disable_timer")) { - disable_vmi_timer = 1; - disable_noidle = 1; - } else if (!strcmp(arg, "disable_noidle")) - disable_noidle = 1; - return 0; -} - -early_param("vmi", parse_vmi); diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c deleted file mode 100644 index 5e1ff66ecd7..00000000000 --- a/arch/x86/kernel/vmiclock_32.c +++ /dev/null @@ -1,317 +0,0 @@ -/* - * VMI paravirtual timer support routines. - * - * Copyright (C) 2007, VMware, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - */ - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#define VMI_ONESHOT (VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL | vmi_get_alarm_wiring()) -#define VMI_PERIODIC (VMI_ALARM_IS_PERIODIC | VMI_CYCLES_REAL | vmi_get_alarm_wiring()) - -static DEFINE_PER_CPU(struct clock_event_device, local_events); - -static inline u32 vmi_counter(u32 flags) -{ - /* Given VMI_ONESHOT or VMI_PERIODIC, return the corresponding - * cycle counter. */ - return flags & VMI_ALARM_COUNTER_MASK; -} - -/* paravirt_ops.get_wallclock = vmi_get_wallclock */ -unsigned long vmi_get_wallclock(void) -{ - unsigned long long wallclock; - wallclock = vmi_timer_ops.get_wallclock(); // nsec - (void)do_div(wallclock, 1000000000); // sec - - return wallclock; -} - -/* paravirt_ops.set_wallclock = vmi_set_wallclock */ -int vmi_set_wallclock(unsigned long now) -{ - return 0; -} - -/* paravirt_ops.sched_clock = vmi_sched_clock */ -unsigned long long vmi_sched_clock(void) -{ - return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE)); -} - -/* x86_platform.calibrate_tsc = vmi_tsc_khz */ -unsigned long vmi_tsc_khz(void) -{ - unsigned long long khz; - khz = vmi_timer_ops.get_cycle_frequency(); - (void)do_div(khz, 1000); - return khz; -} - -static inline unsigned int vmi_get_timer_vector(void) -{ - return IRQ0_VECTOR; -} - -/** vmi clockchip */ -#ifdef CONFIG_X86_LOCAL_APIC -static unsigned int startup_timer_irq(unsigned int irq) -{ - unsigned long val = apic_read(APIC_LVTT); - apic_write(APIC_LVTT, vmi_get_timer_vector()); - - return (val & APIC_SEND_PENDING); -} - -static void mask_timer_irq(unsigned int irq) -{ - unsigned long val = apic_read(APIC_LVTT); - apic_write(APIC_LVTT, val | APIC_LVT_MASKED); -} - -static void unmask_timer_irq(unsigned int irq) -{ - unsigned long val = apic_read(APIC_LVTT); - apic_write(APIC_LVTT, val & ~APIC_LVT_MASKED); -} - -static void ack_timer_irq(unsigned int irq) -{ - ack_APIC_irq(); -} - -static struct irq_chip vmi_chip __read_mostly = { - .name = "VMI-LOCAL", - .startup = startup_timer_irq, - .mask = mask_timer_irq, - .unmask = unmask_timer_irq, - .ack = ack_timer_irq -}; -#endif - -/** vmi clockevent */ -#define VMI_ALARM_WIRED_IRQ0 0x00000000 -#define VMI_ALARM_WIRED_LVTT 0x00010000 -static int vmi_wiring = VMI_ALARM_WIRED_IRQ0; - -static inline int vmi_get_alarm_wiring(void) -{ - return vmi_wiring; -} - -static void vmi_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - cycle_t now, cycles_per_hz; - BUG_ON(!irqs_disabled()); - - switch (mode) { - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_RESUME: - break; - case CLOCK_EVT_MODE_PERIODIC: - cycles_per_hz = vmi_timer_ops.get_cycle_frequency(); - (void)do_div(cycles_per_hz, HZ); - now = vmi_timer_ops.get_cycle_counter(vmi_counter(VMI_PERIODIC)); - vmi_timer_ops.set_alarm(VMI_PERIODIC, now, cycles_per_hz); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - switch (evt->mode) { - case CLOCK_EVT_MODE_ONESHOT: - vmi_timer_ops.cancel_alarm(VMI_ONESHOT); - break; - case CLOCK_EVT_MODE_PERIODIC: - vmi_timer_ops.cancel_alarm(VMI_PERIODIC); - break; - default: - break; - } - break; - default: - break; - } -} - -static int vmi_timer_next_event(unsigned long delta, - struct clock_event_device *evt) -{ - /* Unfortunately, set_next_event interface only passes relative - * expiry, but we want absolute expiry. It'd be better if were - * were passed an absolute expiry, since a bunch of time may - * have been stolen between the time the delta is computed and - * when we set the alarm below. */ - cycle_t now = vmi_timer_ops.get_cycle_counter(vmi_counter(VMI_ONESHOT)); - - BUG_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); - vmi_timer_ops.set_alarm(VMI_ONESHOT, now + delta, 0); - return 0; -} - -static struct clock_event_device vmi_clockevent = { - .name = "vmi-timer", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .shift = 22, - .set_mode = vmi_timer_set_mode, - .set_next_event = vmi_timer_next_event, - .rating = 1000, - .irq = 0, -}; - -static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = &__get_cpu_var(local_events); - evt->event_handler(evt); - return IRQ_HANDLED; -} - -static struct irqaction vmi_clock_action = { - .name = "vmi-timer", - .handler = vmi_timer_interrupt, - .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, -}; - -static void __devinit vmi_time_init_clockevent(void) -{ - cycle_t cycles_per_msec; - struct clock_event_device *evt; - - int cpu = smp_processor_id(); - evt = &__get_cpu_var(local_events); - - /* Use cycles_per_msec since div_sc params are 32-bits. */ - cycles_per_msec = vmi_timer_ops.get_cycle_frequency(); - (void)do_div(cycles_per_msec, 1000); - - memcpy(evt, &vmi_clockevent, sizeof(*evt)); - /* Must pick .shift such that .mult fits in 32-bits. Choosing - * .shift to be 22 allows 2^(32-22) cycles per nano-seconds - * before overflow. */ - evt->mult = div_sc(cycles_per_msec, NSEC_PER_MSEC, evt->shift); - /* Upper bound is clockevent's use of ulong for cycle deltas. */ - evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt); - evt->min_delta_ns = clockevent_delta2ns(1, evt); - evt->cpumask = cpumask_of(cpu); - - printk(KERN_WARNING "vmi: registering clock event %s. mult=%u shift=%u\n", - evt->name, evt->mult, evt->shift); - clockevents_register_device(evt); -} - -void __init vmi_time_init(void) -{ - unsigned int cpu; - /* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */ - outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */ - - vmi_time_init_clockevent(); - setup_irq(0, &vmi_clock_action); - for_each_possible_cpu(cpu) - per_cpu(vector_irq, cpu)[vmi_get_timer_vector()] = 0; -} - -#ifdef CONFIG_X86_LOCAL_APIC -void __devinit vmi_time_bsp_init(void) -{ - /* - * On APIC systems, we want local timers to fire on each cpu. We do - * this by programming LVTT to deliver timer events to the IRQ handler - * for IRQ-0, since we can't re-use the APIC local timer handler - * without interfering with that code. - */ - clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); - local_irq_disable(); -#ifdef CONFIG_SMP - /* - * XXX handle_percpu_irq only defined for SMP; we need to switch over - * to using it, since this is a local interrupt, which each CPU must - * handle individually without locking out or dropping simultaneous - * local timers on other CPUs. We also don't want to trigger the - * quirk workaround code for interrupts which gets invoked from - * handle_percpu_irq via eoi, so we use our own IRQ chip. - */ - set_irq_chip_and_handler_name(0, &vmi_chip, handle_percpu_irq, "lvtt"); -#else - set_irq_chip_and_handler_name(0, &vmi_chip, handle_edge_irq, "lvtt"); -#endif - vmi_wiring = VMI_ALARM_WIRED_LVTT; - apic_write(APIC_LVTT, vmi_get_timer_vector()); - local_irq_enable(); - clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); -} - -void __devinit vmi_time_ap_init(void) -{ - vmi_time_init_clockevent(); - apic_write(APIC_LVTT, vmi_get_timer_vector()); -} -#endif - -/** vmi clocksource */ -static struct clocksource clocksource_vmi; - -static cycle_t read_real_cycles(struct clocksource *cs) -{ - cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); - return max(ret, clocksource_vmi.cycle_last); -} - -static struct clocksource clocksource_vmi = { - .name = "vmi-timer", - .rating = 450, - .read = read_real_cycles, - .mask = CLOCKSOURCE_MASK(64), - .mult = 0, /* to be set */ - .shift = 22, - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; - -static int __init init_vmi_clocksource(void) -{ - cycle_t cycles_per_msec; - - if (!vmi_timer_ops.get_cycle_frequency) - return 0; - /* Use khz2mult rather than hz2mult since hz arg is only 32-bits. */ - cycles_per_msec = vmi_timer_ops.get_cycle_frequency(); - (void)do_div(cycles_per_msec, 1000); - - /* Note that clocksource.{mult, shift} converts in the opposite direction - * as clockevents. */ - clocksource_vmi.mult = clocksource_khz2mult(cycles_per_msec, - clocksource_vmi.shift); - - printk(KERN_WARNING "vmi: registering clock source khz=%lld\n", cycles_per_msec); - return clocksource_register(&clocksource_vmi); - -} -module_init(init_vmi_clocksource); -- cgit v1.2.3-70-g09d2 From b0f4c062fb6dd4c02b1fe6de73319ed50a09b27d Mon Sep 17 00:00:00 2001 From: Alok Kataria Date: Mon, 23 Aug 2010 17:05:57 -0700 Subject: x86, paravirt: Remove alloc_pmd_clone hook, only used by VMI VMI was the only user of the alloc_pmd_clone hook, given that VMI is now removed we can also remove this hook. Signed-off-by: Alok N Kataria LKML-Reference: <1282608357.19396.36.camel@ank32.eng.vmware.com> Cc: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/paravirt.h | 5 ----- arch/x86/include/asm/paravirt_types.h | 1 - arch/x86/kernel/paravirt.c | 1 - arch/x86/mm/pgtable.c | 4 ---- arch/x86/xen/mmu.c | 1 - 5 files changed, 12 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5653f43d90e..edecb4ed221 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -416,11 +416,6 @@ static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); } -static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn, - unsigned long start, unsigned long count) -{ - PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); -} static inline void paravirt_release_pmd(unsigned long pfn) { PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index db9ef553234..b82bac97525 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -255,7 +255,6 @@ struct pv_mmu_ops { */ void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); - void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count); void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); void (*release_pte)(unsigned long pfn); void (*release_pmd)(unsigned long pfn); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 1db183ed7c0..c5b250011fd 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -413,7 +413,6 @@ struct pv_mmu_ops pv_mmu_ops = { .alloc_pte = paravirt_nop, .alloc_pmd = paravirt_nop, - .alloc_pmd_clone = paravirt_nop, .alloc_pud = paravirt_nop, .release_pte = paravirt_nop, .release_pmd = paravirt_nop, diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 5c4ee422590..a96023e872a 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -98,10 +98,6 @@ static void pgd_ctor(pgd_t *pgd) clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY, KERNEL_PGD_PTRS); - paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT, - __pa(swapper_pg_dir) >> PAGE_SHIFT, - KERNEL_PGD_BOUNDARY, - KERNEL_PGD_PTRS); } /* list required to sync kernel mapping updates */ diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 42086ac406a..b2363fcbcd0 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1969,7 +1969,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { .alloc_pte = xen_alloc_pte_init, .release_pte = xen_release_pte_init, .alloc_pmd = xen_alloc_pmd_init, - .alloc_pmd_clone = paravirt_nop, .release_pmd = xen_release_pmd_init, #ifdef CONFIG_X86_64 -- cgit v1.2.3-70-g09d2 From 6afb5157b9eba4092e2f0f54d24a3806409bdde5 Mon Sep 17 00:00:00 2001 From: Haicheng Li Date: Wed, 19 May 2010 17:42:14 +0800 Subject: x86, mm: Separate x86_64 vmalloc_sync_all() into separate functions No behavior change. Move some of vmalloc_sync_all() code into a new function sync_global_pgds() that will be useful for memory hotplug. Signed-off-by: Haicheng Li LKML-Reference: <4C6E4ECD.1090607@linux.intel.com> Reviewed-by: Wu Fengguang Reviewed-by: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/pgtable_64.h | 2 ++ arch/x86/mm/fault.c | 24 +----------------------- arch/x86/mm/init_64.c | 30 ++++++++++++++++++++++++++++++ 3 files changed, 33 insertions(+), 23 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 076052cd62b..f96ac9bedf7 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -102,6 +102,8 @@ static inline void native_pgd_clear(pgd_t *pgd) native_set_pgd(pgd, native_make_pgd(0)); } +extern void sync_global_pgds(unsigned long start, unsigned long end); + /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 4c4508e8a20..51f7ee71d6c 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -326,29 +326,7 @@ out: void vmalloc_sync_all(void) { - unsigned long address; - - for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; - address += PGDIR_SIZE) { - - const pgd_t *pgd_ref = pgd_offset_k(address); - unsigned long flags; - struct page *page; - - if (pgd_none(*pgd_ref)) - continue; - - spin_lock_irqsave(&pgd_lock, flags); - list_for_each_entry(page, &pgd_list, lru) { - pgd_t *pgd; - pgd = (pgd_t *)page_address(page) + pgd_index(address); - if (pgd_none(*pgd)) - set_pgd(pgd, *pgd_ref); - else - BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); - } - spin_unlock_irqrestore(&pgd_lock, flags); - } + sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); } /* diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 9a6674689a2..61a1b4fdecb 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -97,6 +97,36 @@ static int __init nonx32_setup(char *str) } __setup("noexec32=", nonx32_setup); +/* + * When memory was added/removed make sure all the processes MM have + * suitable PGD entries in the local PGD level page. + */ +void sync_global_pgds(unsigned long start, unsigned long end) +{ + unsigned long address; + + for (address = start; address <= end; address += PGDIR_SIZE) { + const pgd_t *pgd_ref = pgd_offset_k(address); + unsigned long flags; + struct page *page; + + if (pgd_none(*pgd_ref)) + continue; + + spin_lock_irqsave(&pgd_lock, flags); + list_for_each_entry(page, &pgd_list, lru) { + pgd_t *pgd; + pgd = (pgd_t *)page_address(page) + pgd_index(address); + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); + else + BUG_ON(pgd_page_vaddr(*pgd) + != pgd_page_vaddr(*pgd_ref)); + } + spin_unlock_irqrestore(&pgd_lock, flags); + } +} + /* * NOTE: This function is marked __ref because it calls __init function * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. -- cgit v1.2.3-70-g09d2 From 0444ad93ea2449963132d68753020a6a24d69895 Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Thu, 26 Aug 2010 13:57:56 -0400 Subject: x86, iommu: Add IOMMU_INIT macros, .iommu_table section, and iommu_table_entry structure This patch set adds a mechanism to "modularize" the IOMMUs we have on X86. Currently the count of IOMMUs is up to six and they have a complex relationship that requires careful execution order. 'pci_iommu_alloc' does that today, but most folks are unhappy with how it does it. This patch set addresses this and also paves a mechanism to jettison unused IOMMUs during run-time. For details that sparked this, please refer to: http://lkml.org/lkml/2010/8/2/282 The first solution that comes to mind is to convert wholesale the IOMMU detection routines to be called during initcall time frame. Unfortunately that misses the dependency relationship that some of the IOMMUs have (for example: for AMD-Vi IOMMU to work, GART detection MUST run first, and before all of that SWIOTLB MUST run). The second solution would be to introduce a registration call wherein the IOMMU would provide its detection/init routines and as well on what MUST run before it. That would work, except that the 'pci_iommu_alloc' which would run through this list, is called during mem_init. This means we don't have any memory allocator, and it is so early that we haven't yet started running through the initcall_t list. This solution borrows concepts from the 2nd idea and from how MODULE_INIT works. A macro is provided that each IOMMU uses to define it's detect function and early_init (before the memory allocate is active), and as well what other IOMMU MUST run before us. Since most IOMMUs depend on having SWIOTLB run first ("pci_swiotlb_detect") a convenience macro to depends on that is also provided. This macro is similar in design to MODULE_PARAM macro wherein we setup a .iommu_table section in which we populate it with the values that match a struct iommu_table_entry. During bootup we will sort through the array so that the IOMMUs that MUST run before us are first elements in the array. And then we just iterate through them calling the detection routine and if appropiate, the init routines. Signed-off-by: Konrad Rzeszutek Wilk LKML-Reference: <1282845485-8991-2-git-send-email-konrad.wilk@oracle.com> CC: H. Peter Anvin CC: Fujita Tomonori CC: Thomas Gleixner CC: Ingo Molnar Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/iommu_table.h | 95 ++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/vmlinux.lds.S | 7 +++ 2 files changed, 102 insertions(+) create mode 100644 arch/x86/include/asm/iommu_table.h (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h new file mode 100644 index 00000000000..435176f96a5 --- /dev/null +++ b/arch/x86/include/asm/iommu_table.h @@ -0,0 +1,95 @@ + +#ifndef _ASM_X86_IOMMU_TABLE_H +#define _ASM_X86_IOMMU_TABLE_H + +#include + +/* + * History lesson: + * The execution chain of IOMMUs in 2.6.36 looks as so: + * + * [xen-swiotlb] + * | + * +----[swiotlb *]--+ + * / | \ + * / | \ + * [GART] [Calgary] [Intel VT-d] + * / + * / + * [AMD-Vi] + * + * *: if SWIOTLB detected 'iommu=soft'/'swiotlb=force' it would skip + * over the rest of IOMMUs and unconditionally initialize the SWIOTLB. + * Also it would surreptitiously initialize set the swiotlb=1 if there were + * more than 4GB and if the user did not pass in 'iommu=off'. The swiotlb + * flag would be turned off by all IOMMUs except the Calgary one. + * + * The IOMMU_INIT* macros allow a similar tree (or more complex if desired) + * to be built by defining who we depend on. + * + * And all that needs to be done is to use one of the macros in the IOMMU + * and the pci-dma.c will take care of the rest. + */ + +struct iommu_table_entry { + initcall_t detect; + initcall_t depend; + void (*early_init)(void); /* No memory allocate available. */ + void (*late_init)(void); /* Yes, can allocate memory. */ +#define IOMMU_FINISH_IF_DETECTED (1<<0) +#define IOMMU_DETECTED (1<<1) + int flags; +}; +/* + * Macro fills out an entry in the .iommu_table that is equivalent + * to the fields that 'struct iommu_table_entry' has. The entries + * that are put in the .iommu_table section are not put in any order + * hence during boot-time we will have to resort them based on + * dependency. */ + + +#define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\ + static const struct iommu_table_entry const \ + __iommu_entry_##_detect __used \ + __attribute__ ((unused, __section__(".iommu_table"), \ + aligned((sizeof(void *))))) \ + = {_detect, _depend, _early_init, _late_init, \ + _finish ? IOMMU_FINISH_IF_DETECTED : 0} +/* + * The simplest IOMMU definition. Provide the detection routine + * and it will be run after the SWIOTLB and the other IOMMUs + * that utilize this macro. If the IOMMU is detected (ie, the + * detect routine returns a positive value), the other IOMMUs + * are also checked. You can use IOMMU_INIT_FINISH if you prefer + * to stop detecting the other IOMMUs after yours has been detected. + */ +#define IOMMU_INIT_POST(_detect) \ + __IOMMU_INIT(_detect, pci_swiotlb_detect, 0, 0, 0) + +#define IOMMU_INIT_POST_FINISH(detect) \ + __IOMMU_INIT(_detect, pci_swiotlb_detect, 0, 0, 1) + +/* + * A more sophisticated version of IOMMU_INIT. This variant requires: + * a). A detection routine function. + * b). The name of the detection routine we depend on to get called + * before us. + * c). The init routine which gets called if the detection routine + * returns a positive value from the pci_iommu_alloc. This means + * no presence of a memory allocator. + * d). Similar to the 'init', except that this gets called from pci_iommu_init + * where we do have a memory allocator. + * + * The _CONT vs the _EXIT differs in that the _CONT variant will + * continue detecting other IOMMUs in the call list after the + * the detection routine returns a positive number. The _EXIT will + * stop the execution chain. Both will still call the 'init' and + * 'late_init' functions if they are set. + */ +#define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init) \ + __IOMMU_INIT(_detect, _depend, _init, _late_init, 1) + +#define IOMMU_INIT(_detect, _depend, _init, _late_init) \ + __IOMMU_INIT(_detect, _depend, _init, _late_init, 0) + +#endif /* _ASM_X86_IOMMU_TABLE_H */ diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index d0bb52296fa..b92e040466c 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -260,6 +260,13 @@ SECTIONS *(.altinstr_replacement) } + .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) { + __iommu_table = .; + *(.iommu_table) + . = ALIGN(8); + __iommu_table_end = .; + } + /* * .exit.text is discard at runtime, not link time, to deal with * references from .altinstructions and .eh_frame -- cgit v1.2.3-70-g09d2 From 480125ba49ba62be93beea37770f266846e077ab Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Thu, 26 Aug 2010 13:57:57 -0400 Subject: x86, iommu: Make all IOMMU's detection routines return a value. We return 1 if the IOMMU has been detected. Zero or an error number if we failed to find it. This is in preperation of using the IOMMU_INIT so that we can detect whether an IOMMU is present. I have not tested this for regression on Calgary, nor on AMD Vi chipsets as I don't have that hardware. CC: Muli Ben-Yehuda CC: "Jon D. Mason" CC: "Darrick J. Wong" CC: Jesse Barnes CC: David Woodhouse CC: Chris Wright CC: Yinghai Lu CC: Joerg Roedel CC: H. Peter Anvin CC: Fujita Tomonori Signed-off-by: Konrad Rzeszutek Wilk LKML-Reference: <1282845485-8991-3-git-send-email-konrad.wilk@oracle.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/amd_iommu.h | 4 ++-- arch/x86/include/asm/calgary.h | 4 ++-- arch/x86/include/asm/gart.h | 5 +++-- arch/x86/kernel/amd_iommu_init.c | 8 +++++--- arch/x86/kernel/aperture_64.c | 11 +++++++---- arch/x86/kernel/pci-calgary_64.c | 15 ++++++++------- drivers/pci/dmar.c | 4 +++- include/linux/dmar.h | 6 +++--- 8 files changed, 33 insertions(+), 24 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h index 5af2982133b..2798142cdb4 100644 --- a/arch/x86/include/asm/amd_iommu.h +++ b/arch/x86/include/asm/amd_iommu.h @@ -24,11 +24,11 @@ #ifdef CONFIG_AMD_IOMMU -extern void amd_iommu_detect(void); +extern int amd_iommu_detect(void); #else -static inline void amd_iommu_detect(void) { } +static inline int amd_iommu_detect(void) { return -ENODEV; } #endif diff --git a/arch/x86/include/asm/calgary.h b/arch/x86/include/asm/calgary.h index 0918654305a..0d467b33883 100644 --- a/arch/x86/include/asm/calgary.h +++ b/arch/x86/include/asm/calgary.h @@ -62,9 +62,9 @@ struct cal_chipset_ops { extern int use_calgary; #ifdef CONFIG_CALGARY_IOMMU -extern void detect_calgary(void); +extern int detect_calgary(void); #else -static inline void detect_calgary(void) { return; } +static inline int detect_calgary(void) { return -ENODEV; } #endif #endif /* _ASM_X86_CALGARY_H */ diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h index 4ac5b0f33fc..d7d1d4c438a 100644 --- a/arch/x86/include/asm/gart.h +++ b/arch/x86/include/asm/gart.h @@ -37,7 +37,7 @@ extern int gart_iommu_aperture_disabled; extern void early_gart_iommu_check(void); extern int gart_iommu_init(void); extern void __init gart_parse_options(char *); -extern void gart_iommu_hole_init(void); +extern int gart_iommu_hole_init(void); #else #define gart_iommu_aperture 0 @@ -50,8 +50,9 @@ static inline void early_gart_iommu_check(void) static inline void gart_parse_options(char *options) { } -static inline void gart_iommu_hole_init(void) +static inline int gart_iommu_hole_init(void) { + return -ENODEV; } #endif diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 3cc63e2b8dd..0b9e2dc4fc9 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -1382,13 +1382,13 @@ static int __init early_amd_iommu_detect(struct acpi_table_header *table) return 0; } -void __init amd_iommu_detect(void) +int __init amd_iommu_detect(void) { if (no_iommu || (iommu_detected && !gart_iommu_aperture)) - return; + return -ENODEV; if (amd_iommu_disabled) - return; + return -ENODEV; if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { iommu_detected = 1; @@ -1397,7 +1397,9 @@ void __init amd_iommu_detect(void) /* Make sure ACS will be enabled */ pci_request_acs(); + return 1; } + return -ENODEV; } /**************************************************************************** diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index a2e0caf26e1..afa0dab3302 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -371,7 +371,7 @@ void __init early_gart_iommu_check(void) static int __initdata printed_gart_size_msg; -void __init gart_iommu_hole_init(void) +int __init gart_iommu_hole_init(void) { u32 agp_aper_base = 0, agp_aper_order = 0; u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0; @@ -381,7 +381,7 @@ void __init gart_iommu_hole_init(void) if (gart_iommu_aperture_disabled || !fix_aperture || !early_pci_allowed()) - return; + return -ENODEV; printk(KERN_INFO "Checking aperture...\n"); @@ -463,8 +463,9 @@ out: unsigned long n = (32 * 1024 * 1024) << last_aper_order; insert_aperture_resource((u32)last_aper_base, n); + return 1; } - return; + return 0; } if (!fallback_aper_force) { @@ -500,7 +501,7 @@ out: panic("Not enough memory for aperture"); } } else { - return; + return 0; } /* Fix up the north bridges */ @@ -524,4 +525,6 @@ out: } set_up_gart_resume(aper_order, aper_alloc); + + return 1; } diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 078d4ec1a9d..28c6b389fee 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -1364,7 +1364,7 @@ static int __init calgary_iommu_init(void) return 0; } -void __init detect_calgary(void) +int __init detect_calgary(void) { int bus; void *tbl; @@ -1378,13 +1378,13 @@ void __init detect_calgary(void) * another HW IOMMU already, bail out. */ if (no_iommu || iommu_detected) - return; + return -ENODEV; if (!use_calgary) - return; + return -ENODEV; if (!early_pci_allowed()) - return; + return -ENODEV; printk(KERN_DEBUG "Calgary: detecting Calgary via BIOS EBDA area\n"); @@ -1410,13 +1410,13 @@ void __init detect_calgary(void) if (!rio_table_hdr) { printk(KERN_DEBUG "Calgary: Unable to locate Rio Grande table " "in EBDA - bailing!\n"); - return; + return -ENODEV; } ret = build_detail_arrays(); if (ret) { printk(KERN_DEBUG "Calgary: build_detail_arrays ret %d\n", ret); - return; + return -ENOMEM; } specified_table_size = determine_tce_table_size((is_kdump_kernel() ? @@ -1464,7 +1464,7 @@ void __init detect_calgary(void) x86_init.iommu.iommu_init = calgary_iommu_init; } - return; + return calgary_found; cleanup: for (--bus; bus >= 0; --bus) { @@ -1473,6 +1473,7 @@ cleanup: if (info->tce_space) free_tce_table(info->tce_space); } + return -ENOMEM; } static int __init calgary_parse_options(char *p) diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 0a19708074c..5fa64ea5416 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -687,7 +687,7 @@ failed: return 0; } -void __init detect_intel_iommu(void) +int __init detect_intel_iommu(void) { int ret; @@ -723,6 +723,8 @@ void __init detect_intel_iommu(void) } early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); dmar_tbl = NULL; + + return (ret ? 1 : -ENODEV); } diff --git a/include/linux/dmar.h b/include/linux/dmar.h index d7cecc90ed3..a2060204151 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -57,15 +57,15 @@ extern int dmar_table_init(void); extern int dmar_dev_scope_init(void); /* Intel IOMMU detection */ -extern void detect_intel_iommu(void); +extern int detect_intel_iommu(void); extern int enable_drhd_fault_handling(void); extern int parse_ioapics_under_ir(void); extern int alloc_iommu(struct dmar_drhd_unit *); #else -static inline void detect_intel_iommu(void) +static inline int detect_intel_iommu(void) { - return; + return -ENODEV; } static inline int dmar_table_init(void) -- cgit v1.2.3-70-g09d2 From 5bef80a4b826b9cee1c6aec7ecc371ec395260cc Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Thu, 26 Aug 2010 13:57:58 -0400 Subject: x86, iommu: Add proper dependency sort routine (and sanity check). We are using a very simple sort routine which sorts the .iommu_table array in the order of dependencies. Specifically each structure of iommu_table_entry has a field 'depend' which contains the function pointer to the IOMMU that MUST be run before us. We sort the array of structures so that the struct iommu_table_entry with no 'depend' field are first, and then the subsequent ones are the ones for which the 'depend' function has been already invoked (in other words, precede us). Using the kernel's version 'sort', which is a mergeheap is feasible, but would require making the comparison operator scan recursivly the array to satisfy the "heapify" process: setting the levels properly. The end result would much more complex than it should be an it is just much simpler to utilize this simple sort routine. Signed-off-by: Konrad Rzeszutek Wilk LKML-Reference: <1282845485-8991-4-git-send-email-konrad.wilk@oracle.com> CC: H. Peter Anvin CC: Fujita Tomonori Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/iommu_table.h | 6 +++ arch/x86/kernel/Makefile | 1 + arch/x86/kernel/pci-iommu_table.c | 89 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+) create mode 100644 arch/x86/kernel/pci-iommu_table.c (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h index 435176f96a5..2124e3ef6f9 100644 --- a/arch/x86/include/asm/iommu_table.h +++ b/arch/x86/include/asm/iommu_table.h @@ -92,4 +92,10 @@ struct iommu_table_entry { #define IOMMU_INIT(_detect, _depend, _init, _late_init) \ __IOMMU_INIT(_detect, _depend, _init, _late_init, 0) +void sort_iommu_table(struct iommu_table_entry *start, + struct iommu_table_entry *finish); + +void check_iommu_entries(struct iommu_table_entry *start, + struct iommu_table_entry *finish); + #endif /* _ASM_X86_IOMMU_TABLE_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0925676266b..6817546595e 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -42,6 +42,7 @@ obj-y += bootflag.o e820.o obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o obj-y += tsc.o io_delay.o rtc.o +obj-y += pci-iommu_table.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o obj-y += process.o diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c new file mode 100644 index 00000000000..55d745ec118 --- /dev/null +++ b/arch/x86/kernel/pci-iommu_table.c @@ -0,0 +1,89 @@ +#include +#include +#include +#include + + +#define DEBUG 1 + +static struct iommu_table_entry * __init +find_dependents_of(struct iommu_table_entry *start, + struct iommu_table_entry *finish, + struct iommu_table_entry *q) +{ + struct iommu_table_entry *p; + + if (!q) + return NULL; + + for (p = start; p < finish; p++) + if (p->detect == q->depend) + return p; + + return NULL; +} + + +void __init sort_iommu_table(struct iommu_table_entry *start, + struct iommu_table_entry *finish) { + + struct iommu_table_entry *p, *q, tmp; + + for (p = start; p < finish; p++) { +again: + q = find_dependents_of(start, finish, p); + /* We are bit sneaky here. We use the memory address to figure + * out if the node we depend on is past our point, if so, swap. + */ + if (q > p) { + tmp = *p; + memmove(p, q, sizeof(*p)); + *q = tmp; + goto again; + } + } + +} + +#ifdef DEBUG +void __init check_iommu_entries(struct iommu_table_entry *start, + struct iommu_table_entry *finish) +{ + struct iommu_table_entry *p, *q, *x; + char sym_p[KSYM_SYMBOL_LEN]; + char sym_q[KSYM_SYMBOL_LEN]; + + /* Simple cyclic dependency checker. */ + for (p = start; p < finish; p++) { + q = find_dependents_of(start, finish, p); + x = find_dependents_of(start, finish, q); + if (p == x) { + sprint_symbol(sym_p, (unsigned long)p->detect); + sprint_symbol(sym_q, (unsigned long)q->detect); + + printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %s depends" \ + " on %s and vice-versa. BREAKING IT.\n", + sym_p, sym_q); + /* Heavy handed way..*/ + x->depend = 0; + } + } + + for (p = start; p < finish; p++) { + q = find_dependents_of(p, finish, p); + if (q && q > p) { + sprint_symbol(sym_p, (unsigned long)p->detect); + sprint_symbol(sym_q, (unsigned long)q->detect); + + printk(KERN_ERR "EXECUTION ORDER INVALID! %s "\ + "should be called before %s!\n", + sym_p, sym_q); + } + } +} +#else +inline void check_iommu_entries(struct iommu_table_entry *start, + struct iommu_table_entry *finish) +{ +} +#endif -- cgit v1.2.3-70-g09d2 From efa631c26d3bb1162b8f95008801db602217f52b Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Thu, 26 Aug 2010 13:57:59 -0400 Subject: x86, swiotlb: Simplify SWIOTLB pci_swiotlb_detect routine. In 'pci_swiotlb_detect' we used to do two different things: a). If user provided 'iommu=soft' or 'swiotlb=force' we would set swiotlb=1 and return 1 (and forcing pci-dma.c to call pci_swiotlb_init() immediately). b). If 4GB or more would be detected and if user did not specify iommu=off, we would set 'swiotlb=1' and return whatever 'a)' figured out. We simplify this by splitting a) and b) in two different routines. CC: Fujita Tomonori Signed-off-by: Konrad Rzeszutek Wilk LKML-Reference: <1282845485-8991-5-git-send-email-konrad.wilk@oracle.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/iommu_table.h | 4 ++-- arch/x86/include/asm/swiotlb.h | 13 +++++++++++-- arch/x86/kernel/pci-dma.c | 4 +++- arch/x86/kernel/pci-swiotlb.c | 33 +++++++++++++++++++++++++++------ 4 files changed, 43 insertions(+), 11 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h index 2124e3ef6f9..df55a78888e 100644 --- a/arch/x86/include/asm/iommu_table.h +++ b/arch/x86/include/asm/iommu_table.h @@ -64,10 +64,10 @@ struct iommu_table_entry { * to stop detecting the other IOMMUs after yours has been detected. */ #define IOMMU_INIT_POST(_detect) \ - __IOMMU_INIT(_detect, pci_swiotlb_detect, 0, 0, 0) + __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 0) #define IOMMU_INIT_POST_FINISH(detect) \ - __IOMMU_INIT(_detect, pci_swiotlb_detect, 0, 0, 1) + __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 1) /* * A more sophisticated version of IOMMU_INIT. This variant requires: diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h index 8085277e1b8..977f1761a25 100644 --- a/arch/x86/include/asm/swiotlb.h +++ b/arch/x86/include/asm/swiotlb.h @@ -5,17 +5,26 @@ #ifdef CONFIG_SWIOTLB extern int swiotlb; -extern int __init pci_swiotlb_detect(void); +extern int __init pci_swiotlb_detect_override(void); +extern int __init pci_swiotlb_detect_4gb(void); extern void __init pci_swiotlb_init(void); +extern void __init pci_swiotlb_late_init(void); #else #define swiotlb 0 -static inline int pci_swiotlb_detect(void) +static inline int pci_swiotlb_detect_override(void) +{ + return 0; +} +static inline int pci_swiotlb_detect_4gb(void) { return 0; } static inline void pci_swiotlb_init(void) { } +static inline void pci_swiotlb_late_init(void) +{ +} #endif static inline void dma_mark_clean(void *addr, size_t size) {} diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 9f07cfcbd3a..1b3beb5075e 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -133,9 +133,11 @@ void __init pci_iommu_alloc(void) /* free the range so iommu could get some range less than 4G */ dma32_free_bootmem(); - if (pci_xen_swiotlb_detect() || pci_swiotlb_detect()) + if (pci_xen_swiotlb_detect() || pci_swiotlb_detect_override()) goto out; + pci_swiotlb_detect_4gb(); + gart_iommu_hole_init(); detect_calgary(); diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index a5bc528d432..c7a72faeb14 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -41,24 +41,33 @@ static struct dma_map_ops swiotlb_dma_ops = { }; /* - * pci_swiotlb_detect - set swiotlb to 1 if necessary + * pci_swiotlb_detect_override - set swiotlb to 1 if necessary * * This returns non-zero if we are forced to use swiotlb (by the boot * option). */ -int __init pci_swiotlb_detect(void) +int __init pci_swiotlb_detect_override(void) { int use_swiotlb = swiotlb | swiotlb_force; + if (swiotlb_force) + swiotlb = 1; + + return use_swiotlb; +} + +/* + * if 4GB or more detected (and iommu=off not set) return 1 + * and set swiotlb to 1. + */ +int __init pci_swiotlb_detect_4gb(void) +{ /* don't initialize swiotlb if iommu=off (no_iommu=1) */ #ifdef CONFIG_X86_64 if (!no_iommu && max_pfn > MAX_DMA32_PFN) swiotlb = 1; #endif - if (swiotlb_force) - swiotlb = 1; - - return use_swiotlb; + return swiotlb; } void __init pci_swiotlb_init(void) @@ -68,3 +77,15 @@ void __init pci_swiotlb_init(void) dma_ops = &swiotlb_dma_ops; } } + +void __init pci_swiotlb_late_init(void) +{ + /* An IOMMU turned us off. */ + if (!swiotlb) + swiotlb_free(); + else { + printk(KERN_INFO "PCI-DMA: " + "Using software bounce buffering for IO (SWIOTLB)\n"); + swiotlb_print_info(); + } +} -- cgit v1.2.3-70-g09d2 From fb74fb6db91abc3c1ceeb9d2c17b44866a12c63e Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:15 -0700 Subject: x86, memblock: Add memblock_x86_find_in_range_size() size is returned according free range. Will be used to find free ranges for early_memtest and memory corruption check Do not mess it up with lib/memblock.c yet. Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 8 ++++ arch/x86/mm/Makefile | 2 + arch/x86/mm/memblock.c | 87 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 97 insertions(+) create mode 100644 arch/x86/include/asm/memblock.h create mode 100644 arch/x86/mm/memblock.c (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h new file mode 100644 index 00000000000..c14219a2616 --- /dev/null +++ b/arch/x86/include/asm/memblock.h @@ -0,0 +1,8 @@ +#ifndef _X86_MEMBLOCK_H +#define _X86_MEMBLOCK_H + +#define ARCH_DISCARD_MEMBLOCK + +u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); + +#endif diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index a4c768397ba..55543397a8a 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -26,4 +26,6 @@ obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o obj-$(CONFIG_K8_NUMA) += k8topology_64.o obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o +obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o + obj-$(CONFIG_MEMTEST) += memtest.o diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c new file mode 100644 index 00000000000..26ba46234cb --- /dev/null +++ b/arch/x86/mm/memblock.c @@ -0,0 +1,87 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +/* Check for already reserved areas */ +static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align) +{ + struct memblock_region *r; + u64 addr = *addrp, last; + u64 size = *sizep; + bool changed = false; + +again: + last = addr + size; + for_each_memblock(reserved, r) { + if (last > r->base && addr < r->base) { + size = r->base - addr; + changed = true; + goto again; + } + if (last > (r->base + r->size) && addr < (r->base + r->size)) { + addr = round_up(r->base + r->size, align); + size = last - addr; + changed = true; + goto again; + } + if (last <= (r->base + r->size) && addr >= r->base) { + (*sizep)++; + return false; + } + } + if (changed) { + *addrp = addr; + *sizep = size; + } + return changed; +} + +static u64 __init __memblock_x86_find_in_range_size(u64 ei_start, u64 ei_last, u64 start, + u64 *sizep, u64 align) +{ + u64 addr, last; + + addr = round_up(ei_start, align); + if (addr < start) + addr = round_up(start, align); + if (addr >= ei_last) + goto out; + *sizep = ei_last - addr; + while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last) + ; + last = addr + *sizep; + if (last > ei_last) + goto out; + + return addr; + +out: + return MEMBLOCK_ERROR; +} + +/* + * Find next free range after start, and size is returned in *sizep + */ +u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) +{ + struct memblock_region *r; + + for_each_memblock(memory, r) { + u64 ei_start = r->base; + u64 ei_last = ei_start + r->size; + u64 addr; + + addr = __memblock_x86_find_in_range_size(ei_start, ei_last, start, + sizep, align); + + if (addr != MEMBLOCK_ERROR) + return addr; + } + + return MEMBLOCK_ERROR; +} -- cgit v1.2.3-70-g09d2 From 27de794365786b4cdc3461ed4e23af2a33f40612 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:15 -0700 Subject: x86, memblock: Add memblock_x86_to_bootmem() memblock_x86_to_bootmem() will reserve memblock.reserved.region in bootmem after bootmem is set up. We can use it to with all arches that support memblock later. Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 1 + arch/x86/mm/memblock.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index c14219a2616..69cf853e931 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -4,5 +4,6 @@ #define ARCH_DISCARD_MEMBLOCK u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); +void memblock_x86_to_bootmem(u64 start, u64 end); #endif diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 26ba46234cb..8101084d452 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -85,3 +85,32 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) return MEMBLOCK_ERROR; } + +#ifndef CONFIG_NO_BOOTMEM +void __init memblock_x86_to_bootmem(u64 start, u64 end) +{ + int count; + u64 final_start, final_end; + struct memblock_region *r; + + /* Take out region array itself */ + memblock_free_reserved_regions(); + + count = memblock.reserved.cnt; + pr_info("(%d early reservations) ==> bootmem [%010llx-%010llx]\n", count, start, end - 1); + for_each_memblock(reserved, r) { + pr_info(" [%010llx-%010llx] ", (u64)r->base, (u64)r->base + r->size - 1); + final_start = max(start, r->base); + final_end = min(end, r->base + r->size); + if (final_start >= final_end) { + pr_cont("\n"); + continue; + } + pr_cont(" ==> [%010llx-%010llx]\n", final_start, final_end - 1); + reserve_bootmem_generic(final_start, final_end - final_start, BOOTMEM_DEFAULT); + } + + /* Put region array back ? */ + memblock_reserve_reserved_regions(); +} +#endif -- cgit v1.2.3-70-g09d2 From 9dc5d569c133819c1ce069ebb1d771c62de32580 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:15 -0700 Subject: x86, memblock: Add memblock_x86_reserve_range/memblock_x86_free_range They are wrappers for core versions, which take start/end/name instead of base/size. This will make x86 conversion eaasier. could add more debug print out -v2: change get_max_mapped() to memblock.default_alloc_limit according to Michael Ellerman and Ben change to memblock_x86_reserve_range and memblock_x86_free_range according to Michael Ellerman -v3: call check_and_double after reserve/free, so could avoid to use find_memblock_area. Suggested by Michael Ellerman Signed-off-by: Yinghai Lu Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 3 +++ arch/x86/mm/memblock.c | 22 ++++++++++++++++++++++ 2 files changed, 25 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index 69cf853e931..e11ddf059fa 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -6,4 +6,7 @@ u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); void memblock_x86_to_bootmem(u64 start, u64 end); +void memblock_x86_reserve_range(u64 start, u64 end, char *name); +void memblock_x86_free_range(u64 start, u64 end); + #endif diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 8101084d452..9829eaf1dbd 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -114,3 +114,25 @@ void __init memblock_x86_to_bootmem(u64 start, u64 end) memblock_reserve_reserved_regions(); } #endif + +void __init memblock_x86_reserve_range(u64 start, u64 end, char *name) +{ + if (start == end) + return; + + if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx]\n", start, end)) + return; + + memblock_reserve(start, end - start); +} + +void __init memblock_x86_free_range(u64 start, u64 end) +{ + if (start == end) + return; + + if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx]\n", start, end)) + return; + + memblock_free(start, end - start); +} -- cgit v1.2.3-70-g09d2 From 4d5cf86ce187c0d3a4cdf233ab0cc6526ccbe01f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:16 -0700 Subject: x86, memblock: Add get_free_all_memory_range() get_free_all_memory_range is for CONFIG_NO_BOOTMEM=y, and will be called by free_all_memory_core_early(). It will use early_node_map aka active ranges subtract memblock.reserved to get all free range, and those ranges will convert to slab pages. -v4: increase range size Signed-off-by: Yinghai Lu Cc: Jan Beulich Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 2 + arch/x86/mm/memblock.c | 98 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 99 insertions(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index e11ddf059fa..72639ce65e8 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -8,5 +8,7 @@ void memblock_x86_to_bootmem(u64 start, u64 end); void memblock_x86_reserve_range(u64 start, u64 end, char *name); void memblock_x86_free_range(u64 start, u64 end); +struct range; +int get_free_all_memory_range(struct range **rangep, int nodeid); #endif diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 9829eaf1dbd..b4500604ab3 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -86,7 +86,103 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) return MEMBLOCK_ERROR; } -#ifndef CONFIG_NO_BOOTMEM +static __init struct range *find_range_array(int count) +{ + u64 end, size, mem; + struct range *range; + + size = sizeof(struct range) * count; + end = memblock.current_limit; + + mem = memblock_find_in_range(0, end, size, sizeof(struct range)); + if (mem == MEMBLOCK_ERROR) + panic("can not find more space for range array"); + + /* + * This range is tempoaray, so don't reserve it, it will not be + * overlapped because We will not alloccate new buffer before + * We discard this one + */ + range = __va(mem); + memset(range, 0, size); + + return range; +} + +#ifdef CONFIG_NO_BOOTMEM +static void __init memblock_x86_subtract_reserved(struct range *range, int az) +{ + u64 final_start, final_end; + struct memblock_region *r; + + /* Take out region array itself at first*/ + memblock_free_reserved_regions(); + + pr_info("Subtract (%ld early reservations)\n", memblock.reserved.cnt); + + for_each_memblock(reserved, r) { + pr_info(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1); + final_start = PFN_DOWN(r->base); + final_end = PFN_UP(r->base + r->size); + if (final_start >= final_end) + continue; + subtract_range(range, az, final_start, final_end); + } + + /* Put region array back ? */ + memblock_reserve_reserved_regions(); +} + +struct count_data { + int nr; +}; + +static int __init count_work_fn(unsigned long start_pfn, + unsigned long end_pfn, void *datax) +{ + struct count_data *data = datax; + + data->nr++; + + return 0; +} + +static int __init count_early_node_map(int nodeid) +{ + struct count_data data; + + data.nr = 0; + work_with_active_regions(nodeid, count_work_fn, &data); + + return data.nr; +} + +int __init get_free_all_memory_range(struct range **rangep, int nodeid) +{ + int count; + struct range *range; + int nr_range; + + count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2; + + range = find_range_array(count); + nr_range = 0; + + /* + * Use early_node_map[] and memblock.reserved.region to get range array + * at first + */ + nr_range = add_from_early_node_map(range, count, nr_range, nodeid); +#ifdef CONFIG_X86_32 + subtract_range(range, count, max_low_pfn, -1ULL); +#endif + memblock_x86_subtract_reserved(range, count); + nr_range = clean_sort_range(range, count); + + *rangep = range; + return nr_range; +} +#else void __init memblock_x86_to_bootmem(u64 start, u64 end) { int count; -- cgit v1.2.3-70-g09d2 From 88ba088c18457caaf8d2e5f8d36becc731a3d4f6 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:16 -0700 Subject: x86, memblock: Add memblock_x86_register_active_regions() and memblock_x86_hole_size() memblock_x86_register_active_regions() will be used to fill early_node_map, the result will be memblock.memory.region AND numa data memblock_x86_hole_size will be used to find hole size on memblock.memory.region with specified range. Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 4 +++ arch/x86/mm/memblock.c | 66 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index 72639ce65e8..16af28d3607 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -11,4 +11,8 @@ void memblock_x86_free_range(u64 start, u64 end); struct range; int get_free_all_memory_range(struct range **rangep, int nodeid); +void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, + unsigned long last_pfn); +u64 memblock_x86_hole_size(u64 start, u64 end); + #endif diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index b4500604ab3..53a7a5aebd6 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -232,3 +232,69 @@ void __init memblock_x86_free_range(u64 start, u64 end) memblock_free(start, end - start); } + +/* + * Finds an active region in the address range from start_pfn to last_pfn and + * returns its range in ei_startpfn and ei_endpfn for the memblock entry. + */ +static int __init memblock_x86_find_active_region(const struct memblock_region *ei, + unsigned long start_pfn, + unsigned long last_pfn, + unsigned long *ei_startpfn, + unsigned long *ei_endpfn) +{ + u64 align = PAGE_SIZE; + + *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT; + *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT; + + /* Skip map entries smaller than a page */ + if (*ei_startpfn >= *ei_endpfn) + return 0; + + /* Skip if map is outside the node */ + if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn) + return 0; + + /* Check for overlaps */ + if (*ei_startpfn < start_pfn) + *ei_startpfn = start_pfn; + if (*ei_endpfn > last_pfn) + *ei_endpfn = last_pfn; + + return 1; +} + +/* Walk the memblock.memory map and register active regions within a node */ +void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn, + unsigned long last_pfn) +{ + unsigned long ei_startpfn; + unsigned long ei_endpfn; + struct memblock_region *r; + + for_each_memblock(memory, r) + if (memblock_x86_find_active_region(r, start_pfn, last_pfn, + &ei_startpfn, &ei_endpfn)) + add_active_range(nid, ei_startpfn, ei_endpfn); +} + +/* + * Find the hole size (in bytes) in the memory range. + * @start: starting address of the memory range to scan + * @end: ending address of the memory range to scan + */ +u64 __init memblock_x86_hole_size(u64 start, u64 end) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long last_pfn = end >> PAGE_SHIFT; + unsigned long ei_startpfn, ei_endpfn, ram = 0; + struct memblock_region *r; + + for_each_memblock(memory, r) + if (memblock_x86_find_active_region(r, start_pfn, last_pfn, + &ei_startpfn, &ei_endpfn)) + ram += ei_endpfn - ei_startpfn; + + return end - start - ((u64)ram << PAGE_SHIFT); +} -- cgit v1.2.3-70-g09d2 From 6bcc8176d07f108da3b1af17fb2c0e82c80e948e Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:16 -0700 Subject: x86, memblock: Add memblock_x86_find_in_range_node() It can be used to find NODE_DATA for numa. Need to make sure early_node_map[] is filled before it is called, otherwise it will fallback to memblock_find_in_range(), with node range. Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 1 + arch/x86/mm/memblock.c | 15 +++++++++++++++ 2 files changed, 16 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index 16af28d3607..3a86b10380f 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -14,5 +14,6 @@ int get_free_all_memory_range(struct range **rangep, int nodeid); void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, unsigned long last_pfn); u64 memblock_x86_hole_size(u64 start, u64 end); +u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); #endif diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 53a7a5aebd6..22ff0a39b22 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -233,6 +233,21 @@ void __init memblock_x86_free_range(u64 start, u64 end) memblock_free(start, end - start); } +/* + * Need to call this function after memblock_x86_register_active_regions, + * so early_node_map[] is filled already. + */ +u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align) +{ + u64 addr; + addr = find_memory_core_early(nid, size, align, start, end); + if (addr != MEMBLOCK_ERROR) + return addr; + + /* Fallback, should already have start end within node range */ + return memblock_find_in_range(start, end, size, align); +} + /* * Finds an active region in the address range from start_pfn to last_pfn and * returns its range in ei_startpfn and ei_endpfn for the memblock entry. -- cgit v1.2.3-70-g09d2 From b52c17ce854125700c4e19d4427d39bf2504ff63 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:16 -0700 Subject: x86, memblock: Add memblock_x86_free_memory_in_range() It will return free memory size in specified range. We can not use memory_size - reserved_size here, because some reserved area may not be in the scope of memblock.memory.region. Use memblock.memory.region subtracting memblock.reserved.region to get free range array. then count size of all free ranges. -v2: Ben insist on using _in_range Signed-off-by: Yinghai Lu Cc: Benjamin Herrenschmidt Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 1 + arch/x86/mm/memblock.c | 48 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index 3a86b10380f..fc3c230812e 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -15,5 +15,6 @@ void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, unsigned long last_pfn); u64 memblock_x86_hole_size(u64 start, u64 end); u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); +u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); #endif diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 22ff0a39b22..30d60cf29ce 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -211,6 +211,54 @@ void __init memblock_x86_to_bootmem(u64 start, u64 end) } #endif +u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) +{ + int i, count; + struct range *range; + int nr_range; + u64 final_start, final_end; + u64 free_size; + struct memblock_region *r; + + count = (memblock.reserved.cnt + memblock.memory.cnt) * 2; + + range = find_range_array(count); + nr_range = 0; + + addr = PFN_UP(addr); + limit = PFN_DOWN(limit); + + for_each_memblock(memory, r) { + final_start = PFN_UP(r->base); + final_end = PFN_DOWN(r->base + r->size); + if (final_start >= final_end) + continue; + if (final_start >= limit || final_end <= addr) + continue; + + nr_range = add_range(range, count, nr_range, final_start, final_end); + } + subtract_range(range, count, 0, addr); + subtract_range(range, count, limit, -1ULL); + for_each_memblock(reserved, r) { + final_start = PFN_DOWN(r->base); + final_end = PFN_UP(r->base + r->size); + if (final_start >= final_end) + continue; + if (final_start >= limit || final_end <= addr) + continue; + + subtract_range(range, count, final_start, final_end); + } + nr_range = clean_sort_range(range, count); + + free_size = 0; + for (i = 0; i < nr_range; i++) + free_size += range[i].end - range[i].start; + + return free_size << PAGE_SHIFT; +} + void __init memblock_x86_reserve_range(u64 start, u64 end, char *name) { if (start == end) -- cgit v1.2.3-70-g09d2 From e82d42be24bd5d75bf6f81045636e6ca95ab55f2 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:17 -0700 Subject: x86, memblock: Add memblock_x86_memory_in_range() It will return memory size in specified range according to memblock.memory.region Try to share some code with memblock_x86_free_memory_in_range() by passing get_free to __memblock_x86_memory_in_range(). -v2: Ben want _in_range in the name instead of size Signed-off-by: Yinghai Lu Cc: Benjamin Herrenschmidt Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 1 + arch/x86/mm/memblock.c | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index fc3c230812e..2c304bb6e07 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -16,5 +16,6 @@ void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, u64 memblock_x86_hole_size(u64 start, u64 end); u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); +u64 memblock_x86_memory_in_range(u64 addr, u64 limit); #endif diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 30d60cf29ce..32ddad5dc93 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -211,7 +211,7 @@ void __init memblock_x86_to_bootmem(u64 start, u64 end) } #endif -u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) +static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free) { int i, count; struct range *range; @@ -240,6 +240,10 @@ u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) } subtract_range(range, count, 0, addr); subtract_range(range, count, limit, -1ULL); + + /* Subtract memblock.reserved.region in range ? */ + if (!get_free) + goto sort_and_count_them; for_each_memblock(reserved, r) { final_start = PFN_DOWN(r->base); final_end = PFN_UP(r->base + r->size); @@ -250,6 +254,8 @@ u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) subtract_range(range, count, final_start, final_end); } + +sort_and_count_them: nr_range = clean_sort_range(range, count); free_size = 0; @@ -259,6 +265,16 @@ u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) return free_size << PAGE_SHIFT; } +u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) +{ + return __memblock_x86_memory_in_range(addr, limit, true); +} + +u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit) +{ + return __memblock_x86_memory_in_range(addr, limit, false); +} + void __init memblock_x86_reserve_range(u64 start, u64 end, char *name) { if (start == end) -- cgit v1.2.3-70-g09d2 From 72d7c3b33c980843e756681fb4867dc1efd62a76 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:17 -0700 Subject: x86: Use memblock to replace early_res 1. replace find_e820_area with memblock_find_in_range 2. replace reserve_early with memblock_x86_reserve_range 3. replace free_early with memblock_x86_free_range. 4. NO_BOOTMEM will switch to use memblock too. 5. use _e820, _early wrap in the patch, in following patch, will replace them all 6. because memblock_x86_free_range support partial free, we can remove some special care 7. Need to make sure that memblock_find_in_range() is called after memblock_x86_fill() so adjust some calling later in setup.c::setup_arch() -- corruption_check and mptable_update -v2: Move reserve_brk() early Before fill_memblock_area, to avoid overlap between brk and memblock_find_in_range() that could happen We have more then 128 RAM entry in E820 tables, and memblock_x86_fill() could use memblock_find_in_range() to find a new place for memblock.memory.region array. and We don't need to use extend_brk() after fill_memblock_area() So move reserve_brk() early before fill_memblock_area(). -v3: Move find_smp_config early To make sure memblock_find_in_range not find wrong place, if BIOS doesn't put mptable in right place. -v4: Treat RESERVED_KERN as RAM in memblock.memory. and they are already in memblock.reserved already.. use __NOT_KEEP_MEMBLOCK to make sure memblock related code could be freed later. -v5: Generic version __memblock_find_in_range() is going from high to low, and for 32bit active_region for 32bit does include high pages need to replace the limit with memblock.default_alloc_limit, aka get_max_mapped() -v6: Use current_limit instead -v7: check with MEMBLOCK_ERROR instead of -1ULL or -1L -v8: Set memblock_can_resize early to handle EFI with more RAM entries -v9: update after kmemleak changes in mainline Suggested-by: David S. Miller Suggested-by: Benjamin Herrenschmidt Suggested-by: Thomas Gleixner Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/Kconfig | 9 +-- arch/x86/include/asm/e820.h | 14 ++-- arch/x86/kernel/check.c | 16 +++-- arch/x86/kernel/e820.c | 159 ++++++++++++++--------------------------- arch/x86/kernel/head.c | 3 +- arch/x86/kernel/head32.c | 6 +- arch/x86/kernel/head64.c | 3 + arch/x86/kernel/mpparse.c | 5 +- arch/x86/kernel/setup.c | 46 ++++++++---- arch/x86/kernel/setup_percpu.c | 6 -- arch/x86/mm/numa_64.c | 9 +-- mm/bootmem.c | 3 + mm/page_alloc.c | 50 ++++--------- mm/sparse-vmemmap.c | 11 --- 14 files changed, 141 insertions(+), 199 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index dcb0593b4a6..542bb2610cb 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -27,6 +27,7 @@ config X86 select HAVE_PERF_EVENTS if (!M386 && !M486) select HAVE_IOREMAP_PROT select HAVE_KPROBES + select HAVE_MEMBLOCK select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_FRAME_POINTERS select HAVE_DMA_ATTRS @@ -195,9 +196,6 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y -config HAVE_EARLY_RES - def_bool y - config HAVE_INTEL_TXT def_bool y depends on EXPERIMENTAL && DMAR && ACPI @@ -590,14 +588,13 @@ config NO_BOOTMEM default y bool "Disable Bootmem code" ---help--- - Use early_res directly instead of bootmem before slab is ready. + Use memblock directly instead of bootmem before slab is ready. - allocator (buddy) [generic] - early allocator (bootmem) [generic] - - very early allocator (reserve_early*()) [x86] + - very early allocator (memblock) [some generic] - very very early allocator (early brk model) [x86] So reduce one layer between early allocator to final allocator - config MEMTEST bool "Memtest" ---help--- diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index ec8a52d14ab..388fed29146 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h @@ -117,24 +117,26 @@ extern unsigned long end_user_pfn; extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align); extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); -#include extern unsigned long e820_end_of_ram_pfn(void); extern unsigned long e820_end_of_low_ram_pfn(void); -extern int e820_find_active_region(const struct e820entry *ei, - unsigned long start_pfn, - unsigned long last_pfn, - unsigned long *ei_startpfn, - unsigned long *ei_endpfn); extern void e820_register_active_regions(int nid, unsigned long start_pfn, unsigned long end_pfn); extern u64 e820_hole_size(u64 start, u64 end); + +extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); + +void memblock_x86_fill(void); + extern void finish_e820_parsing(void); extern void e820_reserve_resources(void); extern void e820_reserve_resources_late(void); extern void setup_memory_map(void); extern char *default_machine_specific_memory_setup(void); +void reserve_early(u64 start, u64 end, char *name); +void free_early(u64 start, u64 end); + /* * Returns true iff the specified range [s,e) is completely contained inside * the ISA region. diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index fc999e6fc46..13a38917951 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c @@ -2,7 +2,8 @@ #include #include #include -#include +#include + #include /* @@ -18,10 +19,12 @@ static int __read_mostly memory_corruption_check = -1; static unsigned __read_mostly corruption_check_size = 64*1024; static unsigned __read_mostly corruption_check_period = 60; /* seconds */ -static struct e820entry scan_areas[MAX_SCAN_AREAS]; +static struct scan_area { + u64 addr; + u64 size; +} scan_areas[MAX_SCAN_AREAS]; static int num_scan_areas; - static __init int set_corruption_check(char *arg) { char *end; @@ -81,9 +84,9 @@ void __init setup_bios_corruption_check(void) while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) { u64 size; - addr = find_e820_area_size(addr, &size, PAGE_SIZE); + addr = memblock_x86_find_in_range_size(addr, &size, PAGE_SIZE); - if (!(addr + 1)) + if (addr == MEMBLOCK_ERROR) break; if (addr >= corruption_check_size) @@ -92,7 +95,7 @@ void __init setup_bios_corruption_check(void) if ((addr + size) > corruption_check_size) size = corruption_check_size - addr; - e820_update_range(addr, size, E820_RAM, E820_RESERVED); + memblock_x86_reserve_range(addr, addr + size, "SCAN RAM"); scan_areas[num_scan_areas].addr = addr; scan_areas[num_scan_areas].size = size; num_scan_areas++; @@ -105,7 +108,6 @@ void __init setup_bios_corruption_check(void) printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas); - update_e820(); } diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 0d6fc71bedb..a9221d18a5e 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -742,69 +743,29 @@ core_initcall(e820_mark_nvs_memory); */ u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align) { - int i; - - for (i = 0; i < e820.nr_map; i++) { - struct e820entry *ei = &e820.map[i]; - u64 addr; - u64 ei_start, ei_last; + u64 mem = memblock_find_in_range(start, end, size, align); - if (ei->type != E820_RAM) - continue; - - ei_last = ei->addr + ei->size; - ei_start = ei->addr; - addr = find_early_area(ei_start, ei_last, start, end, - size, align); - - if (addr != -1ULL) - return addr; - } - return -1ULL; -} + if (mem == MEMBLOCK_ERROR) + return -1ULL; -u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align) -{ - return find_e820_area(start, end, size, align); + return mem; } -u64 __init get_max_mapped(void) -{ - u64 end = max_pfn_mapped; - - end <<= PAGE_SHIFT; - - return end; -} /* * Find next free range after *start */ u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align) { - int i; + u64 mem = memblock_x86_find_in_range_size(start, sizep, align); - for (i = 0; i < e820.nr_map; i++) { - struct e820entry *ei = &e820.map[i]; - u64 addr; - u64 ei_start, ei_last; - - if (ei->type != E820_RAM) - continue; - - ei_last = ei->addr + ei->size; - ei_start = ei->addr; - addr = find_early_area_size(ei_start, ei_last, start, - sizep, align); + if (mem == MEMBLOCK_ERROR) + return -1ULL - if (addr != -1ULL) - return addr; - } - - return -1ULL; + return mem; } /* - * pre allocated 4k and reserved it in e820 + * pre allocated 4k and reserved it in memblock and e820_saved */ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) { @@ -813,8 +774,8 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) u64 start; for (start = startt; ; start += size) { - start = find_e820_area_size(start, &size, align); - if (!(start + 1)) + start = memblock_x86_find_in_range_size(start, &size, align); + if (start == MEMBLOCK_ERROR) return 0; if (size >= sizet) break; @@ -830,10 +791,9 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) addr = round_down(start + size - sizet, align); if (addr < start) return 0; - e820_update_range(addr, sizet, E820_RAM, E820_RESERVED); + memblock_x86_reserve_range(addr, addr + sizet, "new next"); e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); - printk(KERN_INFO "update e820 for early_reserve_e820\n"); - update_e820(); + printk(KERN_INFO "update e820_saved for early_reserve_e820\n"); update_e820_saved(); return addr; @@ -895,52 +855,12 @@ unsigned long __init e820_end_of_low_ram_pfn(void) { return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); } -/* - * Finds an active region in the address range from start_pfn to last_pfn and - * returns its range in ei_startpfn and ei_endpfn for the e820 entry. - */ -int __init e820_find_active_region(const struct e820entry *ei, - unsigned long start_pfn, - unsigned long last_pfn, - unsigned long *ei_startpfn, - unsigned long *ei_endpfn) -{ - u64 align = PAGE_SIZE; - - *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT; - *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT; - - /* Skip map entries smaller than a page */ - if (*ei_startpfn >= *ei_endpfn) - return 0; - - /* Skip if map is outside the node */ - if (ei->type != E820_RAM || *ei_endpfn <= start_pfn || - *ei_startpfn >= last_pfn) - return 0; - - /* Check for overlaps */ - if (*ei_startpfn < start_pfn) - *ei_startpfn = start_pfn; - if (*ei_endpfn > last_pfn) - *ei_endpfn = last_pfn; - - return 1; -} /* Walk the e820 map and register active regions within a node */ void __init e820_register_active_regions(int nid, unsigned long start_pfn, unsigned long last_pfn) { - unsigned long ei_startpfn; - unsigned long ei_endpfn; - int i; - - for (i = 0; i < e820.nr_map; i++) - if (e820_find_active_region(&e820.map[i], - start_pfn, last_pfn, - &ei_startpfn, &ei_endpfn)) - add_active_range(nid, ei_startpfn, ei_endpfn); + memblock_x86_register_active_regions(nid, start_pfn, last_pfn); } /* @@ -950,18 +870,16 @@ void __init e820_register_active_regions(int nid, unsigned long start_pfn, */ u64 __init e820_hole_size(u64 start, u64 end) { - unsigned long start_pfn = start >> PAGE_SHIFT; - unsigned long last_pfn = end >> PAGE_SHIFT; - unsigned long ei_startpfn, ei_endpfn, ram = 0; - int i; + return memblock_x86_hole_size(start, end); +} - for (i = 0; i < e820.nr_map; i++) { - if (e820_find_active_region(&e820.map[i], - start_pfn, last_pfn, - &ei_startpfn, &ei_endpfn)) - ram += ei_endpfn - ei_startpfn; - } - return end - start - ((u64)ram << PAGE_SHIFT); +void reserve_early(u64 start, u64 end, char *name) +{ + memblock_x86_reserve_range(start, end, name); +} +void free_early(u64 start, u64 end) +{ + memblock_x86_free_range(start, end); } static void early_panic(char *msg) @@ -1210,3 +1128,32 @@ void __init setup_memory_map(void) printk(KERN_INFO "BIOS-provided physical RAM map:\n"); e820_print_map(who); } + +void __init memblock_x86_fill(void) +{ + int i; + u64 end; + + /* + * EFI may have more than 128 entries + * We are safe to enable resizing, beause memblock_x86_fill() + * is rather later for x86 + */ + memblock_can_resize = 1; + + for (i = 0; i < e820.nr_map; i++) { + struct e820entry *ei = &e820.map[i]; + + end = ei->addr + ei->size; + if (end != (resource_size_t)end) + continue; + + if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN) + continue; + + memblock_add(ei->addr, ei->size); + } + + memblock_analyze(); + memblock_dump_all(); +} diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c index 3e66bd364a9..af0699ba48c 100644 --- a/arch/x86/kernel/head.c +++ b/arch/x86/kernel/head.c @@ -1,5 +1,6 @@ #include #include +#include #include #include @@ -51,5 +52,5 @@ void __init reserve_ebda_region(void) lowmem = 0x9f000; /* reserve all memory between lowmem and the 1MB mark */ - reserve_early_overlap_ok(lowmem, 0x100000, "BIOS reserved"); + memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved"); } diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index b2e24603739..da60aa8a850 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -30,14 +31,15 @@ static void __init i386_default_early_setup(void) void __init i386_start_kernel(void) { + memblock_init(); + #ifdef CONFIG_X86_TRAMPOLINE /* * But first pinch a few for the stack/trampoline stuff * FIXME: Don't need the extra page at 4K, but need to fix * trampoline before removing it. (see the GDT stuff) */ - reserve_early_overlap_ok(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, - "EX TRAMPOLINE"); + memblock_x86_reserve_range(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); #endif reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 7147143fd61..8ee930fdeeb 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -98,6 +99,8 @@ void __init x86_64_start_reservations(char *real_mode_data) { copy_bootdata(__va(real_mode_data)); + memblock_init(); + reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index d86dbf7e54b..8252545ae6f 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -641,7 +642,7 @@ static void __init smp_reserve_memory(struct mpf_intel *mpf) { unsigned long size = get_mpc_size(mpf->physptr); - reserve_early_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc"); + memblock_x86_reserve_range(mpf->physptr, mpf->physptr+size, "* MP-table mpc"); } static int __init smp_scan_config(unsigned long base, unsigned long length) @@ -670,7 +671,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) mpf, (u64)virt_to_phys(mpf)); mem = virt_to_phys(mpf); - reserve_early_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf"); + memblock_x86_reserve_range(mem, mem + sizeof(*mpf), "* MP-table mpf"); if (mpf->physptr) smp_reserve_memory(mpf); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b4ae4acbd03..bbe0aaf7749 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -614,7 +615,7 @@ static __init void reserve_ibft_region(void) addr = find_ibft_region(&size); if (size) - reserve_early_overlap_ok(addr, addr + size, "ibft"); + memblock_x86_reserve_range(addr, addr + size, "* ibft"); } #ifdef CONFIG_X86_RESERVE_LOW_64K @@ -708,6 +709,15 @@ static void __init trim_bios_range(void) sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); } +static u64 __init get_max_mapped(void) +{ + u64 end = max_pfn_mapped; + + end <<= PAGE_SHIFT; + + return end; +} + /* * Determine if we were loaded by an EFI loader. If so, then we have also been * passed the efi memmap, systab, etc., so we should use these data structures @@ -891,8 +901,6 @@ void __init setup_arch(char **cmdline_p) */ max_pfn = e820_end_of_ram_pfn(); - /* preallocate 4k for mptable mpc */ - early_reserve_e820_mpc_new(); /* update e820 for memory not covered by WB MTRRs */ mtrr_bp_init(); if (mtrr_trim_uncached_memory(max_pfn)) @@ -917,15 +925,6 @@ void __init setup_arch(char **cmdline_p) max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; #endif -#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION - setup_bios_corruption_check(); -#endif - - printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", - max_pfn_mapped< #include #include +#include #include #include #include @@ -171,8 +172,8 @@ static void * __init early_node_mem(int nodeid, unsigned long start, if (start < (MAX_DMA32_PFN< (MAX_DMA32_PFN< #include #include +#include #include #include @@ -434,6 +435,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size) { #ifdef CONFIG_NO_BOOTMEM + kmemleak_free_part(__va(physaddr), size); free_early(physaddr, physaddr + size); #else unsigned long start, end; @@ -459,6 +461,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, void __init free_bootmem(unsigned long addr, unsigned long size) { #ifdef CONFIG_NO_BOOTMEM + kmemleak_free_part(__va(addr), size); free_early(addr, addr + size); #else unsigned long start, end; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8c9b34674d8..f2cd7450fa7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3667,46 +3667,26 @@ int __init add_from_early_node_map(struct range *range, int az, void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit) { - int i; void *ptr; + u64 addr; - if (limit > get_max_mapped()) - limit = get_max_mapped(); + if (limit > memblock.current_limit) + limit = memblock.current_limit; - /* need to go over early_node_map to find out good range for node */ - for_each_active_range_index_in_nid(i, nid) { - u64 addr; - u64 ei_start, ei_last; + addr = find_memory_core_early(nid, size, align, goal, limit); - ei_last = early_node_map[i].end_pfn; - ei_last <<= PAGE_SHIFT; - ei_start = early_node_map[i].start_pfn; - ei_start <<= PAGE_SHIFT; - addr = find_early_area(ei_start, ei_last, - goal, limit, size, align); - - if (addr == -1ULL) - continue; - -#if 0 - printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n", - nid, - ei_start, ei_last, goal, limit, size, - align, addr); -#endif - - ptr = phys_to_virt(addr); - memset(ptr, 0, size); - reserve_early_without_check(addr, addr + size, "BOOTMEM"); - /* - * The min_count is set to 0 so that bootmem allocated blocks - * are never reported as leaks. - */ - kmemleak_alloc(ptr, size, 0, 0); - return ptr; - } + if (addr == MEMBLOCK_ERROR) + return NULL; - return NULL; + ptr = phys_to_virt(addr); + memset(ptr, 0, size); + memblock_x86_reserve_range(addr, addr + size, "BOOTMEM"); + /* + * The min_count is set to 0 so that bootmem allocated blocks + * are never reported as leaks. + */ + kmemleak_alloc(ptr, size, 0, 0); + return ptr; } #endif diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index aa33fd67fa4..29d6cbffb28 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -220,18 +220,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, if (vmemmap_buf_start) { /* need to free left buf */ -#ifdef CONFIG_NO_BOOTMEM - free_early(__pa(vmemmap_buf_start), __pa(vmemmap_buf_end)); - if (vmemmap_buf_start < vmemmap_buf) { - char name[15]; - - snprintf(name, sizeof(name), "MEMMAP %d", nodeid); - reserve_early_without_check(__pa(vmemmap_buf_start), - __pa(vmemmap_buf), name); - } -#else free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf); -#endif vmemmap_buf = NULL; vmemmap_buf_end = NULL; } -- cgit v1.2.3-70-g09d2 From a9ce6bc15100023b411f8117e53a016d61889800 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:17 -0700 Subject: x86, memblock: Replace e820_/_early string with memblock_ 1.include linux/memblock.h directly. so later could reduce e820.h reference. 2 this patch is done by sed scripts mainly -v2: use MEMBLOCK_ERROR instead of -1ULL or -1UL Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/efi.h | 2 +- arch/x86/kernel/acpi/sleep.c | 9 +++++---- arch/x86/kernel/apic/numaq_32.c | 3 ++- arch/x86/kernel/efi.c | 5 +++-- arch/x86/kernel/head32.c | 4 ++-- arch/x86/kernel/head64.c | 4 ++-- arch/x86/kernel/setup.c | 29 ++++++++++++++--------------- arch/x86/kernel/trampoline.c | 10 +++++----- arch/x86/mm/init.c | 10 ++++++---- arch/x86/mm/init_32.c | 14 ++++++++------ arch/x86/mm/init_64.c | 11 ++++++----- arch/x86/mm/k8topology_64.c | 4 +++- arch/x86/mm/memtest.c | 7 +++---- arch/x86/mm/numa_32.c | 25 +++++++++++++------------ arch/x86/mm/numa_64.c | 34 +++++++++++++++++----------------- arch/x86/mm/srat_32.c | 3 ++- arch/x86/mm/srat_64.c | 11 ++++++----- arch/x86/xen/mmu.c | 5 +++-- arch/x86/xen/setup.c | 3 ++- mm/bootmem.c | 4 ++-- 20 files changed, 105 insertions(+), 92 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 8406ed7f992..8e4a16508d4 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -90,7 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, #endif /* CONFIG_X86_32 */ extern int add_efi_memmap; -extern void efi_reserve_early(void); +extern void efi_memblock_x86_reserve_range(void); extern void efi_call_phys_prelog(void); extern void efi_call_phys_epilog(void); diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index fcc3c61fdec..d829e75f968 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -125,7 +126,7 @@ void acpi_restore_state_mem(void) */ void __init acpi_reserve_wakeup_memory(void) { - unsigned long mem; + phys_addr_t mem; if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) { printk(KERN_ERR @@ -133,15 +134,15 @@ void __init acpi_reserve_wakeup_memory(void) return; } - mem = find_e820_area(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE); + mem = memblock_find_in_range(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE); - if (mem == -1L) { + if (mem == MEMBLOCK_ERROR) { printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); return; } acpi_realmode = (unsigned long) phys_to_virt(mem); acpi_wakeup_address = mem; - reserve_early(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); + memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); } diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index 3e28401f161..960f26ab5c9 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -88,7 +89,7 @@ static inline void numaq_register_node(int node, struct sys_cfg_data *scd) node_end_pfn[node] = MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size); - e820_register_active_regions(node, node_start_pfn[node], + memblock_x86_register_active_regions(node, node_start_pfn[node], node_end_pfn[node]); memory_present(node, node_start_pfn[node], node_end_pfn[node]); diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index c2fa9b8b497..0fe27d7c625 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -275,7 +276,7 @@ static void __init do_add_efi_memmap(void) sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); } -void __init efi_reserve_early(void) +void __init efi_memblock_x86_reserve_range(void) { unsigned long pmap; @@ -290,7 +291,7 @@ void __init efi_reserve_early(void) boot_params.efi_info.efi_memdesc_size; memmap.desc_version = boot_params.efi_info.efi_memdesc_version; memmap.desc_size = boot_params.efi_info.efi_memdesc_size; - reserve_early(pmap, pmap + memmap.nr_map * memmap.desc_size, + memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size, "EFI memmap"); } diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index da60aa8a850..74e4cf65043 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -42,7 +42,7 @@ void __init i386_start_kernel(void) memblock_x86_reserve_range(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); #endif - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); + memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ @@ -51,7 +51,7 @@ void __init i386_start_kernel(void) u64 ramdisk_image = boot_params.hdr.ramdisk_image; u64 ramdisk_size = boot_params.hdr.ramdisk_size; u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); - reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); + memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 8ee930fdeeb..97adf9828b9 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -101,7 +101,7 @@ void __init x86_64_start_reservations(char *real_mode_data) memblock_init(); - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); + memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ @@ -110,7 +110,7 @@ void __init x86_64_start_reservations(char *real_mode_data) unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); - reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); + memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index bbe0aaf7749..a4f01733e87 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -302,7 +302,7 @@ static inline void init_gbpages(void) static void __init reserve_brk(void) { if (_brk_end > _brk_start) - reserve_early(__pa(_brk_start), __pa(_brk_end), "BRK"); + memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK"); /* Mark brk area as locked down and no longer taking any new allocations */ @@ -324,17 +324,16 @@ static void __init relocate_initrd(void) char *p, *q; /* We need to move the initrd down into lowmem */ - ramdisk_here = find_e820_area(0, end_of_lowmem, area_size, + ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size, PAGE_SIZE); - if (ramdisk_here == -1ULL) + if (ramdisk_here == MEMBLOCK_ERROR) panic("Cannot find place for new RAMDISK of size %lld\n", ramdisk_size); /* Note: this includes all the lowmem currently occupied by the initrd, we rely on that fact to keep the data intact. */ - reserve_early(ramdisk_here, ramdisk_here + area_size, - "NEW RAMDISK"); + memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK"); initrd_start = ramdisk_here + PAGE_OFFSET; initrd_end = initrd_start + ramdisk_size; printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", @@ -390,7 +389,7 @@ static void __init reserve_initrd(void) initrd_start = 0; if (ramdisk_size >= (end_of_lowmem>>1)) { - free_early(ramdisk_image, ramdisk_end); + memblock_x86_free_range(ramdisk_image, ramdisk_end); printk(KERN_ERR "initrd too large to handle, " "disabling initrd\n"); return; @@ -413,7 +412,7 @@ static void __init reserve_initrd(void) relocate_initrd(); - free_early(ramdisk_image, ramdisk_end); + memblock_x86_free_range(ramdisk_image, ramdisk_end); } #else static void __init reserve_initrd(void) @@ -469,7 +468,7 @@ static void __init e820_reserve_setup_data(void) e820_print_map("reserve setup_data"); } -static void __init reserve_early_setup_data(void) +static void __init memblock_x86_reserve_range_setup_data(void) { struct setup_data *data; u64 pa_data; @@ -481,7 +480,7 @@ static void __init reserve_early_setup_data(void) while (pa_data) { data = early_memremap(pa_data, sizeof(*data)); sprintf(buf, "setup data %x", data->type); - reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf); + memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf); pa_data = data->next; early_iounmap(data, sizeof(*data)); } @@ -519,23 +518,23 @@ static void __init reserve_crashkernel(void) if (crash_base <= 0) { const unsigned long long alignment = 16<<20; /* 16M */ - crash_base = find_e820_area(alignment, ULONG_MAX, crash_size, + crash_base = memblock_find_in_range(alignment, ULONG_MAX, crash_size, alignment); - if (crash_base == -1ULL) { + if (crash_base == MEMBLOCK_ERROR) { pr_info("crashkernel reservation failed - No suitable area found.\n"); return; } } else { unsigned long long start; - start = find_e820_area(crash_base, ULONG_MAX, crash_size, + start = memblock_find_in_range(crash_base, ULONG_MAX, crash_size, 1<<20); if (start != crash_base) { pr_info("crashkernel reservation failed - memory is in use.\n"); return; } } - reserve_early(crash_base, crash_base + crash_size, "CRASH KERNEL"); + memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL"); printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " "for crashkernel (System RAM: %ldMB)\n", @@ -786,7 +785,7 @@ void __init setup_arch(char **cmdline_p) #endif 4)) { efi_enabled = 1; - efi_reserve_early(); + efi_memblock_x86_reserve_range(); } #endif @@ -846,7 +845,7 @@ void __init setup_arch(char **cmdline_p) vmi_activate(); /* after early param, so could get panic from serial */ - reserve_early_setup_data(); + memblock_x86_reserve_range_setup_data(); if (acpi_mps_check()) { #ifdef CONFIG_X86_LOCAL_APIC diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index c652ef62742..7c2102c2aad 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c @@ -1,7 +1,7 @@ #include +#include #include -#include #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) #define __trampinit @@ -16,15 +16,15 @@ unsigned char *__trampinitdata trampoline_base; void __init reserve_trampoline_memory(void) { - unsigned long mem; + phys_addr_t mem; /* Has to be in very low memory so we can execute real-mode AP code. */ - mem = find_e820_area(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE); - if (mem == -1L) + mem = memblock_find_in_range(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE); + if (mem == MEMBLOCK_ERROR) panic("Cannot allocate trampoline\n"); trampoline_base = __va(mem); - reserve_early(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE"); + memblock_x86_reserve_range(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE"); } /* diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index b278535b14a..c0e28a13de7 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -2,6 +2,7 @@ #include #include #include +#include #include #include @@ -33,6 +34,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse, int use_gbpages) { unsigned long puds, pmds, ptes, tables, start; + phys_addr_t base; puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); @@ -75,12 +77,12 @@ static void __init find_early_table_space(unsigned long end, int use_pse, #else start = 0x8000; #endif - e820_table_start = find_e820_area(start, max_pfn_mapped<>= PAGE_SHIFT; + e820_table_start = base >> PAGE_SHIFT; e820_table_end = e820_table_start; e820_table_top = e820_table_start + (tables >> PAGE_SHIFT); @@ -299,7 +301,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, __flush_tlb_all(); if (!after_bootmem && e820_table_end > e820_table_start) - reserve_early(e820_table_start << PAGE_SHIFT, + memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, e820_table_end << PAGE_SHIFT, "PGTABLE"); if (!after_bootmem) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 90e054589aa..63b09bae250 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -712,14 +713,14 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, highstart_pfn = highend_pfn = max_pfn; if (max_pfn > max_low_pfn) highstart_pfn = max_low_pfn; - e820_register_active_regions(0, 0, highend_pfn); + memblock_x86_register_active_regions(0, 0, highend_pfn); sparse_memory_present_with_active_regions(0); printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); num_physpages = highend_pfn; high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; #else - e820_register_active_regions(0, 0, max_low_pfn); + memblock_x86_register_active_regions(0, 0, max_low_pfn); sparse_memory_present_with_active_regions(0); num_physpages = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; @@ -776,16 +777,16 @@ void __init setup_bootmem_allocator(void) { #ifndef CONFIG_NO_BOOTMEM int nodeid; - unsigned long bootmap_size, bootmap; + phys_addr_t bootmap_size, bootmap; /* * Initialize the boot-time allocator (with low memory only): */ bootmap_size = bootmem_bootmap_pages(max_low_pfn)< #include #include +#include #include #include #include @@ -577,18 +578,18 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, unsigned long bootmap_size, bootmap; bootmap_size = bootmem_bootmap_pages(end_pfn)<> PAGE_SHIFT, 0, end_pfn); - e820_register_active_regions(0, start_pfn, end_pfn); + memblock_x86_register_active_regions(0, start_pfn, end_pfn); free_bootmem_with_active_regions(0, end_pfn); #else - e820_register_active_regions(0, start_pfn, end_pfn); + memblock_x86_register_active_regions(0, start_pfn, end_pfn); #endif } #endif diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c index 970ed579d4e..966de9372e8 100644 --- a/arch/x86/mm/k8topology_64.c +++ b/arch/x86/mm/k8topology_64.c @@ -11,6 +11,8 @@ #include #include #include +#include + #include #include #include @@ -222,7 +224,7 @@ int __init k8_scan_nodes(void) for_each_node_mask(i, node_possible_map) { int j; - e820_register_active_regions(i, + memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, nodes[i].end >> PAGE_SHIFT); for (j = apicid_base; j < cores + apicid_base; j++) diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 18d244f7020..92faf3a1c53 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c @@ -6,8 +6,7 @@ #include #include #include - -#include +#include static u64 patterns[] __initdata = { 0, @@ -35,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad) (unsigned long long) pattern, (unsigned long long) start_bad, (unsigned long long) end_bad); - reserve_early(start_bad, end_bad, "BAD RAM"); + memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM"); } static void __init memtest(u64 pattern, u64 start_phys, u64 size) @@ -74,7 +73,7 @@ static void __init do_one_pass(u64 pattern, u64 start, u64 end) u64 size = 0; while (start < end) { - start = find_e820_area_size(start, &size, 1); + start = memblock_x86_find_in_range_size(start, &size, 1); /* done ? */ if (start >= end) diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 809baaaf48b..ddf9730b206 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -120,7 +121,7 @@ int __init get_memcfg_numa_flat(void) node_start_pfn[0] = 0; node_end_pfn[0] = max_pfn; - e820_register_active_regions(0, 0, max_pfn); + memblock_x86_register_active_regions(0, 0, max_pfn); memory_present(0, 0, max_pfn); node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn); @@ -161,14 +162,14 @@ static void __init allocate_pgdat(int nid) NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid]; else { unsigned long pgdat_phys; - pgdat_phys = find_e820_area(min_low_pfn<>PAGE_SHIFT)); memset(buf, 0, sizeof(buf)); sprintf(buf, "NODE_DATA %d", nid); - reserve_early(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf); + memblock_x86_reserve_range(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf); } printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n", nid, (unsigned long)NODE_DATA(nid)); @@ -291,15 +292,15 @@ static __init unsigned long calculate_numa_remap_pages(void) PTRS_PER_PTE); node_kva_target <<= PAGE_SHIFT; do { - node_kva_final = find_e820_area(node_kva_target, + node_kva_final = memblock_find_in_range(node_kva_target, ((u64)node_end_pfn[nid])<>PAGE_SHIFT) > (node_start_pfn[nid])); - if (node_kva_final == -1ULL) + if (node_kva_final == MEMBLOCK_ERROR) panic("Can not get kva ram\n"); node_remap_size[nid] = size; @@ -318,9 +319,9 @@ static __init unsigned long calculate_numa_remap_pages(void) * but we could have some hole in high memory, and it will only * check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide * to use it as free. - * So reserve_early here, hope we don't run out of that array + * So memblock_x86_reserve_range here, hope we don't run out of that array */ - reserve_early(node_kva_final, + memblock_x86_reserve_range(node_kva_final, node_kva_final+(((u64)size)<> PAGE_SHIFT; kva_target_pfn -= PTRS_PER_PTE; - } while (kva_start_pfn == -1UL && kva_target_pfn > min_low_pfn); + } while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn); - if (kva_start_pfn == -1UL) + if (kva_start_pfn == MEMBLOCK_ERROR) panic("Can not get kva space\n"); printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n", @@ -382,7 +383,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, printk(KERN_INFO "max_pfn = %lx\n", max_pfn); /* avoid clash with initrd */ - reserve_early(kva_start_pfn< physnodes[i].end) { end = physnodes[i].end; @@ -467,7 +467,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, * this one must extend to the boundary. */ if (end < dma32_end && dma32_end - end - - e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) + memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) end = dma32_end; /* @@ -476,7 +476,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, * physical node. */ if (physnodes[i].end - end - - e820_hole_size(end, physnodes[i].end) < size) + memblock_x86_hole_size(end, physnodes[i].end) < size) end = physnodes[i].end; /* @@ -504,7 +504,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) { u64 end = start + size; - while (end - start - e820_hole_size(start, end) < size) { + while (end - start - memblock_x86_hole_size(start, end) < size) { end += FAKE_NODE_MIN_SIZE; if (end > max_addr) { end = max_addr; @@ -533,7 +533,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) * creates a uniform distribution of node sizes across the entire * machine (but not necessarily over physical nodes). */ - min_size = (max_addr - addr - e820_hole_size(addr, max_addr)) / + min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / MAX_NUMNODES; min_size = max(min_size, FAKE_NODE_MIN_SIZE); if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) @@ -566,7 +566,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) * this one must extend to the boundary. */ if (end < dma32_end && dma32_end - end - - e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) + memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) end = dma32_end; /* @@ -575,7 +575,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) * physical node. */ if (physnodes[i].end - end - - e820_hole_size(end, physnodes[i].end) < size) + memblock_x86_hole_size(end, physnodes[i].end) < size) end = physnodes[i].end; /* @@ -639,7 +639,7 @@ static int __init numa_emulation(unsigned long start_pfn, */ remove_all_active_ranges(); for_each_node_mask(i, node_possible_map) { - e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, + memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, nodes[i].end >> PAGE_SHIFT); setup_node_bootmem(i, nodes[i].start, nodes[i].end); } @@ -692,7 +692,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, node_set(0, node_possible_map); for (i = 0; i < nr_cpu_ids; i++) numa_set_node(i, 0); - e820_register_active_regions(0, start_pfn, last_pfn); + memblock_x86_register_active_regions(0, start_pfn, last_pfn); setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); } diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c index 9324f13492d..a17dffd136c 100644 --- a/arch/x86/mm/srat_32.c +++ b/arch/x86/mm/srat_32.c @@ -25,6 +25,7 @@ */ #include #include +#include #include #include #include @@ -264,7 +265,7 @@ int __init get_memcfg_from_srat(void) if (node_read_chunk(chunk->nid, chunk)) continue; - e820_register_active_regions(chunk->nid, chunk->start_pfn, + memblock_x86_register_active_regions(chunk->nid, chunk->start_pfn, min(chunk->end_pfn, max_pfn)); } /* for out of order entries in SRAT */ diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index f9897f7a9ef..7f44eb62a5e 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -98,15 +99,15 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) unsigned long phys; length = slit->header.length; - phys = find_e820_area(0, max_pfn_mapped< x2APIC mapping */ @@ -324,7 +325,7 @@ static int __init nodes_cover_memory(const struct bootnode *nodes) pxmram = 0; } - e820ram = max_pfn - (e820_hole_size(0, max_pfn<>PAGE_SHIFT); + e820ram = max_pfn - (memblock_x86_hole_size(0, max_pfn<>PAGE_SHIFT); /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) { printk(KERN_ERR @@ -421,7 +422,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) } for_each_node_mask(i, nodes_parsed) - e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, + memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, nodes[i].end >> PAGE_SHIFT); /* for out of order entries in SRAT */ sort_node_map(); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 914f04695ce..b511f198691 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -1735,7 +1736,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, __xen_write_cr3(true, __pa(pgd)); xen_mc_issue(PARAVIRT_LAZY_CPU); - reserve_early(__pa(xen_start_info->pt_base), + memblock_x86_reserve_range(__pa(xen_start_info->pt_base), __pa(xen_start_info->pt_base + xen_start_info->nr_pt_frames * PAGE_SIZE), "XEN PAGETABLES"); @@ -1773,7 +1774,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); - reserve_early(__pa(xen_start_info->pt_base), + memblock_x86_reserve_range(__pa(xen_start_info->pt_base), __pa(xen_start_info->pt_base + xen_start_info->nr_pt_frames * PAGE_SIZE), "XEN PAGETABLES"); diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index ad0047f47cd..2ac8f29f89c 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -61,7 +62,7 @@ char * __init xen_memory_setup(void) * - xen_start_info * See comment above "struct start_info" in */ - reserve_early(__pa(xen_start_info->mfn_list), + memblock_x86_reserve_range(__pa(xen_start_info->mfn_list), __pa(xen_start_info->pt_base), "XEN START INFO"); diff --git a/mm/bootmem.c b/mm/bootmem.c index fda01a2c31a..13b0caa9793 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -436,7 +436,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, { #ifdef CONFIG_NO_BOOTMEM kmemleak_free_part(__va(physaddr), size); - free_early(physaddr, physaddr + size); + memblock_x86_free_range(physaddr, physaddr + size); #else unsigned long start, end; @@ -462,7 +462,7 @@ void __init free_bootmem(unsigned long addr, unsigned long size) { #ifdef CONFIG_NO_BOOTMEM kmemleak_free_part(__va(addr), size); - free_early(addr, addr + size); + memblock_x86_free_range(addr, addr + size); #else unsigned long start, end; -- cgit v1.2.3-70-g09d2 From a587d2daebcd2bc159d4348b6a7b028950a6d803 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:18 -0700 Subject: x86: Remove not used early_res code and some functions in e820.c that are not used anymore Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/e820.h | 14 -- arch/x86/kernel/e820.c | 52 ---- include/linux/early_res.h | 23 -- kernel/Makefile | 1 - kernel/early_res.c | 590 -------------------------------------------- 5 files changed, 680 deletions(-) delete mode 100644 include/linux/early_res.h delete mode 100644 kernel/early_res.c (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 388fed29146..718646384e0 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h @@ -112,31 +112,17 @@ static inline void early_memtest(unsigned long start, unsigned long end) } #endif -extern unsigned long end_user_pfn; - -extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); -extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align); -extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); - extern unsigned long e820_end_of_ram_pfn(void); extern unsigned long e820_end_of_low_ram_pfn(void); -extern void e820_register_active_regions(int nid, unsigned long start_pfn, - unsigned long end_pfn); -extern u64 e820_hole_size(u64 start, u64 end); - extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); void memblock_x86_fill(void); - extern void finish_e820_parsing(void); extern void e820_reserve_resources(void); extern void e820_reserve_resources_late(void); extern void setup_memory_map(void); extern char *default_machine_specific_memory_setup(void); -void reserve_early(u64 start, u64 end, char *name); -void free_early(u64 start, u64 end); - /* * Returns true iff the specified range [s,e) is completely contained inside * the ISA region. diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index a9221d18a5e..d5fd89462d7 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -738,32 +738,6 @@ static int __init e820_mark_nvs_memory(void) core_initcall(e820_mark_nvs_memory); #endif -/* - * Find a free area with specified alignment in a specific range. - */ -u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align) -{ - u64 mem = memblock_find_in_range(start, end, size, align); - - if (mem == MEMBLOCK_ERROR) - return -1ULL; - - return mem; -} - -/* - * Find next free range after *start - */ -u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align) -{ - u64 mem = memblock_x86_find_in_range_size(start, sizep, align); - - if (mem == MEMBLOCK_ERROR) - return -1ULL - - return mem; -} - /* * pre allocated 4k and reserved it in memblock and e820_saved */ @@ -856,32 +830,6 @@ unsigned long __init e820_end_of_low_ram_pfn(void) return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); } -/* Walk the e820 map and register active regions within a node */ -void __init e820_register_active_regions(int nid, unsigned long start_pfn, - unsigned long last_pfn) -{ - memblock_x86_register_active_regions(nid, start_pfn, last_pfn); -} - -/* - * Find the hole size (in bytes) in the memory range. - * @start: starting address of the memory range to scan - * @end: ending address of the memory range to scan - */ -u64 __init e820_hole_size(u64 start, u64 end) -{ - return memblock_x86_hole_size(start, end); -} - -void reserve_early(u64 start, u64 end, char *name) -{ - memblock_x86_reserve_range(start, end, name); -} -void free_early(u64 start, u64 end) -{ - memblock_x86_free_range(start, end); -} - static void early_panic(char *msg) { early_printk(msg); diff --git a/include/linux/early_res.h b/include/linux/early_res.h deleted file mode 100644 index 29c09f57a13..00000000000 --- a/include/linux/early_res.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _LINUX_EARLY_RES_H -#define _LINUX_EARLY_RES_H -#ifdef __KERNEL__ - -extern void reserve_early(u64 start, u64 end, char *name); -extern void reserve_early_overlap_ok(u64 start, u64 end, char *name); -extern void free_early(u64 start, u64 end); -void free_early_partial(u64 start, u64 end); -extern void early_res_to_bootmem(u64 start, u64 end); - -void reserve_early_without_check(u64 start, u64 end, char *name); -u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end, - u64 size, u64 align); -u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start, - u64 *sizep, u64 align); -u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align); -u64 get_max_mapped(void); -#include -int get_free_all_memory_range(struct range **rangep, int nodeid); - -#endif /* __KERNEL__ */ - -#endif /* _LINUX_EARLY_RES_H */ diff --git a/kernel/Makefile b/kernel/Makefile index 057472fbc27..80e61c3e44a 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -11,7 +11,6 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ async.o range.o -obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o obj-y += groups.o ifdef CONFIG_FUNCTION_TRACER diff --git a/kernel/early_res.c b/kernel/early_res.c deleted file mode 100644 index 7bfae887f21..00000000000 --- a/kernel/early_res.c +++ /dev/null @@ -1,590 +0,0 @@ -/* - * early_res, could be used to replace bootmem - */ -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Early reserved memory areas. - */ -/* - * need to make sure this one is bigger enough before - * find_fw_memmap_area could be used - */ -#define MAX_EARLY_RES_X 32 - -struct early_res { - u64 start, end; - char name[15]; - char overlap_ok; -}; -static struct early_res early_res_x[MAX_EARLY_RES_X] __initdata; - -static int max_early_res __initdata = MAX_EARLY_RES_X; -static struct early_res *early_res __initdata = &early_res_x[0]; -static int early_res_count __initdata; - -static int __init find_overlapped_early(u64 start, u64 end) -{ - int i; - struct early_res *r; - - for (i = 0; i < max_early_res && early_res[i].end; i++) { - r = &early_res[i]; - if (end > r->start && start < r->end) - break; - } - - return i; -} - -/* - * Drop the i-th range from the early reservation map, - * by copying any higher ranges down one over it, and - * clearing what had been the last slot. - */ -static void __init drop_range(int i) -{ - int j; - - for (j = i + 1; j < max_early_res && early_res[j].end; j++) - ; - - memmove(&early_res[i], &early_res[i + 1], - (j - 1 - i) * sizeof(struct early_res)); - - early_res[j - 1].end = 0; - early_res_count--; -} - -static void __init drop_range_partial(int i, u64 start, u64 end) -{ - u64 common_start, common_end; - u64 old_start, old_end; - - old_start = early_res[i].start; - old_end = early_res[i].end; - common_start = max(old_start, start); - common_end = min(old_end, end); - - /* no overlap ? */ - if (common_start >= common_end) - return; - - if (old_start < common_start) { - /* make head segment */ - early_res[i].end = common_start; - if (old_end > common_end) { - char name[15]; - - /* - * Save a local copy of the name, since the - * early_res array could get resized inside - * reserve_early_without_check() -> - * __check_and_double_early_res(), which would - * make the current name pointer invalid. - */ - strncpy(name, early_res[i].name, - sizeof(early_res[i].name) - 1); - /* add another for left over on tail */ - reserve_early_without_check(common_end, old_end, name); - } - return; - } else { - if (old_end > common_end) { - /* reuse the entry for tail left */ - early_res[i].start = common_end; - return; - } - /* all covered */ - drop_range(i); - } -} - -/* - * Split any existing ranges that: - * 1) are marked 'overlap_ok', and - * 2) overlap with the stated range [start, end) - * into whatever portion (if any) of the existing range is entirely - * below or entirely above the stated range. Drop the portion - * of the existing range that overlaps with the stated range, - * which will allow the caller of this routine to then add that - * stated range without conflicting with any existing range. - */ -static void __init drop_overlaps_that_are_ok(u64 start, u64 end) -{ - int i; - struct early_res *r; - u64 lower_start, lower_end; - u64 upper_start, upper_end; - char name[15]; - - for (i = 0; i < max_early_res && early_res[i].end; i++) { - r = &early_res[i]; - - /* Continue past non-overlapping ranges */ - if (end <= r->start || start >= r->end) - continue; - - /* - * Leave non-ok overlaps as is; let caller - * panic "Overlapping early reservations" - * when it hits this overlap. - */ - if (!r->overlap_ok) - return; - - /* - * We have an ok overlap. We will drop it from the early - * reservation map, and add back in any non-overlapping - * portions (lower or upper) as separate, overlap_ok, - * non-overlapping ranges. - */ - - /* 1. Note any non-overlapping (lower or upper) ranges. */ - strncpy(name, r->name, sizeof(name) - 1); - - lower_start = lower_end = 0; - upper_start = upper_end = 0; - if (r->start < start) { - lower_start = r->start; - lower_end = start; - } - if (r->end > end) { - upper_start = end; - upper_end = r->end; - } - - /* 2. Drop the original ok overlapping range */ - drop_range(i); - - i--; /* resume for-loop on copied down entry */ - - /* 3. Add back in any non-overlapping ranges. */ - if (lower_end) - reserve_early_overlap_ok(lower_start, lower_end, name); - if (upper_end) - reserve_early_overlap_ok(upper_start, upper_end, name); - } -} - -static void __init __reserve_early(u64 start, u64 end, char *name, - int overlap_ok) -{ - int i; - struct early_res *r; - - i = find_overlapped_early(start, end); - if (i >= max_early_res) - panic("Too many early reservations"); - r = &early_res[i]; - if (r->end) - panic("Overlapping early reservations " - "%llx-%llx %s to %llx-%llx %s\n", - start, end - 1, name ? name : "", r->start, - r->end - 1, r->name); - r->start = start; - r->end = end; - r->overlap_ok = overlap_ok; - if (name) - strncpy(r->name, name, sizeof(r->name) - 1); - early_res_count++; -} - -/* - * A few early reservtations come here. - * - * The 'overlap_ok' in the name of this routine does -not- mean it - * is ok for these reservations to overlap an earlier reservation. - * Rather it means that it is ok for subsequent reservations to - * overlap this one. - * - * Use this entry point to reserve early ranges when you are doing - * so out of "Paranoia", reserving perhaps more memory than you need, - * just in case, and don't mind a subsequent overlapping reservation - * that is known to be needed. - * - * The drop_overlaps_that_are_ok() call here isn't really needed. - * It would be needed if we had two colliding 'overlap_ok' - * reservations, so that the second such would not panic on the - * overlap with the first. We don't have any such as of this - * writing, but might as well tolerate such if it happens in - * the future. - */ -void __init reserve_early_overlap_ok(u64 start, u64 end, char *name) -{ - drop_overlaps_that_are_ok(start, end); - __reserve_early(start, end, name, 1); -} - -static void __init __check_and_double_early_res(u64 ex_start, u64 ex_end) -{ - u64 start, end, size, mem; - struct early_res *new; - - /* do we have enough slots left ? */ - if ((max_early_res - early_res_count) > max(max_early_res/8, 2)) - return; - - /* double it */ - mem = -1ULL; - size = sizeof(struct early_res) * max_early_res * 2; - if (early_res == early_res_x) - start = 0; - else - start = early_res[0].end; - end = ex_start; - if (start + size < end) - mem = find_fw_memmap_area(start, end, size, - sizeof(struct early_res)); - if (mem == -1ULL) { - start = ex_end; - end = get_max_mapped(); - if (start + size < end) - mem = find_fw_memmap_area(start, end, size, - sizeof(struct early_res)); - } - if (mem == -1ULL) - panic("can not find more space for early_res array"); - - new = __va(mem); - /* save the first one for own */ - new[0].start = mem; - new[0].end = mem + size; - new[0].overlap_ok = 0; - /* copy old to new */ - if (early_res == early_res_x) { - memcpy(&new[1], &early_res[0], - sizeof(struct early_res) * max_early_res); - memset(&new[max_early_res+1], 0, - sizeof(struct early_res) * (max_early_res - 1)); - early_res_count++; - } else { - memcpy(&new[1], &early_res[1], - sizeof(struct early_res) * (max_early_res - 1)); - memset(&new[max_early_res], 0, - sizeof(struct early_res) * max_early_res); - } - memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res); - early_res = new; - max_early_res *= 2; - printk(KERN_DEBUG "early_res array is doubled to %d at [%llx - %llx]\n", - max_early_res, mem, mem + size - 1); -} - -/* - * Most early reservations come here. - * - * We first have drop_overlaps_that_are_ok() drop any pre-existing - * 'overlap_ok' ranges, so that we can then reserve this memory - * range without risk of panic'ing on an overlapping overlap_ok - * early reservation. - */ -void __init reserve_early(u64 start, u64 end, char *name) -{ - if (start >= end) - return; - - __check_and_double_early_res(start, end); - - drop_overlaps_that_are_ok(start, end); - __reserve_early(start, end, name, 0); -} - -void __init reserve_early_without_check(u64 start, u64 end, char *name) -{ - struct early_res *r; - - if (start >= end) - return; - - __check_and_double_early_res(start, end); - - r = &early_res[early_res_count]; - - r->start = start; - r->end = end; - r->overlap_ok = 0; - if (name) - strncpy(r->name, name, sizeof(r->name) - 1); - early_res_count++; -} - -void __init free_early(u64 start, u64 end) -{ - struct early_res *r; - int i; - - kmemleak_free_part(__va(start), end - start); - - i = find_overlapped_early(start, end); - r = &early_res[i]; - if (i >= max_early_res || r->end != end || r->start != start) - panic("free_early on not reserved area: %llx-%llx!", - start, end - 1); - - drop_range(i); -} - -void __init free_early_partial(u64 start, u64 end) -{ - struct early_res *r; - int i; - - kmemleak_free_part(__va(start), end - start); - - if (start == end) - return; - - if (WARN_ONCE(start > end, " wrong range [%#llx, %#llx]\n", start, end)) - return; - -try_next: - i = find_overlapped_early(start, end); - if (i >= max_early_res) - return; - - r = &early_res[i]; - /* hole ? */ - if (r->end >= end && r->start <= start) { - drop_range_partial(i, start, end); - return; - } - - drop_range_partial(i, start, end); - goto try_next; -} - -#ifdef CONFIG_NO_BOOTMEM -static void __init subtract_early_res(struct range *range, int az) -{ - int i, count; - u64 final_start, final_end; - int idx = 0; - - count = 0; - for (i = 0; i < max_early_res && early_res[i].end; i++) - count++; - - /* need to skip first one ?*/ - if (early_res != early_res_x) - idx = 1; - -#define DEBUG_PRINT_EARLY_RES 1 - -#if DEBUG_PRINT_EARLY_RES - printk(KERN_INFO "Subtract (%d early reservations)\n", count); -#endif - for (i = idx; i < count; i++) { - struct early_res *r = &early_res[i]; -#if DEBUG_PRINT_EARLY_RES - printk(KERN_INFO " #%d [%010llx - %010llx] %15s\n", i, - r->start, r->end, r->name); -#endif - final_start = PFN_DOWN(r->start); - final_end = PFN_UP(r->end); - if (final_start >= final_end) - continue; - subtract_range(range, az, final_start, final_end); - } - -} - -int __init get_free_all_memory_range(struct range **rangep, int nodeid) -{ - int i, count; - u64 start = 0, end; - u64 size; - u64 mem; - struct range *range; - int nr_range; - - count = 0; - for (i = 0; i < max_early_res && early_res[i].end; i++) - count++; - - count *= 2; - - size = sizeof(struct range) * count; - end = get_max_mapped(); -#ifdef MAX_DMA32_PFN - if (end > (MAX_DMA32_PFN << PAGE_SHIFT)) - start = MAX_DMA32_PFN << PAGE_SHIFT; -#endif - mem = find_fw_memmap_area(start, end, size, sizeof(struct range)); - if (mem == -1ULL) - panic("can not find more space for range free"); - - range = __va(mem); - /* use early_node_map[] and early_res to get range array at first */ - memset(range, 0, size); - nr_range = 0; - - /* need to go over early_node_map to find out good range for node */ - nr_range = add_from_early_node_map(range, count, nr_range, nodeid); -#ifdef CONFIG_X86_32 - subtract_range(range, count, max_low_pfn, -1ULL); -#endif - subtract_early_res(range, count); - nr_range = clean_sort_range(range, count); - - /* need to clear it ? */ - if (nodeid == MAX_NUMNODES) { - memset(&early_res[0], 0, - sizeof(struct early_res) * max_early_res); - early_res = NULL; - max_early_res = 0; - } - - *rangep = range; - return nr_range; -} -#else -void __init early_res_to_bootmem(u64 start, u64 end) -{ - int i, count; - u64 final_start, final_end; - int idx = 0; - - count = 0; - for (i = 0; i < max_early_res && early_res[i].end; i++) - count++; - - /* need to skip first one ?*/ - if (early_res != early_res_x) - idx = 1; - - printk(KERN_INFO "(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n", - count - idx, max_early_res, start, end); - for (i = idx; i < count; i++) { - struct early_res *r = &early_res[i]; - printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i, - r->start, r->end, r->name); - final_start = max(start, r->start); - final_end = min(end, r->end); - if (final_start >= final_end) { - printk(KERN_CONT "\n"); - continue; - } - printk(KERN_CONT " ==> [%010llx - %010llx]\n", - final_start, final_end); - reserve_bootmem_generic(final_start, final_end - final_start, - BOOTMEM_DEFAULT); - } - /* clear them */ - memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res); - early_res = NULL; - max_early_res = 0; - early_res_count = 0; -} -#endif - -/* Check for already reserved areas */ -static inline int __init bad_addr(u64 *addrp, u64 size, u64 align) -{ - int i; - u64 addr = *addrp; - int changed = 0; - struct early_res *r; -again: - i = find_overlapped_early(addr, addr + size); - r = &early_res[i]; - if (i < max_early_res && r->end) { - *addrp = addr = round_up(r->end, align); - changed = 1; - goto again; - } - return changed; -} - -/* Check for already reserved areas */ -static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align) -{ - int i; - u64 addr = *addrp, last; - u64 size = *sizep; - int changed = 0; -again: - last = addr + size; - for (i = 0; i < max_early_res && early_res[i].end; i++) { - struct early_res *r = &early_res[i]; - if (last > r->start && addr < r->start) { - size = r->start - addr; - changed = 1; - goto again; - } - if (last > r->end && addr < r->end) { - addr = round_up(r->end, align); - size = last - addr; - changed = 1; - goto again; - } - if (last <= r->end && addr >= r->start) { - (*sizep)++; - return 0; - } - } - if (changed) { - *addrp = addr; - *sizep = size; - } - return changed; -} - -/* - * Find a free area with specified alignment in a specific range. - * only with the area.between start to end is active range from early_node_map - * so they are good as RAM - */ -u64 __init find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end, - u64 size, u64 align) -{ - u64 addr, last; - - addr = round_up(ei_start, align); - if (addr < start) - addr = round_up(start, align); - if (addr >= ei_last) - goto out; - while (bad_addr(&addr, size, align) && addr+size <= ei_last) - ; - last = addr + size; - if (last > ei_last) - goto out; - if (last > end) - goto out; - - return addr; - -out: - return -1ULL; -} - -u64 __init find_early_area_size(u64 ei_start, u64 ei_last, u64 start, - u64 *sizep, u64 align) -{ - u64 addr, last; - - addr = round_up(ei_start, align); - if (addr < start) - addr = round_up(start, align); - if (addr >= ei_last) - goto out; - *sizep = ei_last - addr; - while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last) - ; - last = addr + *sizep; - if (last > ei_last) - goto out; - - return addr; - -out: - return -1ULL; -} -- cgit v1.2.3-70-g09d2 From 6f2a75369e7561e800d86927ecd83c970996b21f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:18 -0700 Subject: x86, memblock: Use memblock_memory_size()/memblock_free_memory_size() to get correct dma_reserve memblock_memory_size() will return memory size in memblock.memory.region. memblock_free_memory_size() will return free memory size in memblock.memory.region. So We can get exact reseved size in specified range. Set the size right after initmem_init(), because later bootmem API will get area above 16M. (except some fallback). Later after we remove the bootmem, We could call that just before paging_init(). Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/e820.h | 2 ++ arch/x86/kernel/e820.c | 16 ++++++++++++++++ arch/x86/kernel/setup.c | 1 + arch/x86/mm/init_64.c | 7 ------- 4 files changed, 19 insertions(+), 7 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 718646384e0..5be1542fbfa 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h @@ -117,6 +117,8 @@ extern unsigned long e820_end_of_low_ram_pfn(void); extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); void memblock_x86_fill(void); +void memblock_find_dma_reserve(void); + extern void finish_e820_parsing(void); extern void e820_reserve_resources(void); extern void e820_reserve_resources_late(void); diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index d5fd89462d7..0c2b7ef7a34 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1105,3 +1105,19 @@ void __init memblock_x86_fill(void) memblock_analyze(); memblock_dump_all(); } + +void __init memblock_find_dma_reserve(void) +{ +#ifdef CONFIG_X86_64 + u64 free_size_pfn; + u64 mem_size_pfn; + /* + * need to find out used area below MAX_DMA_PFN + * need to use memblock to get free size in [0, MAX_DMA_PFN] + * at first, and assume boot_mem will not take below MAX_DMA_PFN + */ + mem_size_pfn = memblock_x86_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; + free_size_pfn = memblock_x86_free_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; + set_dma_reserve(mem_size_pfn - free_size_pfn); +#endif +} diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index a4f01733e87..924c8f78e98 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1013,6 +1013,7 @@ void __init setup_arch(char **cmdline_p) #endif initmem_init(0, max_pfn, acpi, k8); + memblock_find_dma_reserve(); #ifndef CONFIG_NO_BOOTMEM memblock_x86_to_bootmem(0, max_low_pfn< #include -static unsigned long dma_reserve __initdata; - static int __init parse_direct_gbpages_off(char *arg) { direct_gbpages = 0; @@ -821,11 +819,6 @@ int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, reserve_bootmem(phys, len, flags); - if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { - dma_reserve += len / PAGE_SIZE; - set_dma_reserve(dma_reserve); - } - return 0; } #endif -- cgit v1.2.3-70-g09d2 From c9cf4a019cff198ee5638323e3b0ee18886467e8 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Wed, 25 Aug 2010 22:23:34 +0400 Subject: perf, x86, Pentium4: Add RAW events verification Implements verification of - Bits of ESCR EventMask field (meaningful bits in field are hardware predefined and others bits should be set to zero) - INSTR_COMPLETED event (it is available on predefined cpu model only) - Thread shared events (they should be guarded by "perf_event_paranoid" sysctl due to security reason). The side effect of this action is that PERF_COUNT_HW_BUS_CYCLES become a "paranoid" general event. Signed-off-by: Cyrill Gorcunov Tested-by: Lin Ming Cc: Frederic Weisbecker Cc: Peter Zijlstra LKML-Reference: <20100825182334.GB14874@lenovo> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/perf_event_p4.h | 52 +++---- arch/x86/kernel/cpu/perf_event_p4.c | 292 ++++++++++++++++++++++++++++++++--- 2 files changed, 290 insertions(+), 54 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index def500776b1..a70cd216be5 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h @@ -36,19 +36,6 @@ #define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT) #define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT) -/* Non HT mask */ -#define P4_ESCR_MASK \ - (P4_ESCR_EVENT_MASK | \ - P4_ESCR_EVENTMASK_MASK | \ - P4_ESCR_TAG_MASK | \ - P4_ESCR_TAG_ENABLE | \ - P4_ESCR_T0_OS | \ - P4_ESCR_T0_USR) - -/* HT mask */ -#define P4_ESCR_MASK_HT \ - (P4_ESCR_MASK | P4_ESCR_T1_OS | P4_ESCR_T1_USR) - #define P4_CCCR_OVF 0x80000000U #define P4_CCCR_CASCADE 0x40000000U #define P4_CCCR_OVF_PMI_T0 0x04000000U @@ -70,23 +57,6 @@ #define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT) #define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT) -/* Non HT mask */ -#define P4_CCCR_MASK \ - (P4_CCCR_OVF | \ - P4_CCCR_CASCADE | \ - P4_CCCR_OVF_PMI_T0 | \ - P4_CCCR_FORCE_OVF | \ - P4_CCCR_EDGE | \ - P4_CCCR_THRESHOLD_MASK | \ - P4_CCCR_COMPLEMENT | \ - P4_CCCR_COMPARE | \ - P4_CCCR_ESCR_SELECT_MASK | \ - P4_CCCR_ENABLE) - -/* HT mask */ -#define P4_CCCR_MASK_HT \ - (P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY) - #define P4_GEN_ESCR_EMASK(class, name, bit) \ class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) #define P4_ESCR_EMASK_BIT(class, name) class##__##name @@ -127,6 +97,28 @@ #define P4_CONFIG_HT_SHIFT 63 #define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT) +/* + * The bits we allow to pass for RAW events + */ +#define P4_CONFIG_MASK_ESCR \ + P4_ESCR_EVENT_MASK | \ + P4_ESCR_EVENTMASK_MASK | \ + P4_ESCR_TAG_MASK | \ + P4_ESCR_TAG_ENABLE + +#define P4_CONFIG_MASK_CCCR \ + P4_CCCR_EDGE | \ + P4_CCCR_THRESHOLD_MASK | \ + P4_CCCR_COMPLEMENT | \ + P4_CCCR_COMPARE | \ + P4_CCCR_THREAD_ANY | \ + P4_CCCR_RESERVED + +/* some dangerous bits are reserved for kernel internals */ +#define P4_CONFIG_MASK \ + (p4_config_pack_escr(P4_CONFIG_MASK_ESCR)) | \ + (p4_config_pack_cccr(P4_CONFIG_MASK_CCCR)) + static inline bool p4_is_event_cascaded(u64 config) { u32 cccr = p4_config_unpack_cccr(config); diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 7e578e9cc58..4fe62c7165c 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -18,6 +18,8 @@ struct p4_event_bind { unsigned int opcode; /* Event code and ESCR selector */ unsigned int escr_msr[2]; /* ESCR MSR for this event */ + unsigned int escr_emask; /* valid ESCR EventMask bits */ + unsigned int shared; /* event is shared across threads */ char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */ }; @@ -66,231 +68,435 @@ static struct p4_event_bind p4_event_bind_map[] = { [P4_EVENT_TC_DELIVER_MODE] = { .opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE), .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DD) | + P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DB) | + P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DI) | + P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BD) | + P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BB) | + P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BI) | + P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, ID), + .shared = 1, .cntr = { {4, 5, -1}, {6, 7, -1} }, }, [P4_EVENT_BPU_FETCH_REQUEST] = { .opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST), .escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_BPU_FETCH_REQUEST, TCMISS), .cntr = { {0, -1, -1}, {2, -1, -1} }, }, [P4_EVENT_ITLB_REFERENCE] = { .opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE), .escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT) | + P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, MISS) | + P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT_UK), .cntr = { {0, -1, -1}, {2, -1, -1} }, }, [P4_EVENT_MEMORY_CANCEL] = { .opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL), .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL) | + P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, 64K_CONF), .cntr = { {8, 9, -1}, {10, 11, -1} }, }, [P4_EVENT_MEMORY_COMPLETE] = { .opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE), .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, LSC) | + P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, SSC), .cntr = { {8, 9, -1}, {10, 11, -1} }, }, [P4_EVENT_LOAD_PORT_REPLAY] = { .opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY), .escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD), .cntr = { {8, 9, -1}, {10, 11, -1} }, }, [P4_EVENT_STORE_PORT_REPLAY] = { .opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY), .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST), .cntr = { {8, 9, -1}, {10, 11, -1} }, }, [P4_EVENT_MOB_LOAD_REPLAY] = { .opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY), .escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STA) | + P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STD) | + P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA) | + P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR), .cntr = { {0, -1, -1}, {2, -1, -1} }, }, [P4_EVENT_PAGE_WALK_TYPE] = { .opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE), .escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, DTMISS) | + P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, ITMISS), + .shared = 1, .cntr = { {0, -1, -1}, {2, -1, -1} }, }, [P4_EVENT_BSQ_CACHE_REFERENCE] = { .opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE), .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS), .cntr = { {0, -1, -1}, {2, -1, -1} }, }, [P4_EVENT_IOQ_ALLOCATION] = { .opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION), .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, DEFAULT) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_READ) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_UC) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WC) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WT) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WP) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WB) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OWN) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OTHER) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, PREFETCH), .cntr = { {0, -1, -1}, {2, -1, -1} }, }, [P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */ .opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES), .escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER) | + P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH), .cntr = { {2, -1, -1}, {3, -1, -1} }, }, [P4_EVENT_FSB_DATA_ACTIVITY] = { .opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY), .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) | + P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN) | + P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER) | + P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV) | + P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN) | + P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER), + .shared = 1, .cntr = { {0, -1, -1}, {2, -1, -1} }, }, [P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */ .opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION), .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2), .cntr = { {0, -1, -1}, {1, -1, -1} }, }, [P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */ .opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES), .escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1) | + P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2), .cntr = { {2, -1, -1}, {3, -1, -1} }, }, [P4_EVENT_SSE_INPUT_ASSIST] = { .opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST), .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_SSE_INPUT_ASSIST, ALL), + .shared = 1, .cntr = { {8, 9, -1}, {10, 11, -1} }, }, [P4_EVENT_PACKED_SP_UOP] = { .opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP), .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_SP_UOP, ALL), + .shared = 1, .cntr = { {8, 9, -1}, {10, 11, -1} }, }, [P4_EVENT_PACKED_DP_UOP] = { .opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP), .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_DP_UOP, ALL), + .shared = 1, .cntr = { {8, 9, -1}, {10, 11, -1} }, }, [P4_EVENT_SCALAR_SP_UOP] = { .opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP), .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_SP_UOP, ALL), + .shared = 1, .cntr = { {8, 9, -1}, {10, 11, -1} }, }, [P4_EVENT_SCALAR_DP_UOP] = { .opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP), .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_DP_UOP, ALL), + .shared = 1, .cntr = { {8, 9, -1}, {10, 11, -1} }, }, [P4_EVENT_64BIT_MMX_UOP] = { .opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP), .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_64BIT_MMX_UOP, ALL), + .shared = 1, .cntr = { {8, 9, -1}, {10, 11, -1} }, }, [P4_EVENT_128BIT_MMX_UOP] = { .opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP), .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_128BIT_MMX_UOP, ALL), + .shared = 1, .cntr = { {8, 9, -1}, {10, 11, -1} }, }, [P4_EVENT_X87_FP_UOP] = { .opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP), .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_X87_FP_UOP, ALL), + .shared = 1, .cntr = { {8, 9, -1}, {10, 11, -1} }, }, [P4_EVENT_TC_MISC] = { .opcode = P4_OPCODE(P4_EVENT_TC_MISC), .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_TC_MISC, FLUSH), .cntr = { {4, 5, -1}, {6, 7, -1} }, }, [P4_EVENT_GLOBAL_POWER_EVENTS] = { .opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS), .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING), .cntr = { {0, -1, -1}, {2, -1, -1} }, }, [P4_EVENT_TC_MS_XFER] = { .opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER), .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_TC_MS_XFER, CISC), .cntr = { {4, 5, -1}, {6, 7, -1} }, }, [P4_EVENT_UOP_QUEUE_WRITES] = { .opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES), .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD) | + P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER) | + P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM), .cntr = { {4, 5, -1}, {6, 7, -1} }, }, [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = { .opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE), .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL) | + P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL) | + P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN) | + P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT), .cntr = { {4, 5, -1}, {6, 7, -1} }, }, [P4_EVENT_RETIRED_BRANCH_TYPE] = { .opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE), .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) | + P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) | + P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) | + P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT), .cntr = { {4, 5, -1}, {6, 7, -1} }, }, [P4_EVENT_RESOURCE_STALL] = { .opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL), .escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_RESOURCE_STALL, SBFULL), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_WC_BUFFER] = { .opcode = P4_OPCODE(P4_EVENT_WC_BUFFER), .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_EVICTS) | + P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS), + .shared = 1, .cntr = { {8, 9, -1}, {10, 11, -1} }, }, [P4_EVENT_B2B_CYCLES] = { .opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES), .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, + .escr_emask = 0, .cntr = { {0, -1, -1}, {2, -1, -1} }, }, [P4_EVENT_BNR] = { .opcode = P4_OPCODE(P4_EVENT_BNR), .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, + .escr_emask = 0, .cntr = { {0, -1, -1}, {2, -1, -1} }, }, [P4_EVENT_SNOOP] = { .opcode = P4_OPCODE(P4_EVENT_SNOOP), .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, + .escr_emask = 0, .cntr = { {0, -1, -1}, {2, -1, -1} }, }, [P4_EVENT_RESPONSE] = { .opcode = P4_OPCODE(P4_EVENT_RESPONSE), .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, + .escr_emask = 0, .cntr = { {0, -1, -1}, {2, -1, -1} }, }, [P4_EVENT_FRONT_END_EVENT] = { .opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT), .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, NBOGUS) | + P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, BOGUS), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_EXECUTION_EVENT] = { .opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT), .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_REPLAY_EVENT] = { .opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT), .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, NBOGUS) | + P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, BOGUS), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_INSTR_RETIRED] = { .opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED), .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) | + P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSTAG) | + P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG) | + P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSTAG), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_UOPS_RETIRED] = { .opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED), .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, NBOGUS) | + P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, BOGUS), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_UOP_TYPE] = { .opcode = P4_OPCODE(P4_EVENT_UOP_TYPE), .escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGLOADS) | + P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGSTORES), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_BRANCH_RETIRED] = { .opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED), .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNP) | + P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNM) | + P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTP) | + P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTM), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_MISPRED_BRANCH_RETIRED] = { .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED), .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_X87_ASSIST] = { .opcode = P4_OPCODE(P4_EVENT_X87_ASSIST), .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSU) | + P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSO) | + P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAO) | + P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAU) | + P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, PREA), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_MACHINE_CLEAR] = { .opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR), .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, CLEAR) | + P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, MOCLEAR) | + P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, SMCLEAR), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_INSTR_COMPLETED] = { .opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED), .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, + .escr_emask = + P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, NBOGUS) | + P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, BOGUS), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, }; @@ -428,29 +634,73 @@ static u64 p4_pmu_event_map(int hw_event) return config; } +/* check cpu model specifics */ +static bool p4_event_match_cpu_model(unsigned int event_idx) +{ + /* INSTR_COMPLETED event only exist for model 3, 4, 6 (Prescott) */ + if (event_idx == P4_EVENT_INSTR_COMPLETED) { + if (boot_cpu_data.x86_model != 3 && + boot_cpu_data.x86_model != 4 && + boot_cpu_data.x86_model != 6) + return false; + } + + /* + * For info + * - IQ_ESCR0, IQ_ESCR1 only for models 1 and 2 + */ + + return true; +} + static int p4_validate_raw_event(struct perf_event *event) { - unsigned int v; + unsigned int v, emask; - /* user data may have out-of-bound event index */ + /* User data may have out-of-bound event index */ v = p4_config_unpack_event(event->attr.config); - if (v >= ARRAY_SIZE(p4_event_bind_map)) { - pr_warning("P4 PMU: Unknown event code: %d\n", v); + if (v >= ARRAY_SIZE(p4_event_bind_map)) + return -EINVAL; + + /* It may be unsupported: */ + if (!p4_event_match_cpu_model(v)) return -EINVAL; + + /* + * NOTE: P4_CCCR_THREAD_ANY has not the same meaning as + * in Architectural Performance Monitoring, it means not + * on _which_ logical cpu to count but rather _when_, ie it + * depends on logical cpu state -- count event if one cpu active, + * none, both or any, so we just allow user to pass any value + * desired. + * + * In turn we always set Tx_OS/Tx_USR bits bound to logical + * cpu without their propagation to another cpu + */ + + /* + * if an event is shared accross the logical threads + * the user needs special permissions to be able to use it + */ + if (p4_event_bind_map[v].shared) { + if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) + return -EACCES; } + /* ESCR EventMask bits may be invalid */ + emask = p4_config_unpack_escr(event->attr.config) & P4_ESCR_EVENTMASK_MASK; + if (emask & ~p4_event_bind_map[v].escr_emask) + return -EINVAL; + /* - * it may have some screwed PEBS bits + * it may have some invalid PEBS bits */ - if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE)) { - pr_warning("P4 PMU: PEBS are not supported yet\n"); + if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE)) return -EINVAL; - } + v = p4_config_unpack_metric(event->attr.config); - if (v >= ARRAY_SIZE(p4_pebs_bind_map)) { - pr_warning("P4 PMU: Unknown metric code: %d\n", v); + if (v >= ARRAY_SIZE(p4_pebs_bind_map)) return -EINVAL; - } return 0; } @@ -478,27 +728,21 @@ static int p4_hw_config(struct perf_event *event) if (event->attr.type == PERF_TYPE_RAW) { + /* + * Clear bits we reserve to be managed by kernel itself + * and never allowed from a user space + */ + event->attr.config &= P4_CONFIG_MASK; + rc = p4_validate_raw_event(event); if (rc) goto out; /* - * We don't control raw events so it's up to the caller - * to pass sane values (and we don't count the thread number - * on HT machine but allow HT-compatible specifics to be - * passed on) - * * Note that for RAW events we allow user to use P4_CCCR_RESERVED * bits since we keep additional info here (for cache events and etc) - * - * XXX: HT wide things should check perf_paranoid_cpu() && - * CAP_SYS_ADMIN */ - event->hw.config |= event->attr.config & - (p4_config_pack_escr(P4_ESCR_MASK_HT) | - p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED)); - - event->hw.config &= ~P4_CCCR_FORCE_OVF; + event->hw.config |= event->attr.config; } rc = x86_setup_perfctr(event); -- cgit v1.2.3-70-g09d2 From df5d1874ce1a1f0e0eceff4fa3a9d45620243a68 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Thu, 2 Sep 2010 14:07:16 +0100 Subject: x86: Use {push,pop}{l,q}_cfi in more places ... plus additionally introduce {push,pop}f{l,q}_cfi. All in the hope that the code becomes better readable this way (it gets quite a bit smaller in any case). Signed-off-by: Jan Beulich Acked-by: Alexander van Heukelum LKML-Reference: <4C7FBDA40200007800013FAF@vpn.id2.novell.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/dwarf2.h | 20 +++ arch/x86/kernel/entry_32.S | 294 ++++++++++++++---------------------------- arch/x86/kernel/entry_64.S | 65 ++++------ 3 files changed, 141 insertions(+), 238 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h index 733f7e91e7a..32609919931 100644 --- a/arch/x86/include/asm/dwarf2.h +++ b/arch/x86/include/asm/dwarf2.h @@ -89,6 +89,16 @@ CFI_ADJUST_CFA_OFFSET -8 .endm + .macro pushfq_cfi + pushfq + CFI_ADJUST_CFA_OFFSET 8 + .endm + + .macro popfq_cfi + popfq + CFI_ADJUST_CFA_OFFSET -8 + .endm + .macro movq_cfi reg offset=0 movq %\reg, \offset(%rsp) CFI_REL_OFFSET \reg, \offset @@ -109,6 +119,16 @@ CFI_ADJUST_CFA_OFFSET -4 .endm + .macro pushfl_cfi + pushfl + CFI_ADJUST_CFA_OFFSET 4 + .endm + + .macro popfl_cfi + popfl + CFI_ADJUST_CFA_OFFSET -4 + .endm + .macro movl_cfi reg offset=0 movl %\reg, \offset(%esp) CFI_REL_OFFSET \reg, \offset diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index d9b950ee559..9fb188d7bc7 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -115,8 +115,7 @@ /* unfortunately push/pop can't be no-op */ .macro PUSH_GS - pushl $0 - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $0 .endm .macro POP_GS pop=0 addl $(4 + \pop), %esp @@ -140,14 +139,12 @@ #else /* CONFIG_X86_32_LAZY_GS */ .macro PUSH_GS - pushl %gs - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %gs /*CFI_REL_OFFSET gs, 0*/ .endm .macro POP_GS pop=0 -98: popl %gs - CFI_ADJUST_CFA_OFFSET -4 +98: popl_cfi %gs /*CFI_RESTORE gs*/ .if \pop <> 0 add $\pop, %esp @@ -195,35 +192,25 @@ .macro SAVE_ALL cld PUSH_GS - pushl %fs - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %fs /*CFI_REL_OFFSET fs, 0;*/ - pushl %es - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %es /*CFI_REL_OFFSET es, 0;*/ - pushl %ds - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ds /*CFI_REL_OFFSET ds, 0;*/ - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %eax CFI_REL_OFFSET eax, 0 - pushl %ebp - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ebp CFI_REL_OFFSET ebp, 0 - pushl %edi - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %edi CFI_REL_OFFSET edi, 0 - pushl %esi - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %esi CFI_REL_OFFSET esi, 0 - pushl %edx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %edx CFI_REL_OFFSET edx, 0 - pushl %ecx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ecx CFI_REL_OFFSET ecx, 0 - pushl %ebx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ebx CFI_REL_OFFSET ebx, 0 movl $(__USER_DS), %edx movl %edx, %ds @@ -234,39 +221,29 @@ .endm .macro RESTORE_INT_REGS - popl %ebx - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %ebx CFI_RESTORE ebx - popl %ecx - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %ecx CFI_RESTORE ecx - popl %edx - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %edx CFI_RESTORE edx - popl %esi - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %esi CFI_RESTORE esi - popl %edi - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %edi CFI_RESTORE edi - popl %ebp - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %ebp CFI_RESTORE ebp - popl %eax - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %eax CFI_RESTORE eax .endm .macro RESTORE_REGS pop=0 RESTORE_INT_REGS -1: popl %ds - CFI_ADJUST_CFA_OFFSET -4 +1: popl_cfi %ds /*CFI_RESTORE ds;*/ -2: popl %es - CFI_ADJUST_CFA_OFFSET -4 +2: popl_cfi %es /*CFI_RESTORE es;*/ -3: popl %fs - CFI_ADJUST_CFA_OFFSET -4 +3: popl_cfi %fs /*CFI_RESTORE fs;*/ POP_GS \pop .pushsection .fixup, "ax" @@ -320,16 +297,12 @@ ENTRY(ret_from_fork) CFI_STARTPROC - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %eax call schedule_tail GET_THREAD_INFO(%ebp) - popl %eax - CFI_ADJUST_CFA_OFFSET -4 - pushl $0x0202 # Reset kernel eflags - CFI_ADJUST_CFA_OFFSET 4 - popfl - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %eax + pushl_cfi $0x0202 # Reset kernel eflags + popfl_cfi jmp syscall_exit CFI_ENDPROC END(ret_from_fork) @@ -409,29 +382,23 @@ sysenter_past_esp: * enough kernel state to call TRACE_IRQS_OFF can be called - but * we immediately enable interrupts at that point anyway. */ - pushl $(__USER_DS) - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $(__USER_DS) /*CFI_REL_OFFSET ss, 0*/ - pushl %ebp - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ebp CFI_REL_OFFSET esp, 0 - pushfl + pushfl_cfi orl $X86_EFLAGS_IF, (%esp) - CFI_ADJUST_CFA_OFFSET 4 - pushl $(__USER_CS) - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $(__USER_CS) /*CFI_REL_OFFSET cs, 0*/ /* * Push current_thread_info()->sysenter_return to the stack. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words * pushed above; +8 corresponds to copy_thread's esp0 setting. */ - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) CFI_REL_OFFSET eip, 0 - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %eax SAVE_ALL ENABLE_INTERRUPTS(CLBR_NONE) @@ -486,8 +453,7 @@ sysenter_audit: movl %eax,%edx /* 2nd arg: syscall number */ movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ call audit_syscall_entry - pushl %ebx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ebx movl PT_EAX(%esp),%eax /* reload syscall number */ jmp sysenter_do_call @@ -529,8 +495,7 @@ ENDPROC(ia32_sysenter_target) # system call handler stub ENTRY(system_call) RING0_INT_FRAME # can't unwind into user space anyway - pushl %eax # save orig_eax - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %eax # save orig_eax SAVE_ALL GET_THREAD_INFO(%ebp) # system call tracing in operation / emulation @@ -566,7 +531,6 @@ restore_all_notrace: je ldt_ss # returning to user-space with LDT SS restore_nocheck: RESTORE_REGS 4 # skip orig_eax/error_code - CFI_ADJUST_CFA_OFFSET -4 irq_return: INTERRUPT_RETURN .section .fixup,"ax" @@ -619,10 +583,8 @@ ldt_ss: shr $16, %edx mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ - pushl $__ESPFIX_SS - CFI_ADJUST_CFA_OFFSET 4 - push %eax /* new kernel esp */ - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $__ESPFIX_SS + pushl_cfi %eax /* new kernel esp */ /* Disable interrupts, but do not irqtrace this section: we * will soon execute iret and the tracer was already set to * the irqstate after the iret */ @@ -666,11 +628,9 @@ work_notifysig: # deal with pending signals and ALIGN work_notifysig_v86: - pushl %ecx # save ti_flags for do_notify_resume - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ecx # save ti_flags for do_notify_resume call save_v86_state # %eax contains pt_regs pointer - popl %ecx - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %ecx movl %eax, %esp #else movl %esp, %eax @@ -803,10 +763,8 @@ ENDPROC(ptregs_clone) mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ shl $16, %eax addl %esp, %eax /* the adjusted stack pointer */ - pushl $__KERNEL_DS - CFI_ADJUST_CFA_OFFSET 4 - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $__KERNEL_DS + pushl_cfi %eax lss (%esp), %esp /* switch to the normal stack segment */ CFI_ADJUST_CFA_OFFSET -8 .endm @@ -843,8 +801,7 @@ vector=FIRST_EXTERNAL_VECTOR .if vector <> FIRST_EXTERNAL_VECTOR CFI_ADJUST_CFA_OFFSET -4 .endif -1: pushl $(~vector+0x80) /* Note: always in signed byte range */ - CFI_ADJUST_CFA_OFFSET 4 +1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */ .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 jmp 2f .endif @@ -884,8 +841,7 @@ ENDPROC(common_interrupt) #define BUILD_INTERRUPT3(name, nr, fn) \ ENTRY(name) \ RING0_INT_FRAME; \ - pushl $~(nr); \ - CFI_ADJUST_CFA_OFFSET 4; \ + pushl_cfi $~(nr); \ SAVE_ALL; \ TRACE_IRQS_OFF \ movl %esp,%eax; \ @@ -901,21 +857,18 @@ ENDPROC(name) ENTRY(coprocessor_error) RING0_INT_FRAME - pushl $0 - CFI_ADJUST_CFA_OFFSET 4 - pushl $do_coprocessor_error - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $0 + pushl_cfi $do_coprocessor_error jmp error_code CFI_ENDPROC END(coprocessor_error) ENTRY(simd_coprocessor_error) RING0_INT_FRAME - pushl $0 - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $0 #ifdef CONFIG_X86_INVD_BUG /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ -661: pushl $do_general_protection +661: pushl_cfi $do_general_protection 662: .section .altinstructions,"a" .balign 4 @@ -930,19 +883,16 @@ ENTRY(simd_coprocessor_error) 664: .previous #else - pushl $do_simd_coprocessor_error + pushl_cfi $do_simd_coprocessor_error #endif - CFI_ADJUST_CFA_OFFSET 4 jmp error_code CFI_ENDPROC END(simd_coprocessor_error) ENTRY(device_not_available) RING0_INT_FRAME - pushl $-1 # mark this as an int - CFI_ADJUST_CFA_OFFSET 4 - pushl $do_device_not_available - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $-1 # mark this as an int + pushl_cfi $do_device_not_available jmp error_code CFI_ENDPROC END(device_not_available) @@ -964,82 +914,68 @@ END(native_irq_enable_sysexit) ENTRY(overflow) RING0_INT_FRAME - pushl $0 - CFI_ADJUST_CFA_OFFSET 4 - pushl $do_overflow - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $0 + pushl_cfi $do_overflow jmp error_code CFI_ENDPROC END(overflow) ENTRY(bounds) RING0_INT_FRAME - pushl $0 - CFI_ADJUST_CFA_OFFSET 4 - pushl $do_bounds - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $0 + pushl_cfi $do_bounds jmp error_code CFI_ENDPROC END(bounds) ENTRY(invalid_op) RING0_INT_FRAME - pushl $0 - CFI_ADJUST_CFA_OFFSET 4 - pushl $do_invalid_op - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $0 + pushl_cfi $do_invalid_op jmp error_code CFI_ENDPROC END(invalid_op) ENTRY(coprocessor_segment_overrun) RING0_INT_FRAME - pushl $0 - CFI_ADJUST_CFA_OFFSET 4 - pushl $do_coprocessor_segment_overrun - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $0 + pushl_cfi $do_coprocessor_segment_overrun jmp error_code CFI_ENDPROC END(coprocessor_segment_overrun) ENTRY(invalid_TSS) RING0_EC_FRAME - pushl $do_invalid_TSS - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $do_invalid_TSS jmp error_code CFI_ENDPROC END(invalid_TSS) ENTRY(segment_not_present) RING0_EC_FRAME - pushl $do_segment_not_present - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $do_segment_not_present jmp error_code CFI_ENDPROC END(segment_not_present) ENTRY(stack_segment) RING0_EC_FRAME - pushl $do_stack_segment - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $do_stack_segment jmp error_code CFI_ENDPROC END(stack_segment) ENTRY(alignment_check) RING0_EC_FRAME - pushl $do_alignment_check - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $do_alignment_check jmp error_code CFI_ENDPROC END(alignment_check) ENTRY(divide_error) RING0_INT_FRAME - pushl $0 # no error code - CFI_ADJUST_CFA_OFFSET 4 - pushl $do_divide_error - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $0 # no error code + pushl_cfi $do_divide_error jmp error_code CFI_ENDPROC END(divide_error) @@ -1047,10 +983,8 @@ END(divide_error) #ifdef CONFIG_X86_MCE ENTRY(machine_check) RING0_INT_FRAME - pushl $0 - CFI_ADJUST_CFA_OFFSET 4 - pushl machine_check_vector - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $0 + pushl_cfi machine_check_vector jmp error_code CFI_ENDPROC END(machine_check) @@ -1058,10 +992,8 @@ END(machine_check) ENTRY(spurious_interrupt_bug) RING0_INT_FRAME - pushl $0 - CFI_ADJUST_CFA_OFFSET 4 - pushl $do_spurious_interrupt_bug - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $0 + pushl_cfi $do_spurious_interrupt_bug jmp error_code CFI_ENDPROC END(spurious_interrupt_bug) @@ -1092,8 +1024,7 @@ ENTRY(xen_sysenter_target) ENTRY(xen_hypervisor_callback) CFI_STARTPROC - pushl $0 - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $0 SAVE_ALL TRACE_IRQS_OFF @@ -1129,23 +1060,20 @@ ENDPROC(xen_hypervisor_callback) # We distinguish between categories by maintaining a status value in EAX. ENTRY(xen_failsafe_callback) CFI_STARTPROC - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %eax movl $1,%eax 1: mov 4(%esp),%ds 2: mov 8(%esp),%es 3: mov 12(%esp),%fs 4: mov 16(%esp),%gs testl %eax,%eax - popl %eax - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %eax lea 16(%esp),%esp CFI_ADJUST_CFA_OFFSET -16 jz 5f addl $16,%esp jmp iret_exc # EAX != 0 => Category 2 (Bad IRET) -5: pushl $0 # EAX == 0 => Category 1 (Bad segment) - CFI_ADJUST_CFA_OFFSET 4 +5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment) SAVE_ALL jmp ret_from_exception CFI_ENDPROC @@ -1295,40 +1223,29 @@ syscall_table_size=(.-sys_call_table) ENTRY(page_fault) RING0_EC_FRAME - pushl $do_page_fault - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $do_page_fault ALIGN error_code: /* the function address is in %gs's slot on the stack */ - pushl %fs - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %fs /*CFI_REL_OFFSET fs, 0*/ - pushl %es - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %es /*CFI_REL_OFFSET es, 0*/ - pushl %ds - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ds /*CFI_REL_OFFSET ds, 0*/ - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %eax CFI_REL_OFFSET eax, 0 - pushl %ebp - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ebp CFI_REL_OFFSET ebp, 0 - pushl %edi - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %edi CFI_REL_OFFSET edi, 0 - pushl %esi - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %esi CFI_REL_OFFSET esi, 0 - pushl %edx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %edx CFI_REL_OFFSET edx, 0 - pushl %ecx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ecx CFI_REL_OFFSET ecx, 0 - pushl %ebx - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ebx CFI_REL_OFFSET ebx, 0 cld movl $(__KERNEL_PERCPU), %ecx @@ -1370,12 +1287,9 @@ END(page_fault) movl TSS_sysenter_sp0 + \offset(%esp), %esp CFI_DEF_CFA esp, 0 CFI_UNDEFINED eip - pushfl - CFI_ADJUST_CFA_OFFSET 4 - pushl $__KERNEL_CS - CFI_ADJUST_CFA_OFFSET 4 - pushl $sysenter_past_esp - CFI_ADJUST_CFA_OFFSET 4 + pushfl_cfi + pushl_cfi $__KERNEL_CS + pushl_cfi $sysenter_past_esp CFI_REL_OFFSET eip, 0 .endm @@ -1385,8 +1299,7 @@ ENTRY(debug) jne debug_stack_correct FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn debug_stack_correct: - pushl $-1 # mark this as an int - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $-1 # mark this as an int SAVE_ALL TRACE_IRQS_OFF xorl %edx,%edx # error code 0 @@ -1406,32 +1319,27 @@ END(debug) */ ENTRY(nmi) RING0_INT_FRAME - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %eax movl %ss, %eax cmpw $__ESPFIX_SS, %ax - popl %eax - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %eax je nmi_espfix_stack cmpl $ia32_sysenter_target,(%esp) je nmi_stack_fixup - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %eax movl %esp,%eax /* Do not access memory above the end of our stack page, * it might not exist. */ andl $(THREAD_SIZE-1),%eax cmpl $(THREAD_SIZE-20),%eax - popl %eax - CFI_ADJUST_CFA_OFFSET -4 + popl_cfi %eax jae nmi_stack_correct cmpl $ia32_sysenter_target,12(%esp) je nmi_debug_stack_check nmi_stack_correct: /* We have a RING0_INT_FRAME here */ - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %eax SAVE_ALL xorl %edx,%edx # zero error code movl %esp,%eax # pt_regs pointer @@ -1460,18 +1368,14 @@ nmi_espfix_stack: * * create the pointer to lss back */ - pushl %ss - CFI_ADJUST_CFA_OFFSET 4 - pushl %esp - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %ss + pushl_cfi %esp addl $4, (%esp) /* copy the iret frame of 12 bytes */ .rept 3 - pushl 16(%esp) - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi 16(%esp) .endr - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %eax SAVE_ALL FIXUP_ESPFIX_STACK # %eax == %esp xorl %edx,%edx # zero error code @@ -1485,8 +1389,7 @@ END(nmi) ENTRY(int3) RING0_INT_FRAME - pushl $-1 # mark this as an int - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $-1 # mark this as an int SAVE_ALL TRACE_IRQS_OFF xorl %edx,%edx # zero error code @@ -1498,8 +1401,7 @@ END(int3) ENTRY(general_protection) RING0_EC_FRAME - pushl $do_general_protection - CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi $do_general_protection jmp error_code CFI_ENDPROC END(general_protection) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 6f305830c80..8851a2bb8c0 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -213,23 +213,17 @@ ENDPROC(native_usergs_sysret64) .macro FAKE_STACK_FRAME child_rip /* push in order ss, rsp, eflags, cs, rip */ xorl %eax, %eax - pushq $__KERNEL_DS /* ss */ - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi $__KERNEL_DS /* ss */ /*CFI_REL_OFFSET ss,0*/ - pushq %rax /* rsp */ - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi %rax /* rsp */ CFI_REL_OFFSET rsp,0 - pushq $X86_EFLAGS_IF /* eflags - interrupts on */ - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi $X86_EFLAGS_IF /* eflags - interrupts on */ /*CFI_REL_OFFSET rflags,0*/ - pushq $__KERNEL_CS /* cs */ - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi $__KERNEL_CS /* cs */ /*CFI_REL_OFFSET cs,0*/ - pushq \child_rip /* rip */ - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi \child_rip /* rip */ CFI_REL_OFFSET rip,0 - pushq %rax /* orig rax */ - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi %rax /* orig rax */ .endm .macro UNFAKE_STACK_FRAME @@ -398,10 +392,8 @@ ENTRY(ret_from_fork) LOCK ; btr $TIF_FORK,TI_flags(%r8) - push kernel_eflags(%rip) - CFI_ADJUST_CFA_OFFSET 8 - popf # reset kernel eflags - CFI_ADJUST_CFA_OFFSET -8 + pushq_cfi kernel_eflags(%rip) + popfq_cfi # reset kernel eflags call schedule_tail # rdi: 'prev' task parameter @@ -521,11 +513,9 @@ sysret_careful: jnc sysret_signal TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) - pushq %rdi - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi %rdi call schedule - popq %rdi - CFI_ADJUST_CFA_OFFSET -8 + popq_cfi %rdi jmp sysret_check /* Handle a signal */ @@ -634,11 +624,9 @@ int_careful: jnc int_very_careful TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) - pushq %rdi - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi %rdi call schedule - popq %rdi - CFI_ADJUST_CFA_OFFSET -8 + popq_cfi %rdi DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF jmp int_with_check @@ -652,12 +640,10 @@ int_check_syscall_exit_work: /* Check for syscall exit trace */ testl $_TIF_WORK_SYSCALL_EXIT,%edx jz int_signal - pushq %rdi - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi %rdi leaq 8(%rsp),%rdi # &ptregs -> arg1 call syscall_trace_leave - popq %rdi - CFI_ADJUST_CFA_OFFSET -8 + popq_cfi %rdi andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi jmp int_restore_rest @@ -765,8 +751,7 @@ vector=FIRST_EXTERNAL_VECTOR .if vector <> FIRST_EXTERNAL_VECTOR CFI_ADJUST_CFA_OFFSET -8 .endif -1: pushq $(~vector+0x80) /* Note: always in signed byte range */ - CFI_ADJUST_CFA_OFFSET 8 +1: pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */ .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 jmp 2f .endif @@ -821,6 +806,7 @@ ret_from_intr: TRACE_IRQS_OFF decl PER_CPU_VAR(irq_count) leaveq + CFI_RESTORE rbp CFI_DEF_CFA_REGISTER rsp CFI_ADJUST_CFA_OFFSET -8 exit_intr: @@ -902,11 +888,9 @@ retint_careful: jnc retint_signal TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) - pushq %rdi - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi %rdi call schedule - popq %rdi - CFI_ADJUST_CFA_OFFSET -8 + popq_cfi %rdi GET_THREAD_INFO(%rcx) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF @@ -955,8 +939,7 @@ END(common_interrupt) .macro apicinterrupt num sym do_sym ENTRY(\sym) INTR_FRAME - pushq $~(\num) - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi $~(\num) interrupt \do_sym jmp ret_from_intr CFI_ENDPROC @@ -1138,16 +1121,14 @@ zeroentry simd_coprocessor_error do_simd_coprocessor_error /* edi: new selector */ ENTRY(native_load_gs_index) CFI_STARTPROC - pushf - CFI_ADJUST_CFA_OFFSET 8 + pushfq_cfi DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) SWAPGS gs_change: movl %edi,%gs 2: mfence /* workaround */ SWAPGS - popf - CFI_ADJUST_CFA_OFFSET -8 + popfq_cfi ret CFI_ENDPROC END(native_load_gs_index) @@ -1214,8 +1195,7 @@ END(kernel_execve) /* Call softirq on interrupt stack. Interrupts are off. */ ENTRY(call_softirq) CFI_STARTPROC - push %rbp - CFI_ADJUST_CFA_OFFSET 8 + pushq_cfi %rbp CFI_REL_OFFSET rbp,0 mov %rsp,%rbp CFI_DEF_CFA_REGISTER rbp @@ -1224,6 +1204,7 @@ ENTRY(call_softirq) push %rbp # backlink for old unwinder call __do_softirq leaveq + CFI_RESTORE rbp CFI_DEF_CFA_REGISTER rsp CFI_ADJUST_CFA_OFFSET -8 decl PER_CPU_VAR(irq_count) -- cgit v1.2.3-70-g09d2 From 57ab43e33122ffdc2eebca5d6de035699f0a8c06 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 3 Sep 2010 18:39:39 +0200 Subject: x86, GART: Remove superfluous AMD64_GARTEN There is a GARTEN so use that and drop the duplicate. Signed-off-by: Borislav Petkov Cc: Dave Airlie Cc: FUJITA Tomonori LKML-Reference: <1283531981-7495-2-git-send-email-bp@amd64.org> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/gart.h | 1 - arch/x86/kernel/aperture_64.c | 4 ++-- drivers/char/agp/amd64-agp.c | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h index 4ac5b0f33fc..fba0a72c4cc 100644 --- a/arch/x86/include/asm/gart.h +++ b/arch/x86/include/asm/gart.h @@ -27,7 +27,6 @@ extern int fix_aperture; #define AMD64_GARTAPERTUREBASE 0x94 #define AMD64_GARTTABLEBASE 0x98 #define AMD64_GARTCACHECTL 0x9c -#define AMD64_GARTEN (1<<0) #ifdef CONFIG_GART_IOMMU extern int gart_iommu_aperture; diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index a2e0caf26e1..6fabd406aa7 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -307,7 +307,7 @@ void __init early_gart_iommu_check(void) continue; ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); - aper_enabled = ctl & AMD64_GARTEN; + aper_enabled = ctl & GARTEN; aper_order = (ctl >> 1) & 7; aper_size = (32 * 1024 * 1024) << aper_order; aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; @@ -362,7 +362,7 @@ void __init early_gart_iommu_check(void) continue; ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); - ctl &= ~AMD64_GARTEN; + ctl &= ~GARTEN; write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); } } diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 70312da4c96..bfe372b3d9d 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -199,7 +199,7 @@ static void amd64_cleanup(void) struct pci_dev *dev = k8_northbridges[i]; /* disable gart translation */ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); - tmp &= ~AMD64_GARTEN; + tmp &= ~GARTEN; pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp); } } -- cgit v1.2.3-70-g09d2 From 260133ab658bd2b80e07832a878e00405e19ff43 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 3 Sep 2010 18:39:40 +0200 Subject: x86, GART: Disable GART table walk probes Current code tramples over bit F3x90[6] which can be used to disable GART table walk probes. However, this bit should be set for performance reasons (speed up GART table walks). We are allowed to do that since we put GART tables in UC memory later anyway. Make it so. Signed-off-by: Borislav Petkov Cc: Dave Airlie Cc: FUJITA Tomonori LKML-Reference: <1283531981-7495-3-git-send-email-bp@amd64.org> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/gart.h | 14 ++++++++++++++ arch/x86/kernel/aperture_64.c | 14 ++++++++------ arch/x86/kernel/pci-gart_64.c | 2 +- drivers/char/agp/amd64-agp.c | 2 +- 4 files changed, 24 insertions(+), 8 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h index fba0a72c4cc..bf357f9b25f 100644 --- a/arch/x86/include/asm/gart.h +++ b/arch/x86/include/asm/gart.h @@ -17,6 +17,7 @@ extern int fix_aperture; #define GARTEN (1<<0) #define DISGARTCPU (1<<4) #define DISGARTIO (1<<5) +#define DISTLBWALKPRB (1<<6) /* GART cache control register bits. */ #define INVGART (1<<0) @@ -56,6 +57,19 @@ static inline void gart_iommu_hole_init(void) extern int agp_amd64_init(void); +static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order) +{ + u32 ctl; + + /* + * Don't enable translation but enable GART IO and CPU accesses. + * Also, set DISTLBWALKPRB since GART tables memory is UC. + */ + ctl = DISTLBWALKPRB | order << 1; + + pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); +} + static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) { u32 tmp, ctl; diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 6fabd406aa7..c9cb1736844 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -505,8 +505,13 @@ out: /* Fix up the north bridges */ for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { - int bus; - int dev_base, dev_limit; + int bus, dev_base, dev_limit; + + /* + * Don't enable translation yet but enable GART IO and CPU + * accesses and set DISTLBWALKPRB since GART table memory is UC. + */ + u32 ctl = DISTLBWALKPRB | aper_order << 1; bus = bus_dev_ranges[i].bus; dev_base = bus_dev_ranges[i].dev_base; @@ -515,10 +520,7 @@ out: if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) continue; - /* Don't enable translation yet. That is done later. - Assume this BIOS didn't initialise the GART so - just overwrite all previous bits */ - write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, aper_order << 1); + write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); write_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE, aper_alloc >> 25); } } diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 0f7f130caa6..6015ee13e22 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -601,7 +601,7 @@ static void gart_fixup_northbridges(struct sys_device *dev) * Don't enable translations just yet. That is the next * step. Restore the pre-suspend aperture settings. */ - pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, aperture_order << 1); + gart_set_size_and_enable(dev, aperture_order); pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25); } } diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index bfe372b3d9d..564808a5c3c 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -313,7 +313,7 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<> 25); return 0; -- cgit v1.2.3-70-g09d2 From 7ef8aa72ab176e0288f363d1247079732c5d5792 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Mon, 6 Sep 2010 15:14:17 +0200 Subject: x86, cpu: Fix renamed, not-yet-shipping AMD CPUID feature bit The AMD SSE5 feature set as-it has been replaced by some extensions to the AVX instruction set. Thus the bit formerly advertised as SSE5 is re-used for one of these extensions (XOP). Although this changes the /proc/cpuinfo output, it is not user visible, as there are no CPUs (yet) having this feature. To avoid confusion this should be added to the stable series, too. Cc: stable@kernel.org [.32.x .34.x, .35.x] Signed-off-by: Andre Przywara LKML-Reference: <1283778860-26843-2-git-send-email-andre.przywara@amd.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/cpufeature.h | 2 +- arch/x86/kvm/x86.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 781a50b29a4..c9c73d8dedb 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -152,7 +152,7 @@ #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ #define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ #define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ -#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ +#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */ #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3a09c625d52..dd54779ccbe 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1996,7 +1996,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, const u32 kvm_supported_word6_x86_features = F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | - F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) | + F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) | 0 /* SKINIT */ | 0 /* WDT */; /* all calls to cpuid_count() should be made on the same cpu */ -- cgit v1.2.3-70-g09d2 From 33ed82fb6c5f032151f7e9f1ac7b667f78f426b8 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Mon, 6 Sep 2010 15:14:18 +0200 Subject: x86, cpu: Update AMD CPUID feature bits AMD's public CPUID specification has been updated and some bits have got names. Add them to properly describe new CPU features. Signed-off-by: Andre Przywara LKML-Reference: <1283778860-26843-3-git-send-email-andre.przywara@amd.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/cpufeature.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index c9c73d8dedb..341835df789 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -155,7 +155,11 @@ #define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */ #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ +#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */ +#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */ #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ +#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ +#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ /* * Auxiliary flags: Linux defined - For features scattered in various -- cgit v1.2.3-70-g09d2 From aeb9c7d618264dcf6eea39142fefee096c3b09e2 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Mon, 6 Sep 2010 15:14:20 +0200 Subject: x86, kvm: add new AMD SVM feature bits The recently updated CPUID specification names new SVM feature bits. Add them to the list of reported features. Signed-off-by: Andre Przywara LKML-Reference: <1283778860-26843-5-git-send-email-andre.przywara@amd.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/cpufeature.h | 7 +++++++ arch/x86/kernel/cpu/scattered.c | 6 ++++++ 2 files changed, 13 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 341835df789..bffeab7eab9 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -183,6 +183,13 @@ #define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */ #define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ #define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ +#define X86_FEATURE_TSCRATEMSR (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */ +#define X86_FEATURE_VMCBCLEAN (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */ +#define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */ +#define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */ +#define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */ +#define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */ + /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 34b4dad6f0b..2c77931473f 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -43,6 +43,12 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 }, { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 }, { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 }, + { X86_FEATURE_TSCRATEMSR, CR_EDX, 4, 0x8000000a, 0 }, + { X86_FEATURE_VMCBCLEAN, CR_EDX, 5, 0x8000000a, 0 }, + { X86_FEATURE_FLUSHBYASID, CR_EDX, 6, 0x8000000a, 0 }, + { X86_FEATURE_DECODEASSISTS, CR_EDX, 7, 0x8000000a, 0 }, + { X86_FEATURE_PAUSEFILTER, CR_EDX,10, 0x8000000a, 0 }, + { X86_FEATURE_PFTHRESHOLD, CR_EDX,12, 0x8000000a, 0 }, { 0, 0, 0, 0, 0 } }; -- cgit v1.2.3-70-g09d2 From 2df7a6e9e8e67c19e5fe2eac3f2d2223b7bb4a7b Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 3 Sep 2010 21:17:08 -0400 Subject: x86: Use correct type for %cr4 %cr4 is 64-bit in 64-bit mode (although the upper 32-bits are currently reserved). Use unsigned long for the temporary variable to get the right size. Signed-off-by: Brian Gerst Acked-by: Pekka Enberg Cc: Suresh Siddha LKML-Reference: <1283563039-3466-2-git-send-email-brgerst@gmail.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/processor.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 325b7bdbeba..396b80f2d87 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -602,7 +602,7 @@ extern unsigned long mmu_cr4_features; static inline void set_in_cr4(unsigned long mask) { - unsigned cr4; + unsigned long cr4; mmu_cr4_features |= mask; cr4 = read_cr4(); @@ -612,7 +612,7 @@ static inline void set_in_cr4(unsigned long mask) static inline void clear_in_cr4(unsigned long mask) { - unsigned cr4; + unsigned long cr4; mmu_cr4_features &= ~mask; cr4 = read_cr4(); -- cgit v1.2.3-70-g09d2 From 51115d4d45700fc7c08306f7ba6e68551f526ae5 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 3 Sep 2010 21:17:10 -0400 Subject: x86, fpu: Merge tolerant_fwait() Commit e2e75c91 merged the math exception handler, allowing both 32-bit and 64-bit to handle math exceptions from kernel mode. Switch to using the 64-bit version of tolerant_fwait() without fnclex, which simply ignores the exception if one is still pending from userspace. Signed-off-by: Brian Gerst Acked-by: Pekka Enberg Cc: Suresh Siddha LKML-Reference: <1283563039-3466-4-git-send-email-brgerst@gmail.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/i387.h | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index a73a8d5a5e6..5d8f9a79fa5 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -77,15 +77,6 @@ static inline void sanitize_i387_state(struct task_struct *tsk) } #ifdef CONFIG_X86_64 - -/* Ignore delayed exceptions from user space */ -static inline void tolerant_fwait(void) -{ - asm volatile("1: fwait\n" - "2:\n" - _ASM_EXTABLE(1b, 2b)); -} - static inline int fxrstor_checking(struct i387_fxsave_struct *fx) { int err; @@ -220,11 +211,6 @@ extern void finit_soft_fpu(struct i387_soft_struct *soft); static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} #endif -static inline void tolerant_fwait(void) -{ - asm volatile("fnclex ; fwait"); -} - /* perform fxrstor iff the processor has extended states, otherwise frstor */ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) { @@ -344,7 +330,10 @@ static inline void __unlazy_fpu(struct task_struct *tsk) static inline void __clear_fpu(struct task_struct *tsk) { if (task_thread_info(tsk)->status & TS_USEDFPU) { - tolerant_fwait(); + /* Ignore delayed exceptions from user space */ + asm volatile("1: fwait\n" + "2:\n" + _ASM_EXTABLE(1b, 2b)); task_thread_info(tsk)->status &= ~TS_USEDFPU; stts(); } -- cgit v1.2.3-70-g09d2 From bfd946cb891800d408decaae268a3480775178a3 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 3 Sep 2010 21:17:11 -0400 Subject: x86, fpu: Merge __save_init_fpu() __save_init_fpu() is identical for 32-bit and 64-bit. Signed-off-by: Brian Gerst Acked-by: Pekka Enberg Cc: Suresh Siddha LKML-Reference: <1283563039-3466-5-git-send-email-brgerst@gmail.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/i387.h | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 5d8f9a79fa5..88065e3a03c 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -197,12 +197,6 @@ static inline void fpu_save_init(struct fpu *fpu) fpu_clear(fpu); } -static inline void __save_init_fpu(struct task_struct *tsk) -{ - fpu_save_init(&tsk->thread.fpu); - task_thread_info(tsk)->status &= ~TS_USEDFPU; -} - #else /* CONFIG_X86_32 */ #ifdef CONFIG_MATH_EMULATION @@ -285,15 +279,14 @@ end: ; } +#endif /* CONFIG_X86_64 */ + static inline void __save_init_fpu(struct task_struct *tsk) { fpu_save_init(&tsk->thread.fpu); task_thread_info(tsk)->status &= ~TS_USEDFPU; } - -#endif /* CONFIG_X86_64 */ - static inline int fpu_fxrstor_checking(struct fpu *fpu) { return fxrstor_checking(&fpu->state->fxsave); -- cgit v1.2.3-70-g09d2 From a4d4fbc7735bba6654b20f859135f9d3f8fe7f76 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 3 Sep 2010 21:17:12 -0400 Subject: x86-64, fpu: Disable preemption when using TS_USEDFPU Consolidates code and fixes the below race for 64-bit. commit 9fa2f37bfeb798728241cc4a19578ce6e4258f25 Author: torvalds Date: Tue Sep 2 07:37:25 2003 +0000 Be a lot more careful about TS_USEDFPU and preemption We had some races where we testecd (or set) TS_USEDFPU together with sequences that depended on the setting (like clearing or setting the TS flag in %cr0) and we could be preempted in between, which screws up the FPU state, since preemption will itself change USEDFPU and the TS flag. This makes it a lot more explicit: the "internal" low-level FPU functions ("__xxxx_fpu()") all require preemption to be disabled, and the exported "real" functions will make sure that is the case. One case - in __switch_to() - was switched to the non-preempt-safe internal version, since the scheduler itself has already disabled preemption. BKrev: 3f5448b5WRiQuyzAlbajs3qoQjSobw Signed-off-by: Brian Gerst Acked-by: Pekka Enberg Cc: Suresh Siddha LKML-Reference: <1283563039-3466-6-git-send-email-brgerst@gmail.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/i387.h | 15 --------------- arch/x86/kernel/process_64.c | 2 +- 2 files changed, 1 insertion(+), 16 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 88065e3a03c..8b40a8379cc 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -387,19 +387,6 @@ static inline void irq_ts_restore(int TS_state) stts(); } -#ifdef CONFIG_X86_64 - -static inline void save_init_fpu(struct task_struct *tsk) -{ - __save_init_fpu(tsk); - stts(); -} - -#define unlazy_fpu __unlazy_fpu -#define clear_fpu __clear_fpu - -#else /* CONFIG_X86_32 */ - /* * These disable preemption on their own and are safe */ @@ -425,8 +412,6 @@ static inline void clear_fpu(struct task_struct *tsk) preempt_enable(); } -#endif /* CONFIG_X86_64 */ - /* * i387 state interaction */ diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3d9ea531ddd..b3d7a3a04f3 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -424,7 +424,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) load_TLS(next, cpu); /* Must be after DS reload */ - unlazy_fpu(prev_p); + __unlazy_fpu(prev_p); /* Make sure cpu is ready for new context */ if (preload_fpu) -- cgit v1.2.3-70-g09d2 From 820241356d6aa9a895fc10def15794a5a5bfcd98 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 3 Sep 2010 21:17:14 -0400 Subject: x86-64, fpu: Simplify constraints for fxsave/fxtstor Use the "R" constraint (legacy register) instead of listing all the possible registers. Clean up the comments as well. Signed-off-by: Brian Gerst Acked-by: Pekka Enberg Cc: Suresh Siddha LKML-Reference: <1283563039-3466-8-git-send-email-brgerst@gmail.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/i387.h | 44 +++++++++++++++++--------------------------- 1 file changed, 17 insertions(+), 27 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 8b40a8379cc..768fcb25900 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -81,6 +81,7 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) { int err; + /* See comment in fxsave() below. */ asm volatile("1: rex64/fxrstor (%[fx])\n\t" "2:\n" ".section .fixup,\"ax\"\n" @@ -89,11 +90,7 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) ".previous\n" _ASM_EXTABLE(1b, 3b) : [err] "=r" (err) -#if 0 /* See comment in fxsave() below. */ - : [fx] "r" (fx), "m" (*fx), "0" (0)); -#else - : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); -#endif + : [fx] "R" (fx), "m" (*fx), "0" (0)); return err; } @@ -140,6 +137,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) if (unlikely(err)) return -EFAULT; + /* See comment in fxsave() below. */ asm volatile("1: rex64/fxsave (%[fx])\n\t" "2:\n" ".section .fixup,\"ax\"\n" @@ -148,11 +146,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) ".previous\n" _ASM_EXTABLE(1b, 3b) : [err] "=r" (err), "=m" (*fx) -#if 0 /* See comment in fxsave() below. */ - : [fx] "r" (fx), "0" (0)); -#else - : [fx] "cdaSDb" (fx), "0" (0)); -#endif + : [fx] "R" (fx), "0" (0)); if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct))) err = -EFAULT; @@ -165,26 +159,22 @@ static inline void fpu_fxsave(struct fpu *fpu) /* Using "rex64; fxsave %0" is broken because, if the memory operand uses any extended registers for addressing, a second REX prefix will be generated (to the assembler, rex64 followed by semicolon - is a separate instruction), and hence the 64-bitness is lost. */ -#if 0 - /* Using "fxsaveq %0" would be the ideal choice, but is only supported - starting with gas 2.16. */ - __asm__ __volatile__("fxsaveq %0" - : "=m" (fpu->state->fxsave)); -#elif 0 - /* Using, as a workaround, the properly prefixed form below isn't + is a separate instruction), and hence the 64-bitness is lost. + Using "fxsaveq %0" would be the ideal choice, but is only supported + starting with gas 2.16. + asm volatile("fxsaveq %0" + : "=m" (fpu->state->fxsave)); + Using, as a workaround, the properly prefixed form below isn't accepted by any binutils version so far released, complaining that the same type of prefix is used twice if an extended register is - needed for addressing (fix submitted to mainline 2005-11-21). */ - __asm__ __volatile__("rex64/fxsave %0" - : "=m" (fpu->state->fxsave)); -#else - /* This, however, we can work around by forcing the compiler to select + needed for addressing (fix submitted to mainline 2005-11-21). + asm volatile("rex64/fxsave %0" + : "=m" (fpu->state->fxsave)); + This, however, we can work around by forcing the compiler to select an addressing mode that doesn't require extended registers. */ - __asm__ __volatile__("rex64/fxsave (%1)" - : "=m" (fpu->state->fxsave) - : "cdaSDb" (&fpu->state->fxsave)); -#endif + asm volatile("rex64/fxsave (%[fx])" + : "=m" (fpu->state->fxsave) + : [fx] "R" (&fpu->state->fxsave)); } static inline void fpu_save_init(struct fpu *fpu) -- cgit v1.2.3-70-g09d2 From 8eb91a577d7763d21628f6761045328784b1911c Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 3 Sep 2010 21:17:16 -0400 Subject: x86, fpu: Remove unnecessary ifdefs from i387 code. Remove ifdefs for code that the compiler can optimize away on 64-bit. Signed-off-by: Brian Gerst Acked-by: Pekka Enberg Cc: Suresh Siddha LKML-Reference: <1283563039-3466-10-git-send-email-brgerst@gmail.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/i387.h | 12 ++++++------ arch/x86/kernel/i387.c | 4 ---- 2 files changed, 6 insertions(+), 10 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 768fcb25900..42b507e16d1 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -55,6 +55,12 @@ extern int save_i387_xstate_ia32(void __user *buf); extern int restore_i387_xstate_ia32(void __user *buf); #endif +#ifdef CONFIG_MATH_EMULATION +extern void finit_soft_fpu(struct i387_soft_struct *soft); +#else +static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} +#endif + #define X87_FSW_ES (1 << 7) /* Exception Summary */ static __always_inline __pure bool use_xsaveopt(void) @@ -189,12 +195,6 @@ static inline void fpu_save_init(struct fpu *fpu) #else /* CONFIG_X86_32 */ -#ifdef CONFIG_MATH_EMULATION -extern void finit_soft_fpu(struct i387_soft_struct *soft); -#else -static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} -#endif - /* perform fxrstor iff the processor has extended states, otherwise frstor */ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) { diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index f3775f5ef4a..e795e360bd5 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -79,10 +79,8 @@ static void __cpuinit init_thread_xstate(void) if (cpu_has_fxsr) xstate_size = sizeof(struct i387_fxsave_struct); -#ifdef CONFIG_X86_32 else xstate_size = sizeof(struct i387_fsave_struct); -#endif } /* @@ -119,12 +117,10 @@ void __cpuinit fpu_init(void) void fpu_finit(struct fpu *fpu) { -#ifdef CONFIG_X86_32 if (!HAVE_HWFP) { finit_soft_fpu(&fpu->state->soft); return; } -#endif if (cpu_has_fxsr) { struct i387_fxsave_struct *fx = &fpu->state->fxsave; -- cgit v1.2.3-70-g09d2 From eec73f813ab0954253e5e2168119c4555f83f07d Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 3 Sep 2010 21:17:17 -0400 Subject: x86, fpu: Remove PSHUFB_XMM5_* macros The PSHUFB_XMM5_* macros are no longer used. Signed-off-by: Brian Gerst Acked-by: Pekka Enberg Cc: Suresh Siddha LKML-Reference: <1283563039-3466-11-git-send-email-brgerst@gmail.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/i387.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 42b507e16d1..907967e4fa8 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -465,7 +465,4 @@ extern void fpu_finit(struct fpu *fpu); #endif /* __ASSEMBLY__ */ -#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5 -#define PSHUFB_XMM5_XMM6 .byte 0x66, 0x0f, 0x38, 0x00, 0xf5 - #endif /* _ASM_X86_I387_H */ -- cgit v1.2.3-70-g09d2 From 58a992b9cbaf449aeebd3575c3695a9eb5d95b5e Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 3 Sep 2010 21:17:18 -0400 Subject: x86-32, fpu: Rewrite fpu_save_init() Rewrite fpu_save_init() to prepare for merging with 64-bit. Signed-off-by: Brian Gerst Acked-by: Pekka Enberg Cc: Suresh Siddha LKML-Reference: <1283563039-3466-12-git-send-email-brgerst@gmail.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/i387.h | 47 +++++++++++++++++++++------------------------ 1 file changed, 22 insertions(+), 25 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 907967e4fa8..b45abefb89f 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -73,6 +73,11 @@ static __always_inline __pure bool use_xsave(void) return static_cpu_has(X86_FEATURE_XSAVE); } +static __always_inline __pure bool use_fxsr(void) +{ + return static_cpu_has(X86_FEATURE_FXSR); +} + extern void __sanitize_i387_state(struct task_struct *); static inline void sanitize_i387_state(struct task_struct *tsk) @@ -211,6 +216,12 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) return 0; } +static inline void fpu_fxsave(struct fpu *fpu) +{ + asm volatile("fxsave %[fx]" + : [fx] "=m" (fpu->state->fxsave)); +} + /* We need a safe address that is cheap to find and that is already in L1 during context switch. The best choices are unfortunately different for UP and SMP */ @@ -226,36 +237,24 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) static inline void fpu_save_init(struct fpu *fpu) { if (use_xsave()) { - struct xsave_struct *xstate = &fpu->state->xsave; - struct i387_fxsave_struct *fx = &fpu->state->fxsave; - fpu_xsave(fpu); /* * xsave header may indicate the init state of the FP. */ - if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) - goto end; - - if (unlikely(fx->swd & X87_FSW_ES)) - asm volatile("fnclex"); - - /* - * we can do a simple return here or be paranoid :) - */ - goto clear_state; + if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) + return; + } else if (use_fxsr()) { + fpu_fxsave(fpu); + } else { + asm volatile("fsave %[fx]; fwait" + : [fx] "=m" (fpu->state->fsave)); + return; } - /* Use more nops than strictly needed in case the compiler - varies code */ - alternative_input( - "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4, - "fxsave %[fx]\n" - "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", - X86_FEATURE_FXSR, - [fx] "m" (fpu->state->fxsave), - [fsw] "m" (fpu->state->fxsave.swd) : "memory"); -clear_state: + if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) + asm volatile("fnclex"); + /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is pending. Clear the x87 state here by setting it to fixed values. safe_address is a random variable that should be in L1 */ @@ -265,8 +264,6 @@ clear_state: "fildl %[addr]", /* set F?P to defined value */ X86_FEATURE_FXSAVE_LEAK, [addr] "m" (safe_address)); -end: - ; } #endif /* CONFIG_X86_64 */ -- cgit v1.2.3-70-g09d2 From b2b57fe053c9cf8b8af5a0e826a465996afed0ff Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Fri, 3 Sep 2010 21:17:19 -0400 Subject: x86, fpu: Merge fpu_save_init() Make 64-bit use the 32-bit version of fpu_save_init(). Remove unused clear_fpu_state(). Signed-off-by: Brian Gerst Acked-by: Pekka Enberg Cc: Suresh Siddha LKML-Reference: <1283563039-3466-13-git-send-email-brgerst@gmail.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/i387.h | 48 ++++----------------------------------------- 1 file changed, 4 insertions(+), 44 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index b45abefb89f..70626ed96cb 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -105,36 +105,6 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) return err; } -/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception - is pending. Clear the x87 state here by setting it to fixed - values. The kernel data segment can be sometimes 0 and sometimes - new user value. Both should be ok. - Use the PDA as safe address because it should be already in L1. */ -static inline void fpu_clear(struct fpu *fpu) -{ - struct xsave_struct *xstate = &fpu->state->xsave; - struct i387_fxsave_struct *fx = &fpu->state->fxsave; - - /* - * xsave header may indicate the init state of the FP. - */ - if (use_xsave() && - !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) - return; - - if (unlikely(fx->swd & X87_FSW_ES)) - asm volatile("fnclex"); - alternative_input(ASM_NOP8 ASM_NOP2, - " emms\n" /* clear stack tags */ - " fildl %%gs:0", /* load to clear state */ - X86_FEATURE_FXSAVE_LEAK); -} - -static inline void clear_fpu_state(struct task_struct *tsk) -{ - fpu_clear(&tsk->thread.fpu); -} - static inline int fxsave_user(struct i387_fxsave_struct __user *fx) { int err; @@ -188,16 +158,6 @@ static inline void fpu_fxsave(struct fpu *fpu) : [fx] "R" (&fpu->state->fxsave)); } -static inline void fpu_save_init(struct fpu *fpu) -{ - if (use_xsave()) - fpu_xsave(fpu); - else - fpu_fxsave(fpu); - - fpu_clear(fpu); -} - #else /* CONFIG_X86_32 */ /* perform fxrstor iff the processor has extended states, otherwise frstor */ @@ -222,6 +182,8 @@ static inline void fpu_fxsave(struct fpu *fpu) : [fx] "=m" (fpu->state->fxsave)); } +#endif /* CONFIG_X86_64 */ + /* We need a safe address that is cheap to find and that is already in L1 during context switch. The best choices are unfortunately different for UP and SMP */ @@ -259,15 +221,13 @@ static inline void fpu_save_init(struct fpu *fpu) is pending. Clear the x87 state here by setting it to fixed values. safe_address is a random variable that should be in L1 */ alternative_input( - GENERIC_NOP8 GENERIC_NOP2, + ASM_NOP8 ASM_NOP2, "emms\n\t" /* clear stack tags */ - "fildl %[addr]", /* set F?P to defined value */ + "fildl %P[addr]", /* set F?P to defined value */ X86_FEATURE_FXSAVE_LEAK, [addr] "m" (safe_address)); } -#endif /* CONFIG_X86_64 */ - static inline void __save_init_fpu(struct task_struct *tsk) { fpu_save_init(&tsk->thread.fpu); -- cgit v1.2.3-70-g09d2 From db7829c6cc32f3c0c9a324118d743acb1abff081 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Thu, 9 Sep 2010 18:17:26 +0200 Subject: x86, percpu: Optimize this_cpu_ptr Allow arches to implement __this_cpu_ptr, and provide an x86 version. Before: movq $foo, %rax movq %gs:this_cpu_off, %rdx addq %rdx, %rax After: movq $foo, %rax addq %gs:this_cpu_off, %rax The benefit is doing it in one less instruction and not clobbering a temporary register. tj: * Beefed up the comment a bit and renamed in-macro temp variable to match neighboring macros. * Folded fix for const pointer case found in linux-next. * Fixed sparse notation. Signed-off-by: Brian Gerst Signed-off-by: Tejun Heo --- arch/x86/include/asm/percpu.h | 14 ++++++++++++++ include/asm-generic/percpu.h | 9 +++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index cd28f9ad910..f899e01a8ac 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -47,6 +47,20 @@ #ifdef CONFIG_SMP #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x #define __my_cpu_offset percpu_read(this_cpu_off) + +/* + * Compared to the generic __my_cpu_offset version, the following + * saves one instruction and avoids clobbering a temp register. + */ +#define __this_cpu_ptr(ptr) \ +({ \ + unsigned long tcp_ptr__; \ + __verify_pcpu_ptr(ptr); \ + asm volatile("add " __percpu_arg(1) ", %0" \ + : "=r" (tcp_ptr__) \ + : "m" (this_cpu_off), "0" (ptr)); \ + (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \ +}) #else #define __percpu_arg(x) "%P" #x #endif diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 08923b68476..ec643115bb5 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -60,9 +60,14 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define __raw_get_cpu_var(var) \ (*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset)) -#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) +#ifndef __this_cpu_ptr #define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) - +#endif +#ifdef CONFIG_DEBUG_PREEMPT +#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) +#else +#define this_cpu_ptr(ptr) __this_cpu_ptr(ptr) +#endif #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void setup_per_cpu_areas(void); -- cgit v1.2.3-70-g09d2 From 62a92f4c69cd1d9361ad8c16be1dd16e6821bc15 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Fri, 27 Aug 2010 11:09:49 -0700 Subject: x86, intr-remap: Remove IRTE setup duplicate code Remove IRTE setup duplicate code with prepare_irte(). Signed-off-by: Suresh Siddha LKML-Reference: <20100827181049.095067319@sbsiddha-MOBL3.sc.intel.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/irq_remapping.h | 27 +++++++++++++++++++++++++++ arch/x86/kernel/apic/io_apic.c | 27 ++------------------------- 2 files changed, 29 insertions(+), 25 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index f275e224450..8d841505344 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h @@ -3,4 +3,31 @@ #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) +#ifdef CONFIG_INTR_REMAP +static inline void prepare_irte(struct irte *irte, int vector, + unsigned int dest) +{ + memset(irte, 0, sizeof(*irte)); + + irte->present = 1; + irte->dst_mode = apic->irq_dest_mode; + /* + * Trigger mode in the IRTE will always be edge, and for IO-APIC, the + * actual level or edge trigger will be setup in the IO-APIC + * RTE. This will help simplify level triggered irq migration. + * For more details, see the comments (in io_apic.c) explainig IO-APIC + * irq migration in the presence of interrupt-remapping. + */ + irte->trigger_mode = 0; + irte->dlvry_mode = apic->irq_delivery_mode; + irte->vector = vector; + irte->dest_id = IRTE_DEST(dest); + irte->redir_hint = 1; +} +#else +static void prepare_irte(struct irte *irte, int vector, unsigned int dest) +{ +} +#endif + #endif /* _ASM_X86_IRQ_REMAPPING_H */ diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 90f8a75f548..e8c95a22614 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1377,22 +1377,7 @@ int setup_ioapic_entry(int apic_id, int irq, if (index < 0) panic("Failed to allocate IRTE for ioapic %d\n", apic_id); - memset(&irte, 0, sizeof(irte)); - - irte.present = 1; - irte.dst_mode = apic->irq_dest_mode; - /* - * Trigger mode in the IRTE will always be edge, and the - * actual level or edge trigger will be setup in the IO-APIC - * RTE. This will help simplify level triggered irq migration. - * For more details, see the comments above explainig IO-APIC - * irq migration in the presence of interrupt-remapping. - */ - irte.trigger_mode = 0; - irte.dlvry_mode = apic->irq_delivery_mode; - irte.vector = vector; - irte.dest_id = IRTE_DEST(destination); - irte.redir_hint = 1; + prepare_irte(&irte, vector, destination); /* Set source-id of interrupt request */ set_ioapic_sid(&irte, apic_id); @@ -3336,15 +3321,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, ir_index = map_irq_to_irte_handle(irq, &sub_handle); BUG_ON(ir_index == -1); - memset (&irte, 0, sizeof(irte)); - - irte.present = 1; - irte.dst_mode = apic->irq_dest_mode; - irte.trigger_mode = 0; /* edge */ - irte.dlvry_mode = apic->irq_delivery_mode; - irte.vector = cfg->vector; - irte.dest_id = IRTE_DEST(dest); - irte.redir_hint = 1; + prepare_irte(&irte, cfg->vector, dest); /* Set source-id of interrupt request */ if (pdev) -- cgit v1.2.3-70-g09d2 From 3ee48b6af49cf534ca2f481ecc484b156a41451d Mon Sep 17 00:00:00 2001 From: Cliff Wickman Date: Thu, 16 Sep 2010 11:44:02 -0500 Subject: mm, x86: Saving vmcore with non-lazy freeing of vmas During the reading of /proc/vmcore the kernel is doing ioremap()/iounmap() repeatedly. And the buildup of un-flushed vm_area_struct's is causing a great deal of overhead. (rb_next() is chewing up most of that time). This solution is to provide function set_iounmap_nonlazy(). It causes a subsequent call to iounmap() to immediately purge the vma area (with try_purge_vmap_area_lazy()). With this patch we have seen the time for writing a 250MB compressed dump drop from 71 seconds to 44 seconds. Signed-off-by: Cliff Wickman Cc: Andrew Morton Cc: kexec@lists.infradead.org Cc: LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io.h | 1 + arch/x86/kernel/crash_dump_64.c | 1 + mm/vmalloc.c | 9 +++++++++ 3 files changed, 11 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 30a3e977612..6a45ec41ec2 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -206,6 +206,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) extern void iounmap(volatile void __iomem *addr); +extern void set_iounmap_nonlazy(void); #ifdef __KERNEL__ diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index bf43188ca65..994828899e0 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c @@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, } else memcpy(buf, vaddr + offset, csize); + set_iounmap_nonlazy(); iounmap(vaddr); return csize; } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 6b8889da69a..d8087f0db50 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -516,6 +516,15 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); /* for per-CPU blocks */ static void purge_fragmented_blocks_allcpus(void); +/* + * called before a call to iounmap() if the caller wants vm_area_struct's + * immediately freed. + */ +void set_iounmap_nonlazy(void) +{ + atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); +} + /* * Purges all lazily-freed vmap areas. * -- cgit v1.2.3-70-g09d2 From 900f9ac9f12dc3dd6fc8e33e16df172eafcaead6 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Fri, 17 Sep 2010 18:02:54 +0200 Subject: x86, k8-gart: Decouple handling of garts and northbridges So far we only provide num_k8_northbridges. This is required in different areas (e.g. L3 cache index disable, GART). But not all AMD CPUs provide a GART. Thus it is useful to split off the GART handling from the generic caching of AMD northbridge misc devices. Signed-off-by: Andreas Herrmann LKML-Reference: <20100917160254.GC4958@loge.amd.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/k8.h | 13 +++++---- arch/x86/kernel/cpu/intel_cacheinfo.c | 4 +-- arch/x86/kernel/k8.c | 52 ++++++++++++++++++++--------------- arch/x86/kernel/pci-gart_64.c | 27 ++++++++++++------ drivers/char/agp/amd64-agp.c | 33 +++++++++++++++------- drivers/edac/amd64_edac.c | 2 +- 6 files changed, 82 insertions(+), 49 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h index af00bd1d208..9cee145dcac 100644 --- a/arch/x86/include/asm/k8.h +++ b/arch/x86/include/asm/k8.h @@ -7,24 +7,27 @@ extern struct pci_device_id k8_nb_ids[]; struct bootnode; extern int early_is_k8_nb(u32 value); -extern struct pci_dev **k8_northbridges; -extern int num_k8_northbridges; extern int cache_k8_northbridges(void); extern void k8_flush_garts(void); extern int k8_get_nodes(struct bootnode *nodes); extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); extern int k8_scan_nodes(void); +struct k8_northbridge_info { + u16 num; + u8 gart_supported; + struct pci_dev **nb_misc; +}; +extern struct k8_northbridge_info k8_northbridges; + #ifdef CONFIG_K8_NB -extern int num_k8_northbridges; static inline struct pci_dev *node_to_k8_nb_misc(int node) { - return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL; + return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL; } #else -#define num_k8_northbridges 0 static inline struct pci_dev *node_to_k8_nb_misc(int node) { diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 2521cdcb877..6fdfb0b20f8 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -369,7 +369,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, return; /* not in virtualized environments */ - if (num_k8_northbridges == 0) + if (k8_northbridges.num == 0) return; /* @@ -377,7 +377,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, * never freed but this is done only on shutdown so it doesn't matter. */ if (!l3_caches) { - int size = num_k8_northbridges * sizeof(struct amd_l3_cache *); + int size = k8_northbridges.num * sizeof(struct amd_l3_cache *); l3_caches = kzalloc(size, GFP_ATOMIC); if (!l3_caches) diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c index 0f7bc20cfcd..5de1b6b3963 100644 --- a/arch/x86/kernel/k8.c +++ b/arch/x86/kernel/k8.c @@ -10,9 +10,6 @@ #include #include -int num_k8_northbridges; -EXPORT_SYMBOL(num_k8_northbridges); - static u32 *flush_words; struct pci_device_id k8_nb_ids[] = { @@ -22,7 +19,7 @@ struct pci_device_id k8_nb_ids[] = { }; EXPORT_SYMBOL(k8_nb_ids); -struct pci_dev **k8_northbridges; +struct k8_northbridge_info k8_northbridges; EXPORT_SYMBOL(k8_northbridges); static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) @@ -40,36 +37,44 @@ int cache_k8_northbridges(void) int i; struct pci_dev *dev; - if (num_k8_northbridges) + if (k8_northbridges.num) return 0; dev = NULL; while ((dev = next_k8_northbridge(dev)) != NULL) - num_k8_northbridges++; + k8_northbridges.num++; + + /* some CPU families (e.g. family 0x11) do not support GART */ + if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10) + k8_northbridges.gart_supported = 1; - k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *), - GFP_KERNEL); - if (!k8_northbridges) + k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) * + sizeof(void *), GFP_KERNEL); + if (!k8_northbridges.nb_misc) return -ENOMEM; - if (!num_k8_northbridges) { - k8_northbridges[0] = NULL; + if (!k8_northbridges.num) { + k8_northbridges.nb_misc[0] = NULL; return 0; } - flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL); - if (!flush_words) { - kfree(k8_northbridges); - return -ENOMEM; + if (k8_northbridges.gart_supported) { + flush_words = kmalloc(k8_northbridges.num * sizeof(u32), + GFP_KERNEL); + if (!flush_words) { + kfree(k8_northbridges.nb_misc); + return -ENOMEM; + } } dev = NULL; i = 0; while ((dev = next_k8_northbridge(dev)) != NULL) { - k8_northbridges[i] = dev; - pci_read_config_dword(dev, 0x9c, &flush_words[i++]); + k8_northbridges.nb_misc[i] = dev; + if (k8_northbridges.gart_supported) + pci_read_config_dword(dev, 0x9c, &flush_words[i++]); } - k8_northbridges[i] = NULL; + k8_northbridges.nb_misc[i] = NULL; return 0; } EXPORT_SYMBOL_GPL(cache_k8_northbridges); @@ -93,22 +98,25 @@ void k8_flush_garts(void) unsigned long flags; static DEFINE_SPINLOCK(gart_lock); + if (!k8_northbridges.gart_supported) + return; + /* Avoid races between AGP and IOMMU. In theory it's not needed but I'm not sure if the hardware won't lose flush requests when another is pending. This whole thing is so expensive anyways that it doesn't matter to serialize more. -AK */ spin_lock_irqsave(&gart_lock, flags); flushed = 0; - for (i = 0; i < num_k8_northbridges; i++) { - pci_write_config_dword(k8_northbridges[i], 0x9c, + for (i = 0; i < k8_northbridges.num; i++) { + pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c, flush_words[i]|1); flushed++; } - for (i = 0; i < num_k8_northbridges; i++) { + for (i = 0; i < k8_northbridges.num; i++) { u32 w; /* Make sure the hardware actually executed the flush*/ for (;;) { - pci_read_config_dword(k8_northbridges[i], + pci_read_config_dword(k8_northbridges.nb_misc[i], 0x9c, &w); if (!(w & 1)) break; diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 0f7f130caa6..8f214a2643f 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -560,8 +560,11 @@ static void enable_gart_translations(void) { int i; - for (i = 0; i < num_k8_northbridges; i++) { - struct pci_dev *dev = k8_northbridges[i]; + if (!k8_northbridges.gart_supported) + return; + + for (i = 0; i < k8_northbridges.num; i++) { + struct pci_dev *dev = k8_northbridges.nb_misc[i]; enable_gart_translation(dev, __pa(agp_gatt_table)); } @@ -592,10 +595,13 @@ static void gart_fixup_northbridges(struct sys_device *dev) if (!fix_up_north_bridges) return; + if (!k8_northbridges.gart_supported) + return; + pr_info("PCI-DMA: Restoring GART aperture settings\n"); - for (i = 0; i < num_k8_northbridges; i++) { - struct pci_dev *dev = k8_northbridges[i]; + for (i = 0; i < k8_northbridges.num; i++) { + struct pci_dev *dev = k8_northbridges.nb_misc[i]; /* * Don't enable translations just yet. That is the next @@ -649,8 +655,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info) aper_size = aper_base = info->aper_size = 0; dev = NULL; - for (i = 0; i < num_k8_northbridges; i++) { - dev = k8_northbridges[i]; + for (i = 0; i < k8_northbridges.num; i++) { + dev = k8_northbridges.nb_misc[i]; new_aper_base = read_aperture(dev, &new_aper_size); if (!new_aper_base) goto nommu; @@ -718,10 +724,13 @@ static void gart_iommu_shutdown(void) if (!no_agp) return; - for (i = 0; i < num_k8_northbridges; i++) { + if (!k8_northbridges.gart_supported) + return; + + for (i = 0; i < k8_northbridges.num; i++) { u32 ctl; - dev = k8_northbridges[i]; + dev = k8_northbridges.nb_misc[i]; pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); ctl &= ~GARTEN; @@ -739,7 +748,7 @@ int __init gart_iommu_init(void) unsigned long scratch; long i; - if (num_k8_northbridges == 0) + if (!k8_northbridges.gart_supported) return 0; #ifndef CONFIG_AGP_AMD64 diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 70312da4c96..bdf00d583d7 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -124,7 +124,7 @@ static int amd64_fetch_size(void) u32 temp; struct aper_size_info_32 *values; - dev = k8_northbridges[0]; + dev = k8_northbridges.nb_misc[0]; if (dev==NULL) return 0; @@ -181,10 +181,14 @@ static int amd_8151_configure(void) unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); int i; + if (!k8_northbridges.gart_supported) + return 0; + /* Configure AGP regs in each x86-64 host bridge. */ - for (i = 0; i < num_k8_northbridges; i++) { + for (i = 0; i < k8_northbridges.num; i++) { agp_bridge->gart_bus_addr = - amd64_configure(k8_northbridges[i], gatt_bus); + amd64_configure(k8_northbridges.nb_misc[i], + gatt_bus); } k8_flush_garts(); return 0; @@ -195,8 +199,12 @@ static void amd64_cleanup(void) { u32 tmp; int i; - for (i = 0; i < num_k8_northbridges; i++) { - struct pci_dev *dev = k8_northbridges[i]; + + if (!k8_northbridges.gart_supported) + return; + + for (i = 0; i < k8_northbridges.num; i++) { + struct pci_dev *dev = k8_northbridges.nb_misc[i]; /* disable gart translation */ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); tmp &= ~AMD64_GARTEN; @@ -319,16 +327,19 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, return 0; } -static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr) +static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr) { int i; if (cache_k8_northbridges() < 0) return -ENODEV; + if (!k8_northbridges.gart_supported) + return -ENODEV; + i = 0; - for (i = 0; i < num_k8_northbridges; i++) { - struct pci_dev *dev = k8_northbridges[i]; + for (i = 0; i < k8_northbridges.num; i++) { + struct pci_dev *dev = k8_northbridges.nb_misc[i]; if (fix_northbridge(dev, pdev, cap_ptr) < 0) { dev_err(&dev->dev, "no usable aperture found\n"); #ifdef __x86_64__ @@ -405,7 +416,8 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) } /* shadow x86-64 registers into ULi registers */ - pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); + pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, + &httfea); /* if x86-64 aperture base is beyond 4G, exit here */ if ((httfea & 0x7fff) >> (32 - 25)) { @@ -472,7 +484,8 @@ static int nforce3_agp_init(struct pci_dev *pdev) pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); /* shadow x86-64 registers into NVIDIA registers */ - pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &apbase); + pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, + &apbase); /* if x86-64 aperture base is beyond 4G, exit here */ if ( (apbase & 0x7fff) >> (32 - 25) ) { diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index e7d5d6b5dcf..5babf6f4805 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -2927,7 +2927,7 @@ static int __init amd64_edac_init(void) * to finish initialization of the MC instances. */ err = -ENODEV; - for (nb = 0; nb < num_k8_northbridges; nb++) { + for (nb = 0; nb < k8_northbridges.num; nb++) { if (!pvt_lookup[nb]) continue; -- cgit v1.2.3-70-g09d2 From bc83cccc761953f878088cdfa682de0970b5561f Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Fri, 17 Sep 2010 15:36:40 -0700 Subject: x86, mwait: Move mwait constants to a common header file We have MWAIT constants spread across three different .c files, for no good reason. Move them all into a common header file. Signed-off-by: H. Peter Anvin Reviewed-by: Arjan van de Ven Cc: Len Brown LKML-Reference: --- arch/x86/include/asm/mwait.h | 15 +++++++++++++++ arch/x86/kernel/acpi/cstate.c | 11 +---------- drivers/acpi/acpi_pad.c | 7 +------ drivers/idle/intel_idle.c | 9 +-------- 4 files changed, 18 insertions(+), 24 deletions(-) create mode 100644 arch/x86/include/asm/mwait.h (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h new file mode 100644 index 00000000000..bcdff997668 --- /dev/null +++ b/arch/x86/include/asm/mwait.h @@ -0,0 +1,15 @@ +#ifndef _ASM_X86_MWAIT_H +#define _ASM_X86_MWAIT_H + +#define MWAIT_SUBSTATE_MASK 0xf +#define MWAIT_CSTATE_MASK 0xf +#define MWAIT_SUBSTATE_SIZE 4 +#define MWAIT_MAX_NUM_CSTATES 8 + +#define CPUID_MWAIT_LEAF 5 +#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 +#define CPUID5_ECX_INTERRUPT_BREAK 0x2 + +#define MWAIT_ECX_INTERRUPT_BREAK 0x1 + +#endif /* _ASM_X86_MWAIT_H */ diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index fb7a5f052e2..bcc4adda7b9 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -13,6 +13,7 @@ #include #include +#include /* * Initialize bm_flags based on the CPU cache properties @@ -65,16 +66,6 @@ static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; -#define MWAIT_SUBSTATE_MASK (0xf) -#define MWAIT_CSTATE_MASK (0xf) -#define MWAIT_SUBSTATE_SIZE (4) - -#define CPUID_MWAIT_LEAF (5) -#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) -#define CPUID5_ECX_INTERRUPT_BREAK (0x2) - -#define MWAIT_ECX_INTERRUPT_BREAK (0x1) - #define NATIVE_CSTATE_BEYOND_HALT (2) static long acpi_processor_ffh_cstate_probe_cpu(void *_cx) diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index b76848c80be..ca2694d365e 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -30,18 +30,13 @@ #include #include #include +#include #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad" #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 static DEFINE_MUTEX(isolated_cpus_lock); -#define MWAIT_SUBSTATE_MASK (0xf) -#define MWAIT_CSTATE_MASK (0xf) -#define MWAIT_SUBSTATE_SIZE (4) -#define CPUID_MWAIT_LEAF (5) -#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) -#define CPUID5_ECX_INTERRUPT_BREAK (0x2) static unsigned long power_saving_mwait_eax; static unsigned char tsc_detected_unstable; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index a10152bb142..065c8041c91 100755 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -59,18 +59,11 @@ #include /* ktime_get_real() */ #include #include +#include #define INTEL_IDLE_VERSION "0.4" #define PREFIX "intel_idle: " -#define MWAIT_SUBSTATE_MASK (0xf) -#define MWAIT_CSTATE_MASK (0xf) -#define MWAIT_SUBSTATE_SIZE (4) -#define MWAIT_MAX_NUM_CSTATES 8 -#define CPUID_MWAIT_LEAF (5) -#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) -#define CPUID5_ECX_INTERRUPT_BREAK (0x2) - static struct cpuidle_driver intel_idle_driver = { .name = "intel_idle", .owner = THIS_MODULE, -- cgit v1.2.3-70-g09d2 From ea53069231f9317062910d6e772cca4ce93de8c8 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Fri, 17 Sep 2010 15:39:11 -0700 Subject: x86, hotplug: Use mwait to offline a processor, fix the legacy case The code in native_play_dead() has a number of problems: 1. We should use MWAIT when available, to put ourselves into a deeper sleep state. 2. We use the existence of CLFLUSH to determine if WBINVD is safe, but that is totally bogus -- WBINVD is 486+, whereas CLFLUSH is a much later addition. 3. We should do WBINVD inside the loop, just in case of something like setting an A bit on page tables. Pointed out by Arjan van de Ven. This code is based in part of a previous patch by Venki Pallipadi, but unlike that patch this one keeps all the detection code local instead of pre-caching a bunch of information. We're shutting down the CPU; there is absolutely no hurry. This patch moves all the code to C and deletes the global wbinvd_halt() which is broken anyway. Originally-by: Venkatesh Pallipadi Signed-off-by: H. Peter Anvin Reviewed-by: Arjan van de Ven Cc: Len Brown Cc: Venkatesh Pallipadi Cc: Peter Zijlstra LKML-Reference: <20090522232230.162239000@intel.com> --- arch/x86/include/asm/processor.h | 23 --------------- arch/x86/kernel/smpboot.c | 63 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 62 insertions(+), 24 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 325b7bdbeba..f358241e412 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -764,29 +764,6 @@ extern unsigned long idle_halt; extern unsigned long idle_nomwait; extern bool c1e_detected; -/* - * on systems with caches, caches must be flashed as the absolute - * last instruction before going into a suspended halt. Otherwise, - * dirty data can linger in the cache and become stale on resume, - * leading to strange errors. - * - * perform a variety of operations to guarantee that the compiler - * will not reorder instructions. wbinvd itself is serializing - * so the processor will not reorder. - * - * Systems without cache can just go into halt. - */ -static inline void wbinvd_halt(void) -{ - mb(); - /* check for clflush to determine if wbinvd is legal */ - if (cpu_has_clflush) - asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory"); - else - while (1) - halt(); -} - extern void enable_sep_cpu(void); extern int sysenter_setup(void); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 8b3bfc4dd70..07bf4233441 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -62,6 +62,7 @@ #include #include #include +#include #include #include #include @@ -1383,11 +1384,71 @@ void play_dead_common(void) local_irq_disable(); } +/* + * We need to flush the caches before going to sleep, lest we have + * dirty data in our caches when we come back up. + */ +static inline void mwait_play_dead(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned int highest_cstate = 0; + unsigned int highest_subcstate = 0; + int i; + + if (!cpu_has(¤t_cpu_data, X86_FEATURE_MWAIT)) + return; + if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) + return; + + eax = CPUID_MWAIT_LEAF; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + + /* + * eax will be 0 if EDX enumeration is not valid. + * Initialized below to cstate, sub_cstate value when EDX is valid. + */ + if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { + eax = 0; + } else { + edx >>= MWAIT_SUBSTATE_SIZE; + for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { + if (edx & MWAIT_SUBSTATE_MASK) { + highest_cstate = i; + highest_subcstate = edx & MWAIT_SUBSTATE_MASK; + } + } + eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | + (highest_subcstate - 1); + } + + while (1) { + mb(); + wbinvd(); + __monitor(¤t_thread_info()->flags, 0, 0); + mb(); + __mwait(eax, 0); + } +} + +static inline void hlt_play_dead(void) +{ + while (1) { + mb(); + if (current_cpu_data.x86 >= 4) + wbinvd(); + mb(); + native_halt(); + } +} + void native_play_dead(void) { play_dead_common(); tboot_shutdown(TB_SHUTDOWN_WFS); - wbinvd_halt(); + + mwait_play_dead(); /* Only returns on failure */ + hlt_play_dead(); } #else /* ... !CONFIG_HOTPLUG_CPU */ -- cgit v1.2.3-70-g09d2 From 23ac4ae827e6264e21b898f2cd3f601450aa02a6 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Fri, 17 Sep 2010 18:03:43 +0200 Subject: x86, k8: Rename k8.[ch] to amd_nb.[ch] and CONFIG_K8_NB to CONFIG_AMD_NB The file names are somehow misleading as the code is not specific to AMD K8 CPUs anymore. The files accomodate code for other AMD CPU northbridges as well. Same is true for the config option which is valid for AMD CPU northbridges in general and not specific to K8. Signed-off-by: Andreas Herrmann LKML-Reference: <20100917160343.GD4958@loge.amd.com> Signed-off-by: H. Peter Anvin --- arch/x86/Kconfig | 4 +- arch/x86/include/asm/amd_nb.h | 39 +++++++++ arch/x86/include/asm/k8.h | 39 --------- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/amd_nb.c | 145 ++++++++++++++++++++++++++++++++++ arch/x86/kernel/aperture_64.c | 2 +- arch/x86/kernel/cpu/intel_cacheinfo.c | 10 +-- arch/x86/kernel/k8.c | 145 ---------------------------------- arch/x86/kernel/pci-gart_64.c | 2 +- arch/x86/kernel/setup.c | 2 +- arch/x86/mm/k8topology_64.c | 2 +- arch/x86/mm/numa_64.c | 2 +- drivers/char/agp/Kconfig | 2 +- drivers/char/agp/amd64-agp.c | 2 +- drivers/edac/Kconfig | 2 +- drivers/edac/amd64_edac.c | 2 +- 16 files changed, 201 insertions(+), 201 deletions(-) create mode 100644 arch/x86/include/asm/amd_nb.h delete mode 100644 arch/x86/include/asm/k8.h create mode 100644 arch/x86/kernel/amd_nb.c delete mode 100644 arch/x86/kernel/k8.c (limited to 'arch/x86/include') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cea0cd9a316..7fd41f0d754 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -670,7 +670,7 @@ config GART_IOMMU bool "GART IOMMU support" if EMBEDDED default y select SWIOTLB - depends on X86_64 && PCI && K8_NB + depends on X86_64 && PCI && AMD_NB ---help--- Support for full DMA access of devices with 32bit memory access only on systems with more than 3GB. This is usually needed for USB, @@ -2076,7 +2076,7 @@ config OLPC_OPENFIRMWARE endif # X86_32 -config K8_NB +config AMD_NB def_bool y depends on CPU_SUP_AMD && PCI diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h new file mode 100644 index 00000000000..c8517f81b21 --- /dev/null +++ b/arch/x86/include/asm/amd_nb.h @@ -0,0 +1,39 @@ +#ifndef _ASM_X86_AMD_NB_H +#define _ASM_X86_AMD_NB_H + +#include + +extern struct pci_device_id k8_nb_ids[]; +struct bootnode; + +extern int early_is_k8_nb(u32 value); +extern int cache_k8_northbridges(void); +extern void k8_flush_garts(void); +extern int k8_get_nodes(struct bootnode *nodes); +extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); +extern int k8_scan_nodes(void); + +struct k8_northbridge_info { + u16 num; + u8 gart_supported; + struct pci_dev **nb_misc; +}; +extern struct k8_northbridge_info k8_northbridges; + +#ifdef CONFIG_AMD_NB + +static inline struct pci_dev *node_to_k8_nb_misc(int node) +{ + return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL; +} + +#else + +static inline struct pci_dev *node_to_k8_nb_misc(int node) +{ + return NULL; +} +#endif + + +#endif /* _ASM_X86_AMD_NB_H */ diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h deleted file mode 100644 index 9cee145dcac..00000000000 --- a/arch/x86/include/asm/k8.h +++ /dev/null @@ -1,39 +0,0 @@ -#ifndef _ASM_X86_K8_H -#define _ASM_X86_K8_H - -#include - -extern struct pci_device_id k8_nb_ids[]; -struct bootnode; - -extern int early_is_k8_nb(u32 value); -extern int cache_k8_northbridges(void); -extern void k8_flush_garts(void); -extern int k8_get_nodes(struct bootnode *nodes); -extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); -extern int k8_scan_nodes(void); - -struct k8_northbridge_info { - u16 num; - u8 gart_supported; - struct pci_dev **nb_misc; -}; -extern struct k8_northbridge_info k8_northbridges; - -#ifdef CONFIG_K8_NB - -static inline struct pci_dev *node_to_k8_nb_misc(int node) -{ - return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL; -} - -#else - -static inline struct pci_dev *node_to_k8_nb_misc(int node) -{ - return NULL; -} -#endif - - -#endif /* _ASM_X86_K8_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0925676266b..25dc82d3e76 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -87,7 +87,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_HPET_TIMER) += hpet.o obj-$(CONFIG_APB_TIMER) += apb_timer.o -obj-$(CONFIG_K8_NB) += k8.o +obj-$(CONFIG_AMD_NB) += amd_nb.o obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c new file mode 100644 index 00000000000..4ffc38df7ac --- /dev/null +++ b/arch/x86/kernel/amd_nb.c @@ -0,0 +1,145 @@ +/* + * Shared support code for AMD K8 northbridges and derivates. + * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2. + */ +#include +#include +#include +#include +#include +#include +#include + +static u32 *flush_words; + +struct pci_device_id k8_nb_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, + {} +}; +EXPORT_SYMBOL(k8_nb_ids); + +struct k8_northbridge_info k8_northbridges; +EXPORT_SYMBOL(k8_northbridges); + +static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) +{ + do { + dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); + if (!dev) + break; + } while (!pci_match_id(&k8_nb_ids[0], dev)); + return dev; +} + +int cache_k8_northbridges(void) +{ + int i; + struct pci_dev *dev; + + if (k8_northbridges.num) + return 0; + + dev = NULL; + while ((dev = next_k8_northbridge(dev)) != NULL) + k8_northbridges.num++; + + /* some CPU families (e.g. family 0x11) do not support GART */ + if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10) + k8_northbridges.gart_supported = 1; + + k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) * + sizeof(void *), GFP_KERNEL); + if (!k8_northbridges.nb_misc) + return -ENOMEM; + + if (!k8_northbridges.num) { + k8_northbridges.nb_misc[0] = NULL; + return 0; + } + + if (k8_northbridges.gart_supported) { + flush_words = kmalloc(k8_northbridges.num * sizeof(u32), + GFP_KERNEL); + if (!flush_words) { + kfree(k8_northbridges.nb_misc); + return -ENOMEM; + } + } + + dev = NULL; + i = 0; + while ((dev = next_k8_northbridge(dev)) != NULL) { + k8_northbridges.nb_misc[i] = dev; + if (k8_northbridges.gart_supported) + pci_read_config_dword(dev, 0x9c, &flush_words[i++]); + } + k8_northbridges.nb_misc[i] = NULL; + return 0; +} +EXPORT_SYMBOL_GPL(cache_k8_northbridges); + +/* Ignores subdevice/subvendor but as far as I can figure out + they're useless anyways */ +int __init early_is_k8_nb(u32 device) +{ + struct pci_device_id *id; + u32 vendor = device & 0xffff; + device >>= 16; + for (id = k8_nb_ids; id->vendor; id++) + if (vendor == id->vendor && device == id->device) + return 1; + return 0; +} + +void k8_flush_garts(void) +{ + int flushed, i; + unsigned long flags; + static DEFINE_SPINLOCK(gart_lock); + + if (!k8_northbridges.gart_supported) + return; + + /* Avoid races between AGP and IOMMU. In theory it's not needed + but I'm not sure if the hardware won't lose flush requests + when another is pending. This whole thing is so expensive anyways + that it doesn't matter to serialize more. -AK */ + spin_lock_irqsave(&gart_lock, flags); + flushed = 0; + for (i = 0; i < k8_northbridges.num; i++) { + pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c, + flush_words[i]|1); + flushed++; + } + for (i = 0; i < k8_northbridges.num; i++) { + u32 w; + /* Make sure the hardware actually executed the flush*/ + for (;;) { + pci_read_config_dword(k8_northbridges.nb_misc[i], + 0x9c, &w); + if (!(w & 1)) + break; + cpu_relax(); + } + } + spin_unlock_irqrestore(&gart_lock, flags); + if (!flushed) + printk("nothing to flush?\n"); +} +EXPORT_SYMBOL_GPL(k8_flush_garts); + +static __init int init_k8_nbs(void) +{ + int err = 0; + + err = cache_k8_northbridges(); + + if (err < 0) + printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n"); + + return err; +} + +/* This has to go after the PCI subsystem */ +fs_initcall(init_k8_nbs); diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index a2e0caf26e1..e91e042a5c8 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include int gart_iommu_aperture; diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 6fdfb0b20f8..12cd823c8d0 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include #define LVL_1_INST 1 @@ -306,7 +306,7 @@ struct _cache_attr { ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); }; -#ifdef CONFIG_K8_NB +#ifdef CONFIG_AMD_NB /* * L3 cache descriptors @@ -556,12 +556,12 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, show_cache_disable_1, store_cache_disable_1); -#else /* CONFIG_K8_NB */ +#else /* CONFIG_AMD_NB */ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index) { }; -#endif /* CONFIG_K8_NB */ +#endif /* CONFIG_AMD_NB */ static int __cpuinit cpuid4_cache_lookup_regs(int index, @@ -1000,7 +1000,7 @@ static struct attribute *default_attrs[] = { static struct attribute *default_l3_attrs[] = { DEFAULT_SYSFS_CACHE_ATTRS, -#ifdef CONFIG_K8_NB +#ifdef CONFIG_AMD_NB &cache_disable_0.attr, &cache_disable_1.attr, #endif diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c deleted file mode 100644 index 5de1b6b3963..00000000000 --- a/arch/x86/kernel/k8.c +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Shared support code for AMD K8 northbridges and derivates. - * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2. - */ -#include -#include -#include -#include -#include -#include -#include - -static u32 *flush_words; - -struct pci_device_id k8_nb_ids[] = { - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, - {} -}; -EXPORT_SYMBOL(k8_nb_ids); - -struct k8_northbridge_info k8_northbridges; -EXPORT_SYMBOL(k8_northbridges); - -static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) -{ - do { - dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); - if (!dev) - break; - } while (!pci_match_id(&k8_nb_ids[0], dev)); - return dev; -} - -int cache_k8_northbridges(void) -{ - int i; - struct pci_dev *dev; - - if (k8_northbridges.num) - return 0; - - dev = NULL; - while ((dev = next_k8_northbridge(dev)) != NULL) - k8_northbridges.num++; - - /* some CPU families (e.g. family 0x11) do not support GART */ - if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10) - k8_northbridges.gart_supported = 1; - - k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) * - sizeof(void *), GFP_KERNEL); - if (!k8_northbridges.nb_misc) - return -ENOMEM; - - if (!k8_northbridges.num) { - k8_northbridges.nb_misc[0] = NULL; - return 0; - } - - if (k8_northbridges.gart_supported) { - flush_words = kmalloc(k8_northbridges.num * sizeof(u32), - GFP_KERNEL); - if (!flush_words) { - kfree(k8_northbridges.nb_misc); - return -ENOMEM; - } - } - - dev = NULL; - i = 0; - while ((dev = next_k8_northbridge(dev)) != NULL) { - k8_northbridges.nb_misc[i] = dev; - if (k8_northbridges.gart_supported) - pci_read_config_dword(dev, 0x9c, &flush_words[i++]); - } - k8_northbridges.nb_misc[i] = NULL; - return 0; -} -EXPORT_SYMBOL_GPL(cache_k8_northbridges); - -/* Ignores subdevice/subvendor but as far as I can figure out - they're useless anyways */ -int __init early_is_k8_nb(u32 device) -{ - struct pci_device_id *id; - u32 vendor = device & 0xffff; - device >>= 16; - for (id = k8_nb_ids; id->vendor; id++) - if (vendor == id->vendor && device == id->device) - return 1; - return 0; -} - -void k8_flush_garts(void) -{ - int flushed, i; - unsigned long flags; - static DEFINE_SPINLOCK(gart_lock); - - if (!k8_northbridges.gart_supported) - return; - - /* Avoid races between AGP and IOMMU. In theory it's not needed - but I'm not sure if the hardware won't lose flush requests - when another is pending. This whole thing is so expensive anyways - that it doesn't matter to serialize more. -AK */ - spin_lock_irqsave(&gart_lock, flags); - flushed = 0; - for (i = 0; i < k8_northbridges.num; i++) { - pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c, - flush_words[i]|1); - flushed++; - } - for (i = 0; i < k8_northbridges.num; i++) { - u32 w; - /* Make sure the hardware actually executed the flush*/ - for (;;) { - pci_read_config_dword(k8_northbridges.nb_misc[i], - 0x9c, &w); - if (!(w & 1)) - break; - cpu_relax(); - } - } - spin_unlock_irqrestore(&gart_lock, flags); - if (!flushed) - printk("nothing to flush?\n"); -} -EXPORT_SYMBOL_GPL(k8_flush_garts); - -static __init int init_k8_nbs(void) -{ - int err = 0; - - err = cache_k8_northbridges(); - - if (err < 0) - printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n"); - - return err; -} - -/* This has to go after the PCI subsystem */ -fs_initcall(init_k8_nbs); diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 8f214a2643f..67e5665ce42 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -39,7 +39,7 @@ #include #include #include -#include +#include #include static unsigned long iommu_bus_base; /* GART remapping area (physical) */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index c3a4fbb2b99..77eea0362a4 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -107,7 +107,7 @@ #include #include #include -#include +#include #ifdef CONFIG_X86_64 #include #endif diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c index 970ed579d4e..ab75b181812 100644 --- a/arch/x86/mm/k8topology_64.c +++ b/arch/x86/mm/k8topology_64.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include static struct bootnode __initdata nodes[8]; static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE; diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index a7bcc23ef96..4962f1aeda6 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig index 4b66c69eaf5..5ddf67e76f8 100644 --- a/drivers/char/agp/Kconfig +++ b/drivers/char/agp/Kconfig @@ -57,7 +57,7 @@ config AGP_AMD config AGP_AMD64 tristate "AMD Opteron/Athlon64 on-CPU GART support" - depends on AGP && X86 && K8_NB + depends on AGP && X86 && AMD_NB help This option gives you AGP support for the GLX component of X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index bdf00d583d7..4d6087c9ec9 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -15,7 +15,7 @@ #include #include /* PAGE_SIZE */ #include -#include +#include #include #include "agp.h" diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 70bb350de99..734e2e06237 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -66,7 +66,7 @@ config EDAC_MCE config EDAC_AMD64 tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h" - depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE + depends on EDAC_MM_EDAC && AMD_NB && X86_64 && PCI && EDAC_DECODE_MCE help Support for error detection and correction on the AMD 64 Families of Memory Controllers (K8, F10h and F11h) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 5babf6f4805..09fcc528232 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -1,5 +1,5 @@ #include "amd64_edac.h" -#include +#include static struct edac_pci_ctl_info *amd64_ctl_pci; -- cgit v1.2.3-70-g09d2 From f49aa448561fe9215f43405cac6f31eb86317792 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 17 Sep 2010 11:08:51 -0400 Subject: jump label: Make dynamic no-op selection available outside of ftrace Move Steve's code for finding the best 5-byte no-op from ftrace.c to alternative.c. The idea is that other consumers (in this case jump label) want to make use of that code. Signed-off-by: Jason Baron LKML-Reference: <96259ae74172dcac99c0020c249743c523a92e18.1284733808.git.jbaron@redhat.com> Signed-off-by: Steven Rostedt --- arch/x86/include/asm/alternative.h | 8 +++++ arch/x86/kernel/alternative.c | 64 ++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/ftrace.c | 63 +------------------------------------ arch/x86/kernel/setup.c | 6 ++++ 4 files changed, 79 insertions(+), 62 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index bc6abb7bc7e..27a35b68918 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -180,4 +180,12 @@ static inline void apply_paravirt(struct paravirt_patch_site *start, extern void *text_poke(void *addr, const void *opcode, size_t len); extern void *text_poke_smp(void *addr, const void *opcode, size_t len); +#if defined(CONFIG_DYNAMIC_FTRACE) +#define IDEAL_NOP_SIZE_5 5 +extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; +extern void arch_init_ideal_nop5(void); +#else +static inline void arch_init_ideal_nop5(void) {} +#endif + #endif /* _ASM_X86_ALTERNATIVE_H */ diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index f65ab8b014c..1849d8036ee 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -641,3 +641,67 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len) return addr; } +#if defined(CONFIG_DYNAMIC_FTRACE) + +unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; + +void __init arch_init_ideal_nop5(void) +{ + extern const unsigned char ftrace_test_p6nop[]; + extern const unsigned char ftrace_test_nop5[]; + extern const unsigned char ftrace_test_jmp[]; + int faulted = 0; + + /* + * There is no good nop for all x86 archs. + * We will default to using the P6_NOP5, but first we + * will test to make sure that the nop will actually + * work on this CPU. If it faults, we will then + * go to a lesser efficient 5 byte nop. If that fails + * we then just use a jmp as our nop. This isn't the most + * efficient nop, but we can not use a multi part nop + * since we would then risk being preempted in the middle + * of that nop, and if we enabled tracing then, it might + * cause a system crash. + * + * TODO: check the cpuid to determine the best nop. + */ + asm volatile ( + "ftrace_test_jmp:" + "jmp ftrace_test_p6nop\n" + "nop\n" + "nop\n" + "nop\n" /* 2 byte jmp + 3 bytes */ + "ftrace_test_p6nop:" + P6_NOP5 + "jmp 1f\n" + "ftrace_test_nop5:" + ".byte 0x66,0x66,0x66,0x66,0x90\n" + "1:" + ".section .fixup, \"ax\"\n" + "2: movl $1, %0\n" + " jmp ftrace_test_nop5\n" + "3: movl $2, %0\n" + " jmp 1b\n" + ".previous\n" + _ASM_EXTABLE(ftrace_test_p6nop, 2b) + _ASM_EXTABLE(ftrace_test_nop5, 3b) + : "=r"(faulted) : "0" (faulted)); + + switch (faulted) { + case 0: + pr_info("converting mcount calls to 0f 1f 44 00 00\n"); + memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5); + break; + case 1: + pr_info("converting mcount calls to 66 66 66 66 90\n"); + memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5); + break; + case 2: + pr_info("converting mcount calls to jmp . + 5\n"); + memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5); + break; + } + +} +#endif diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index cd37469b54e..3afb33f14d2 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -257,14 +257,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) return mod_code_status; } - - - -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; - static unsigned char *ftrace_nop_replace(void) { - return ftrace_nop; + return ideal_nop5; } static int @@ -338,62 +333,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func) int __init ftrace_dyn_arch_init(void *data) { - extern const unsigned char ftrace_test_p6nop[]; - extern const unsigned char ftrace_test_nop5[]; - extern const unsigned char ftrace_test_jmp[]; - int faulted = 0; - - /* - * There is no good nop for all x86 archs. - * We will default to using the P6_NOP5, but first we - * will test to make sure that the nop will actually - * work on this CPU. If it faults, we will then - * go to a lesser efficient 5 byte nop. If that fails - * we then just use a jmp as our nop. This isn't the most - * efficient nop, but we can not use a multi part nop - * since we would then risk being preempted in the middle - * of that nop, and if we enabled tracing then, it might - * cause a system crash. - * - * TODO: check the cpuid to determine the best nop. - */ - asm volatile ( - "ftrace_test_jmp:" - "jmp ftrace_test_p6nop\n" - "nop\n" - "nop\n" - "nop\n" /* 2 byte jmp + 3 bytes */ - "ftrace_test_p6nop:" - P6_NOP5 - "jmp 1f\n" - "ftrace_test_nop5:" - ".byte 0x66,0x66,0x66,0x66,0x90\n" - "1:" - ".section .fixup, \"ax\"\n" - "2: movl $1, %0\n" - " jmp ftrace_test_nop5\n" - "3: movl $2, %0\n" - " jmp 1b\n" - ".previous\n" - _ASM_EXTABLE(ftrace_test_p6nop, 2b) - _ASM_EXTABLE(ftrace_test_nop5, 3b) - : "=r"(faulted) : "0" (faulted)); - - switch (faulted) { - case 0: - pr_info("converting mcount calls to 0f 1f 44 00 00\n"); - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); - break; - case 1: - pr_info("converting mcount calls to 66 66 66 66 90\n"); - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); - break; - case 2: - pr_info("converting mcount calls to jmp . + 5\n"); - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); - break; - } - /* The return code is retured via data */ *(unsigned long *)data = 0; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index c3a4fbb2b99..00e167870f7 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -112,6 +112,7 @@ #include #endif #include +#include /* * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. @@ -726,6 +727,7 @@ void __init setup_arch(char **cmdline_p) { int acpi = 0; int k8 = 0; + unsigned long flags; #ifdef CONFIG_X86_32 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); @@ -1071,6 +1073,10 @@ void __init setup_arch(char **cmdline_p) x86_init.oem.banner(); mcheck_init(); + + local_irq_save(flags); + arch_init_ideal_nop5(); + local_irq_restore(flags); } #ifdef CONFIG_X86_32 -- cgit v1.2.3-70-g09d2 From fa6f2cc77081792e4edca9168420a3422299ef15 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 17 Sep 2010 11:08:56 -0400 Subject: jump label: Make text_poke_early() globally visible Make text_poke_early available outside of alternative.c. The jump label patchset wants to make use of it in order to set up the optimal no-op sequences at run-time. Signed-off-by: Jason Baron LKML-Reference: <04cfddf2ba77bcabfc3e524f1849d871d6a1cf9d.1284733808.git.jbaron@redhat.com> Signed-off-by: Steven Rostedt --- arch/x86/include/asm/alternative.h | 2 ++ arch/x86/kernel/alternative.c | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 27a35b68918..634bf782dca 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -160,6 +160,8 @@ static inline void apply_paravirt(struct paravirt_patch_site *start, #define __parainstructions_end NULL #endif +extern void *text_poke_early(void *addr, const void *opcode, size_t len); + /* * Clear and restore the kernel write-protection flag on the local CPU. * Allows the kernel to edit read-only pages. diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 1849d8036ee..083bd010d92 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -195,7 +195,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len) extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern s32 __smp_locks[], __smp_locks_end[]; -static void *text_poke_early(void *addr, const void *opcode, size_t len); +void *text_poke_early(void *addr, const void *opcode, size_t len); /* Replace instructions with better alternatives for this CPU type. This runs before SMP is initialized to avoid SMP problems with @@ -522,7 +522,7 @@ void __init alternative_instructions(void) * instructions. And on the local CPU you need to be protected again NMI or MCE * handlers seeing an inconsistent instruction while you patch. */ -static void *__init_or_module text_poke_early(void *addr, const void *opcode, +void *__init_or_module text_poke_early(void *addr, const void *opcode, size_t len) { unsigned long flags; -- cgit v1.2.3-70-g09d2 From bf5438fca2950b03c21ad868090cc1a8fcd49536 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 17 Sep 2010 11:09:00 -0400 Subject: jump label: Base patch for jump label base patch to implement 'jump labeling'. Based on a new 'asm goto' inline assembly gcc mechanism, we can now branch to labels from an 'asm goto' statment. This allows us to create a 'no-op' fastpath, which can subsequently be patched with a jump to the slowpath code. This is useful for code which might be rarely used, but which we'd like to be able to call, if needed. Tracepoints are the current usecase that these are being implemented for. Acked-by: David S. Miller Signed-off-by: Jason Baron LKML-Reference: [ cleaned up some formating ] Signed-off-by: Steven Rostedt --- Makefile | 5 + arch/Kconfig | 3 + arch/x86/include/asm/alternative.h | 3 +- arch/x86/kernel/alternative.c | 2 +- include/asm-generic/vmlinux.lds.h | 10 ++ include/linux/jump_label.h | 58 +++++++ include/linux/module.h | 5 +- kernel/Makefile | 2 +- kernel/jump_label.c | 346 +++++++++++++++++++++++++++++++++++++ kernel/kprobes.c | 1 + kernel/module.c | 6 + scripts/gcc-goto.sh | 5 + 12 files changed, 442 insertions(+), 4 deletions(-) create mode 100644 include/linux/jump_label.h create mode 100644 kernel/jump_label.c create mode 100644 scripts/gcc-goto.sh (limited to 'arch/x86/include') diff --git a/Makefile b/Makefile index 92ab33f16cf..a906378d505 100644 --- a/Makefile +++ b/Makefile @@ -591,6 +591,11 @@ KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) # conserve stack if available KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) +# check for 'asm goto' +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y) + KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO +endif + # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments # But warn user when we do so warn-assign = \ diff --git a/arch/Kconfig b/arch/Kconfig index 4877a8c8ee1..1462d8492d8 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -158,4 +158,7 @@ config HAVE_PERF_EVENTS_NMI subsystem. Also has support for calculating CPU cycle events to determine how many clock cycles in a given period. +config HAVE_ARCH_JUMP_LABEL + bool + source "kernel/gcov/Kconfig" diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 634bf782dca..76561d20ea2 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -4,6 +4,7 @@ #include #include #include +#include #include /* @@ -182,7 +183,7 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len); extern void *text_poke(void *addr, const void *opcode, size_t len); extern void *text_poke_smp(void *addr, const void *opcode, size_t len); -#if defined(CONFIG_DYNAMIC_FTRACE) +#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) #define IDEAL_NOP_SIZE_5 5 extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; extern void arch_init_ideal_nop5(void); diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 083bd010d92..cb0e6d385f6 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -641,7 +641,7 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len) return addr; } -#if defined(CONFIG_DYNAMIC_FTRACE) +#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8a92a170fb7..ef2af9948ea 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -220,6 +220,8 @@ \ BUG_TABLE \ \ + JUMP_TABLE \ + \ /* PCI quirks */ \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ @@ -563,6 +565,14 @@ #define BUG_TABLE #endif +#define JUMP_TABLE \ + . = ALIGN(8); \ + __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___jump_table) = .; \ + *(__jump_table) \ + VMLINUX_SYMBOL(__stop___jump_table) = .; \ + } + #ifdef CONFIG_PM_TRACE #define TRACEDATA \ . = ALIGN(4); \ diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h new file mode 100644 index 00000000000..de58656d28e --- /dev/null +++ b/include/linux/jump_label.h @@ -0,0 +1,58 @@ +#ifndef _LINUX_JUMP_LABEL_H +#define _LINUX_JUMP_LABEL_H + +#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_HAVE_ARCH_JUMP_LABEL) +# include +# define HAVE_JUMP_LABEL +#endif + +enum jump_label_type { + JUMP_LABEL_ENABLE, + JUMP_LABEL_DISABLE +}; + +struct module; + +#ifdef HAVE_JUMP_LABEL + +extern struct jump_entry __start___jump_table[]; +extern struct jump_entry __stop___jump_table[]; + +extern void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type); +extern void jump_label_update(unsigned long key, enum jump_label_type type); +extern void jump_label_apply_nops(struct module *mod); +extern void arch_jump_label_text_poke_early(jump_label_t addr); + +#define enable_jump_label(key) \ + jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); + +#define disable_jump_label(key) \ + jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); + +#else + +#define JUMP_LABEL(key, label) \ +do { \ + if (unlikely(*key)) \ + goto label; \ +} while (0) + +#define enable_jump_label(cond_var) \ +do { \ + *(cond_var) = 1; \ +} while (0) + +#define disable_jump_label(cond_var) \ +do { \ + *(cond_var) = 0; \ +} while (0) + +static inline int jump_label_apply_nops(struct module *mod) +{ + return 0; +} + +#endif + +#endif diff --git a/include/linux/module.h b/include/linux/module.h index 8a6b9fdc7ff..403ac26023c 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -350,7 +350,10 @@ struct module struct tracepoint *tracepoints; unsigned int num_tracepoints; #endif - +#ifdef HAVE_JUMP_LABEL + struct jump_entry *jump_entries; + unsigned int num_jump_entries; +#endif #ifdef CONFIG_TRACING const char **trace_bprintk_fmt_start; unsigned int num_trace_bprintk_fmt; diff --git a/kernel/Makefile b/kernel/Makefile index 0b72d1a74be..d52b473c99a 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -10,7 +10,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ - async.o range.o + async.o range.o jump_label.o obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o obj-y += groups.o diff --git a/kernel/jump_label.c b/kernel/jump_label.c new file mode 100644 index 00000000000..460fd40112b --- /dev/null +++ b/kernel/jump_label.c @@ -0,0 +1,346 @@ +/* + * jump label support + * + * Copyright (C) 2009 Jason Baron + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_JUMP_LABEL + +#define JUMP_LABEL_HASH_BITS 6 +#define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS) +static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE]; + +/* mutex to protect coming/going of the the jump_label table */ +static DEFINE_MUTEX(jump_label_mutex); + +struct jump_label_entry { + struct hlist_node hlist; + struct jump_entry *table; + int nr_entries; + /* hang modules off here */ + struct hlist_head modules; + unsigned long key; +}; + +struct jump_label_module_entry { + struct hlist_node hlist; + struct jump_entry *table; + int nr_entries; + struct module *mod; +}; + +static int jump_label_cmp(const void *a, const void *b) +{ + const struct jump_entry *jea = a; + const struct jump_entry *jeb = b; + + if (jea->key < jeb->key) + return -1; + + if (jea->key > jeb->key) + return 1; + + return 0; +} + +static void +sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) +{ + unsigned long size; + + size = (((unsigned long)stop - (unsigned long)start) + / sizeof(struct jump_entry)); + sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); +} + +static struct jump_label_entry *get_jump_label_entry(jump_label_t key) +{ + struct hlist_head *head; + struct hlist_node *node; + struct jump_label_entry *e; + u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0); + + head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; + hlist_for_each_entry(e, node, head, hlist) { + if (key == e->key) + return e; + } + return NULL; +} + +static struct jump_label_entry * +add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table) +{ + struct hlist_head *head; + struct jump_label_entry *e; + u32 hash; + + e = get_jump_label_entry(key); + if (e) + return ERR_PTR(-EEXIST); + + e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL); + if (!e) + return ERR_PTR(-ENOMEM); + + hash = jhash((void *)&key, sizeof(jump_label_t), 0); + head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; + e->key = key; + e->table = table; + e->nr_entries = nr_entries; + INIT_HLIST_HEAD(&(e->modules)); + hlist_add_head(&e->hlist, head); + return e; +} + +static int +build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop) +{ + struct jump_entry *iter, *iter_begin; + struct jump_label_entry *entry; + int count; + + sort_jump_label_entries(start, stop); + iter = start; + while (iter < stop) { + entry = get_jump_label_entry(iter->key); + if (!entry) { + iter_begin = iter; + count = 0; + while ((iter < stop) && + (iter->key == iter_begin->key)) { + iter++; + count++; + } + entry = add_jump_label_entry(iter_begin->key, + count, iter_begin); + if (IS_ERR(entry)) + return PTR_ERR(entry); + } else { + WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n"); + return -1; + } + } + return 0; +} + +/*** + * jump_label_update - update jump label text + * @key - key value associated with a a jump label + * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE + * + * Will enable/disable the jump for jump label @key, depending on the + * value of @type. + * + */ + +void jump_label_update(unsigned long key, enum jump_label_type type) +{ + struct jump_entry *iter; + struct jump_label_entry *entry; + struct hlist_node *module_node; + struct jump_label_module_entry *e_module; + int count; + + mutex_lock(&jump_label_mutex); + entry = get_jump_label_entry((jump_label_t)key); + if (entry) { + count = entry->nr_entries; + iter = entry->table; + while (count--) { + if (kernel_text_address(iter->code)) + arch_jump_label_transform(iter, type); + iter++; + } + /* eanble/disable jump labels in modules */ + hlist_for_each_entry(e_module, module_node, &(entry->modules), + hlist) { + count = e_module->nr_entries; + iter = e_module->table; + while (count--) { + if (kernel_text_address(iter->code)) + arch_jump_label_transform(iter, type); + iter++; + } + } + } + mutex_unlock(&jump_label_mutex); +} + +static __init int init_jump_label(void) +{ + int ret; + struct jump_entry *iter_start = __start___jump_table; + struct jump_entry *iter_stop = __stop___jump_table; + struct jump_entry *iter; + + mutex_lock(&jump_label_mutex); + ret = build_jump_label_hashtable(__start___jump_table, + __stop___jump_table); + iter = iter_start; + while (iter < iter_stop) { + arch_jump_label_text_poke_early(iter->code); + iter++; + } + mutex_unlock(&jump_label_mutex); + return ret; +} +early_initcall(init_jump_label); + +#ifdef CONFIG_MODULES + +static struct jump_label_module_entry * +add_jump_label_module_entry(struct jump_label_entry *entry, + struct jump_entry *iter_begin, + int count, struct module *mod) +{ + struct jump_label_module_entry *e; + + e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL); + if (!e) + return ERR_PTR(-ENOMEM); + e->mod = mod; + e->nr_entries = count; + e->table = iter_begin; + hlist_add_head(&e->hlist, &entry->modules); + return e; +} + +static int add_jump_label_module(struct module *mod) +{ + struct jump_entry *iter, *iter_begin; + struct jump_label_entry *entry; + struct jump_label_module_entry *module_entry; + int count; + + /* if the module doesn't have jump label entries, just return */ + if (!mod->num_jump_entries) + return 0; + + sort_jump_label_entries(mod->jump_entries, + mod->jump_entries + mod->num_jump_entries); + iter = mod->jump_entries; + while (iter < mod->jump_entries + mod->num_jump_entries) { + entry = get_jump_label_entry(iter->key); + iter_begin = iter; + count = 0; + while ((iter < mod->jump_entries + mod->num_jump_entries) && + (iter->key == iter_begin->key)) { + iter++; + count++; + } + if (!entry) { + entry = add_jump_label_entry(iter_begin->key, 0, NULL); + if (IS_ERR(entry)) + return PTR_ERR(entry); + } + module_entry = add_jump_label_module_entry(entry, iter_begin, + count, mod); + if (IS_ERR(module_entry)) + return PTR_ERR(module_entry); + } + return 0; +} + +static void remove_jump_label_module(struct module *mod) +{ + struct hlist_head *head; + struct hlist_node *node, *node_next, *module_node, *module_node_next; + struct jump_label_entry *e; + struct jump_label_module_entry *e_module; + int i; + + /* if the module doesn't have jump label entries, just return */ + if (!mod->num_jump_entries) + return; + + for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { + head = &jump_label_table[i]; + hlist_for_each_entry_safe(e, node, node_next, head, hlist) { + hlist_for_each_entry_safe(e_module, module_node, + module_node_next, + &(e->modules), hlist) { + if (e_module->mod == mod) { + hlist_del(&e_module->hlist); + kfree(e_module); + } + } + if (hlist_empty(&e->modules) && (e->nr_entries == 0)) { + hlist_del(&e->hlist); + kfree(e); + } + } + } +} + +static int +jump_label_module_notify(struct notifier_block *self, unsigned long val, + void *data) +{ + struct module *mod = data; + int ret = 0; + + switch (val) { + case MODULE_STATE_COMING: + mutex_lock(&jump_label_mutex); + ret = add_jump_label_module(mod); + if (ret) + remove_jump_label_module(mod); + mutex_unlock(&jump_label_mutex); + break; + case MODULE_STATE_GOING: + mutex_lock(&jump_label_mutex); + remove_jump_label_module(mod); + mutex_unlock(&jump_label_mutex); + break; + } + return ret; +} + +/*** + * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() + * @mod: module to patch + * + * Allow for run-time selection of the optimal nops. Before the module + * loads patch these with arch_get_jump_label_nop(), which is specified by + * the arch specific jump label code. + */ +void jump_label_apply_nops(struct module *mod) +{ + struct jump_entry *iter; + + /* if the module doesn't have jump label entries, just return */ + if (!mod->num_jump_entries) + return; + + iter = mod->jump_entries; + while (iter < mod->jump_entries + mod->num_jump_entries) { + arch_jump_label_text_poke_early(iter->code); + iter++; + } +} + +struct notifier_block jump_label_module_nb = { + .notifier_call = jump_label_module_notify, + .priority = 0, +}; + +static __init int init_jump_label_module(void) +{ + return register_module_notifier(&jump_label_module_nb); +} +early_initcall(init_jump_label_module); + +#endif /* CONFIG_MODULES */ + +#endif diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 6dd5359e1f0..18904e42a91 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include diff --git a/kernel/module.c b/kernel/module.c index d0b5f8db11b..eba134157ef 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -55,6 +55,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -2308,6 +2309,11 @@ static void find_module_sections(struct module *mod, struct load_info *info) sizeof(*mod->tracepoints), &mod->num_tracepoints); #endif +#ifdef HAVE_JUMP_LABEL + mod->jump_entries = section_objs(info, "__jump_table", + sizeof(*mod->jump_entries), + &mod->num_jump_entries); +#endif #ifdef CONFIG_EVENT_TRACING mod->trace_events = section_objs(info, "_ftrace_events", sizeof(*mod->trace_events), diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh new file mode 100644 index 00000000000..8e82424be7a --- /dev/null +++ b/scripts/gcc-goto.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Test for gcc 'asm goto' suport +# Copyright (C) 2010, Jason Baron + +echo "int main(void) { entry: asm goto (\"\"::::entry); return 0; }" | $1 -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y" -- cgit v1.2.3-70-g09d2 From d9f5ab7b1c0a520867af389bab5d5fcdbd0e407e Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 17 Sep 2010 11:09:22 -0400 Subject: jump label: x86 support add x86 support for jump label. I'm keeping this patch separate so its clear to arch maintainers what was required for x86 support this new feature. Hopefully, it wouldn't be too painful for other archs. Signed-off-by: Jason Baron LKML-Reference: [ cleaned up some formatting ] Signed-off-by: Steven Rostedt --- arch/x86/Kconfig | 1 + arch/x86/include/asm/jump_label.h | 47 ++++++++++++++++++++++++++++++++++++ arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/jump_label.c | 50 +++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/module.c | 3 +++ 5 files changed, 102 insertions(+), 1 deletion(-) create mode 100644 arch/x86/include/asm/jump_label.h create mode 100644 arch/x86/kernel/jump_label.c (limited to 'arch/x86/include') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cea0cd9a316..afcd6632c94 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -59,6 +59,7 @@ config X86 select ANON_INODES select HAVE_ARCH_KMEMCHECK select HAVE_USER_RETURN_NOTIFIER + select HAVE_ARCH_JUMP_LABEL if !CC_OPTIMIZE_FOR_SIZE config INSTRUCTION_DECODER def_bool (KPROBES || PERF_EVENTS) diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h new file mode 100644 index 00000000000..b4a2cb40337 --- /dev/null +++ b/arch/x86/include/asm/jump_label.h @@ -0,0 +1,47 @@ +#ifndef _ASM_X86_JUMP_LABEL_H +#define _ASM_X86_JUMP_LABEL_H + +#ifdef __KERNEL__ + +#include +#include + +#define JUMP_LABEL_NOP_SIZE 5 + +# define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" + +# define JUMP_LABEL(key, label) \ + do { \ + asm goto("1:" \ + JUMP_LABEL_INITIAL_NOP \ + ".pushsection __jump_table, \"a\" \n\t"\ + _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ + ".popsection \n\t" \ + : : "i" (key) : : label); \ + } while (0) + +#endif /* __KERNEL__ */ + +#ifdef CONFIG_X86_64 + +typedef u64 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#else + +typedef u32 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#endif + +#endif diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0925676266b..24fa1718ddb 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -32,7 +32,7 @@ GCOV_PROFILE_paravirt.o := n obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time.o ioport.o ldt.o dumpstack.o -obj-y += setup.o x86_init.o i8259.o irqinit.o +obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o obj-$(CONFIG_X86_VISWS) += visws_quirks.o obj-$(CONFIG_X86_32) += probe_roms_32.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c new file mode 100644 index 00000000000..961b6b30ba9 --- /dev/null +++ b/arch/x86/kernel/jump_label.c @@ -0,0 +1,50 @@ +/* + * jump label x86 support + * + * Copyright (C) 2009 Jason Baron + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_JUMP_LABEL + +union jump_code_union { + char code[JUMP_LABEL_NOP_SIZE]; + struct { + char jump; + int offset; + } __attribute__((packed)); +}; + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + union jump_code_union code; + + if (type == JUMP_LABEL_ENABLE) { + code.jump = 0xe9; + code.offset = entry->target - + (entry->code + JUMP_LABEL_NOP_SIZE); + } else + memcpy(&code, ideal_nop5, JUMP_LABEL_NOP_SIZE); + get_online_cpus(); + mutex_lock(&text_mutex); + text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); + mutex_unlock(&text_mutex); + put_online_cpus(); +} + +void arch_jump_label_text_poke_early(jump_label_t addr) +{ + text_poke_early((void *)addr, ideal_nop5, JUMP_LABEL_NOP_SIZE); +} + +#endif diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index e0bc186d750..5399f58de7e 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -239,6 +239,9 @@ int module_finalize(const Elf_Ehdr *hdr, apply_paravirt(pseg, pseg + para->sh_size); } + /* make jump label nops */ + jump_label_apply_nops(me); + return module_bug_finalize(hdr, sechdrs, me); } -- cgit v1.2.3-70-g09d2 From 95fccd465eefb3d6bf80dae0496607b534d38313 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 22 Sep 2010 17:37:43 -0400 Subject: jump label: Remove duplicate structure for x86 The structure in the x86 jump label code uses the typedef jump_label_t, which is defined by the #ifdef arch type. The structure does not need to be duplicated there. Signed-off-by: Steven Rostedt --- arch/x86/include/asm/jump_label.h | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index b4a2cb40337..f52d42e8058 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -23,18 +23,10 @@ #endif /* __KERNEL__ */ #ifdef CONFIG_X86_64 - typedef u64 jump_label_t; - -struct jump_entry { - jump_label_t code; - jump_label_t target; - jump_label_t key; -}; - #else - typedef u32 jump_label_t; +#endif struct jump_entry { jump_label_t code; @@ -43,5 +35,3 @@ struct jump_entry { }; #endif - -#endif -- cgit v1.2.3-70-g09d2 From 3e3c486012a3dbb113c0ca15ee265d309d77aea9 Mon Sep 17 00:00:00 2001 From: Daniel Drake Date: Thu, 23 Sep 2010 17:28:46 +0100 Subject: x86, olpc: Rework BIOS signature check The XO-1.5 laptop is not currently detected as an OLPC machine because it fails this XO-1-centric check. Now that we have OLPC OFW support in the kernel, a more sensible check is to see if we found OFW during boot and check the architecture property. Also remove a now-meaningless codepath, as we're always going to have OFW support with OLPC. Signed-off-by: Daniel Drake LKML-Reference: <20100923162846.D8D409D401B@zog.reactivated.net> Cc: Andres Salomon Cc: Grant Likely Signed-off-by: H. Peter Anvin --- arch/x86/Kconfig | 3 ++- arch/x86/include/asm/olpc_ofw.h | 4 +++ arch/x86/kernel/olpc.c | 58 ++++++++++++++++++----------------------- arch/x86/kernel/olpc_ofw.c | 6 +++++ 4 files changed, 37 insertions(+), 34 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0ed4c9bfcd1..c2552557134 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2061,6 +2061,7 @@ config SCx200HR_TIMER config OLPC bool "One Laptop Per Child support" select GPIOLIB + select OLPC_OPENFIRMWARE ---help--- Add support for detecting the unique features of the OLPC XO hardware. @@ -2068,7 +2069,7 @@ config OLPC config OLPC_OPENFIRMWARE bool "Support for OLPC's Open Firmware" depends on !X86_64 && !X86_PAE - default y if OLPC + default n help This option adds support for the implementation of Open Firmware that is used on the OLPC XO-1 Children's Machine. diff --git a/arch/x86/include/asm/olpc_ofw.h b/arch/x86/include/asm/olpc_ofw.h index 08fde475cb3..2a8478140bb 100644 --- a/arch/x86/include/asm/olpc_ofw.h +++ b/arch/x86/include/asm/olpc_ofw.h @@ -21,10 +21,14 @@ extern void olpc_ofw_detect(void); /* install OFW's pde permanently into the kernel's pgtable */ extern void setup_olpc_ofw_pgd(void); +/* check if OFW was detected during boot */ +extern bool olpc_ofw_present(void); + #else /* !CONFIG_OLPC_OPENFIRMWARE */ static inline void olpc_ofw_detect(void) { } static inline void setup_olpc_ofw_pgd(void) { } +static inline bool olpc_ofw_present(void) { return false; } #endif /* !CONFIG_OLPC_OPENFIRMWARE */ diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c index 635888cf050..37c49934c78 100644 --- a/arch/x86/kernel/olpc.c +++ b/arch/x86/kernel/olpc.c @@ -183,8 +183,21 @@ err: } EXPORT_SYMBOL_GPL(olpc_ec_cmd); -#ifdef CONFIG_OLPC_OPENFIRMWARE -static void __init platform_detect(void) +static bool __init check_ofw_architecture(void) +{ + size_t propsize; + char olpc_arch[5]; + const void *args[] = { NULL, "architecture", olpc_arch, (void *)5 }; + void *res[] = { &propsize }; + + if (olpc_ofw("getprop", args, res)) { + printk(KERN_ERR "ofw: getprop call failed!\n"); + return false; + } + return propsize == 5 && strncmp("OLPC", olpc_arch, 5) == 0; +} + +static u32 __init get_board_revision(void) { size_t propsize; __be32 rev; @@ -193,46 +206,27 @@ static void __init platform_detect(void) if (olpc_ofw("getprop", args, res) || propsize != 4) { printk(KERN_ERR "ofw: getprop call failed!\n"); - rev = cpu_to_be32(0); + return cpu_to_be32(0); } - olpc_platform_info.boardrev = be32_to_cpu(rev); + return be32_to_cpu(rev); } -#else -static void __init platform_detect(void) + +static bool __init platform_detect(void) { - /* stopgap until OFW support is added to the kernel */ - olpc_platform_info.boardrev = olpc_board(0xc2); + if (!check_ofw_architecture()) + return false; + olpc_platform_info.flags |= OLPC_F_PRESENT; + olpc_platform_info.boardrev = get_board_revision(); + return true; } -#endif static int __init olpc_init(void) { - unsigned char *romsig; - - /* The ioremap check is dangerous; limit what we run it on */ - if (!is_geode() || cs5535_has_vsa2()) + if (!olpc_ofw_present() || !platform_detect()) return 0; spin_lock_init(&ec_lock); - romsig = ioremap(0xffffffc0, 16); - if (!romsig) - return 0; - - if (strncmp(romsig, "CL1 Q", 7)) - goto unmap; - if (strncmp(romsig+6, romsig+13, 3)) { - printk(KERN_INFO "OLPC BIOS signature looks invalid. " - "Assuming not OLPC\n"); - goto unmap; - } - - printk(KERN_INFO "OLPC board with OpenFirmware %.16s\n", romsig); - olpc_platform_info.flags |= OLPC_F_PRESENT; - - /* get the platform revision */ - platform_detect(); - /* assume B1 and above models always have a DCON */ if (olpc_board_at_least(olpc_board(0xb1))) olpc_platform_info.flags |= OLPC_F_DCON; @@ -254,8 +248,6 @@ static int __init olpc_init(void) olpc_platform_info.boardrev >> 4, olpc_platform_info.ecver); -unmap: - iounmap(romsig); return 0; } diff --git a/arch/x86/kernel/olpc_ofw.c b/arch/x86/kernel/olpc_ofw.c index 3218aa71ab5..78732046437 100644 --- a/arch/x86/kernel/olpc_ofw.c +++ b/arch/x86/kernel/olpc_ofw.c @@ -74,6 +74,12 @@ int __olpc_ofw(const char *name, int nr_args, const void **args, int nr_res, } EXPORT_SYMBOL_GPL(__olpc_ofw); +bool olpc_ofw_present(void) +{ + return olpc_ofw_cif != NULL; +} +EXPORT_SYMBOL_GPL(olpc_ofw_present); + /* OFW cif _should_ be above this address */ #define OFW_MIN 0xff000000 -- cgit v1.2.3-70-g09d2 From 6057b4d331f19a3ea51aec463ea7839c128b3227 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Thu, 30 Sep 2010 14:38:57 +0200 Subject: x86, amd: Extract compute unit information for AMD CPUs Get compute unit information from CPUID Fn8000_001E_EBX. (See AMD CPUID Specification - publication # 25481, revision 2.34, September 2010.) Note that each core on a compute unit still has a core_id of its own. Signed-off-by: Andreas Herrmann LKML-Reference: <20100930123857.GE20545@loge.amd.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/processor.h | 2 ++ arch/x86/kernel/cpu/amd.c | 20 +++++++++++++++----- 2 files changed, 17 insertions(+), 5 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 325b7bdbeba..69e80c2ec6c 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -110,6 +110,8 @@ struct cpuinfo_x86 { u16 phys_proc_id; /* Core id: */ u16 cpu_core_id; + /* Compute unit id */ + u8 compute_unit_id; /* Index into per_cpu list: */ u16 cpu_index; #endif diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7e6a37d2425..70168ab88b7 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -256,21 +256,29 @@ static int __cpuinit nearby_node(int apicid) * Fixup core topology information for * (1) AMD multi-node processors * Assumption: Number of cores in each internal node is the same. + * (2) AMD processors supporting compute units */ #ifdef CONFIG_X86_HT static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) { - u32 nodes, cores_per_node; + u32 nodes; u8 node_id; - unsigned long long value; int cpu = smp_processor_id(); /* get information required for multi-node processors */ if (cpu_has(c, X86_FEATURE_TOPOEXT)) { - value = cpuid_ecx(0x8000001e); - nodes = ((value >> 8) & 7) + 1; - node_id = value & 7; + u32 eax, ebx, ecx, edx; + + cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); + nodes = ((ecx >> 8) & 7) + 1; + node_id = ecx & 7; + + /* get compute unit information */ + smp_num_siblings = ((ebx >> 8) & 3) + 1; + c->compute_unit_id = ebx & 0xff; } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { + u64 value; + rdmsrl(MSR_FAM10H_NODE_ID, value); nodes = ((value >> 3) & 7) + 1; node_id = value & 7; @@ -279,6 +287,8 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) /* fixup multi-node processor information */ if (nodes > 1) { + u32 cores_per_node; + set_cpu_cap(c, X86_FEATURE_AMD_DCM); cores_per_node = c->x86_max_cores / nodes; -- cgit v1.2.3-70-g09d2 From 1d931264af0f10649b35afa8fbd2e169da51ac08 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 5 Oct 2010 16:15:15 -0700 Subject: x86-32, memblock: Make add_highpages honor early reserved ranges Originally the only early reserved range that is overlapped with high pages is "KVA RAM", but we already do remove that from the active ranges. However, It turns out Xen could have that kind of overlapping to support memory ballooning.x So we need to make add_highpage_with_active_regions() to subtract memblock reserved just like low ram; this is the proper design anyway. In this patch, refactering get_freel_all_memory_range() to make it can be used by add_highpage_with_active_regions(). Also we don't need to remove "KVA RAM" from active ranges. Signed-off-by: Yinghai Lu LKML-Reference: <4CABB183.1040607@kernel.org> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 2 ++ arch/x86/mm/init_32.c | 53 +++++++++++++---------------------------- arch/x86/mm/memblock.c | 19 +++++++++++---- arch/x86/mm/numa_32.c | 2 -- 4 files changed, 33 insertions(+), 43 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index 2c304bb6e07..19ae14ba697 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -9,6 +9,8 @@ void memblock_x86_to_bootmem(u64 start, u64 end); void memblock_x86_reserve_range(u64 start, u64 end, char *name); void memblock_x86_free_range(u64 start, u64 end); struct range; +int __get_free_all_memory_range(struct range **range, int nodeid, + unsigned long start_pfn, unsigned long end_pfn); int get_free_all_memory_range(struct range **rangep, int nodeid); void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index c2385d7ae31..85467099d6d 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -423,49 +423,28 @@ static void __init add_one_highpage_init(struct page *page) totalhigh_pages++; } -struct add_highpages_data { - unsigned long start_pfn; - unsigned long end_pfn; -}; - -static int __init add_highpages_work_fn(unsigned long start_pfn, - unsigned long end_pfn, void *datax) +void __init add_highpages_with_active_regions(int nid, + unsigned long start_pfn, unsigned long end_pfn) { - int node_pfn; - struct page *page; - unsigned long final_start_pfn, final_end_pfn; - struct add_highpages_data *data; + struct range *range; + int nr_range; + int i; - data = (struct add_highpages_data *)datax; + nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn); - final_start_pfn = max(start_pfn, data->start_pfn); - final_end_pfn = min(end_pfn, data->end_pfn); - if (final_start_pfn >= final_end_pfn) - return 0; + for (i = 0; i < nr_range; i++) { + struct page *page; + int node_pfn; - for (node_pfn = final_start_pfn; node_pfn < final_end_pfn; - node_pfn++) { - if (!pfn_valid(node_pfn)) - continue; - page = pfn_to_page(node_pfn); - add_one_highpage_init(page); + for (node_pfn = range[i].start; node_pfn < range[i].end; + node_pfn++) { + if (!pfn_valid(node_pfn)) + continue; + page = pfn_to_page(node_pfn); + add_one_highpage_init(page); + } } - - return 0; - } - -void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, - unsigned long end_pfn) -{ - struct add_highpages_data data; - - data.start_pfn = start_pfn; - data.end_pfn = end_pfn; - - work_with_active_regions(nid, add_highpages_work_fn, &data); -} - #else static inline void permanent_kmaps_init(pgd_t *pgd_base) { diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 50ecbc59757..fd7a0404945 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -156,7 +156,8 @@ static int __init count_early_node_map(int nodeid) return data.nr; } -int __init get_free_all_memory_range(struct range **rangep, int nodeid) +int __init __get_free_all_memory_range(struct range **rangep, int nodeid, + unsigned long start_pfn, unsigned long end_pfn) { int count; struct range *range; @@ -172,9 +173,9 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid) * at first */ nr_range = add_from_early_node_map(range, count, nr_range, nodeid); -#ifdef CONFIG_X86_32 - subtract_range(range, count, max_low_pfn, -1ULL); -#endif + subtract_range(range, count, 0, start_pfn); + subtract_range(range, count, end_pfn, -1ULL); + memblock_x86_subtract_reserved(range, count); nr_range = clean_sort_range(range, count); @@ -182,6 +183,16 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid) return nr_range; } +int __init get_free_all_memory_range(struct range **rangep, int nodeid) +{ + unsigned long end_pfn = -1UL; + +#ifdef CONFIG_X86_32 + end_pfn = max_low_pfn; +#endif + return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn); +} + static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free) { int i, count; diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 70ddeb75ba2..84a3e4c9f27 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -326,8 +326,6 @@ static __init unsigned long calculate_numa_remap_pages(void) "KVA RAM"); node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT; - remove_active_range(nid, node_remap_start_pfn[nid], - node_remap_start_pfn[nid] + size); } printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n", reserve_pages); -- cgit v1.2.3-70-g09d2 From 161b0275e2311b8bd9609d5f32e2b703cf5d70a8 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 2 Sep 2010 19:35:22 -0700 Subject: x86, mm: Add RESERVE_BRK_ARRAY() helper This is useful when converting static arrays into boot-time brk allocated objects. Signed-off-by: Jeremy Fitzhardinge LKML-Reference: <4C805EEA.1080205@goop.org> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/setup.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ef292c792d7..d6763b139a8 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -93,6 +93,11 @@ void *extend_brk(size_t size, size_t align); : : "i" (sz)); \ } +/* Helper for reserving space for arrays of things */ +#define RESERVE_BRK_ARRAY(type, name, entries) \ + type *name; \ + RESERVE_BRK(name, sizeof(type) * entries) + #ifdef __i386__ void __init i386_start_kernel(void); -- cgit v1.2.3-70-g09d2 From df9ee29270c11dba7d0fe0b83ce47a4d8e8d2101 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 7 Oct 2010 14:08:55 +0100 Subject: Fix IRQ flag handling naming Fix the IRQ flag handling naming. In linux/irqflags.h under one configuration, it maps: local_irq_enable() -> raw_local_irq_enable() local_irq_disable() -> raw_local_irq_disable() local_irq_save() -> raw_local_irq_save() ... and under the other configuration, it maps: raw_local_irq_enable() -> local_irq_enable() raw_local_irq_disable() -> local_irq_disable() raw_local_irq_save() -> local_irq_save() ... This is quite confusing. There should be one set of names expected of the arch, and this should be wrapped to give another set of names that are expected by users of this facility. Change this to have the arch provide: flags = arch_local_save_flags() flags = arch_local_irq_save() arch_local_irq_restore(flags) arch_local_irq_disable() arch_local_irq_enable() arch_irqs_disabled_flags(flags) arch_irqs_disabled() arch_safe_halt() Then linux/irqflags.h wraps these to provide: raw_local_save_flags(flags) raw_local_irq_save(flags) raw_local_irq_restore(flags) raw_local_irq_disable() raw_local_irq_enable() raw_irqs_disabled_flags(flags) raw_irqs_disabled() raw_safe_halt() with type checking on the flags 'arguments', and then wraps those to provide: local_save_flags(flags) local_irq_save(flags) local_irq_restore(flags) local_irq_disable() local_irq_enable() irqs_disabled_flags(flags) irqs_disabled() safe_halt() with tracing included if enabled. The arch functions can now all be inline functions rather than some of them having to be macros. Signed-off-by: David Howells [X86, FRV, MN10300] Signed-off-by: Chris Metcalf [Tile] Signed-off-by: Michal Simek [Microblaze] Tested-by: Catalin Marinas [ARM] Acked-by: Thomas Gleixner Acked-by: Haavard Skinnemoen [AVR] Acked-by: Tony Luck [IA-64] Acked-by: Hirokazu Takata [M32R] Acked-by: Greg Ungerer [M68K/M68KNOMMU] Acked-by: Ralf Baechle [MIPS] Acked-by: Kyle McMartin [PA-RISC] Acked-by: Paul Mackerras [PowerPC] Acked-by: Martin Schwidefsky [S390] Acked-by: Chen Liqin [Score] Acked-by: Matt Fleming [SH] Acked-by: David S. Miller [Sparc] Acked-by: Chris Zankel [Xtensa] Reviewed-by: Richard Henderson [Alpha] Reviewed-by: Yoshinori Sato [H8300] Cc: starvik@axis.com [CRIS] Cc: jesper.nilsson@axis.com [CRIS] Cc: linux-cris-kernel@axis.com --- arch/alpha/include/asm/irqflags.h | 67 ++++++++++ arch/alpha/include/asm/system.h | 28 ----- arch/arm/include/asm/irqflags.h | 145 +++++++++++++--------- arch/avr32/include/asm/irqflags.h | 29 ++--- arch/blackfin/include/asm/irqflags.h | 12 -- arch/blackfin/kernel/trace.c | 1 + arch/cris/include/arch-v10/arch/irqflags.h | 45 +++++++ arch/cris/include/arch-v10/arch/system.h | 16 --- arch/cris/include/arch-v32/arch/irqflags.h | 46 +++++++ arch/cris/include/arch-v32/arch/system.h | 22 ---- arch/cris/include/asm/irqflags.h | 1 + arch/cris/include/asm/system.h | 1 + arch/frv/include/asm/irqflags.h | 158 +++++++++++++++++++++++ arch/frv/include/asm/system.h | 136 -------------------- arch/h8300/include/asm/irqflags.h | 43 +++++++ arch/h8300/include/asm/system.h | 24 +--- arch/ia64/include/asm/irqflags.h | 94 ++++++++++++++ arch/ia64/include/asm/system.h | 76 ------------ arch/m32r/include/asm/irqflags.h | 104 ++++++++++++++++ arch/m32r/include/asm/system.h | 66 +--------- arch/m68k/include/asm/entry_no.h | 2 +- arch/m68k/include/asm/irqflags.h | 76 ++++++++++++ arch/m68k/include/asm/system_mm.h | 25 +--- arch/m68k/include/asm/system_no.h | 57 +-------- arch/m68knommu/kernel/asm-offsets.c | 2 - arch/m68knommu/platform/coldfire/head.S | 1 + arch/microblaze/include/asm/irqflags.h | 193 +++++++++++++++-------------- arch/mips/include/asm/irqflags.h | 53 ++++---- arch/mips/kernel/smtc.c | 4 +- arch/mn10300/include/asm/irqflags.h | 123 ++++++++++++++++++ arch/mn10300/include/asm/system.h | 109 +--------------- arch/mn10300/kernel/entry.S | 1 + arch/parisc/include/asm/irqflags.h | 46 +++++++ arch/parisc/include/asm/system.h | 19 +-- arch/powerpc/include/asm/hw_irq.h | 113 ++++++++++------- arch/powerpc/include/asm/irqflags.h | 2 +- arch/powerpc/kernel/exceptions-64s.S | 4 +- arch/powerpc/kernel/irq.c | 4 +- arch/s390/include/asm/irqflags.h | 51 ++++---- arch/s390/include/asm/system.h | 2 +- arch/s390/kernel/mem_detect.c | 4 +- arch/s390/mm/init.c | 3 +- arch/s390/mm/maccess.c | 4 +- arch/score/include/asm/irqflags.h | 187 +++++++++++++++------------- arch/sh/include/asm/irqflags.h | 4 +- arch/sh/kernel/irq_32.c | 12 +- arch/sparc/include/asm/irqflags_32.h | 35 +++--- arch/sparc/include/asm/irqflags_64.h | 29 ++--- arch/sparc/kernel/irq_32.c | 13 +- arch/sparc/prom/p1275.c | 2 +- arch/tile/include/asm/irqflags.h | 36 +++--- arch/x86/include/asm/irqflags.h | 32 ++--- arch/x86/include/asm/paravirt.h | 16 +-- arch/x86/xen/spinlock.c | 2 +- arch/xtensa/include/asm/irqflags.h | 58 +++++++++ arch/xtensa/include/asm/system.h | 33 +---- drivers/s390/char/sclp.c | 2 +- include/asm-generic/atomic.h | 5 +- include/asm-generic/cmpxchg-local.h | 1 + include/asm-generic/hardirq.h | 1 - include/asm-generic/irqflags.h | 52 ++++---- include/linux/irqflags.h | 107 +++++++++------- include/linux/spinlock.h | 1 + 63 files changed, 1482 insertions(+), 1158 deletions(-) create mode 100644 arch/alpha/include/asm/irqflags.h create mode 100644 arch/cris/include/arch-v10/arch/irqflags.h create mode 100644 arch/cris/include/arch-v32/arch/irqflags.h create mode 100644 arch/cris/include/asm/irqflags.h create mode 100644 arch/frv/include/asm/irqflags.h create mode 100644 arch/h8300/include/asm/irqflags.h create mode 100644 arch/ia64/include/asm/irqflags.h create mode 100644 arch/m32r/include/asm/irqflags.h create mode 100644 arch/m68k/include/asm/irqflags.h create mode 100644 arch/mn10300/include/asm/irqflags.h create mode 100644 arch/parisc/include/asm/irqflags.h create mode 100644 arch/xtensa/include/asm/irqflags.h (limited to 'arch/x86/include') diff --git a/arch/alpha/include/asm/irqflags.h b/arch/alpha/include/asm/irqflags.h new file mode 100644 index 00000000000..299bbc7e9d7 --- /dev/null +++ b/arch/alpha/include/asm/irqflags.h @@ -0,0 +1,67 @@ +#ifndef __ALPHA_IRQFLAGS_H +#define __ALPHA_IRQFLAGS_H + +#include + +#define IPL_MIN 0 +#define IPL_SW0 1 +#define IPL_SW1 2 +#define IPL_DEV0 3 +#define IPL_DEV1 4 +#define IPL_TIMER 5 +#define IPL_PERF 6 +#define IPL_POWERFAIL 6 +#define IPL_MCHECK 7 +#define IPL_MAX 7 + +#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK +#undef IPL_MIN +#define IPL_MIN __min_ipl +extern int __min_ipl; +#endif + +#define getipl() (rdps() & 7) +#define setipl(ipl) ((void) swpipl(ipl)) + +static inline unsigned long arch_local_save_flags(void) +{ + return rdps(); +} + +static inline void arch_local_irq_disable(void) +{ + setipl(IPL_MAX); + barrier(); +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags = swpipl(IPL_MAX); + barrier(); + return flags; +} + +static inline void arch_local_irq_enable(void) +{ + barrier(); + setipl(IPL_MIN); +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + barrier(); + setipl(flags); + barrier(); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return flags == IPL_MAX; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(getipl()); +} + +#endif /* __ALPHA_IRQFLAGS_H */ diff --git a/arch/alpha/include/asm/system.h b/arch/alpha/include/asm/system.h index 5aa40cca4f2..9f78e693463 100644 --- a/arch/alpha/include/asm/system.h +++ b/arch/alpha/include/asm/system.h @@ -259,34 +259,6 @@ __CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); __CALL_PAL_W1(wrusp, unsigned long); __CALL_PAL_W1(wrvptptr, unsigned long); -#define IPL_MIN 0 -#define IPL_SW0 1 -#define IPL_SW1 2 -#define IPL_DEV0 3 -#define IPL_DEV1 4 -#define IPL_TIMER 5 -#define IPL_PERF 6 -#define IPL_POWERFAIL 6 -#define IPL_MCHECK 7 -#define IPL_MAX 7 - -#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK -#undef IPL_MIN -#define IPL_MIN __min_ipl -extern int __min_ipl; -#endif - -#define getipl() (rdps() & 7) -#define setipl(ipl) ((void) swpipl(ipl)) - -#define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0) -#define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0) -#define local_save_flags(flags) ((flags) = rdps()) -#define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) -#define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0) - -#define irqs_disabled() (getipl() == IPL_MAX) - /* * TB routines.. */ diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h index 6d09974e664..1e6cca55c75 100644 --- a/arch/arm/include/asm/irqflags.h +++ b/arch/arm/include/asm/irqflags.h @@ -10,66 +10,85 @@ */ #if __LINUX_ARM_ARCH__ >= 6 -#define raw_local_irq_save(x) \ - ({ \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_save\n" \ - "cpsid i" \ - : "=r" (x) : : "memory", "cc"); \ - }) +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + asm volatile( + " mrs %0, cpsr @ arch_local_irq_save\n" + " cpsid i" + : "=r" (flags) : : "memory", "cc"); + return flags; +} + +static inline void arch_local_irq_enable(void) +{ + asm volatile( + " cpsie i @ arch_local_irq_enable" + : + : + : "memory", "cc"); +} + +static inline void arch_local_irq_disable(void) +{ + asm volatile( + " cpsid i @ arch_local_irq_disable" + : + : + : "memory", "cc"); +} -#define raw_local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc") -#define raw_local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc") #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") - #else /* * Save the current interrupt enable state & disable IRQs */ -#define raw_local_irq_save(x) \ - ({ \ - unsigned long temp; \ - (void) (&temp == &x); \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_save\n" \ -" orr %1, %0, #128\n" \ -" msr cpsr_c, %1" \ - : "=r" (x), "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags, temp; + + asm volatile( + " mrs %0, cpsr @ arch_local_irq_save\n" + " orr %1, %0, #128\n" + " msr cpsr_c, %1" + : "=r" (flags), "=r" (temp) + : + : "memory", "cc"); + return flags; +} + /* * Enable IRQs */ -#define raw_local_irq_enable() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_enable\n" \ -" bic %0, %0, #128\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) +static inline void arch_local_irq_enable(void) +{ + unsigned long temp; + asm volatile( + " mrs %0, cpsr @ arch_local_irq_enable\n" + " bic %0, %0, #128\n" + " msr cpsr_c, %0" + : "=r" (temp) + : + : "memory", "cc"); +} /* * Disable IRQs */ -#define raw_local_irq_disable() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_disable\n" \ -" orr %0, %0, #128\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) +static inline void arch_local_irq_disable(void) +{ + unsigned long temp; + asm volatile( + " mrs %0, cpsr @ arch_local_irq_disable\n" + " orr %0, %0, #128\n" + " msr cpsr_c, %0" + : "=r" (temp) + : + : "memory", "cc"); +} /* * Enable FIQs @@ -106,27 +125,31 @@ /* * Save the current interrupt enable state. */ -#define raw_local_save_flags(x) \ - ({ \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_save_flags" \ - : "=r" (x) : : "memory", "cc"); \ - }) +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile( + " mrs %0, cpsr @ local_save_flags" + : "=r" (flags) : : "memory", "cc"); + return flags; +} /* * restore saved IRQ & FIQ state */ -#define raw_local_irq_restore(x) \ - __asm__ __volatile__( \ - "msr cpsr_c, %0 @ local_irq_restore\n" \ - : \ - : "r" (x) \ - : "memory", "cc") +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile( + " msr cpsr_c, %0 @ local_irq_restore" + : + : "r" (flags) + : "memory", "cc"); +} -#define raw_irqs_disabled_flags(flags) \ -({ \ - (int)((flags) & PSR_I_BIT); \ -}) +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return flags & PSR_I_BIT; +} #endif #endif diff --git a/arch/avr32/include/asm/irqflags.h b/arch/avr32/include/asm/irqflags.h index 93570daac38..006e9487372 100644 --- a/arch/avr32/include/asm/irqflags.h +++ b/arch/avr32/include/asm/irqflags.h @@ -8,16 +8,14 @@ #ifndef __ASM_AVR32_IRQFLAGS_H #define __ASM_AVR32_IRQFLAGS_H +#include #include -static inline unsigned long __raw_local_save_flags(void) +static inline unsigned long arch_local_save_flags(void) { return sysreg_read(SR); } -#define raw_local_save_flags(x) \ - do { (x) = __raw_local_save_flags(); } while (0) - /* * This will restore ALL status register flags, not only the interrupt * mask flag. @@ -25,44 +23,39 @@ static inline unsigned long __raw_local_save_flags(void) * The empty asm statement informs the compiler of this fact while * also serving as a barrier. */ -static inline void raw_local_irq_restore(unsigned long flags) +static inline void arch_local_irq_restore(unsigned long flags) { sysreg_write(SR, flags); asm volatile("" : : : "memory", "cc"); } -static inline void raw_local_irq_disable(void) +static inline void arch_local_irq_disable(void) { asm volatile("ssrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory"); } -static inline void raw_local_irq_enable(void) +static inline void arch_local_irq_enable(void) { asm volatile("csrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory"); } -static inline int raw_irqs_disabled_flags(unsigned long flags) +static inline bool arch_irqs_disabled_flags(unsigned long flags) { return (flags & SYSREG_BIT(GM)) != 0; } -static inline int raw_irqs_disabled(void) +static inline bool arch_irqs_disabled(void) { - unsigned long flags = __raw_local_save_flags(); - - return raw_irqs_disabled_flags(flags); + return arch_irqs_disabled_flags(arch_local_save_flags()); } -static inline unsigned long __raw_local_irq_save(void) +static inline unsigned long arch_local_irq_save(void) { - unsigned long flags = __raw_local_save_flags(); + unsigned long flags = arch_local_save_flags(); - raw_local_irq_disable(); + arch_local_irq_disable(); return flags; } -#define raw_local_irq_save(flags) \ - do { (flags) = __raw_local_irq_save(); } while (0) - #endif /* __ASM_AVR32_IRQFLAGS_H */ diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h index 994d7679101..41c4d70544e 100644 --- a/arch/blackfin/include/asm/irqflags.h +++ b/arch/blackfin/include/asm/irqflags.h @@ -218,16 +218,4 @@ static inline void hard_local_irq_restore(unsigned long flags) #endif /* !CONFIG_IPIPE */ - -/* - * Raw interface to linux/irqflags.h. - */ -#define raw_local_save_flags(flags) do { (flags) = arch_local_save_flags(); } while (0) -#define raw_local_irq_save(flags) do { (flags) = arch_local_irq_save(); } while (0) -#define raw_local_irq_restore(flags) arch_local_irq_restore(flags) -#define raw_local_irq_enable() arch_local_irq_enable() -#define raw_local_irq_disable() arch_local_irq_disable() -#define raw_irqs_disabled_flags(flags) arch_irqs_disabled_flags(flags) -#define raw_irqs_disabled() arch_irqs_disabled() - #endif diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c index 59fcdf6b013..05b550891ce 100644 --- a/arch/blackfin/kernel/trace.c +++ b/arch/blackfin/kernel/trace.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/cris/include/arch-v10/arch/irqflags.h b/arch/cris/include/arch-v10/arch/irqflags.h new file mode 100644 index 00000000000..75ef1899124 --- /dev/null +++ b/arch/cris/include/arch-v10/arch/irqflags.h @@ -0,0 +1,45 @@ +#ifndef __ASM_CRIS_ARCH_IRQFLAGS_H +#define __ASM_CRIS_ARCH_IRQFLAGS_H + +#include + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile("move $ccr,%0" : "=rm" (flags) : : "memory"); + return flags; +} + +static inline void arch_local_irq_disable(void) +{ + asm volatile("di" : : : "memory"); +} + +static inline void arch_local_irq_enable(void) +{ + asm volatile("ei" : : : "memory"); +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags = arch_local_save_flags(); + arch_local_irq_disable(); + return flags; +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile("move %0,$ccr" : : "rm" (flags) : "memory"); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & (1 << 5)); +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#endif /* __ASM_CRIS_ARCH_IRQFLAGS_H */ diff --git a/arch/cris/include/arch-v10/arch/system.h b/arch/cris/include/arch-v10/arch/system.h index 4a9cd36c9e1..935fde34aa1 100644 --- a/arch/cris/include/arch-v10/arch/system.h +++ b/arch/cris/include/arch-v10/arch/system.h @@ -44,20 +44,4 @@ static inline unsigned long _get_base(char * addr) struct __xchg_dummy { unsigned long a[100]; }; #define __xg(x) ((struct __xchg_dummy *)(x)) -/* interrupt control.. */ -#define local_save_flags(x) __asm__ __volatile__ ("move $ccr,%0" : "=rm" (x) : : "memory"); -#define local_irq_restore(x) __asm__ __volatile__ ("move %0,$ccr" : : "rm" (x) : "memory"); -#define local_irq_disable() __asm__ __volatile__ ( "di" : : :"memory"); -#define local_irq_enable() __asm__ __volatile__ ( "ei" : : :"memory"); - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - !(flags & (1<<5)); \ -}) - -/* For spinlocks etc */ -#define local_irq_save(x) __asm__ __volatile__ ("move $ccr,%0\n\tdi" : "=rm" (x) : : "memory"); - #endif diff --git a/arch/cris/include/arch-v32/arch/irqflags.h b/arch/cris/include/arch-v32/arch/irqflags.h new file mode 100644 index 00000000000..041851f8ec6 --- /dev/null +++ b/arch/cris/include/arch-v32/arch/irqflags.h @@ -0,0 +1,46 @@ +#ifndef __ASM_CRIS_ARCH_IRQFLAGS_H +#define __ASM_CRIS_ARCH_IRQFLAGS_H + +#include +#include + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile("move $ccs,%0" : "=rm" (flags) : : "memory"); + return flags; +} + +static inline void arch_local_irq_disable(void) +{ + asm volatile("di" : : : "memory"); +} + +static inline void arch_local_irq_enable(void) +{ + asm volatile("ei" : : : "memory"); +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags = arch_local_save_flags(); + arch_local_irq_disable(); + return flags; +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile("move %0,$ccs" : : "rm" (flags) : "memory"); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & (1 << I_CCS_BITNR)); +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#endif /* __ASM_CRIS_ARCH_IRQFLAGS_H */ diff --git a/arch/cris/include/arch-v32/arch/system.h b/arch/cris/include/arch-v32/arch/system.h index 6ca90f1f110..76cea99eaa6 100644 --- a/arch/cris/include/arch-v32/arch/system.h +++ b/arch/cris/include/arch-v32/arch/system.h @@ -44,26 +44,4 @@ static inline unsigned long rdsp(void) struct __xchg_dummy { unsigned long a[100]; }; #define __xg(x) ((struct __xchg_dummy *)(x)) -/* Used for interrupt control. */ -#define local_save_flags(x) \ - __asm__ __volatile__ ("move $ccs, %0" : "=rm" (x) : : "memory"); - -#define local_irq_restore(x) \ - __asm__ __volatile__ ("move %0, $ccs" : : "rm" (x) : "memory"); - -#define local_irq_disable() __asm__ __volatile__ ("di" : : : "memory"); -#define local_irq_enable() __asm__ __volatile__ ("ei" : : : "memory"); - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - \ - local_save_flags(flags);\ - !(flags & (1 << I_CCS_BITNR)); \ -}) - -/* Used for spinlocks, etc. */ -#define local_irq_save(x) \ - __asm__ __volatile__ ("move $ccs, %0\n\tdi" : "=rm" (x) : : "memory"); - #endif /* _ASM_CRIS_ARCH_SYSTEM_H */ diff --git a/arch/cris/include/asm/irqflags.h b/arch/cris/include/asm/irqflags.h new file mode 100644 index 00000000000..943ba5ca6d2 --- /dev/null +++ b/arch/cris/include/asm/irqflags.h @@ -0,0 +1 @@ +#include diff --git a/arch/cris/include/asm/system.h b/arch/cris/include/asm/system.h index 8657b084a92..ea10592f7d7 100644 --- a/arch/cris/include/asm/system.h +++ b/arch/cris/include/asm/system.h @@ -1,6 +1,7 @@ #ifndef __ASM_CRIS_SYSTEM_H #define __ASM_CRIS_SYSTEM_H +#include #include /* the switch_to macro calls resume, an asm function in entry.S which does the actual diff --git a/arch/frv/include/asm/irqflags.h b/arch/frv/include/asm/irqflags.h new file mode 100644 index 00000000000..82f0b5363f4 --- /dev/null +++ b/arch/frv/include/asm/irqflags.h @@ -0,0 +1,158 @@ +/* FR-V interrupt handling + * + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _ASM_IRQFLAGS_H +#define _ASM_IRQFLAGS_H + +/* + * interrupt flag manipulation + * - use virtual interrupt management since touching the PSR is slow + * - ICC2.Z: T if interrupts virtually disabled + * - ICC2.C: F if interrupts really disabled + * - if Z==1 upon interrupt: + * - C is set to 0 + * - interrupts are really disabled + * - entry.S returns immediately + * - uses TIHI (TRAP if Z==0 && C==0) #2 to really reenable interrupts + * - if taken, the trap: + * - sets ICC2.C + * - enables interrupts + */ +static inline void arch_local_irq_disable(void) +{ + /* set Z flag, but don't change the C flag */ + asm volatile(" andcc gr0,gr0,gr0,icc2 \n" + : + : + : "memory", "icc2" + ); +} + +static inline void arch_local_irq_enable(void) +{ + /* clear Z flag and then test the C flag */ + asm volatile(" oricc gr0,#1,gr0,icc2 \n" + " tihi icc2,gr0,#2 \n" + : + : + : "memory", "icc2" + ); +} + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + + asm volatile("movsg ccr,%0" + : "=r"(flags) + : + : "memory"); + + /* shift ICC2.Z to bit 0 */ + flags >>= 26; + + /* make flags 1 if interrupts disabled, 0 otherwise */ + return flags & 1UL; + +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags = arch_local_save_flags(); + arch_local_irq_disable(); + return flags; +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + /* load the Z flag by turning 1 if disabled into 0 if disabled + * and thus setting the Z flag but not the C flag */ + asm volatile(" xoricc %0,#1,gr0,icc2 \n" + /* then trap if Z=0 and C=0 */ + " tihi icc2,gr0,#2 \n" + : + : "r"(flags) + : "memory", "icc2" + ); + +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return flags; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +/* + * real interrupt flag manipulation + */ +#define __arch_local_irq_disable() \ +do { \ + unsigned long psr; \ + asm volatile(" movsg psr,%0 \n" \ + " andi %0,%2,%0 \n" \ + " ori %0,%1,%0 \n" \ + " movgs %0,psr \n" \ + : "=r"(psr) \ + : "i" (PSR_PIL_14), "i" (~PSR_PIL) \ + : "memory"); \ +} while (0) + +#define __arch_local_irq_enable() \ +do { \ + unsigned long psr; \ + asm volatile(" movsg psr,%0 \n" \ + " andi %0,%1,%0 \n" \ + " movgs %0,psr \n" \ + : "=r"(psr) \ + : "i" (~PSR_PIL) \ + : "memory"); \ +} while (0) + +#define __arch_local_save_flags(flags) \ +do { \ + typecheck(unsigned long, flags); \ + asm("movsg psr,%0" \ + : "=r"(flags) \ + : \ + : "memory"); \ +} while (0) + +#define __arch_local_irq_save(flags) \ +do { \ + unsigned long npsr; \ + typecheck(unsigned long, flags); \ + asm volatile(" movsg psr,%0 \n" \ + " andi %0,%3,%1 \n" \ + " ori %1,%2,%1 \n" \ + " movgs %1,psr \n" \ + : "=r"(flags), "=r"(npsr) \ + : "i" (PSR_PIL_14), "i" (~PSR_PIL) \ + : "memory"); \ +} while (0) + +#define __arch_local_irq_restore(flags) \ +do { \ + typecheck(unsigned long, flags); \ + asm volatile(" movgs %0,psr \n" \ + : \ + : "r" (flags) \ + : "memory"); \ +} while (0) + +#define __arch_irqs_disabled() \ + ((__get_PSR() & PSR_PIL) >= PSR_PIL_14) + +#endif /* _ASM_IRQFLAGS_H */ diff --git a/arch/frv/include/asm/system.h b/arch/frv/include/asm/system.h index efd22d9077a..0a6d8d9ca45 100644 --- a/arch/frv/include/asm/system.h +++ b/arch/frv/include/asm/system.h @@ -36,142 +36,6 @@ do { \ mb(); \ } while(0) -/* - * interrupt flag manipulation - * - use virtual interrupt management since touching the PSR is slow - * - ICC2.Z: T if interrupts virtually disabled - * - ICC2.C: F if interrupts really disabled - * - if Z==1 upon interrupt: - * - C is set to 0 - * - interrupts are really disabled - * - entry.S returns immediately - * - uses TIHI (TRAP if Z==0 && C==0) #2 to really reenable interrupts - * - if taken, the trap: - * - sets ICC2.C - * - enables interrupts - */ -#define local_irq_disable() \ -do { \ - /* set Z flag, but don't change the C flag */ \ - asm volatile(" andcc gr0,gr0,gr0,icc2 \n" \ - : \ - : \ - : "memory", "icc2" \ - ); \ -} while(0) - -#define local_irq_enable() \ -do { \ - /* clear Z flag and then test the C flag */ \ - asm volatile(" oricc gr0,#1,gr0,icc2 \n" \ - " tihi icc2,gr0,#2 \n" \ - : \ - : \ - : "memory", "icc2" \ - ); \ -} while(0) - -#define local_save_flags(flags) \ -do { \ - typecheck(unsigned long, flags); \ - asm volatile("movsg ccr,%0" \ - : "=r"(flags) \ - : \ - : "memory"); \ - \ - /* shift ICC2.Z to bit 0 */ \ - flags >>= 26; \ - \ - /* make flags 1 if interrupts disabled, 0 otherwise */ \ - flags &= 1UL; \ -} while(0) - -#define irqs_disabled() \ - ({unsigned long flags; local_save_flags(flags); !!flags; }) - -#define local_irq_save(flags) \ -do { \ - typecheck(unsigned long, flags); \ - local_save_flags(flags); \ - local_irq_disable(); \ -} while(0) - -#define local_irq_restore(flags) \ -do { \ - typecheck(unsigned long, flags); \ - \ - /* load the Z flag by turning 1 if disabled into 0 if disabled \ - * and thus setting the Z flag but not the C flag */ \ - asm volatile(" xoricc %0,#1,gr0,icc2 \n" \ - /* then test Z=0 and C=0 */ \ - " tihi icc2,gr0,#2 \n" \ - : \ - : "r"(flags) \ - : "memory", "icc2" \ - ); \ - \ -} while(0) - -/* - * real interrupt flag manipulation - */ -#define __local_irq_disable() \ -do { \ - unsigned long psr; \ - asm volatile(" movsg psr,%0 \n" \ - " andi %0,%2,%0 \n" \ - " ori %0,%1,%0 \n" \ - " movgs %0,psr \n" \ - : "=r"(psr) \ - : "i" (PSR_PIL_14), "i" (~PSR_PIL) \ - : "memory"); \ -} while(0) - -#define __local_irq_enable() \ -do { \ - unsigned long psr; \ - asm volatile(" movsg psr,%0 \n" \ - " andi %0,%1,%0 \n" \ - " movgs %0,psr \n" \ - : "=r"(psr) \ - : "i" (~PSR_PIL) \ - : "memory"); \ -} while(0) - -#define __local_save_flags(flags) \ -do { \ - typecheck(unsigned long, flags); \ - asm("movsg psr,%0" \ - : "=r"(flags) \ - : \ - : "memory"); \ -} while(0) - -#define __local_irq_save(flags) \ -do { \ - unsigned long npsr; \ - typecheck(unsigned long, flags); \ - asm volatile(" movsg psr,%0 \n" \ - " andi %0,%3,%1 \n" \ - " ori %1,%2,%1 \n" \ - " movgs %1,psr \n" \ - : "=r"(flags), "=r"(npsr) \ - : "i" (PSR_PIL_14), "i" (~PSR_PIL) \ - : "memory"); \ -} while(0) - -#define __local_irq_restore(flags) \ -do { \ - typecheck(unsigned long, flags); \ - asm volatile(" movgs %0,psr \n" \ - : \ - : "r" (flags) \ - : "memory"); \ -} while(0) - -#define __irqs_disabled() \ - ((__get_PSR() & PSR_PIL) >= PSR_PIL_14) - /* * Force strict CPU ordering. */ diff --git a/arch/h8300/include/asm/irqflags.h b/arch/h8300/include/asm/irqflags.h new file mode 100644 index 00000000000..9617cd57aeb --- /dev/null +++ b/arch/h8300/include/asm/irqflags.h @@ -0,0 +1,43 @@ +#ifndef _H8300_IRQFLAGS_H +#define _H8300_IRQFLAGS_H + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile ("stc ccr,%w0" : "=r" (flags)); + return flags; +} + +static inline void arch_local_irq_disable(void) +{ + asm volatile ("orc #0x80,ccr" : : : "memory"); +} + +static inline void arch_local_irq_enable(void) +{ + asm volatile ("andc #0x7f,ccr" : : : "memory"); +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags = arch_local_save_flags(); + arch_local_irq_disable(); + return flags; +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile ("ldc %w0,ccr" : : "r" (flags) : "memory"); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return (flags & 0x80) == 0x80; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#endif /* _H8300_IRQFLAGS_H */ diff --git a/arch/h8300/include/asm/system.h b/arch/h8300/include/asm/system.h index 16bf1560ff6..2c2382e50d9 100644 --- a/arch/h8300/include/asm/system.h +++ b/arch/h8300/include/asm/system.h @@ -2,6 +2,7 @@ #define _H8300_SYSTEM_H #include +#include struct pt_regs; @@ -51,31 +52,8 @@ asmlinkage void resume(void); (last) = _last; \ } -#define __sti() asm volatile ("andc #0x7f,ccr") -#define __cli() asm volatile ("orc #0x80,ccr") - -#define __save_flags(x) \ - asm volatile ("stc ccr,%w0":"=r" (x)) - -#define __restore_flags(x) \ - asm volatile ("ldc %w0,ccr": :"r" (x)) - -#define irqs_disabled() \ -({ \ - unsigned char flags; \ - __save_flags(flags); \ - ((flags & 0x80) == 0x80); \ -}) - #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") -/* For spinlocks etc */ -#define local_irq_disable() __cli() -#define local_irq_enable() __sti() -#define local_irq_save(x) ({ __save_flags(x); local_irq_disable(); }) -#define local_irq_restore(x) __restore_flags(x) -#define local_save_flags(x) __save_flags(x) - /* * Force strict CPU ordering. * Not really required on H8... diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h new file mode 100644 index 00000000000..f82d6be2ecd --- /dev/null +++ b/arch/ia64/include/asm/irqflags.h @@ -0,0 +1,94 @@ +/* + * IRQ flags defines. + * + * Copyright (C) 1998-2003 Hewlett-Packard Co + * David Mosberger-Tang + * Copyright (C) 1999 Asit Mallick + * Copyright (C) 1999 Don Dugger + */ + +#ifndef _ASM_IA64_IRQFLAGS_H +#define _ASM_IA64_IRQFLAGS_H + +#ifdef CONFIG_IA64_DEBUG_IRQ +extern unsigned long last_cli_ip; +static inline void arch_maybe_save_ip(unsigned long flags) +{ + if (flags & IA64_PSR_I) + last_cli_ip = ia64_getreg(_IA64_REG_IP); +} +#else +#define arch_maybe_save_ip(flags) do {} while (0) +#endif + +/* + * - clearing psr.i is implicitly serialized (visible by next insn) + * - setting psr.i requires data serialization + * - we need a stop-bit before reading PSR because we sometimes + * write a floating-point register right before reading the PSR + * and that writes to PSR.mfl + */ + +static inline unsigned long arch_local_save_flags(void) +{ + ia64_stop(); +#ifdef CONFIG_PARAVIRT + return ia64_get_psr_i(); +#else + return ia64_getreg(_IA64_REG_PSR); +#endif +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags = arch_local_save_flags(); + + ia64_stop(); + ia64_rsm(IA64_PSR_I); + arch_maybe_save_ip(flags); + return flags; +} + +static inline void arch_local_irq_disable(void) +{ +#ifdef CONFIG_IA64_DEBUG_IRQ + arch_local_irq_save(); +#else + ia64_stop(); + ia64_rsm(IA64_PSR_I); +#endif +} + +static inline void arch_local_irq_enable(void) +{ + ia64_stop(); + ia64_ssm(IA64_PSR_I); + ia64_srlz_d(); +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ +#ifdef CONFIG_IA64_DEBUG_IRQ + unsigned long old_psr = arch_local_save_flags(); +#endif + ia64_intrin_local_irq_restore(flags & IA64_PSR_I); + arch_maybe_save_ip(old_psr & ~flags); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return (flags & IA64_PSR_I) == 0; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +static inline void arch_safe_halt(void) +{ + ia64_pal_halt_light(); /* PAL_HALT_LIGHT */ +} + + +#endif /* _ASM_IA64_IRQFLAGS_H */ diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h index 9f342a574ce..2feb7f64c03 100644 --- a/arch/ia64/include/asm/system.h +++ b/arch/ia64/include/asm/system.h @@ -107,87 +107,11 @@ extern struct ia64_boot_param { */ #define set_mb(var, value) do { (var) = (value); mb(); } while (0) -#define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */ - /* * The group barrier in front of the rsm & ssm are necessary to ensure * that none of the previous instructions in the same group are * affected by the rsm/ssm. */ -/* For spinlocks etc */ - -/* - * - clearing psr.i is implicitly serialized (visible by next insn) - * - setting psr.i requires data serialization - * - we need a stop-bit before reading PSR because we sometimes - * write a floating-point register right before reading the PSR - * and that writes to PSR.mfl - */ -#ifdef CONFIG_PARAVIRT -#define __local_save_flags() ia64_get_psr_i() -#else -#define __local_save_flags() ia64_getreg(_IA64_REG_PSR) -#endif - -#define __local_irq_save(x) \ -do { \ - ia64_stop(); \ - (x) = __local_save_flags(); \ - ia64_stop(); \ - ia64_rsm(IA64_PSR_I); \ -} while (0) - -#define __local_irq_disable() \ -do { \ - ia64_stop(); \ - ia64_rsm(IA64_PSR_I); \ -} while (0) - -#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I) - -#ifdef CONFIG_IA64_DEBUG_IRQ - - extern unsigned long last_cli_ip; - -# define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP) - -# define local_irq_save(x) \ -do { \ - unsigned long __psr; \ - \ - __local_irq_save(__psr); \ - if (__psr & IA64_PSR_I) \ - __save_ip(); \ - (x) = __psr; \ -} while (0) - -# define local_irq_disable() do { unsigned long __x; local_irq_save(__x); } while (0) - -# define local_irq_restore(x) \ -do { \ - unsigned long __old_psr, __psr = (x); \ - \ - local_save_flags(__old_psr); \ - __local_irq_restore(__psr); \ - if ((__old_psr & IA64_PSR_I) && !(__psr & IA64_PSR_I)) \ - __save_ip(); \ -} while (0) - -#else /* !CONFIG_IA64_DEBUG_IRQ */ -# define local_irq_save(x) __local_irq_save(x) -# define local_irq_disable() __local_irq_disable() -# define local_irq_restore(x) __local_irq_restore(x) -#endif /* !CONFIG_IA64_DEBUG_IRQ */ - -#define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) -#define local_save_flags(flags) ({ ia64_stop(); (flags) = __local_save_flags(); }) - -#define irqs_disabled() \ -({ \ - unsigned long __ia64_id_flags; \ - local_save_flags(__ia64_id_flags); \ - (__ia64_id_flags & IA64_PSR_I) == 0; \ -}) #ifdef __KERNEL__ diff --git a/arch/m32r/include/asm/irqflags.h b/arch/m32r/include/asm/irqflags.h new file mode 100644 index 00000000000..1f92d29982a --- /dev/null +++ b/arch/m32r/include/asm/irqflags.h @@ -0,0 +1,104 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto + * Copyright (C) 2004, 2006 Hirokazu Takata + */ + +#ifndef _ASM_M32R_IRQFLAGS_H +#define _ASM_M32R_IRQFLAGS_H + +#include + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile("mvfc %0,psw" : "=r"(flags)); + return flags; +} + +static inline void arch_local_irq_disable(void) +{ +#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) + asm volatile ( + "clrpsw #0x40 -> nop" + : : : "memory"); +#else + unsigned long tmpreg0, tmpreg1; + asm volatile ( + "ld24 %0, #0 ; Use 32-bit insn. \n\t" + "mvfc %1, psw ; No interrupt can be accepted here. \n\t" + "mvtc %0, psw \n\t" + "and3 %0, %1, #0xffbf \n\t" + "mvtc %0, psw \n\t" + : "=&r" (tmpreg0), "=&r" (tmpreg1) + : + : "cbit", "memory"); +#endif +} + +static inline void arch_local_irq_enable(void) +{ +#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) + asm volatile ( + "setpsw #0x40 -> nop" + : : : "memory"); +#else + unsigned long tmpreg; + asm volatile ( + "mvfc %0, psw; \n\t" + "or3 %0, %0, #0x0040; \n\t" + "mvtc %0, psw; \n\t" + : "=&r" (tmpreg) + : + : "cbit", "memory"); +#endif +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + +#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104)) + asm volatile ( + "mvfc %0, psw; \n\t" + "clrpsw #0x40 -> nop; \n\t" + : "=r" (flags) + : + : "memory"); +#else + unsigned long tmpreg; + asm volatile ( + "ld24 %1, #0 \n\t" + "mvfc %0, psw \n\t" + "mvtc %1, psw \n\t" + "and3 %1, %0, #0xffbf \n\t" + "mvtc %1, psw \n\t" + : "=r" (flags), "=&r" (tmpreg) + : + : "cbit", "memory"); +#endif + return flags; +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile("mvtc %0,psw" + : + : "r" (flags) + : "cbit", "memory"); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & 0x40); +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#endif /* _ASM_M32R_IRQFLAGS_H */ diff --git a/arch/m32r/include/asm/system.h b/arch/m32r/include/asm/system.h index c980f5ba8de..13c46794ccb 100644 --- a/arch/m32r/include/asm/system.h +++ b/arch/m32r/include/asm/system.h @@ -11,6 +11,7 @@ */ #include +#include #include #ifdef __KERNEL__ @@ -54,71 +55,6 @@ ); \ } while(0) -/* Interrupt Control */ -#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) -#define local_irq_enable() \ - __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory") -#define local_irq_disable() \ - __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory") -#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ -static inline void local_irq_enable(void) -{ - unsigned long tmpreg; - __asm__ __volatile__( - "mvfc %0, psw; \n\t" - "or3 %0, %0, #0x0040; \n\t" - "mvtc %0, psw; \n\t" - : "=&r" (tmpreg) : : "cbit", "memory"); -} - -static inline void local_irq_disable(void) -{ - unsigned long tmpreg0, tmpreg1; - __asm__ __volatile__( - "ld24 %0, #0 ; Use 32-bit insn. \n\t" - "mvfc %1, psw ; No interrupt can be accepted here. \n\t" - "mvtc %0, psw \n\t" - "and3 %0, %1, #0xffbf \n\t" - "mvtc %0, psw \n\t" - : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory"); -} -#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ - -#define local_save_flags(x) \ - __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */) - -#define local_irq_restore(x) \ - __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \ - : "r" (x) : "cbit", "memory") - -#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104)) -#define local_irq_save(x) \ - __asm__ __volatile__( \ - "mvfc %0, psw; \n\t" \ - "clrpsw #0x40 -> nop; \n\t" \ - : "=r" (x) : /* no input */ : "memory") -#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ -#define local_irq_save(x) \ - ({ \ - unsigned long tmpreg; \ - __asm__ __volatile__( \ - "ld24 %1, #0 \n\t" \ - "mvfc %0, psw \n\t" \ - "mvtc %1, psw \n\t" \ - "and3 %1, %0, #0xffbf \n\t" \ - "mvtc %1, psw \n\t" \ - : "=r" (x), "=&r" (tmpreg) \ - : : "cbit", "memory"); \ - }) -#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ - -#define irqs_disabled() \ - ({ \ - unsigned long flags; \ - local_save_flags(flags); \ - !(flags & 0x40); \ - }) - #define nop() __asm__ __volatile__ ("nop" : : ) #define xchg(ptr, x) \ diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h index 907ed03d792..80e41492aa2 100644 --- a/arch/m68k/include/asm/entry_no.h +++ b/arch/m68k/include/asm/entry_no.h @@ -28,7 +28,7 @@ * M68K COLDFIRE */ -#define ALLOWINT 0xf8ff +#define ALLOWINT (~0x700) #ifdef __ASSEMBLY__ diff --git a/arch/m68k/include/asm/irqflags.h b/arch/m68k/include/asm/irqflags.h new file mode 100644 index 00000000000..4a5b284a155 --- /dev/null +++ b/arch/m68k/include/asm/irqflags.h @@ -0,0 +1,76 @@ +#ifndef _M68K_IRQFLAGS_H +#define _M68K_IRQFLAGS_H + +#include +#include +#include +#include +#include + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile ("movew %%sr,%0" : "=d" (flags) : : "memory"); + return flags; +} + +static inline void arch_local_irq_disable(void) +{ +#ifdef CONFIG_COLDFIRE + asm volatile ( + "move %/sr,%%d0 \n\t" + "ori.l #0x0700,%%d0 \n\t" + "move %%d0,%/sr \n" + : /* no outputs */ + : + : "cc", "%d0", "memory"); +#else + asm volatile ("oriw #0x0700,%%sr" : : : "memory"); +#endif +} + +static inline void arch_local_irq_enable(void) +{ +#if defined(CONFIG_COLDFIRE) + asm volatile ( + "move %/sr,%%d0 \n\t" + "andi.l #0xf8ff,%%d0 \n\t" + "move %%d0,%/sr \n" + : /* no outputs */ + : + : "cc", "%d0", "memory"); +#else +# if defined(CONFIG_MMU) + if (MACH_IS_Q40 || !hardirq_count()) +# endif + asm volatile ( + "andiw %0,%%sr" + : + : "i" (ALLOWINT) + : "memory"); +#endif +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags = arch_local_save_flags(); + arch_local_irq_disable(); + return flags; +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile ("movew %0,%%sr" : : "d" (flags) : "memory"); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return (flags & ~ALLOWINT) != 0; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#endif /* _M68K_IRQFLAGS_H */ diff --git a/arch/m68k/include/asm/system_mm.h b/arch/m68k/include/asm/system_mm.h index dbb6515ffd5..12053c44ccc 100644 --- a/arch/m68k/include/asm/system_mm.h +++ b/arch/m68k/include/asm/system_mm.h @@ -3,6 +3,7 @@ #include #include +#include #include #include @@ -62,30 +63,6 @@ asmlinkage void resume(void); #define smp_wmb() barrier() #define smp_read_barrier_depends() ((void)0) -/* interrupt control.. */ -#if 0 -#define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory") -#else -#include -#define local_irq_enable() ({ \ - if (MACH_IS_Q40 || !hardirq_count()) \ - asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \ -}) -#endif -#define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory") -#define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory") -#define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory") - -static inline int irqs_disabled(void) -{ - unsigned long flags; - local_save_flags(flags); - return flags & ~ALLOWINT; -} - -/* For spinlocks etc */ -#define local_irq_save(x) ({ local_save_flags(x); local_irq_disable(); }) - #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) struct __xchg_dummy { unsigned long a[100]; }; diff --git a/arch/m68k/include/asm/system_no.h b/arch/m68k/include/asm/system_no.h index 3c0718d7439..20126c09794 100644 --- a/arch/m68k/include/asm/system_no.h +++ b/arch/m68k/include/asm/system_no.h @@ -2,6 +2,7 @@ #define _M68KNOMMU_SYSTEM_H #include +#include #include #include @@ -46,54 +47,6 @@ asmlinkage void resume(void); (last) = _last; \ } -#ifdef CONFIG_COLDFIRE -#define local_irq_enable() __asm__ __volatile__ ( \ - "move %/sr,%%d0\n\t" \ - "andi.l #0xf8ff,%%d0\n\t" \ - "move %%d0,%/sr\n" \ - : /* no outputs */ \ - : \ - : "cc", "%d0", "memory") -#define local_irq_disable() __asm__ __volatile__ ( \ - "move %/sr,%%d0\n\t" \ - "ori.l #0x0700,%%d0\n\t" \ - "move %%d0,%/sr\n" \ - : /* no outputs */ \ - : \ - : "cc", "%d0", "memory") -/* For spinlocks etc */ -#define local_irq_save(x) __asm__ __volatile__ ( \ - "movew %%sr,%0\n\t" \ - "movew #0x0700,%%d0\n\t" \ - "or.l %0,%%d0\n\t" \ - "movew %%d0,%/sr" \ - : "=d" (x) \ - : \ - : "cc", "%d0", "memory") -#else - -/* portable version */ /* FIXME - see entry.h*/ -#define ALLOWINT 0xf8ff - -#define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory") -#define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory") -#endif - -#define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory") -#define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory") - -/* For spinlocks etc */ -#ifndef local_irq_save -#define local_irq_save(x) do { local_save_flags(x); local_irq_disable(); } while (0) -#endif - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - ((flags & 0x0700) == 0x0700); \ -}) - #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") /* @@ -206,12 +159,4 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz #define arch_align_stack(x) (x) -static inline int irqs_disabled_flags(unsigned long flags) -{ - if (flags & 0x0700) - return 0; - else - return 1; -} - #endif /* _M68KNOMMU_SYSTEM_H */ diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c index 9a8876f715d..24335022fa2 100644 --- a/arch/m68knommu/kernel/asm-offsets.c +++ b/arch/m68knommu/kernel/asm-offsets.c @@ -74,8 +74,6 @@ int main(void) DEFINE(PT_PTRACED, PT_PTRACED); - DEFINE(THREAD_SIZE, THREAD_SIZE); - /* Offsets in thread_info structure */ DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); diff --git a/arch/m68knommu/platform/coldfire/head.S b/arch/m68knommu/platform/coldfire/head.S index 4b91aa24eb0..0b2d7c7adf7 100644 --- a/arch/m68knommu/platform/coldfire/head.S +++ b/arch/m68knommu/platform/coldfire/head.S @@ -15,6 +15,7 @@ #include #include #include +#include /*****************************************************************************/ diff --git a/arch/microblaze/include/asm/irqflags.h b/arch/microblaze/include/asm/irqflags.h index 2c38c6d8017..5fd31905775 100644 --- a/arch/microblaze/include/asm/irqflags.h +++ b/arch/microblaze/include/asm/irqflags.h @@ -9,103 +9,114 @@ #ifndef _ASM_MICROBLAZE_IRQFLAGS_H #define _ASM_MICROBLAZE_IRQFLAGS_H -#include +#include #include -# if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR - -# define raw_local_irq_save(flags) \ - do { \ - asm volatile (" msrclr %0, %1; \ - nop;" \ - : "=r"(flags) \ - : "i"(MSR_IE) \ - : "memory"); \ - } while (0) - -# define raw_local_irq_disable() \ - do { \ - asm volatile (" msrclr r0, %0; \ - nop;" \ - : \ - : "i"(MSR_IE) \ - : "memory"); \ - } while (0) - -# define raw_local_irq_enable() \ - do { \ - asm volatile (" msrset r0, %0; \ - nop;" \ - : \ - : "i"(MSR_IE) \ - : "memory"); \ - } while (0) - -# else /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR == 0 */ - -# define raw_local_irq_save(flags) \ - do { \ - register unsigned tmp; \ - asm volatile (" mfs %0, rmsr; \ - nop; \ - andi %1, %0, %2; \ - mts rmsr, %1; \ - nop;" \ - : "=r"(flags), "=r" (tmp) \ - : "i"(~MSR_IE) \ - : "memory"); \ - } while (0) - -# define raw_local_irq_disable() \ - do { \ - register unsigned tmp; \ - asm volatile (" mfs %0, rmsr; \ - nop; \ - andi %0, %0, %1; \ - mts rmsr, %0; \ - nop;" \ - : "=r"(tmp) \ - : "i"(~MSR_IE) \ - : "memory"); \ - } while (0) - -# define raw_local_irq_enable() \ - do { \ - register unsigned tmp; \ - asm volatile (" mfs %0, rmsr; \ - nop; \ - ori %0, %0, %1; \ - mts rmsr, %0; \ - nop;" \ - : "=r"(tmp) \ - : "i"(MSR_IE) \ - : "memory"); \ - } while (0) - -# endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ - -#define raw_local_irq_restore(flags) \ - do { \ - asm volatile (" mts rmsr, %0; \ - nop;" \ - : \ - : "r"(flags) \ - : "memory"); \ - } while (0) - -static inline unsigned long get_msr(void) +#ifdef CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + asm volatile(" msrclr %0, %1 \n" + " nop \n" + : "=r"(flags) + : "i"(MSR_IE) + : "memory"); + return flags; +} + +static inline void arch_local_irq_disable(void) +{ + /* this uses r0 without declaring it - is that correct? */ + asm volatile(" msrclr r0, %0 \n" + " nop \n" + : + : "i"(MSR_IE) + : "memory"); +} + +static inline void arch_local_irq_enable(void) +{ + /* this uses r0 without declaring it - is that correct? */ + asm volatile(" msrset r0, %0 \n" + " nop \n" + : + : "i"(MSR_IE) + : "memory"); +} + +#else /* !CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags, tmp; + asm volatile (" mfs %0, rmsr \n" + " nop \n" + " andi %1, %0, %2 \n" + " mts rmsr, %1 \n" + " nop \n" + : "=r"(flags), "=r"(tmp) + : "i"(~MSR_IE) + : "memory"); + return flags; +} + +static inline void arch_local_irq_disable(void) +{ + unsigned long tmp; + asm volatile(" mfs %0, rmsr \n" + " nop \n" + " andi %0, %0, %1 \n" + " mts rmsr, %0 \n" + " nop \n" + : "=r"(tmp) + : "i"(~MSR_IE) + : "memory"); +} + +static inline void arch_local_irq_enable(void) +{ + unsigned long tmp; + asm volatile(" mfs %0, rmsr \n" + " nop \n" + " ori %0, %0, %1 \n" + " mts rmsr, %0 \n" + " nop \n" + : "=r"(tmp) + : "i"(MSR_IE) + : "memory"); +} + +#endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ + +static inline unsigned long arch_local_save_flags(void) { unsigned long flags; - asm volatile (" mfs %0, rmsr; \ - nop;" \ - : "=r"(flags) \ - : \ - : "memory"); \ + asm volatile(" mfs %0, rmsr \n" + " nop \n" + : "=r"(flags) + : + : "memory"); return flags; } -#define raw_local_save_flags(flags) ((flags) = get_msr()) -#define raw_irqs_disabled() ((get_msr() & MSR_IE) == 0) -#define raw_irqs_disabled_flags(flags) ((flags & MSR_IE) == 0) +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile(" mts rmsr, %0 \n" + " nop \n" + : + : "r"(flags) + : "memory"); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return (flags & MSR_IE) == 0; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} #endif /* _ASM_MICROBLAZE_IRQFLAGS_H */ diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 701ec0ba8fa..9ef3b0d1789 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -17,7 +17,7 @@ #include __asm__( - " .macro raw_local_irq_enable \n" + " .macro arch_local_irq_enable \n" " .set push \n" " .set reorder \n" " .set noat \n" @@ -40,7 +40,7 @@ __asm__( extern void smtc_ipi_replay(void); -static inline void raw_local_irq_enable(void) +static inline void arch_local_irq_enable(void) { #ifdef CONFIG_MIPS_MT_SMTC /* @@ -50,7 +50,7 @@ static inline void raw_local_irq_enable(void) smtc_ipi_replay(); #endif __asm__ __volatile__( - "raw_local_irq_enable" + "arch_local_irq_enable" : /* no outputs */ : /* no inputs */ : "memory"); @@ -76,7 +76,7 @@ static inline void raw_local_irq_enable(void) * Workaround: mask EXL bit of the result or place a nop before mfc0. */ __asm__( - " .macro raw_local_irq_disable\n" + " .macro arch_local_irq_disable\n" " .set push \n" " .set noat \n" #ifdef CONFIG_MIPS_MT_SMTC @@ -97,17 +97,17 @@ __asm__( " .set pop \n" " .endm \n"); -static inline void raw_local_irq_disable(void) +static inline void arch_local_irq_disable(void) { __asm__ __volatile__( - "raw_local_irq_disable" + "arch_local_irq_disable" : /* no outputs */ : /* no inputs */ : "memory"); } __asm__( - " .macro raw_local_save_flags flags \n" + " .macro arch_local_save_flags flags \n" " .set push \n" " .set reorder \n" #ifdef CONFIG_MIPS_MT_SMTC @@ -118,13 +118,15 @@ __asm__( " .set pop \n" " .endm \n"); -#define raw_local_save_flags(x) \ -__asm__ __volatile__( \ - "raw_local_save_flags %0" \ - : "=r" (x)) +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile("arch_local_save_flags %0" : "=r" (flags)); + return flags; +} __asm__( - " .macro raw_local_irq_save result \n" + " .macro arch_local_irq_save result \n" " .set push \n" " .set reorder \n" " .set noat \n" @@ -148,15 +150,18 @@ __asm__( " .set pop \n" " .endm \n"); -#define raw_local_irq_save(x) \ -__asm__ __volatile__( \ - "raw_local_irq_save\t%0" \ - : "=r" (x) \ - : /* no inputs */ \ - : "memory") +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + asm volatile("arch_local_irq_save\t%0" + : "=r" (flags) + : /* no inputs */ + : "memory"); + return flags; +} __asm__( - " .macro raw_local_irq_restore flags \n" + " .macro arch_local_irq_restore flags \n" " .set push \n" " .set noreorder \n" " .set noat \n" @@ -196,7 +201,7 @@ __asm__( " .endm \n"); -static inline void raw_local_irq_restore(unsigned long flags) +static inline void arch_local_irq_restore(unsigned long flags) { unsigned long __tmp1; @@ -211,24 +216,24 @@ static inline void raw_local_irq_restore(unsigned long flags) #endif __asm__ __volatile__( - "raw_local_irq_restore\t%0" + "arch_local_irq_restore\t%0" : "=r" (__tmp1) : "0" (flags) : "memory"); } -static inline void __raw_local_irq_restore(unsigned long flags) +static inline void __arch_local_irq_restore(unsigned long flags) { unsigned long __tmp1; __asm__ __volatile__( - "raw_local_irq_restore\t%0" + "arch_local_irq_restore\t%0" : "=r" (__tmp1) : "0" (flags) : "memory"); } -static inline int raw_irqs_disabled_flags(unsigned long flags) +static inline int arch_irqs_disabled_flags(unsigned long flags) { #ifdef CONFIG_MIPS_MT_SMTC /* diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index cfeb2c15589..39c08254b0f 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -1038,7 +1038,7 @@ void deferred_smtc_ipi(void) * but it's more efficient, given that we're already * running down the IPI queue. */ - __raw_local_irq_restore(flags); + __arch_local_irq_restore(flags); } } @@ -1190,7 +1190,7 @@ void smtc_ipi_replay(void) /* ** But use a raw restore here to avoid recursion. */ - __raw_local_irq_restore(flags); + __arch_local_irq_restore(flags); if (pipi) { self_ipi(pipi); diff --git a/arch/mn10300/include/asm/irqflags.h b/arch/mn10300/include/asm/irqflags.h new file mode 100644 index 00000000000..5e529a117cb --- /dev/null +++ b/arch/mn10300/include/asm/irqflags.h @@ -0,0 +1,123 @@ +/* MN10300 IRQ flag handling + * + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _ASM_IRQFLAGS_H +#define _ASM_IRQFLAGS_H + +#include + +/* + * interrupt control + * - "disabled": run in IM1/2 + * - level 0 - GDB stub + * - level 1 - virtual serial DMA (if present) + * - level 5 - normal interrupt priority + * - level 6 - timer interrupt + * - "enabled": run in IM7 + */ +#ifdef CONFIG_MN10300_TTYSM +#define MN10300_CLI_LEVEL EPSW_IM_2 +#else +#define MN10300_CLI_LEVEL EPSW_IM_1 +#endif + +#ifndef __ASSEMBLY__ + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + + asm volatile("mov epsw,%0" : "=d"(flags)); + return flags; +} + +static inline void arch_local_irq_disable(void) +{ + asm volatile( + " and %0,epsw \n" + " or %1,epsw \n" + " nop \n" + " nop \n" + " nop \n" + : + : "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL) + : "memory"); +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + flags = arch_local_save_flags(); + arch_local_irq_disable(); + return flags; +} + +/* + * we make sure arch_irq_enable() doesn't cause priority inversion + */ +extern unsigned long __mn10300_irq_enabled_epsw; + +static inline void arch_local_irq_enable(void) +{ + unsigned long tmp; + + asm volatile( + " mov epsw,%0 \n" + " and %1,%0 \n" + " or %2,%0 \n" + " mov %0,epsw \n" + : "=&d"(tmp) + : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) + : "memory"); +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile( + " mov %0,epsw \n" + " nop \n" + " nop \n" + " nop \n" + : + : "d"(flags) + : "memory", "cc"); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return (flags & EPSW_IM) <= MN10300_CLI_LEVEL; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +/* + * Hook to save power by halting the CPU + * - called from the idle loop + * - must reenable interrupts (which takes three instruction cycles to complete) + */ +static inline void arch_safe_halt(void) +{ + asm volatile( + " or %0,epsw \n" + " nop \n" + " nop \n" + " bset %2,(%1) \n" + : + : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP) + : "cc"); +} + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_IRQFLAGS_H */ diff --git a/arch/mn10300/include/asm/system.h b/arch/mn10300/include/asm/system.h index 3636c054dcd..9f7c7e17c01 100644 --- a/arch/mn10300/include/asm/system.h +++ b/arch/mn10300/include/asm/system.h @@ -17,6 +17,7 @@ #ifndef __ASSEMBLY__ #include +#include struct task_struct; struct thread_struct; @@ -79,114 +80,6 @@ do { \ #define read_barrier_depends() do {} while (0) #define smp_read_barrier_depends() do {} while (0) -/*****************************************************************************/ -/* - * interrupt control - * - "disabled": run in IM1/2 - * - level 0 - GDB stub - * - level 1 - virtual serial DMA (if present) - * - level 5 - normal interrupt priority - * - level 6 - timer interrupt - * - "enabled": run in IM7 - */ -#ifdef CONFIG_MN10300_TTYSM -#define MN10300_CLI_LEVEL EPSW_IM_2 -#else -#define MN10300_CLI_LEVEL EPSW_IM_1 -#endif - -#define local_save_flags(x) \ -do { \ - typecheck(unsigned long, x); \ - asm volatile( \ - " mov epsw,%0 \n" \ - : "=d"(x) \ - ); \ -} while (0) - -#define local_irq_disable() \ -do { \ - asm volatile( \ - " and %0,epsw \n" \ - " or %1,epsw \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - : \ - : "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL) \ - ); \ -} while (0) - -#define local_irq_save(x) \ -do { \ - local_save_flags(x); \ - local_irq_disable(); \ -} while (0) - -/* - * we make sure local_irq_enable() doesn't cause priority inversion - */ -#ifndef __ASSEMBLY__ - -extern unsigned long __mn10300_irq_enabled_epsw; - -#endif - -#define local_irq_enable() \ -do { \ - unsigned long tmp; \ - \ - asm volatile( \ - " mov epsw,%0 \n" \ - " and %1,%0 \n" \ - " or %2,%0 \n" \ - " mov %0,epsw \n" \ - : "=&d"(tmp) \ - : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) \ - : "cc" \ - ); \ -} while (0) - -#define local_irq_restore(x) \ -do { \ - typecheck(unsigned long, x); \ - asm volatile( \ - " mov %0,epsw \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - : \ - : "d"(x) \ - : "memory", "cc" \ - ); \ -} while (0) - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - (flags & EPSW_IM) <= MN10300_CLI_LEVEL; \ -}) - -/* hook to save power by halting the CPU - * - called from the idle loop - * - must reenable interrupts (which takes three instruction cycles to complete) - */ -#define safe_halt() \ -do { \ - asm volatile(" or %0,epsw \n" \ - " nop \n" \ - " nop \n" \ - " bset %2,(%1) \n" \ - : \ - : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)\ - : "cc" \ - ); \ -} while (0) - -#define STI or EPSW_IE|EPSW_IM,epsw -#define CLI and ~EPSW_IM,epsw; or EPSW_IE|MN10300_CLI_LEVEL,epsw; nop; nop; nop - /*****************************************************************************/ /* * MN10300 doesn't actually have an exchange instruction diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S index d9ed5a15c54..3d394b4eefb 100644 --- a/arch/mn10300/kernel/entry.S +++ b/arch/mn10300/kernel/entry.S @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/parisc/include/asm/irqflags.h b/arch/parisc/include/asm/irqflags.h new file mode 100644 index 00000000000..34f9cb9b475 --- /dev/null +++ b/arch/parisc/include/asm/irqflags.h @@ -0,0 +1,46 @@ +#ifndef __PARISC_IRQFLAGS_H +#define __PARISC_IRQFLAGS_H + +#include +#include + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile("ssm 0, %0" : "=r" (flags) : : "memory"); + return flags; +} + +static inline void arch_local_irq_disable(void) +{ + asm volatile("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory"); +} + +static inline void arch_local_irq_enable(void) +{ + asm volatile("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory"); +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + asm volatile("rsm %1,%0" : "=r" (flags) : "i" (PSW_I) : "memory"); + return flags; +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile("mtsm %0" : : "r" (flags) : "memory"); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return (flags & PSW_I) == 0; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#endif /* __PARISC_IRQFLAGS_H */ diff --git a/arch/parisc/include/asm/system.h b/arch/parisc/include/asm/system.h index 2ab4af58ecb..b19e63a8e84 100644 --- a/arch/parisc/include/asm/system.h +++ b/arch/parisc/include/asm/system.h @@ -1,7 +1,7 @@ #ifndef __PARISC_SYSTEM_H #define __PARISC_SYSTEM_H -#include +#include /* The program status word as bitfields. */ struct pa_psw { @@ -48,23 +48,6 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct * (last) = _switch_to(prev, next); \ } while(0) -/* interrupt control */ -#define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory") -#define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) -#define local_irq_enable() __asm__ __volatile__("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) - -#define local_irq_save(x) \ - __asm__ __volatile__("rsm %1,%0" : "=r" (x) :"i" (PSW_I) : "memory" ) -#define local_irq_restore(x) \ - __asm__ __volatile__("mtsm %0" : : "r" (x) : "memory" ) - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - (flags & PSW_I) == 0; \ -}) - #define mfctl(reg) ({ \ unsigned long cr; \ __asm__ __volatile__( \ diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index bd100fcf40d..ff08b70b36d 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -16,42 +16,57 @@ extern void timer_interrupt(struct pt_regs *); #ifdef CONFIG_PPC64 #include -static inline unsigned long local_get_flags(void) +static inline unsigned long arch_local_save_flags(void) { unsigned long flags; - __asm__ __volatile__("lbz %0,%1(13)" - : "=r" (flags) - : "i" (offsetof(struct paca_struct, soft_enabled))); + asm volatile( + "lbz %0,%1(13)" + : "=r" (flags) + : "i" (offsetof(struct paca_struct, soft_enabled))); return flags; } -static inline unsigned long raw_local_irq_disable(void) +static inline unsigned long arch_local_irq_disable(void) { unsigned long flags, zero; - __asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)" - : "=r" (flags), "=&r" (zero) - : "i" (offsetof(struct paca_struct, soft_enabled)) - : "memory"); + asm volatile( + "li %1,0; lbz %0,%2(13); stb %1,%2(13)" + : "=r" (flags), "=&r" (zero) + : "i" (offsetof(struct paca_struct, soft_enabled)) + : "memory"); return flags; } -extern void raw_local_irq_restore(unsigned long); +extern void arch_local_irq_restore(unsigned long); extern void iseries_handle_interrupts(void); -#define raw_local_irq_enable() raw_local_irq_restore(1) -#define raw_local_save_flags(flags) ((flags) = local_get_flags()) -#define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable()) +static inline void arch_local_irq_enable(void) +{ + arch_local_irq_restore(1); +} + +static inline unsigned long arch_local_irq_save(void) +{ + return arch_local_irq_disable(); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return flags == 0; +} -#define raw_irqs_disabled() (local_get_flags() == 0) -#define raw_irqs_disabled_flags(flags) ((flags) == 0) +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} #ifdef CONFIG_PPC_BOOK3E -#define __hard_irq_enable() __asm__ __volatile__("wrteei 1": : :"memory"); -#define __hard_irq_disable() __asm__ __volatile__("wrteei 0": : :"memory"); +#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory"); +#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory"); #else #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) @@ -64,64 +79,66 @@ extern void iseries_handle_interrupts(void); get_paca()->hard_enabled = 0; \ } while(0) -#else +#else /* CONFIG_PPC64 */ -#if defined(CONFIG_BOOKE) #define SET_MSR_EE(x) mtmsr(x) -#define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") + +static inline unsigned long arch_local_save_flags(void) +{ + return mfmsr(); +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ +#if defined(CONFIG_BOOKE) + asm volatile("wrtee %0" : : "r" (flags) : "memory"); #else -#define SET_MSR_EE(x) mtmsr(x) -#define raw_local_irq_restore(flags) mtmsr(flags) + mtmsr(flags); #endif +} -static inline void raw_local_irq_disable(void) +static inline unsigned long arch_local_irq_save(void) { + unsigned long flags = arch_local_save_flags(); #ifdef CONFIG_BOOKE - __asm__ __volatile__("wrteei 0": : :"memory"); + asm volatile("wrteei 0" : : : "memory"); #else - unsigned long msr; - - msr = mfmsr(); - SET_MSR_EE(msr & ~MSR_EE); + SET_MSR_EE(flags & ~MSR_EE); #endif + return flags; } -static inline void raw_local_irq_enable(void) +static inline void arch_local_irq_disable(void) { #ifdef CONFIG_BOOKE - __asm__ __volatile__("wrteei 1": : :"memory"); + asm volatile("wrteei 0" : : : "memory"); #else - unsigned long msr; - - msr = mfmsr(); - SET_MSR_EE(msr | MSR_EE); + arch_local_irq_save(); #endif } -static inline void raw_local_irq_save_ptr(unsigned long *flags) +static inline void arch_local_irq_enable(void) { - unsigned long msr; - msr = mfmsr(); - *flags = msr; #ifdef CONFIG_BOOKE - __asm__ __volatile__("wrteei 0": : :"memory"); + asm volatile("wrteei 1" : : : "memory"); #else - SET_MSR_EE(msr & ~MSR_EE); + unsigned long msr = mfmsr(); + SET_MSR_EE(msr | MSR_EE); #endif } -#define raw_local_save_flags(flags) ((flags) = mfmsr()) -#define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags) -#define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0) -#define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0) - -#define hard_irq_disable() raw_local_irq_disable() - -static inline int irqs_disabled_flags(unsigned long flags) +static inline bool arch_irqs_disabled_flags(unsigned long flags) { return (flags & MSR_EE) == 0; } +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#define hard_irq_disable() arch_local_irq_disable() + #endif /* CONFIG_PPC64 */ /* diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h index 5f68ecfdf51..b85d8ddbb66 100644 --- a/arch/powerpc/include/asm/irqflags.h +++ b/arch/powerpc/include/asm/irqflags.h @@ -6,7 +6,7 @@ #ifndef __ASSEMBLY__ /* - * Get definitions for raw_local_save_flags(x), etc. + * Get definitions for arch_local_save_flags(x), etc. */ #include diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index f53029a0155..39b0c48872d 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -818,12 +818,12 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) /* * hash_page couldn't handle it, set soft interrupt enable back - * to what it was before the trap. Note that .raw_local_irq_restore + * to what it was before the trap. Note that .arch_local_irq_restore * handles any interrupts pending at this point. */ ld r3,SOFTE(r1) TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) - bl .raw_local_irq_restore + bl .arch_local_irq_restore b 11f /* We have a data breakpoint exception - handle it */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 4a65386995d..1903290f546 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -116,7 +116,7 @@ static inline notrace void set_soft_enabled(unsigned long enable) : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); } -notrace void raw_local_irq_restore(unsigned long en) +notrace void arch_local_irq_restore(unsigned long en) { /* * get_paca()->soft_enabled = en; @@ -192,7 +192,7 @@ notrace void raw_local_irq_restore(unsigned long en) __hard_irq_enable(); } -EXPORT_SYMBOL(raw_local_irq_restore); +EXPORT_SYMBOL(arch_local_irq_restore); #endif /* CONFIG_PPC64 */ static int show_other_interrupts(struct seq_file *p, int prec) diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h index 15b3ac25389..865d6d891ac 100644 --- a/arch/s390/include/asm/irqflags.h +++ b/arch/s390/include/asm/irqflags.h @@ -8,8 +8,8 @@ #include -/* store then or system mask. */ -#define __raw_local_irq_stosm(__or) \ +/* store then OR system mask. */ +#define __arch_local_irq_stosm(__or) \ ({ \ unsigned long __mask; \ asm volatile( \ @@ -18,8 +18,8 @@ __mask; \ }) -/* store then and system mask. */ -#define __raw_local_irq_stnsm(__and) \ +/* store then AND system mask. */ +#define __arch_local_irq_stnsm(__and) \ ({ \ unsigned long __mask; \ asm volatile( \ @@ -29,39 +29,44 @@ }) /* set system mask. */ -#define __raw_local_irq_ssm(__mask) \ -({ \ - asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ -}) +static inline void __arch_local_irq_ssm(unsigned long flags) +{ + asm volatile("ssm %0" : : "Q" (flags) : "memory"); +} -/* interrupt control.. */ -static inline unsigned long raw_local_irq_enable(void) +static inline unsigned long arch_local_save_flags(void) { - return __raw_local_irq_stosm(0x03); + return __arch_local_irq_stosm(0x00); } -static inline unsigned long raw_local_irq_disable(void) +static inline unsigned long arch_local_irq_save(void) { - return __raw_local_irq_stnsm(0xfc); + return __arch_local_irq_stnsm(0xfc); } -#define raw_local_save_flags(x) \ -do { \ - typecheck(unsigned long, x); \ - (x) = __raw_local_irq_stosm(0x00); \ -} while (0) +static inline void arch_local_irq_disable(void) +{ + arch_local_irq_save(); +} -static inline void raw_local_irq_restore(unsigned long flags) +static inline void arch_local_irq_enable(void) { - __raw_local_irq_ssm(flags); + __arch_local_irq_stosm(0x03); } -static inline int raw_irqs_disabled_flags(unsigned long flags) +static inline void arch_local_irq_restore(unsigned long flags) +{ + __arch_local_irq_ssm(flags); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) { return !(flags & (3UL << (BITS_PER_LONG - 8))); } -/* For spinlocks etc */ -#define raw_local_irq_save(x) ((x) = raw_local_irq_disable()) +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} #endif /* __ASM_IRQFLAGS_H */ diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index cef66210c84..8e8a50eeed9 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -399,7 +399,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, static inline void __set_psw_mask(unsigned long mask) { - __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); + __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8))); } #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c index 559af0d0787..0fbe4e32f7b 100644 --- a/arch/s390/kernel/mem_detect.c +++ b/arch/s390/kernel/mem_detect.c @@ -54,11 +54,11 @@ void detect_memory_layout(struct mem_chunk chunk[]) * right thing and we don't get scheduled away with low address * protection disabled. */ - flags = __raw_local_irq_stnsm(0xf8); + flags = __arch_local_irq_stnsm(0xf8); __ctl_store(cr0, 0, 0); __ctl_clear_bit(0, 28); find_memory_chunks(chunk); __ctl_load(cr0, 0, 0); - __raw_local_irq_ssm(flags); + arch_local_irq_restore(flags); } EXPORT_SYMBOL(detect_memory_layout); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 30eb6d02ddb..94b8ba2ec85 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -50,7 +50,6 @@ EXPORT_SYMBOL(empty_zero_page); */ void __init paging_init(void) { - static const int ssm_mask = 0x04000000L; unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long pgd_type; @@ -72,7 +71,7 @@ void __init paging_init(void) __ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 13, 13); - __raw_local_irq_ssm(ssm_mask); + arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); atomic_set(&init_mm.context.attach_count, 1); diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index a8c2af8c650..71a4b0d34be 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -71,7 +71,7 @@ int memcpy_real(void *dest, void *src, size_t count) if (!count) return 0; - flags = __raw_local_irq_stnsm(0xf8UL); + flags = __arch_local_irq_stnsm(0xf8UL); asm volatile ( "0: mvcle %1,%2,0x0\n" "1: jo 0b\n" @@ -82,6 +82,6 @@ int memcpy_real(void *dest, void *src, size_t count) "+d" (_len2), "=m" (*((long *) dest)) : "m" (*((long *) src)) : "cc", "memory"); - __raw_local_irq_ssm(flags); + arch_local_irq_restore(flags); return rc; } diff --git a/arch/score/include/asm/irqflags.h b/arch/score/include/asm/irqflags.h index 690a6cae729..5c7563891e2 100644 --- a/arch/score/include/asm/irqflags.h +++ b/arch/score/include/asm/irqflags.h @@ -3,107 +3,118 @@ #ifndef __ASSEMBLY__ -#define raw_local_irq_save(x) \ -{ \ - __asm__ __volatile__( \ - "mfcr r8, cr0;" \ - "li r9, 0xfffffffe;" \ - "nop;" \ - "mv %0, r8;" \ - "and r8, r8, r9;" \ - "mtcr r8, cr0;" \ - "nop;" \ - "nop;" \ - "nop;" \ - "nop;" \ - "nop;" \ - : "=r" (x) \ - : \ - : "r8", "r9" \ - ); \ +#include + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + + asm volatile( + " mfcr r8, cr0 \n" + " nop \n" + " nop \n" + " mv %0, r8 \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " ldi r9, 0x1 \n" + " and %0, %0, r9 \n" + : "=r" (flags) + : + : "r8", "r9"); + return flags; } -#define raw_local_irq_restore(x) \ -{ \ - __asm__ __volatile__( \ - "mfcr r8, cr0;" \ - "ldi r9, 0x1;" \ - "and %0, %0, r9;" \ - "or r8, r8, %0;" \ - "mtcr r8, cr0;" \ - "nop;" \ - "nop;" \ - "nop;" \ - "nop;" \ - "nop;" \ - : \ - : "r"(x) \ - : "r8", "r9" \ - ); \ +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags + + asm volatile( + " mfcr r8, cr0 \n" + " li r9, 0xfffffffe \n" + " nop \n" + " mv %0, r8 \n" + " and r8, r8, r9 \n" + " mtcr r8, cr0 \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + : "=r" (flags) + : + : "r8", "r9", "memory"); + + return flags; } -#define raw_local_irq_enable(void) \ -{ \ - __asm__ __volatile__( \ - "mfcr\tr8,cr0;" \ - "nop;" \ - "nop;" \ - "ori\tr8,0x1;" \ - "mtcr\tr8,cr0;" \ - "nop;" \ - "nop;" \ - "nop;" \ - "nop;" \ - "nop;" \ - : \ - : \ - : "r8"); \ +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile( + " mfcr r8, cr0 \n" + " ldi r9, 0x1 \n" + " and %0, %0, r9 \n" + " or r8, r8, %0 \n" + " mtcr r8, cr0 \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + : + : "r"(flags) + : "r8", "r9", "memory"); } -#define raw_local_irq_disable(void) \ -{ \ - __asm__ __volatile__( \ - "mfcr\tr8,cr0;" \ - "nop;" \ - "nop;" \ - "srli\tr8,r8,1;" \ - "slli\tr8,r8,1;" \ - "mtcr\tr8,cr0;" \ - "nop;" \ - "nop;" \ - "nop;" \ - "nop;" \ - "nop;" \ - : \ - : \ - : "r8"); \ +static inline void arch_local_irq_enable(void) +{ + asm volatile( + " mfcr r8,cr0 \n" + " nop \n" + " nop \n" + " ori r8,0x1 \n" + " mtcr r8,cr0 \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + : + : + : "r8", "memory"); } -#define raw_local_save_flags(x) \ -{ \ - __asm__ __volatile__( \ - "mfcr r8, cr0;" \ - "nop;" \ - "nop;" \ - "mv %0, r8;" \ - "nop;" \ - "nop;" \ - "nop;" \ - "nop;" \ - "nop;" \ - "ldi r9, 0x1;" \ - "and %0, %0, r9;" \ - : "=r" (x) \ - : \ - : "r8", "r9" \ - ); \ +static inline void arch_local_irq_disable(void) +{ + asm volatile( + " mfcr r8,cr0 \n" + " nop \n" + " nop \n" + " srli r8,r8,1 \n" + " slli r8,r8,1 \n" + " mtcr r8,cr0 \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + : + : + : "r8", "memory"); } -static inline int raw_irqs_disabled_flags(unsigned long flags) +static inline bool arch_irqs_disabled_flags(unsigned long flags) { return !(flags & 1); } -#endif +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#endif /* __ASSEMBLY__ */ #endif /* _ASM_SCORE_IRQFLAGS_H */ diff --git a/arch/sh/include/asm/irqflags.h b/arch/sh/include/asm/irqflags.h index a741153b41c..43b7608606c 100644 --- a/arch/sh/include/asm/irqflags.h +++ b/arch/sh/include/asm/irqflags.h @@ -1,8 +1,8 @@ #ifndef __ASM_SH_IRQFLAGS_H #define __ASM_SH_IRQFLAGS_H -#define RAW_IRQ_DISABLED 0xf0 -#define RAW_IRQ_ENABLED 0x00 +#define ARCH_IRQ_DISABLED 0xf0 +#define ARCH_IRQ_ENABLED 0x00 #include diff --git a/arch/sh/kernel/irq_32.c b/arch/sh/kernel/irq_32.c index e33ab15831f..e5a755be912 100644 --- a/arch/sh/kernel/irq_32.c +++ b/arch/sh/kernel/irq_32.c @@ -10,11 +10,11 @@ #include #include -void notrace raw_local_irq_restore(unsigned long flags) +void notrace arch_local_irq_restore(unsigned long flags) { unsigned long __dummy0, __dummy1; - if (flags == RAW_IRQ_DISABLED) { + if (flags == ARCH_IRQ_DISABLED) { __asm__ __volatile__ ( "stc sr, %0\n\t" "or #0xf0, %0\n\t" @@ -33,14 +33,14 @@ void notrace raw_local_irq_restore(unsigned long flags) #endif "ldc %0, sr\n\t" : "=&r" (__dummy0), "=r" (__dummy1) - : "1" (~RAW_IRQ_DISABLED) + : "1" (~ARCH_IRQ_DISABLED) : "memory" ); } } -EXPORT_SYMBOL(raw_local_irq_restore); +EXPORT_SYMBOL(arch_local_irq_restore); -unsigned long notrace __raw_local_save_flags(void) +unsigned long notrace arch_local_save_flags(void) { unsigned long flags; @@ -54,4 +54,4 @@ unsigned long notrace __raw_local_save_flags(void) return flags; } -EXPORT_SYMBOL(__raw_local_save_flags); +EXPORT_SYMBOL(arch_local_save_flags); diff --git a/arch/sparc/include/asm/irqflags_32.h b/arch/sparc/include/asm/irqflags_32.h index 0fca9d97d44..d4d0711de0f 100644 --- a/arch/sparc/include/asm/irqflags_32.h +++ b/arch/sparc/include/asm/irqflags_32.h @@ -5,33 +5,40 @@ * * This file gets included from lowlevel asm headers too, to provide * wrapped versions of the local_irq_*() APIs, based on the - * raw_local_irq_*() functions from the lowlevel headers. + * arch_local_irq_*() functions from the lowlevel headers. */ #ifndef _ASM_IRQFLAGS_H #define _ASM_IRQFLAGS_H #ifndef __ASSEMBLY__ -extern void raw_local_irq_restore(unsigned long); -extern unsigned long __raw_local_irq_save(void); -extern void raw_local_irq_enable(void); +#include -static inline unsigned long getipl(void) +extern void arch_local_irq_restore(unsigned long); +extern unsigned long arch_local_irq_save(void); +extern void arch_local_irq_enable(void); + +static inline unsigned long arch_local_save_flags(void) { - unsigned long retval; + unsigned long flags; + + asm volatile("rd %%psr, %0" : "=r" (flags)); + return flags; +} - __asm__ __volatile__("rd %%psr, %0" : "=r" (retval)); - return retval; +static inline void arch_local_irq_disable(void) +{ + arch_local_irq_save(); } -#define raw_local_save_flags(flags) ((flags) = getipl()) -#define raw_local_irq_save(flags) ((flags) = __raw_local_irq_save()) -#define raw_local_irq_disable() ((void) __raw_local_irq_save()) -#define raw_irqs_disabled() ((getipl() & PSR_PIL) != 0) +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return (flags & PSR_PIL) != 0; +} -static inline int raw_irqs_disabled_flags(unsigned long flags) +static inline bool arch_irqs_disabled(void) { - return ((flags & PSR_PIL) != 0); + return arch_irqs_disabled_flags(arch_local_save_flags()); } #endif /* (__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h index bfa1ea45b4c..aab969c82c2 100644 --- a/arch/sparc/include/asm/irqflags_64.h +++ b/arch/sparc/include/asm/irqflags_64.h @@ -5,7 +5,7 @@ * * This file gets included from lowlevel asm headers too, to provide * wrapped versions of the local_irq_*() APIs, based on the - * raw_local_irq_*() functions from the lowlevel headers. + * arch_local_irq_*() functions from the lowlevel headers. */ #ifndef _ASM_IRQFLAGS_H #define _ASM_IRQFLAGS_H @@ -14,7 +14,7 @@ #ifndef __ASSEMBLY__ -static inline unsigned long __raw_local_save_flags(void) +static inline unsigned long arch_local_save_flags(void) { unsigned long flags; @@ -26,10 +26,7 @@ static inline unsigned long __raw_local_save_flags(void) return flags; } -#define raw_local_save_flags(flags) \ - do { (flags) = __raw_local_save_flags(); } while (0) - -static inline void raw_local_irq_restore(unsigned long flags) +static inline void arch_local_irq_restore(unsigned long flags) { __asm__ __volatile__( "wrpr %0, %%pil" @@ -39,7 +36,7 @@ static inline void raw_local_irq_restore(unsigned long flags) ); } -static inline void raw_local_irq_disable(void) +static inline void arch_local_irq_disable(void) { __asm__ __volatile__( "wrpr %0, %%pil" @@ -49,7 +46,7 @@ static inline void raw_local_irq_disable(void) ); } -static inline void raw_local_irq_enable(void) +static inline void arch_local_irq_enable(void) { __asm__ __volatile__( "wrpr 0, %%pil" @@ -59,22 +56,17 @@ static inline void raw_local_irq_enable(void) ); } -static inline int raw_irqs_disabled_flags(unsigned long flags) +static inline int arch_irqs_disabled_flags(unsigned long flags) { return (flags > 0); } -static inline int raw_irqs_disabled(void) +static inline int arch_irqs_disabled(void) { - unsigned long flags = __raw_local_save_flags(); - - return raw_irqs_disabled_flags(flags); + return arch_irqs_disabled_flags(arch_local_save_flags()); } -/* - * For spinlocks, etc: - */ -static inline unsigned long __raw_local_irq_save(void) +static inline unsigned long arch_local_irq_save(void) { unsigned long flags, tmp; @@ -100,9 +92,6 @@ static inline unsigned long __raw_local_irq_save(void) return flags; } -#define raw_local_irq_save(flags) \ - do { (flags) = __raw_local_irq_save(); } while (0) - #endif /* (__ASSEMBLY__) */ #endif /* !(_ASM_IRQFLAGS_H) */ diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c index e1af4372832..0116d8d10de 100644 --- a/arch/sparc/kernel/irq_32.c +++ b/arch/sparc/kernel/irq_32.c @@ -57,7 +57,7 @@ #define SMP_NOP2 #define SMP_NOP3 #endif /* SMP */ -unsigned long __raw_local_irq_save(void) +unsigned long arch_local_irq_save(void) { unsigned long retval; unsigned long tmp; @@ -74,8 +74,9 @@ unsigned long __raw_local_irq_save(void) return retval; } +EXPORT_SYMBOL(arch_local_irq_save); -void raw_local_irq_enable(void) +void arch_local_irq_enable(void) { unsigned long tmp; @@ -89,8 +90,9 @@ void raw_local_irq_enable(void) : "i" (PSR_PIL) : "memory"); } +EXPORT_SYMBOL(arch_local_irq_enable); -void raw_local_irq_restore(unsigned long old_psr) +void arch_local_irq_restore(unsigned long old_psr) { unsigned long tmp; @@ -105,10 +107,7 @@ void raw_local_irq_restore(unsigned long old_psr) : "i" (PSR_PIL), "r" (old_psr) : "memory"); } - -EXPORT_SYMBOL(__raw_local_irq_save); -EXPORT_SYMBOL(raw_local_irq_enable); -EXPORT_SYMBOL(raw_local_irq_restore); +EXPORT_SYMBOL(arch_local_irq_restore); /* * Dave Redman (djhr@tadpole.co.uk) diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c index fa6e4e219b9..d9850c2b9bf 100644 --- a/arch/sparc/prom/p1275.c +++ b/arch/sparc/prom/p1275.c @@ -39,7 +39,7 @@ void p1275_cmd_direct(unsigned long *args) unsigned long flags; raw_local_save_flags(flags); - raw_local_irq_restore(PIL_NMI); + raw_local_irq_restore((unsigned long)PIL_NMI); raw_spin_lock(&prom_entry_lock); prom_world(1); diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 45cf67c2f28..a11d4837ee4 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h @@ -103,55 +103,57 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); #define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) /* Disable interrupts. */ -#define raw_local_irq_disable() \ +#define arch_local_irq_disable() \ interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) /* Disable all interrupts, including NMIs. */ -#define raw_local_irq_disable_all() \ +#define arch_local_irq_disable_all() \ interrupt_mask_set_mask(-1UL) /* Re-enable all maskable interrupts. */ -#define raw_local_irq_enable() \ +#define arch_local_irq_enable() \ interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) /* Disable or enable interrupts based on flag argument. */ -#define raw_local_irq_restore(disabled) do { \ +#define arch_local_irq_restore(disabled) do { \ if (disabled) \ - raw_local_irq_disable(); \ + arch_local_irq_disable(); \ else \ - raw_local_irq_enable(); \ + arch_local_irq_enable(); \ } while (0) /* Return true if "flags" argument means interrupts are disabled. */ -#define raw_irqs_disabled_flags(flags) ((flags) != 0) +#define arch_irqs_disabled_flags(flags) ((flags) != 0) /* Return true if interrupts are currently disabled. */ -#define raw_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) +#define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) /* Save whether interrupts are currently disabled. */ -#define raw_local_save_flags(flags) ((flags) = raw_irqs_disabled()) +#define arch_local_save_flags() arch_irqs_disabled() /* Save whether interrupts are currently disabled, then disable them. */ -#define raw_local_irq_save(flags) \ - do { raw_local_save_flags(flags); raw_local_irq_disable(); } while (0) +#define arch_local_irq_save() ({ \ + unsigned long __flags = arch_local_save_flags(); \ + arch_local_irq_disable(); \ + __flags; }) /* Prevent the given interrupt from being enabled next time we enable irqs. */ -#define raw_local_irq_mask(interrupt) \ +#define arch_local_irq_mask(interrupt) \ (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) /* Prevent the given interrupt from being enabled immediately. */ -#define raw_local_irq_mask_now(interrupt) do { \ - raw_local_irq_mask(interrupt); \ +#define arch_local_irq_mask_now(interrupt) do { \ + arch_local_irq_mask(interrupt); \ interrupt_mask_set(interrupt); \ } while (0) /* Allow the given interrupt to be enabled next time we enable irqs. */ -#define raw_local_irq_unmask(interrupt) \ +#define arch_local_irq_unmask(interrupt) \ (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ -#define raw_local_irq_unmask_now(interrupt) do { \ - raw_local_irq_unmask(interrupt); \ +#define arch_local_irq_unmask_now(interrupt) do { \ + arch_local_irq_unmask(interrupt); \ if (!irqs_disabled()) \ interrupt_mask_reset(interrupt); \ } while (0) diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 9e2b952f810..5745ce8bf10 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -61,22 +61,22 @@ static inline void native_halt(void) #else #ifndef __ASSEMBLY__ -static inline unsigned long __raw_local_save_flags(void) +static inline unsigned long arch_local_save_flags(void) { return native_save_fl(); } -static inline void raw_local_irq_restore(unsigned long flags) +static inline void arch_local_irq_restore(unsigned long flags) { native_restore_fl(flags); } -static inline void raw_local_irq_disable(void) +static inline void arch_local_irq_disable(void) { native_irq_disable(); } -static inline void raw_local_irq_enable(void) +static inline void arch_local_irq_enable(void) { native_irq_enable(); } @@ -85,7 +85,7 @@ static inline void raw_local_irq_enable(void) * Used in the idle loop; sti takes one instruction cycle * to complete: */ -static inline void raw_safe_halt(void) +static inline void arch_safe_halt(void) { native_safe_halt(); } @@ -102,12 +102,10 @@ static inline void halt(void) /* * For spinlocks, etc: */ -static inline unsigned long __raw_local_irq_save(void) +static inline unsigned long arch_local_irq_save(void) { - unsigned long flags = __raw_local_save_flags(); - - raw_local_irq_disable(); - + unsigned long flags = arch_local_save_flags(); + arch_local_irq_disable(); return flags; } #else @@ -153,22 +151,16 @@ static inline unsigned long __raw_local_irq_save(void) #endif /* CONFIG_PARAVIRT */ #ifndef __ASSEMBLY__ -#define raw_local_save_flags(flags) \ - do { (flags) = __raw_local_save_flags(); } while (0) - -#define raw_local_irq_save(flags) \ - do { (flags) = __raw_local_irq_save(); } while (0) - -static inline int raw_irqs_disabled_flags(unsigned long flags) +static inline int arch_irqs_disabled_flags(unsigned long flags) { return !(flags & X86_EFLAGS_IF); } -static inline int raw_irqs_disabled(void) +static inline int arch_irqs_disabled(void) { - unsigned long flags = __raw_local_save_flags(); + unsigned long flags = arch_local_save_flags(); - return raw_irqs_disabled_flags(flags); + return arch_irqs_disabled_flags(flags); } #else diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5653f43d90e..499954c530d 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -105,7 +105,7 @@ static inline void write_cr8(unsigned long x) } #endif -static inline void raw_safe_halt(void) +static inline void arch_safe_halt(void) { PVOP_VCALL0(pv_irq_ops.safe_halt); } @@ -829,32 +829,32 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) #define __PV_IS_CALLEE_SAVE(func) \ ((struct paravirt_callee_save) { func }) -static inline unsigned long __raw_local_save_flags(void) +static inline unsigned long arch_local_save_flags(void) { return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); } -static inline void raw_local_irq_restore(unsigned long f) +static inline void arch_local_irq_restore(unsigned long f) { PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); } -static inline void raw_local_irq_disable(void) +static inline void arch_local_irq_disable(void) { PVOP_VCALLEE0(pv_irq_ops.irq_disable); } -static inline void raw_local_irq_enable(void) +static inline void arch_local_irq_enable(void) { PVOP_VCALLEE0(pv_irq_ops.irq_enable); } -static inline unsigned long __raw_local_irq_save(void) +static inline unsigned long arch_local_irq_save(void) { unsigned long f; - f = __raw_local_save_flags(); - raw_local_irq_disable(); + f = arch_local_save_flags(); + arch_local_irq_disable(); return f; } diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index e0500646585..23e061b9327 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -224,7 +224,7 @@ static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enab goto out; } - flags = __raw_local_save_flags(); + flags = arch_local_save_flags(); if (irq_enable) { ADD_STATS(taken_slow_irqenable, 1); raw_local_irq_enable(); diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h new file mode 100644 index 00000000000..dae9a8bdcb1 --- /dev/null +++ b/arch/xtensa/include/asm/irqflags.h @@ -0,0 +1,58 @@ +/* + * Xtensa IRQ flags handling functions + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_IRQFLAGS_H +#define _XTENSA_IRQFLAGS_H + +#include + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile("rsr %0,"__stringify(PS) : "=a" (flags)); + return flags; +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + asm volatile("rsil %0, "__stringify(LOCKLEVEL) + : "=a" (flags) :: "memory"); + return flags; +} + +static inline void arch_local_irq_disable(void) +{ + arch_local_irq_save(); +} + +static inline void arch_local_irq_enable(void) +{ + unsigned long flags; + asm volatile("rsil %0, 0" : "=a" (flags) :: "memory"); +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile("wsr %0, "__stringify(PS)" ; rsync" + :: "a" (flags) : "memory"); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return (flags & 0xf) != 0; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#endif /* _XTENSA_IRQFLAGS_H */ diff --git a/arch/xtensa/include/asm/system.h b/arch/xtensa/include/asm/system.h index 62b1e8f3c13..1e7e09ab6cd 100644 --- a/arch/xtensa/include/asm/system.h +++ b/arch/xtensa/include/asm/system.h @@ -12,41 +12,10 @@ #define _XTENSA_SYSTEM_H #include +#include #include -/* interrupt control */ - -#define local_save_flags(x) \ - __asm__ __volatile__ ("rsr %0,"__stringify(PS) : "=a" (x)); -#define local_irq_restore(x) do { \ - __asm__ __volatile__ ("wsr %0, "__stringify(PS)" ; rsync" \ - :: "a" (x) : "memory"); } while(0); -#define local_irq_save(x) do { \ - __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) \ - : "=a" (x) :: "memory");} while(0); - -static inline void local_irq_disable(void) -{ - unsigned long flags; - __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) - : "=a" (flags) :: "memory"); -} -static inline void local_irq_enable(void) -{ - unsigned long flags; - __asm__ __volatile__ ("rsil %0, 0" : "=a" (flags) :: "memory"); - -} - -static inline int irqs_disabled(void) -{ - unsigned long flags; - local_save_flags(flags); - return flags & 0xf; -} - - #define smp_read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0) diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index f6d72e1f2a3..5707a80b96b 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -468,7 +468,7 @@ sclp_sync_wait(void) cr0_sync &= 0xffff00a0; cr0_sync |= 0x00000200; __ctl_load(cr0_sync, 0, 0); - __raw_local_irq_stosm(0x01); + __arch_local_irq_stosm(0x01); /* Loop until driver state indicates finished request */ while (sclp_running_state != sclp_running_state_idle) { /* Check for expired request timer */ diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index e53347fbf1d..fd57b8477fa 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -43,6 +43,7 @@ */ #define atomic_set(v, i) (((v)->counter) = (i)) +#include #include /** @@ -57,7 +58,7 @@ static inline int atomic_add_return(int i, atomic_t *v) unsigned long flags; int temp; - raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ + raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ temp = v->counter; temp += i; v->counter = temp; @@ -78,7 +79,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) unsigned long flags; int temp; - raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ + raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ temp = v->counter; temp -= i; v->counter = temp; diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h index b2ba2fc8829..2533fddd34a 100644 --- a/include/asm-generic/cmpxchg-local.h +++ b/include/asm-generic/cmpxchg-local.h @@ -2,6 +2,7 @@ #define __ASM_GENERIC_CMPXCHG_LOCAL_H #include +#include extern unsigned long wrong_size_cmpxchg(volatile void *ptr); diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h index 62f59080e5c..c0771aa248c 100644 --- a/include/asm-generic/hardirq.h +++ b/include/asm-generic/hardirq.h @@ -3,7 +3,6 @@ #include #include -#include typedef struct { unsigned int __softirq_pending; diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h index 9aebf618275..1f40d0024cf 100644 --- a/include/asm-generic/irqflags.h +++ b/include/asm-generic/irqflags.h @@ -5,68 +5,62 @@ * All architectures should implement at least the first two functions, * usually inline assembly will be the best way. */ -#ifndef RAW_IRQ_DISABLED -#define RAW_IRQ_DISABLED 0 -#define RAW_IRQ_ENABLED 1 +#ifndef ARCH_IRQ_DISABLED +#define ARCH_IRQ_DISABLED 0 +#define ARCH_IRQ_ENABLED 1 #endif /* read interrupt enabled status */ -#ifndef __raw_local_save_flags -unsigned long __raw_local_save_flags(void); +#ifndef arch_local_save_flags +unsigned long arch_local_save_flags(void); #endif /* set interrupt enabled status */ -#ifndef raw_local_irq_restore -void raw_local_irq_restore(unsigned long flags); +#ifndef arch_local_irq_restore +void arch_local_irq_restore(unsigned long flags); #endif /* get status and disable interrupts */ -#ifndef __raw_local_irq_save -static inline unsigned long __raw_local_irq_save(void) +#ifndef arch_local_irq_save +static inline unsigned long arch_local_irq_save(void) { unsigned long flags; - flags = __raw_local_save_flags(); - raw_local_irq_restore(RAW_IRQ_DISABLED); + flags = arch_local_save_flags(); + arch_local_irq_restore(ARCH_IRQ_DISABLED); return flags; } #endif /* test flags */ -#ifndef raw_irqs_disabled_flags -static inline int raw_irqs_disabled_flags(unsigned long flags) +#ifndef arch_irqs_disabled_flags +static inline int arch_irqs_disabled_flags(unsigned long flags) { - return flags == RAW_IRQ_DISABLED; + return flags == ARCH_IRQ_DISABLED; } #endif /* unconditionally enable interrupts */ -#ifndef raw_local_irq_enable -static inline void raw_local_irq_enable(void) +#ifndef arch_local_irq_enable +static inline void arch_local_irq_enable(void) { - raw_local_irq_restore(RAW_IRQ_ENABLED); + arch_local_irq_restore(ARCH_IRQ_ENABLED); } #endif /* unconditionally disable interrupts */ -#ifndef raw_local_irq_disable -static inline void raw_local_irq_disable(void) +#ifndef arch_local_irq_disable +static inline void arch_local_irq_disable(void) { - raw_local_irq_restore(RAW_IRQ_DISABLED); + arch_local_irq_restore(ARCH_IRQ_DISABLED); } #endif /* test hardware interrupt enable bit */ -#ifndef raw_irqs_disabled -static inline int raw_irqs_disabled(void) +#ifndef arch_irqs_disabled +static inline int arch_irqs_disabled(void) { - return raw_irqs_disabled_flags(__raw_local_save_flags()); + return arch_irqs_disabled_flags(arch_local_save_flags()); } #endif -#define raw_local_save_flags(flags) \ - do { (flags) = __raw_local_save_flags(); } while (0) - -#define raw_local_irq_save(flags) \ - do { (flags) = __raw_local_irq_save(); } while (0) - #endif /* __ASM_GENERIC_IRQFLAGS_H */ diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 006bf45eae3..d176d658fe2 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -12,6 +12,7 @@ #define _LINUX_TRACE_IRQFLAGS_H #include +#include #ifdef CONFIG_TRACE_IRQFLAGS extern void trace_softirqs_on(unsigned long ip); @@ -52,17 +53,45 @@ # define start_critical_timings() do { } while (0) #endif -#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT - -#include +/* + * Wrap the arch provided IRQ routines to provide appropriate checks. + */ +#define raw_local_irq_disable() arch_local_irq_disable() +#define raw_local_irq_enable() arch_local_irq_enable() +#define raw_local_irq_save(flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = arch_local_irq_save(); \ + } while (0) +#define raw_local_irq_restore(flags) \ + do { \ + typecheck(unsigned long, flags); \ + arch_local_irq_restore(flags); \ + } while (0) +#define raw_local_save_flags(flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = arch_local_save_flags(); \ + } while (0) +#define raw_irqs_disabled_flags(flags) \ + ({ \ + typecheck(unsigned long, flags); \ + arch_irqs_disabled_flags(flags); \ + }) +#define raw_irqs_disabled() (arch_irqs_disabled()) +#define raw_safe_halt() arch_safe_halt() +/* + * The local_irq_*() APIs are equal to the raw_local_irq*() + * if !TRACE_IRQFLAGS. + */ +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT #define local_irq_enable() \ do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) #define local_irq_disable() \ do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) #define local_irq_save(flags) \ do { \ - typecheck(unsigned long, flags); \ raw_local_irq_save(flags); \ trace_hardirqs_off(); \ } while (0) @@ -70,7 +99,6 @@ #define local_irq_restore(flags) \ do { \ - typecheck(unsigned long, flags); \ if (raw_irqs_disabled_flags(flags)) { \ raw_local_irq_restore(flags); \ trace_hardirqs_off(); \ @@ -79,51 +107,44 @@ raw_local_irq_restore(flags); \ } \ } while (0) -#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ -/* - * The local_irq_*() APIs are equal to the raw_local_irq*() - * if !TRACE_IRQFLAGS. - */ -# define raw_local_irq_disable() local_irq_disable() -# define raw_local_irq_enable() local_irq_enable() -# define raw_local_irq_save(flags) \ - do { \ - typecheck(unsigned long, flags); \ - local_irq_save(flags); \ - } while (0) -# define raw_local_irq_restore(flags) \ +#define local_save_flags(flags) \ do { \ - typecheck(unsigned long, flags); \ - local_irq_restore(flags); \ + raw_local_save_flags(flags); \ } while (0) -#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ -#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT -#define safe_halt() \ - do { \ - trace_hardirqs_on(); \ - raw_safe_halt(); \ - } while (0) +#define irqs_disabled_flags(flags) \ + ({ \ + raw_irqs_disabled_flags(flags); \ + }) -#define local_save_flags(flags) \ - do { \ - typecheck(unsigned long, flags); \ - raw_local_save_flags(flags); \ +#define irqs_disabled() \ + ({ \ + unsigned long _flags; \ + raw_local_save_flags(_flags); \ + raw_irqs_disabled_flags(_flags); \ + }) + +#define safe_halt() \ + do { \ + trace_hardirqs_on(); \ + raw_safe_halt(); \ } while (0) -#define irqs_disabled() \ -({ \ - unsigned long _flags; \ - \ - raw_local_save_flags(_flags); \ - raw_irqs_disabled_flags(_flags); \ -}) -#define irqs_disabled_flags(flags) \ -({ \ - typecheck(unsigned long, flags); \ - raw_irqs_disabled_flags(flags); \ -}) +#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ + +#define local_irq_enable() do { raw_local_irq_enable(); } while (0) +#define local_irq_disable() do { raw_local_irq_disable(); } while (0) +#define local_irq_save(flags) \ + do { \ + raw_local_irq_save(flags); \ + } while (0) +#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) +#define local_save_flags(flags) do { raw_local_save_flags(flags); } while (0) +#define irqs_disabled() (raw_irqs_disabled()) +#define irqs_disabled_flags(flags) (raw_irqs_disabled_flags(flags)) +#define safe_halt() do { raw_safe_halt(); } while (0) + #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ #endif diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index f8854655860..80e535897de 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -50,6 +50,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3-70-g09d2 From a416e9e1dde0fbcf20cda59df284cc0dcf2aadc4 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 29 Sep 2010 23:29:48 +0900 Subject: x86-32: Fix sparse warning for the __PHYSICAL_MASK calculation On 32-bit non-PAE system, cast to 'phys_addr_t' truncates value before subtraction. Subtracting before cast produce same result but remove following warnings from sparse: arch/x86/include/asm/pgtable_types.h:255:38: warning: cast truncates bits from constant value (100000000 becomes 0) arch/x86/include/asm/pgtable_types.h:270:38: warning: cast truncates bits from constant value (100000000 becomes 0) arch/x86/include/asm/pgtable.h:127:32: warning: cast truncates bits from constant value (100000000 becomes 0) arch/x86/include/asm/pgtable.h:132:32: warning: cast truncates bits from constant value (100000000 becomes 0) arch/x86/include/asm/pgtable.h:344:31: warning: cast truncates bits from constant value (100000000 becomes 0) 64-bit or PAE machines will not be affected by this change. Signed-off-by: Namhyung Kim LKML-Reference: <1285770588-14065-1-git-send-email-namhyung@gmail.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/page_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index a667f24c725..1df66211fd1 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -8,7 +8,7 @@ #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) -#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1) +#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) /* Cast PAGE_MASK to a signed type so that it is sign-extended if -- cgit v1.2.3-70-g09d2 From 55572b293b3a5929e8c54bc91d14ae6264186bf6 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Thu, 7 Oct 2010 16:42:54 -0700 Subject: x86, mrst: A function in a header file needs to be marked "inline" A function in a header file needs to be explicitly marked "inline", or gcc will complain if it is not used. Signed-off-by: H. Peter Anvin Cc: Jacob Pan Cc: v2.6.36 LKML-Reference: <1274295685-6774-3-git-send-email-jacob.jun.pan@linux.intel.com> --- arch/x86/include/asm/mrst.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h index 16350740edf..33fc2966beb 100644 --- a/arch/x86/include/asm/mrst.h +++ b/arch/x86/include/asm/mrst.h @@ -26,7 +26,7 @@ enum mrst_cpu_type { }; extern enum mrst_cpu_type __mrst_cpu_chip; -static enum mrst_cpu_type mrst_identify_cpu(void) +static inline enum mrst_cpu_type mrst_identify_cpu(void) { return __mrst_cpu_chip; } -- cgit v1.2.3-70-g09d2 From 5a47c7dae861c3ca3edf178546641909851bf715 Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Mon, 13 Sep 2010 15:08:54 +0800 Subject: x86: Add two helper macros for fixed address mapping Sometimes fixmap will be used to map an physical address which is not PAGE align, so to use it we need first map it and then add the address offset to the mapped fixed address. These 2 new helpers are suggested by Ingo Molnar to make the process simpler. For a physicall address like "phys", a directly usable virtual address can be get by virt = (void *)set_fixmap_offset(fixed_idx, phys); or virt = (void *)set_fixmap_offset_nocache(fixed_idx, phys); (depends on whether the physical address is cachable or not). Signed-off-by: Feng Tang Cc: alan@linux.intel.com Cc: greg@kroah.com Cc: x86@kernel.org LKML-Reference: <1284361736-23011-3-git-send-email-feng.tang@intel.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fixmap.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index d07b44f7d1d..4d293dced62 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -214,5 +214,20 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); return __virt_to_fix(vaddr); } + +/* Return an pointer with offset calculated */ +static inline unsigned long __set_fixmap_offset(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags) +{ + __set_fixmap(idx, phys, flags); + return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1)); +} + +#define set_fixmap_offset(idx, phys) \ + __set_fixmap_offset(idx, phys, PAGE_KERNEL) + +#define set_fixmap_offset_nocache(idx, phys) \ + __set_fixmap_offset(idx, phys, PAGE_KERNEL_NOCACHE) + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_FIXMAP_H */ -- cgit v1.2.3-70-g09d2 From c20b5c3318fe45e4f33f01a91ccead645dfdf619 Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Mon, 13 Sep 2010 15:08:55 +0800 Subject: x86, earlyprintk: Add earlyprintk for Intel Moorestown platform Intel Moorestown platform has a spi-uart device(Maxim3110), which connects to a Designware spi core controller. This patch will add early console function based on it. As it will be used long before Linux spi subsystem get initialised, we simply directly manipulate the spi controller's register to acheive the early console func. This is safe as it will be disabled when devices subsytem get initialised. To use it, user need enable CONFIG_X86_MRST_EARLY_PRINTK in kenrel config and add "earlyprintk=mrst" in kernel command line. Signed-off-by: Feng Tang Acked-by: Alan Cox Cc: greg@kroah.com LKML-Reference: <1284361736-23011-4-git-send-email-feng.tang@intel.com> Signed-off-by: Ingo Molnar --- arch/x86/Kconfig.debug | 4 + arch/x86/include/asm/mrst.h | 5 + arch/x86/kernel/Makefile | 1 + arch/x86/kernel/early_printk.c | 7 ++ arch/x86/kernel/early_printk_mrst.c | 232 ++++++++++++++++++++++++++++++++++++ 5 files changed, 249 insertions(+) create mode 100644 arch/x86/kernel/early_printk_mrst.c (limited to 'arch/x86/include') diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 75085080b63..e5bb96b10f1 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -43,6 +43,10 @@ config EARLY_PRINTK with klogd/syslogd or the X server. You should normally N here, unless you want to debug such a crash. +config EARLY_PRINTK_MRST + bool "Early printk for MRST platform support" + depends on EARLY_PRINTK && X86_MRST + config EARLY_PRINTK_DBGP bool "Early printk via EHCI debug port" depends on EARLY_PRINTK && PCI diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h index 33fc2966beb..d0ea5bc505a 100644 --- a/arch/x86/include/asm/mrst.h +++ b/arch/x86/include/asm/mrst.h @@ -10,6 +10,9 @@ */ #ifndef _ASM_X86_MRST_H #define _ASM_X86_MRST_H + +#include + extern int pci_mrst_init(void); int __init sfi_parse_mrtc(struct sfi_table_header *table); @@ -42,4 +45,6 @@ extern enum mrst_timer_options mrst_timer_options; #define SFI_MTMR_MAX_NUM 8 #define SFI_MRTC_MAX 8 +extern struct console early_mrst_console; +extern void mrst_early_console_init(void); #endif /* _ASM_X86_MRST_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index fedf32a8c3e..3cd01d04613 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -85,6 +85,7 @@ obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_VM86) += vm86_32.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_EARLY_PRINTK_MRST) += early_printk_mrst.o obj-$(CONFIG_HPET_TIMER) += hpet.o obj-$(CONFIG_APB_TIMER) += apb_timer.o diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index fa99bae75ac..6082463768a 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -238,6 +239,12 @@ static int __init setup_early_printk(char *buf) #ifdef CONFIG_HVC_XEN if (!strncmp(buf, "xen", 3)) early_console_register(&xenboot_console, keep); +#endif +#ifdef CONFIG_X86_MRST_EARLY_PRINTK + if (!strncmp(buf, "mrst", 4)) { + mrst_early_console_init(); + early_console_register(&early_mrst_console, keep); + } #endif buf++; } diff --git a/arch/x86/kernel/early_printk_mrst.c b/arch/x86/kernel/early_printk_mrst.c new file mode 100644 index 00000000000..05d27e1e2b6 --- /dev/null +++ b/arch/x86/kernel/early_printk_mrst.c @@ -0,0 +1,232 @@ +/* + * early_printk_mrst.c - spi-uart early printk for Intel Moorestown platform + * + * Copyright (c) 2008-2010, Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#define MRST_SPI_TIMEOUT 0x200000 +#define MRST_REGBASE_SPI0 0xff128000 +#define MRST_REGBASE_SPI1 0xff128400 +#define MRST_CLK_SPI0_REG 0xff11d86c + +/* Bit fields in CTRLR0 */ +#define SPI_DFS_OFFSET 0 + +#define SPI_FRF_OFFSET 4 +#define SPI_FRF_SPI 0x0 +#define SPI_FRF_SSP 0x1 +#define SPI_FRF_MICROWIRE 0x2 +#define SPI_FRF_RESV 0x3 + +#define SPI_MODE_OFFSET 6 +#define SPI_SCPH_OFFSET 6 +#define SPI_SCOL_OFFSET 7 +#define SPI_TMOD_OFFSET 8 +#define SPI_TMOD_TR 0x0 /* xmit & recv */ +#define SPI_TMOD_TO 0x1 /* xmit only */ +#define SPI_TMOD_RO 0x2 /* recv only */ +#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */ + +#define SPI_SLVOE_OFFSET 10 +#define SPI_SRL_OFFSET 11 +#define SPI_CFS_OFFSET 12 + +/* Bit fields in SR, 7 bits */ +#define SR_MASK 0x7f /* cover 7 bits */ +#define SR_BUSY (1 << 0) +#define SR_TF_NOT_FULL (1 << 1) +#define SR_TF_EMPT (1 << 2) +#define SR_RF_NOT_EMPT (1 << 3) +#define SR_RF_FULL (1 << 4) +#define SR_TX_ERR (1 << 5) +#define SR_DCOL (1 << 6) + +struct dw_spi_reg { + u32 ctrl0; + u32 ctrl1; + u32 ssienr; + u32 mwcr; + u32 ser; + u32 baudr; + u32 txfltr; + u32 rxfltr; + u32 txflr; + u32 rxflr; + u32 sr; + u32 imr; + u32 isr; + u32 risr; + u32 txoicr; + u32 rxoicr; + u32 rxuicr; + u32 msticr; + u32 icr; + u32 dmacr; + u32 dmatdlr; + u32 dmardlr; + u32 idr; + u32 version; + + /* Currently operates as 32 bits, though only the low 16 bits matter */ + u32 dr; +} __packed; + +#define dw_readl(dw, name) __raw_readl(&(dw)->name) +#define dw_writel(dw, name, val) __raw_writel((val), &(dw)->name) + +/* Default use SPI0 register for mrst, we will detect Penwell and use SPI1 */ +static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0; + +static u32 *pclk_spi0; +/* Always contains an accessable address, start with 0 */ +static struct dw_spi_reg *pspi; + +static struct kmsg_dumper dw_dumper; +static int dumper_registered; + +static void dw_kmsg_dump(struct kmsg_dumper *dumper, + enum kmsg_dump_reason reason, + const char *s1, unsigned long l1, + const char *s2, unsigned long l2) +{ + int i; + + /* When run to this, we'd better re-init the HW */ + mrst_early_console_init(); + + for (i = 0; i < l1; i++) + early_mrst_console.write(&early_mrst_console, s1 + i, 1); + for (i = 0; i < l2; i++) + early_mrst_console.write(&early_mrst_console, s2 + i, 1); +} + +/* Set the ratio rate to 115200, 8n1, IRQ disabled */ +static void max3110_write_config(void) +{ + u16 config; + + config = 0xc001; + dw_writel(pspi, dr, config); +} + +/* Translate char to a eligible word and send to max3110 */ +static void max3110_write_data(char c) +{ + u16 data; + + data = 0x8000 | c; + dw_writel(pspi, dr, data); +} + +void mrst_early_console_init(void) +{ + u32 ctrlr0 = 0; + u32 spi0_cdiv; + u32 freq; /* Freqency info only need be searched once */ + + /* Base clk is 100 MHz, the actual clk = 100M / (clk_divider + 1) */ + pclk_spi0 = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, + MRST_CLK_SPI0_REG); + spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9; + freq = 100000000 / (spi0_cdiv + 1); + + if (mrst_identify_cpu() == MRST_CPU_CHIP_PENWELL) + mrst_spi_paddr = MRST_REGBASE_SPI1; + + pspi = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, + mrst_spi_paddr); + + /* Disable SPI controller */ + dw_writel(pspi, ssienr, 0); + + /* Set control param, 8 bits, transmit only mode */ + ctrlr0 = dw_readl(pspi, ctrl0); + + ctrlr0 &= 0xfcc0; + ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET) + | (SPI_TMOD_TO << SPI_TMOD_OFFSET); + dw_writel(pspi, ctrl0, ctrlr0); + + /* + * Change the spi0 clk to comply with 115200 bps, use 100000 to + * calculate the clk dividor to make the clock a little slower + * than real baud rate. + */ + dw_writel(pspi, baudr, freq/100000); + + /* Disable all INT for early phase */ + dw_writel(pspi, imr, 0x0); + + /* Set the cs to spi-uart */ + dw_writel(pspi, ser, 0x2); + + /* Enable the HW, the last step for HW init */ + dw_writel(pspi, ssienr, 0x1); + + /* Set the default configuration */ + max3110_write_config(); + + /* Register the kmsg dumper */ + if (!dumper_registered) { + dw_dumper.dump = dw_kmsg_dump; + kmsg_dump_register(&dw_dumper); + dumper_registered = 1; + } +} + +/* Slave select should be called in the read/write function */ +static void early_mrst_spi_putc(char c) +{ + unsigned int timeout; + u32 sr; + + timeout = MRST_SPI_TIMEOUT; + /* Early putc needs to make sure the TX FIFO is not full */ + while (--timeout) { + sr = dw_readl(pspi, sr); + if (!(sr & SR_TF_NOT_FULL)) + cpu_relax(); + else + break; + } + + if (!timeout) + pr_warning("MRST earlycon: timed out\n"); + else + max3110_write_data(c); +} + +/* Early SPI only uses polling mode */ +static void early_mrst_spi_write(struct console *con, const char *str, unsigned n) +{ + int i; + + for (i = 0; i < n && *str; i++) { + if (*str == '\n') + early_mrst_spi_putc('\r'); + early_mrst_spi_putc(*str); + str++; + } +} + +struct console early_mrst_console = { + .name = "earlymrst", + .write = early_mrst_spi_write, + .flags = CON_PRINTBUFFER, + .index = -1, +}; -- cgit v1.2.3-70-g09d2 From 4d033556f1bfaa7604b951bfadd17aaf1b74e4c5 Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Mon, 13 Sep 2010 15:08:56 +0800 Subject: x86, earlyprintk: Add hsu early console for Intel Medfield platform Intel Medfield platform has a high speed UART device, which could act as a early console. To enable early printk of HSU console, simply add "earlyprintk=hsu" in kernel command line. Currently we put the code in the early_printk_mrst.c as it is also for Intel MID platforms like the mrst early console Signed-off-by: Feng Tang Acked-by: Alan Cox Cc: greg@kroah.com LKML-Reference: <1284361736-23011-5-git-send-email-feng.tang@intel.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mrst.h | 3 ++ arch/x86/kernel/early_printk.c | 6 +++ arch/x86/kernel/early_printk_mrst.c | 89 ++++++++++++++++++++++++++++++++++++- 3 files changed, 97 insertions(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h index d0ea5bc505a..4a711a684b1 100644 --- a/arch/x86/include/asm/mrst.h +++ b/arch/x86/include/asm/mrst.h @@ -47,4 +47,7 @@ extern enum mrst_timer_options mrst_timer_options; extern struct console early_mrst_console; extern void mrst_early_console_init(void); + +extern struct console early_hsu_console; +extern void hsu_early_console_init(void); #endif /* _ASM_X86_MRST_H */ diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 6082463768a..4572f25f932 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -245,6 +245,12 @@ static int __init setup_early_printk(char *buf) mrst_early_console_init(); early_console_register(&early_mrst_console, keep); } + + if (!strncmp(buf, "hsu", 3)) { + hsu_early_console_init(); + early_console_register(&early_hsu_console, keep); + } + #endif buf++; } diff --git a/arch/x86/kernel/early_printk_mrst.c b/arch/x86/kernel/early_printk_mrst.c index 05d27e1e2b6..65df603622b 100644 --- a/arch/x86/kernel/early_printk_mrst.c +++ b/arch/x86/kernel/early_printk_mrst.c @@ -1,5 +1,5 @@ /* - * early_printk_mrst.c - spi-uart early printk for Intel Moorestown platform + * early_printk_mrst.c - early consoles for Intel MID platforms * * Copyright (c) 2008-2010, Intel Corporation * @@ -9,9 +9,19 @@ * of the License. */ +/* + * This file implements two early consoles named mrst and hsu. + * mrst is based on Maxim3110 spi-uart device, it exists in both + * Moorestown and Medfield platforms, while hsu is based on a High + * Speed UART device which only exists in the Medfield platform + */ + +#include +#include #include #include #include +#include #include #include @@ -230,3 +240,80 @@ struct console early_mrst_console = { .flags = CON_PRINTBUFFER, .index = -1, }; + +/* + * Following is the early console based on Medfield HSU (High + * Speed UART) device. + */ +#define HSU_PORT2_PADDR 0xffa28180 + +static void __iomem *phsu; + +void hsu_early_console_init(void) +{ + u8 lcr; + + phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, + HSU_PORT2_PADDR); + + /* Disable FIFO */ + writeb(0x0, phsu + UART_FCR); + + /* Set to default 115200 bps, 8n1 */ + lcr = readb(phsu + UART_LCR); + writeb((0x80 | lcr), phsu + UART_LCR); + writeb(0x18, phsu + UART_DLL); + writeb(lcr, phsu + UART_LCR); + writel(0x3600, phsu + UART_MUL*4); + + writeb(0x8, phsu + UART_MCR); + writeb(0x7, phsu + UART_FCR); + writeb(0x3, phsu + UART_LCR); + + /* Clear IRQ status */ + readb(phsu + UART_LSR); + readb(phsu + UART_RX); + readb(phsu + UART_IIR); + readb(phsu + UART_MSR); + + /* Enable FIFO */ + writeb(0x7, phsu + UART_FCR); +} + +#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) + +static void early_hsu_putc(char ch) +{ + unsigned int timeout = 10000; /* 10ms */ + u8 status; + + while (--timeout) { + status = readb(phsu + UART_LSR); + if (status & BOTH_EMPTY) + break; + udelay(1); + } + + /* Only write the char when there was no timeout */ + if (timeout) + writeb(ch, phsu + UART_TX); +} + +static void early_hsu_write(struct console *con, const char *str, unsigned n) +{ + int i; + + for (i = 0; i < n && *str; i++) { + if (*str == '\n') + early_hsu_putc('\r'); + early_hsu_putc(*str); + str++; + } +} + +struct console early_hsu_console = { + .name = "earlyhsu", + .write = early_hsu_write, + .flags = CON_PRINTBUFFER, + .index = -1, +}; -- cgit v1.2.3-70-g09d2 From 6e9636693373d938aa3b13427be3d212f172ac06 Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Fri, 8 Oct 2010 14:53:48 -0400 Subject: x86, iommu: Update header comments with appropriate naming The header comments diverged a bit from the implementation. Lets re-sync them. Signed-off-by: Konrad Rzeszutek Wilk LKML-Reference: <1286564028-2352-3-git-send-email-konrad.wilk@oracle.com> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/iommu_table.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h index df55a78888e..f229b13a5f3 100644 --- a/arch/x86/include/asm/iommu_table.h +++ b/arch/x86/include/asm/iommu_table.h @@ -1,4 +1,3 @@ - #ifndef _ASM_X86_IOMMU_TABLE_H #define _ASM_X86_IOMMU_TABLE_H @@ -60,7 +59,7 @@ struct iommu_table_entry { * and it will be run after the SWIOTLB and the other IOMMUs * that utilize this macro. If the IOMMU is detected (ie, the * detect routine returns a positive value), the other IOMMUs - * are also checked. You can use IOMMU_INIT_FINISH if you prefer + * are also checked. You can use IOMMU_INIT_POST_FINISH if you prefer * to stop detecting the other IOMMUs after yours has been detected. */ #define IOMMU_INIT_POST(_detect) \ @@ -80,9 +79,9 @@ struct iommu_table_entry { * d). Similar to the 'init', except that this gets called from pci_iommu_init * where we do have a memory allocator. * - * The _CONT vs the _EXIT differs in that the _CONT variant will + * The standard vs the _FINISH differs in that the _FINISH variant will * continue detecting other IOMMUs in the call list after the - * the detection routine returns a positive number. The _EXIT will + * the detection routine returns a positive number. The _FINISH will * stop the execution chain. Both will still call the 'init' and * 'late_init' functions if they are set. */ -- cgit v1.2.3-70-g09d2 From 708ff2a0097b02d32d375b66996661f36cd4d6d1 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Wed, 29 Sep 2010 18:08:50 +0900 Subject: bitops: make asm-generic/bitops/find.h more generic asm-generic/bitops/find.h has the extern declarations of find_next_bit() and find_next_zero_bit() and the macro definitions of find_first_bit() and find_first_zero_bit(). It is only usable by the architectures which enables CONFIG_GENERIC_FIND_NEXT_BIT and disables CONFIG_GENERIC_FIND_FIRST_BIT. x86 and tile enable both CONFIG_GENERIC_FIND_NEXT_BIT and CONFIG_GENERIC_FIND_FIRST_BIT. These architectures cannot include asm-generic/bitops/find.h in their asm/bitops.h. So ifdefed extern declarations of find_first_bit and find_first_zero_bit() are put in linux/bitops.h. This makes asm-generic/bitops/find.h usable by these architectures and use it. Also this change is needed for the forthcoming duplicated extern declarations cleanup. Signed-off-by: Akinobu Mita Signed-off-by: Arnd Bergmann Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: x86@kernel.org Cc: Chris Metcalf --- arch/tile/include/asm/bitops.h | 1 + arch/x86/include/asm/bitops.h | 2 ++ include/asm-generic/bitops/find.h | 25 +++++++++++++++++++++++++ include/linux/bitops.h | 22 ---------------------- 4 files changed, 28 insertions(+), 22 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h index 6832b4be899..6d4f0ff2c68 100644 --- a/arch/tile/include/asm/bitops.h +++ b/arch/tile/include/asm/bitops.h @@ -120,6 +120,7 @@ static inline unsigned long __arch_hweight64(__u64 w) #include #include +#include #include #include #include diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index bafd80defa4..903683b07e4 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -440,6 +440,8 @@ static inline int fls(int x) #ifdef __KERNEL__ +#include + #include #define ARCH_HAS_FAST_MULTIPLIER 1 diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 1914e974251..30afec0db7d 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h @@ -9,7 +9,32 @@ extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset); #endif +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit. + */ +extern unsigned long find_first_bit(const unsigned long *addr, + unsigned long size); + +/** + * find_first_zero_bit - find the first cleared bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first cleared bit. + */ +extern unsigned long find_first_zero_bit(const unsigned long *addr, + unsigned long size); +#else /* CONFIG_GENERIC_FIND_FIRST_BIT */ + #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ + #endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h index fc68053378c..adb0f113f57 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -136,28 +136,6 @@ static inline unsigned long __ffs64(u64 word) } #ifdef __KERNEL__ -#ifdef CONFIG_GENERIC_FIND_FIRST_BIT - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit number of the first set bit. - */ -extern unsigned long find_first_bit(const unsigned long *addr, - unsigned long size); - -/** - * find_first_zero_bit - find the first cleared bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit number of the first cleared bit. - */ -extern unsigned long find_first_zero_bit(const unsigned long *addr, - unsigned long size); -#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ #ifdef CONFIG_GENERIC_FIND_LAST_BIT /** -- cgit v1.2.3-70-g09d2 From 4305df947ca1fd52867c8d56837a4e6b1e33167c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 28 Sep 2010 15:01:33 +0200 Subject: x86: i8259: Convert to new irq_chip functions Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- arch/x86/include/asm/i8259.h | 2 ++ arch/x86/kernel/apic/io_apic.c | 20 +++++++------- arch/x86/kernel/apic/nmi.c | 2 +- arch/x86/kernel/i8259.c | 63 +++++++++++++++++++++--------------------- arch/x86/kernel/smpboot.c | 4 +-- 5 files changed, 47 insertions(+), 44 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index 1655147646a..a20365953bf 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h @@ -55,6 +55,8 @@ extern struct irq_chip i8259A_chip; struct legacy_pic { int nr_legacy_irqs; struct irq_chip *chip; + void (*mask)(unsigned int irq); + void (*unmask)(unsigned int irq); void (*mask_all)(void); void (*restore_mask)(void); void (*init)(int auto_eoi); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 74bb027b517..e5ae2a22262 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1459,7 +1459,7 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq ioapic_register_intr(irq, desc, trigger); if (irq < legacy_pic->nr_legacy_irqs) - legacy_pic->chip->mask(irq); + legacy_pic->mask(irq); ioapic_write_entry(apic_id, pin, entry); } @@ -2233,7 +2233,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq) raw_spin_lock_irqsave(&ioapic_lock, flags); if (irq < legacy_pic->nr_legacy_irqs) { - legacy_pic->chip->mask(irq); + legacy_pic->mask(irq); if (legacy_pic->irq_pending(irq)) was_pending = 1; } @@ -2928,7 +2928,7 @@ static inline void __init check_timer(void) /* * get/set the timer IRQ vector: */ - legacy_pic->chip->mask(0); + legacy_pic->mask(0); assign_irq_vector(0, cfg, apic->target_cpus()); /* @@ -3000,7 +3000,7 @@ static inline void __init check_timer(void) if (timer_irq_works()) { if (nmi_watchdog == NMI_IO_APIC) { setup_nmi(); - legacy_pic->chip->unmask(0); + legacy_pic->unmask(0); } if (disable_timer_pin_1 > 0) clear_IO_APIC_pin(0, pin1); @@ -3023,14 +3023,14 @@ static inline void __init check_timer(void) */ replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); - legacy_pic->chip->unmask(0); + legacy_pic->unmask(0); if (timer_irq_works()) { apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); timer_through_8259 = 1; if (nmi_watchdog == NMI_IO_APIC) { - legacy_pic->chip->mask(0); + legacy_pic->mask(0); setup_nmi(); - legacy_pic->chip->unmask(0); + legacy_pic->unmask(0); } goto out; } @@ -3038,7 +3038,7 @@ static inline void __init check_timer(void) * Cleanup, just in case ... */ local_irq_disable(); - legacy_pic->chip->mask(0); + legacy_pic->mask(0); clear_IO_APIC_pin(apic2, pin2); apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); } @@ -3057,14 +3057,14 @@ static inline void __init check_timer(void) lapic_register_intr(0, desc); apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ - legacy_pic->chip->unmask(0); + legacy_pic->unmask(0); if (timer_irq_works()) { apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); goto out; } local_irq_disable(); - legacy_pic->chip->mask(0); + legacy_pic->mask(0); apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index a43f71cb30f..c90041ccb74 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c @@ -178,7 +178,7 @@ int __init check_nmi_watchdog(void) error: if (nmi_watchdog == NMI_IO_APIC) { if (!timer_through_8259) - legacy_pic->chip->mask(0); + legacy_pic->mask(0); on_each_cpu(__acpi_nmi_disable, NULL, 1); } diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index cafa7c80ac9..20757cb2efa 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -29,24 +29,10 @@ * plus some generic x86 specific things if generic specifics makes * any sense at all. */ +static void init_8259A(int auto_eoi); static int i8259A_auto_eoi; DEFINE_RAW_SPINLOCK(i8259A_lock); -static void mask_and_ack_8259A(unsigned int); -static void mask_8259A(void); -static void unmask_8259A(void); -static void disable_8259A_irq(unsigned int irq); -static void enable_8259A_irq(unsigned int irq); -static void init_8259A(int auto_eoi); -static int i8259A_irq_pending(unsigned int irq); - -struct irq_chip i8259A_chip = { - .name = "XT-PIC", - .mask = disable_8259A_irq, - .disable = disable_8259A_irq, - .unmask = enable_8259A_irq, - .mask_ack = mask_and_ack_8259A, -}; /* * 8259A PIC functions to handle ISA devices: @@ -68,7 +54,7 @@ unsigned int cached_irq_mask = 0xffff; */ unsigned long io_apic_irqs; -static void disable_8259A_irq(unsigned int irq) +static void mask_8259A_irq(unsigned int irq) { unsigned int mask = 1 << irq; unsigned long flags; @@ -82,7 +68,12 @@ static void disable_8259A_irq(unsigned int irq) raw_spin_unlock_irqrestore(&i8259A_lock, flags); } -static void enable_8259A_irq(unsigned int irq) +static void disable_8259A_irq(struct irq_data *data) +{ + mask_8259A_irq(data->irq); +} + +static void unmask_8259A_irq(unsigned int irq) { unsigned int mask = ~(1 << irq); unsigned long flags; @@ -96,6 +87,11 @@ static void enable_8259A_irq(unsigned int irq) raw_spin_unlock_irqrestore(&i8259A_lock, flags); } +static void enable_8259A_irq(struct irq_data *data) +{ + unmask_8259A_irq(data->irq); +} + static int i8259A_irq_pending(unsigned int irq) { unsigned int mask = 1<irq; unsigned int irqmask = 1 << irq; unsigned long flags; @@ -223,6 +220,14 @@ spurious_8259A_irq: } } +struct irq_chip i8259A_chip = { + .name = "XT-PIC", + .irq_mask = disable_8259A_irq, + .irq_disable = disable_8259A_irq, + .irq_unmask = enable_8259A_irq, + .irq_mask_ack = mask_and_ack_8259A, +}; + static char irq_trigger[2]; /** * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ @@ -342,9 +347,9 @@ static void init_8259A(int auto_eoi) * In AEOI mode we just have to mask the interrupt * when acking. */ - i8259A_chip.mask_ack = disable_8259A_irq; + i8259A_chip.irq_mask_ack = disable_8259A_irq; else - i8259A_chip.mask_ack = mask_and_ack_8259A; + i8259A_chip.irq_mask_ack = mask_and_ack_8259A; udelay(100); /* wait for 8259A to initialize */ @@ -363,14 +368,6 @@ static void init_8259A(int auto_eoi) static void legacy_pic_noop(void) { }; static void legacy_pic_uint_noop(unsigned int unused) { }; static void legacy_pic_int_noop(int unused) { }; - -static struct irq_chip dummy_pic_chip = { - .name = "dummy pic", - .mask = legacy_pic_uint_noop, - .unmask = legacy_pic_uint_noop, - .disable = legacy_pic_uint_noop, - .mask_ack = legacy_pic_uint_noop, -}; static int legacy_pic_irq_pending_noop(unsigned int irq) { return 0; @@ -378,7 +375,9 @@ static int legacy_pic_irq_pending_noop(unsigned int irq) struct legacy_pic null_legacy_pic = { .nr_legacy_irqs = 0, - .chip = &dummy_pic_chip, + .chip = &dummy_irq_chip, + .mask = legacy_pic_uint_noop, + .unmask = legacy_pic_uint_noop, .mask_all = legacy_pic_noop, .restore_mask = legacy_pic_noop, .init = legacy_pic_int_noop, @@ -389,7 +388,9 @@ struct legacy_pic null_legacy_pic = { struct legacy_pic default_legacy_pic = { .nr_legacy_irqs = NR_IRQS_LEGACY, .chip = &i8259A_chip, - .mask_all = mask_8259A, + .mask = mask_8259A_irq, + .unmask = unmask_8259A_irq, + .mask_all = mask_8259A, .restore_mask = unmask_8259A, .init = init_8259A, .irq_pending = i8259A_irq_pending, diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 87a8c6b00f8..864b386f6c0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -324,9 +324,9 @@ notrace static void __cpuinit start_secondary(void *unused) check_tsc_sync_target(); if (nmi_watchdog == NMI_IO_APIC) { - legacy_pic->chip->mask(0); + legacy_pic->mask(0); enable_NMI_through_LVT0(); - legacy_pic->chip->unmask(0); + legacy_pic->unmask(0); } /* This must be done before setting cpu_online_mask */ -- cgit v1.2.3-70-g09d2 From d0fbca8f9304d1760fdc906b35b06e89fbdbb51f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 28 Sep 2010 16:18:39 +0200 Subject: x86: ioapic/hpet: Convert to new chip functions Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- arch/x86/include/asm/hpet.h | 10 ++++++---- arch/x86/kernel/apic/io_apic.c | 30 ++++++++++++++---------------- arch/x86/kernel/hpet.c | 16 ++++++---------- 3 files changed, 26 insertions(+), 30 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 1d5c08a1bdf..2c392d663dc 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h @@ -74,10 +74,12 @@ extern void hpet_disable(void); extern unsigned int hpet_readl(unsigned int a); extern void force_hpet_resume(void); -extern void hpet_msi_unmask(unsigned int irq); -extern void hpet_msi_mask(unsigned int irq); -extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg); -extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg); +struct irq_data; +extern void hpet_msi_unmask(struct irq_data *data); +extern void hpet_msi_mask(struct irq_data *data); +struct hpet_dev; +extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg); +extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); #ifdef CONFIG_PCI_MSI extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index b8b013f0cfd..49aa857ff00 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -3605,26 +3605,25 @@ int arch_setup_dmar_msi(unsigned int irq) #ifdef CONFIG_HPET_TIMER #ifdef CONFIG_SMP -static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) +static int hpet_msi_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) { - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg; + struct irq_desc *desc = irq_to_desc(data->irq); + struct irq_cfg *cfg = data->chip_data; struct msi_msg msg; unsigned int dest; if (set_desc_affinity(desc, mask, &dest)) return -1; - cfg = get_irq_desc_chip_data(desc); - - hpet_msi_read(irq, &msg); + hpet_msi_read(data->handler_data, &msg); msg.data &= ~MSI_DATA_VECTOR_MASK; msg.data |= MSI_DATA_VECTOR(cfg->vector); msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; msg.address_lo |= MSI_ADDR_DEST_ID(dest); - hpet_msi_write(irq, &msg); + hpet_msi_write(data->handler_data, &msg); return 0; } @@ -3633,8 +3632,8 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) static struct irq_chip ir_hpet_msi_type = { .name = "IR-HPET_MSI", - .unmask = hpet_msi_unmask, - .mask = hpet_msi_mask, + .irq_unmask = hpet_msi_unmask, + .irq_mask = hpet_msi_mask, #ifdef CONFIG_INTR_REMAP .irq_ack = ir_ack_apic_edge, #ifdef CONFIG_SMP @@ -3646,20 +3645,19 @@ static struct irq_chip ir_hpet_msi_type = { static struct irq_chip hpet_msi_type = { .name = "HPET_MSI", - .unmask = hpet_msi_unmask, - .mask = hpet_msi_mask, + .irq_unmask = hpet_msi_unmask, + .irq_mask = hpet_msi_mask, .irq_ack = ack_apic_edge, #ifdef CONFIG_SMP - .set_affinity = hpet_msi_set_affinity, + .irq_set_affinity = hpet_msi_set_affinity, #endif .irq_retrigger = ioapic_retrigger_irq, }; int arch_setup_hpet_msi(unsigned int irq, unsigned int id) { - int ret; struct msi_msg msg; - struct irq_desc *desc = irq_to_desc(irq); + int ret; if (intr_remapping_enabled) { struct intel_iommu *iommu = map_hpet_to_ir(id); @@ -3677,8 +3675,8 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id) if (ret < 0) return ret; - hpet_msi_write(irq, &msg); - desc->status |= IRQ_MOVE_PCNTXT; + hpet_msi_write(get_irq_data(irq), &msg); + irq_set_status_flags(irq,IRQ_MOVE_PCNTXT); if (irq_remapped(irq)) set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, handle_edge_irq, "edge"); diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 7494999141b..efaf906daf9 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -440,9 +440,9 @@ static int hpet_legacy_next_event(unsigned long delta, static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); static struct hpet_dev *hpet_devs; -void hpet_msi_unmask(unsigned int irq) +void hpet_msi_unmask(struct irq_data *data) { - struct hpet_dev *hdev = get_irq_data(irq); + struct hpet_dev *hdev = data->handler_data; unsigned int cfg; /* unmask it */ @@ -451,10 +451,10 @@ void hpet_msi_unmask(unsigned int irq) hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); } -void hpet_msi_mask(unsigned int irq) +void hpet_msi_mask(struct irq_data *data) { + struct hpet_dev *hdev = data->handler_data; unsigned int cfg; - struct hpet_dev *hdev = get_irq_data(irq); /* mask it */ cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); @@ -462,18 +462,14 @@ void hpet_msi_mask(unsigned int irq) hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); } -void hpet_msi_write(unsigned int irq, struct msi_msg *msg) +void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg) { - struct hpet_dev *hdev = get_irq_data(irq); - hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); } -void hpet_msi_read(unsigned int irq, struct msi_msg *msg) +void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg) { - struct hpet_dev *hdev = get_irq_data(irq); - msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); msg->address_hi = 0; -- cgit v1.2.3-70-g09d2 From f7e909eae444ff733ecc5628af76d89c363ab480 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 8 Oct 2010 21:40:23 +0200 Subject: x86: Prepare the affinity common functions for taking struct irq_data * While at it rename it to sensible function names and fix the return value from unsigned to int for __ioapic_set_affinity (set_desc_affinity). Returning -1 in a function returning unsigned int is somewhat strange. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- arch/x86/include/asm/hw_irq.h | 6 ++-- arch/x86/kernel/apic/io_apic.c | 77 ++++++++++++++++-------------------------- arch/x86/kernel/uv_irq.c | 2 +- 3 files changed, 33 insertions(+), 52 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 46c0fe05f23..76848f27b1a 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -95,9 +95,9 @@ extern struct irq_cfg *irq_cfg(unsigned int); extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); extern void send_cleanup_vector(struct irq_cfg *); -struct irq_desc; -extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *, - unsigned int *dest_id); +struct irq_data; +int __ioapic_set_affinity(struct irq_data *, const struct cpumask *, + unsigned int *dest_id); extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); extern void setup_ioapic_dest(void); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 82c3c66e333..60ca9a47087 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2279,65 +2279,46 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq } /* - * Either sets desc->affinity to a valid value, and returns + * Either sets data->affinity to a valid value, and returns * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and - * leaves desc->affinity untouched. + * leaves data->affinity untouched. */ -unsigned int -set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask, - unsigned int *dest_id) +int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, + unsigned int *dest_id) { - struct irq_cfg *cfg; - unsigned int irq; + struct irq_cfg *cfg = data->chip_data; if (!cpumask_intersects(mask, cpu_online_mask)) return -1; - irq = desc->irq; - cfg = get_irq_desc_chip_data(desc); - if (assign_irq_vector(irq, cfg, mask)) + if (assign_irq_vector(data->irq, data->chip_data, mask)) return -1; - cpumask_copy(desc->affinity, mask); + cpumask_copy(data->affinity, mask); - *dest_id = apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); + *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); return 0; } static int -set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) +ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) { - struct irq_cfg *cfg; + unsigned int dest, irq = data->irq; unsigned long flags; - unsigned int dest; - unsigned int irq; - int ret = -1; - - irq = desc->irq; - cfg = get_irq_desc_chip_data(desc); + int ret; raw_spin_lock_irqsave(&ioapic_lock, flags); - ret = set_desc_affinity(desc, mask, &dest); + ret = __ioapic_set_affinity(data, mask, &dest); if (!ret) { /* Only the high 8 bits are valid. */ dest = SET_APIC_LOGICAL_ID(dest); - __target_IO_APIC_irq(irq, dest, cfg); + __target_IO_APIC_irq(irq, dest, data->chip_data); } raw_spin_unlock_irqrestore(&ioapic_lock, flags); - return ret; } -static int -set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - - return set_ioapic_affinity_irq_desc(desc, mask); -} - #ifdef CONFIG_INTR_REMAP /* @@ -2668,16 +2649,16 @@ static void ir_ack_apic_level(struct irq_data *data) #endif /* CONFIG_INTR_REMAP */ static struct irq_chip ioapic_chip __read_mostly = { - .name = "IO-APIC", - .irq_startup = startup_ioapic_irq, - .irq_mask = mask_ioapic_irq, - .irq_unmask = unmask_ioapic_irq, - .irq_ack = ack_apic_edge, - .irq_eoi = ack_apic_level, + .name = "IO-APIC", + .irq_startup = startup_ioapic_irq, + .irq_mask = mask_ioapic_irq, + .irq_unmask = unmask_ioapic_irq, + .irq_ack = ack_apic_edge, + .irq_eoi = ack_apic_level, #ifdef CONFIG_SMP - .set_affinity = set_ioapic_affinity_irq, + .irq_set_affinity = ioapic_set_affinity, #endif - .irq_retrigger = ioapic_retrigger_irq, + .irq_retrigger = ioapic_retrigger_irq, }; static struct irq_chip ir_ioapic_chip __read_mostly = { @@ -3327,7 +3308,7 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) struct msi_msg msg; unsigned int dest; - if (set_desc_affinity(desc, mask, &dest)) + if (__ioapic_set_affinity(&desc->irq_data, mask, &dest)) return -1; cfg = get_irq_desc_chip_data(desc); @@ -3359,7 +3340,7 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) if (get_irte(irq, &irte)) return -1; - if (set_desc_affinity(desc, mask, &dest)) + if (__ioapic_set_affinity(&desc->irq_data, mask, &dest)) return -1; irte.vector = cfg->vector; @@ -3534,7 +3515,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) struct msi_msg msg; unsigned int dest; - if (set_desc_affinity(desc, mask, &dest)) + if (__ioapic_set_affinity(&desc->irq_data, mask, &dest)) return -1; cfg = get_irq_desc_chip_data(desc); @@ -3590,7 +3571,7 @@ static int hpet_msi_set_affinity(struct irq_data *data, struct msi_msg msg; unsigned int dest; - if (set_desc_affinity(desc, mask, &dest)) + if (__ioapic_set_affinity(&desc->irq_data, mask, &dest)) return -1; hpet_msi_read(data->handler_data, &msg); @@ -3693,7 +3674,7 @@ static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) struct irq_cfg *cfg; unsigned int dest; - if (set_desc_affinity(desc, mask, &dest)) + if (__ioapic_set_affinity(&desc->irq_data, mask, &dest)) return -1; cfg = get_irq_desc_chip_data(desc); @@ -4045,14 +4026,14 @@ void __init setup_ioapic_dest(void) */ if (desc->status & (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) - mask = desc->affinity; + mask = desc->irq_data.affinity; else mask = apic->target_cpus(); if (intr_remapping_enabled) set_ir_ioapic_affinity_irq_desc(desc, mask); else - set_ioapic_affinity_irq_desc(desc, mask); + ioapic_set_affinity(&desc->irq_data, mask, false); } } diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c index 2233a42fb90..56a42e11ae0 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/kernel/uv_irq.c @@ -216,7 +216,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) unsigned long mmr_offset; int mmr_pnode; - if (set_desc_affinity(desc, mask, &dest)) + if (__ioapic_set_affinity(&desc->irq_data, mask, &dest)) return -1; mmr_value = 0; -- cgit v1.2.3-70-g09d2 From 423f085952fd7253407cb92984cc2d495a564481 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 10 Oct 2010 11:39:09 +0200 Subject: x86: Embedd irq_2_iommu into irq_cfg That interrupt remapping code is x86 specific and tied to the io_apic code. No need for separate allocator functions in the interrupt remapping code. This allows to simplify the code and irq_2_iommu is small (13 bytes on 64bit) so it's not a real problem even if interrupt remapping is runtime disabled. If it's compile time disabled the impact is zero. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar Acked-by: Suresh Siddha Cc: David Woodhouse Cc: Jesse Barnes --- arch/x86/include/asm/hw_irq.h | 10 ++++++++++ drivers/pci/intr_remapping.c | 7 ------- include/linux/dmar.h | 1 + 3 files changed, 11 insertions(+), 7 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 76848f27b1a..e756c4bfed9 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -78,6 +78,13 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, irq_attr->polarity = polarity; } +struct irq_2_iommu { + struct intel_iommu *iommu; + u16 irte_index; + u16 sub_handle; + u8 irte_mask; +}; + /* * This is performance-critical, we want to do it O(1) * @@ -89,6 +96,9 @@ struct irq_cfg { cpumask_var_t old_domain; u8 vector; u8 move_in_progress : 1; +#ifdef CONFIG_INTR_REMAP + struct irq_2_iommu irq_2_iommu; +#endif }; extern struct irq_cfg *irq_cfg(unsigned int); diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 343f7299c78..0f4691c5fab 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c @@ -46,13 +46,6 @@ static __init int setup_intremap(char *str) } early_param("intremap", setup_intremap); -struct irq_2_iommu { - struct intel_iommu *iommu; - u16 irte_index; - u16 sub_handle; - u8 irte_mask; -}; - #ifdef CONFIG_GENERIC_HARDIRQS static struct irq_2_iommu *irq_2_iommu(unsigned int irq) { diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 200439ec7c4..4475f8cf7a6 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -106,6 +106,7 @@ struct irte { __u64 high; }; }; + #ifdef CONFIG_INTR_REMAP extern int intr_remapping_enabled; extern int intr_remapping_supported(void); -- cgit v1.2.3-70-g09d2 From 1a0730d6649113c820217387a011a17dd4aff3ad Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 11 Oct 2010 11:55:37 +0200 Subject: x86: Speed up the irq_remapped check in hot pathes irq_2_iommu is in struct irq_cfg, so we can do the irq_remapped check based on irq_cfg instead of going through a lookup function. That's especially interesting in the eoi_ioapic_irq() hotpath. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar Acked-by: Suresh Siddha Cc: David Woodhouse Cc: Jesse Barnes --- arch/x86/include/asm/irq_remapping.h | 8 ++++++++ arch/x86/kernel/apic/io_apic.c | 12 ++++++------ drivers/pci/intr_remapping.c | 7 ------- include/linux/dmar.h | 2 -- 4 files changed, 14 insertions(+), 15 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 8d841505344..1c23360fb2d 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h @@ -24,10 +24,18 @@ static inline void prepare_irte(struct irte *irte, int vector, irte->dest_id = IRTE_DEST(dest); irte->redir_hint = 1; } +static inline bool irq_remapped(struct irq_cfg *cfg) +{ + return cfg->irq_2_iommu.iommu != NULL; +} #else static void prepare_irte(struct irte *irte, int vector, unsigned int dest) { } +static inline bool irq_remapped(struct irq_cfg *cfg) +{ + return false; +} #endif #endif /* _ASM_X86_IRQ_REMAPPING_H */ diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 6ff6bb883c5..1b8e8a10612 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1237,7 +1237,7 @@ static void ioapic_register_intr(unsigned int irq, unsigned long trigger) else irq_clear_status_flags(irq, IRQ_LEVEL); - if (irq_remapped(irq)) { + if (irq_remapped(get_irq_chip_data(irq))) { irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); if (trigger) set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, @@ -2183,7 +2183,7 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq * With interrupt-remapping, destination information comes * from interrupt-remapping table entry. */ - if (!irq_remapped(irq)) + if (!irq_remapped(cfg)) io_apic_write(apic, 0x11 + pin*2, dest); reg = io_apic_read(apic, 0x10 + pin*2); reg &= ~IO_APIC_REDIR_VECTOR_MASK; @@ -2415,7 +2415,7 @@ static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) * intr-remapping table entry. Hence for the io-apic * EOI we use the pin number. */ - if (irq_remapped(irq)) + if (irq_remapped(cfg)) io_apic_eoi(entry->apic, entry->pin); else io_apic_eoi(entry->apic, cfg->vector); @@ -3139,7 +3139,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); - if (irq_remapped(irq)) { + if (irq_remapped(get_irq_chip_data(irq))) { struct irte irte; int ir_index; u16 sub_handle; @@ -3321,7 +3321,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) set_irq_msi(irq, msidesc); write_msi_msg(irq, &msg); - if (irq_remapped(irq)) { + if (irq_remapped(get_irq_chip_data(irq))) { irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); } else @@ -3522,7 +3522,7 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id) hpet_msi_write(get_irq_data(irq), &msg); irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); - if (irq_remapped(irq)) + if (irq_remapped(get_irq_chip_data(irq))) set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, handle_edge_irq, "edge"); else diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index a620b8bd8f4..ec87cd66f3e 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c @@ -54,13 +54,6 @@ static struct irq_2_iommu *irq_2_iommu(unsigned int irq) return cfg ? &cfg->irq_2_iommu : NULL; } -int irq_remapped(int irq) -{ - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - - return irq_iommu ? irq_iommu->iommu != NULL : 0; -} - int get_irte(int irq, struct irte *entry) { struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 4475f8cf7a6..51651b76d40 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -122,7 +122,6 @@ extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); extern int free_irte(int irq); -extern int irq_remapped(int irq); extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); extern struct intel_iommu *map_ioapic_to_ir(int apic); extern struct intel_iommu *map_hpet_to_ir(u8 id); @@ -176,7 +175,6 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) return 0; } -#define irq_remapped(irq) (0) #define enable_intr_remapping(mode) (-1) #define disable_intr_remapping() (0) #define reenable_intr_remapping(mode) (0) -- cgit v1.2.3-70-g09d2 From 1a8ce7ff68d777195da2d340561bda610e533a64 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 4 Oct 2010 21:08:56 +0200 Subject: x86: Make io_apic.c local functions static No users outside of io_apic.c Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- arch/x86/include/asm/io_apic.h | 6 ------ arch/x86/kernel/apic/io_apic.c | 10 +++++----- 2 files changed, 5 insertions(+), 11 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 9cb2edb87c2..c8be4566c3d 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -170,12 +170,6 @@ extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); extern void probe_nr_irqs_gsi(void); -extern int setup_ioapic_entry(int apic, int irq, - struct IO_APIC_route_entry *entry, - unsigned int destination, int trigger, - int polarity, int vector, int pin); -extern void ioapic_write_entry(int apic, int pin, - struct IO_APIC_route_entry e); extern void setup_ioapic_ids_from_mpc(void); struct mp_ioapic_gsi{ diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 1b8e8a10612..0102543b564 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -364,7 +364,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) io_apic_write(apic, 0x10 + 2*pin, eu.w1); } -void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) +static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) { unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); @@ -1259,10 +1259,10 @@ static void ioapic_register_intr(unsigned int irq, unsigned long trigger) handle_edge_irq, "edge"); } -int setup_ioapic_entry(int apic_id, int irq, - struct IO_APIC_route_entry *entry, - unsigned int destination, int trigger, - int polarity, int vector, int pin) +static int setup_ioapic_entry(int apic_id, int irq, + struct IO_APIC_route_entry *entry, + unsigned int destination, int trigger, + int polarity, int vector, int pin) { /* * add it to the IO-APIC irq-routing table: -- cgit v1.2.3-70-g09d2 From 48b2650196364e4ef124efb841b63c2326e4ccb2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 30 Sep 2010 11:43:08 +0200 Subject: x86: uv: Clean up the direct access to irq_desc Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- arch/x86/include/asm/hw_irq.h | 1 - arch/x86/kernel/apic/io_apic.c | 2 +- arch/x86/kernel/uv_irq.c | 55 +++++++++++++++--------------------------- 3 files changed, 20 insertions(+), 38 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index e756c4bfed9..d5905fd8ba4 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -101,7 +101,6 @@ struct irq_cfg { #endif }; -extern struct irq_cfg *irq_cfg(unsigned int); extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); extern void send_cleanup_vector(struct irq_cfg *); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 0102543b564..5193f201b91 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -178,7 +178,7 @@ int __init arch_early_irq_init(void) } #ifdef CONFIG_SPARSE_IRQ -struct irq_cfg *irq_cfg(unsigned int irq) +static struct irq_cfg *irq_cfg(unsigned int irq) { return get_irq_chip_data(irq); } diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c index 56a42e11ae0..7b24460917d 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/kernel/uv_irq.c @@ -28,34 +28,21 @@ struct uv_irq_2_mmr_pnode{ static spinlock_t uv_irq_lock; static struct rb_root uv_irq_root; -static int uv_set_irq_affinity(unsigned int, const struct cpumask *); +static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool); -static void uv_noop(unsigned int irq) -{ -} - -static unsigned int uv_noop_ret(unsigned int irq) -{ - return 0; -} +static void uv_noop(struct irq_data *data) { } -static void uv_ack_apic(unsigned int irq) +static void uv_ack_apic(struct irq_data *data) { ack_APIC_irq(); } static struct irq_chip uv_irq_chip = { - .name = "UV-CORE", - .startup = uv_noop_ret, - .shutdown = uv_noop, - .enable = uv_noop, - .disable = uv_noop, - .ack = uv_noop, - .mask = uv_noop, - .unmask = uv_noop, - .eoi = uv_ack_apic, - .end = uv_noop, - .set_affinity = uv_set_irq_affinity, + .name = "UV-CORE", + .irq_mask = uv_noop, + .irq_unmask = uv_noop, + .irq_eoi = uv_ack_apic, + .irq_set_affinity = uv_set_irq_affinity, }; /* @@ -144,26 +131,22 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { const struct cpumask *eligible_cpu = cpumask_of(cpu); - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg; - int mmr_pnode; + struct irq_cfg *cfg = get_irq_chip_data(irq); unsigned long mmr_value; struct uv_IO_APIC_route_entry *entry; - int err; + int mmr_pnode, err; BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); - cfg = irq_cfg(irq); - err = assign_irq_vector(irq, cfg, eligible_cpu); if (err != 0) return err; if (limit == UV_AFFINITY_CPU) - desc->status |= IRQ_NO_BALANCING; + irq_set_status_flags(irq, IRQ_NO_BALANCING); else - desc->status |= IRQ_MOVE_PCNTXT; + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, irq_name); @@ -206,17 +189,17 @@ static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); } -static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) +static int +uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) { - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg = get_irq_desc_chip_data(desc); + struct irq_cfg *cfg = data->chip_data; unsigned int dest; - unsigned long mmr_value; + unsigned long mmr_value, mmr_offset; struct uv_IO_APIC_route_entry *entry; - unsigned long mmr_offset; int mmr_pnode; - if (__ioapic_set_affinity(&desc->irq_data, mask, &dest)) + if (__ioapic_set_affinity(data, mask, &dest)) return -1; mmr_value = 0; @@ -231,7 +214,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) entry->dest = dest; /* Get previously stored MMR and pnode of hub sourcing interrupts */ - if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode)) + if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode)) return -1; uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); -- cgit v1.2.3-70-g09d2 From 5bcd757f93cc713cf61bbeefceda7539d9afca55 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Mon, 4 Oct 2010 14:59:31 -0400 Subject: x86/amd-iommu: Reenable AMD IOMMU if it's mysteriously vanished over suspend AMD's reference BIOS code had a bug that could result in the firmware failing to reenable the iommu on resume. It transpires that this causes certain less than desirable behaviour when it comes to PCI accesses, to whit them ending up somewhere near Bristol when the more desirable outcome was Edinburgh. Sadness ensues, perhaps along with filesystem corruption. Let's make sure that it gets turned back on, and that we restore its configuration so decisions it makes bear some resemblance to those made by reasonable people rather than crack-addled lemurs who spent all your DMA on Thunderbird. Signed-off-by: Matthew Garrett Signed-off-by: Joerg Roedel --- arch/x86/include/asm/amd_iommu_types.h | 21 ++++-- arch/x86/kernel/amd_iommu_init.c | 122 +++++++++++++++++++++++++++++---- 2 files changed, 123 insertions(+), 20 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 08616180dea..bdd20c8891e 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -416,13 +416,22 @@ struct amd_iommu { struct dma_ops_domain *default_dom; /* - * This array is required to work around a potential BIOS bug. - * The BIOS may miss to restore parts of the PCI configuration - * space when the system resumes from S3. The result is that the - * IOMMU does not execute commands anymore which leads to system - * failure. + * We can't rely on the BIOS to restore all values on reinit, so we + * need to stash them */ - u32 cache_cfg[4]; + + /* The iommu BAR */ + u32 stored_addr_lo; + u32 stored_addr_hi; + + /* + * Each iommu has 6 l1s, each of which is documented as having 0x12 + * registers + */ + u32 stored_l1[6][0x12]; + + /* The l2 indirect registers */ + u32 stored_l2[0x83]; }; /* diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 5a170cbbbed..44710d86a43 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -194,6 +194,39 @@ static inline unsigned long tbl_size(int entry_size) return 1UL << shift; } +/* Access to l1 and l2 indexed register spaces */ + +static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) +{ + u32 val; + + pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); + pci_read_config_dword(iommu->dev, 0xfc, &val); + return val; +} + +static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) +{ + pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); + pci_write_config_dword(iommu->dev, 0xfc, val); + pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); +} + +static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) +{ + u32 val; + + pci_write_config_dword(iommu->dev, 0xf0, address); + pci_read_config_dword(iommu->dev, 0xf4, &val); + return val; +} + +static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) +{ + pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); + pci_write_config_dword(iommu->dev, 0xf4, val); +} + /**************************************************************************** * * AMD IOMMU MMIO register space handling functions @@ -619,6 +652,7 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) { int cap_ptr = iommu->cap_ptr; u32 range, misc; + int i, j; pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, &iommu->cap); @@ -633,12 +667,29 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) MMIO_GET_LD(range)); iommu->evt_msi_num = MMIO_MSI_NUM(misc); - if (is_rd890_iommu(iommu->dev)) { - pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]); - pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]); - pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]); - pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]); - } + if (!is_rd890_iommu(iommu->dev)) + return; + + /* + * Some rd890 systems may not be fully reconfigured by the BIOS, so + * it's necessary for us to store this information so it can be + * reprogrammed on resume + */ + + pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, + &iommu->stored_addr_lo); + pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, + &iommu->stored_addr_hi); + + /* Low bit locks writes to configuration space */ + iommu->stored_addr_lo &= ~1; + + for (i = 0; i < 6; i++) + for (j = 0; j < 0x12; j++) + iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); + + for (i = 0; i < 0x83; i++) + iommu->stored_l2[i] = iommu_read_l2(iommu, i); } /* @@ -1127,14 +1178,53 @@ static void iommu_init_flags(struct amd_iommu *iommu) iommu_feature_enable(iommu, CONTROL_COHERENT_EN); } -static void iommu_apply_quirks(struct amd_iommu *iommu) +static void iommu_apply_resume_quirks(struct amd_iommu *iommu) { - if (is_rd890_iommu(iommu->dev)) { - pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]); - pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]); - pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]); - pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]); - } + int i, j; + u32 ioc_feature_control; + struct pci_dev *pdev = NULL; + + /* RD890 BIOSes may not have completely reconfigured the iommu */ + if (!is_rd890_iommu(iommu->dev)) + return; + + /* + * First, we need to ensure that the iommu is enabled. This is + * controlled by a register in the northbridge + */ + pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); + + if (!pdev) + return; + + /* Select Northbridge indirect register 0x75 and enable writing */ + pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); + pci_read_config_dword(pdev, 0x64, &ioc_feature_control); + + /* Enable the iommu */ + if (!(ioc_feature_control & 0x1)) + pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); + + pci_dev_put(pdev); + + /* Restore the iommu BAR */ + pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, + iommu->stored_addr_lo); + pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, + iommu->stored_addr_hi); + + /* Restore the l1 indirect regs for each of the 6 l1s */ + for (i = 0; i < 6; i++) + for (j = 0; j < 0x12; j++) + iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); + + /* Restore the l2 indirect regs */ + for (i = 0; i < 0x83; i++) + iommu_write_l2(iommu, i, iommu->stored_l2[i]); + + /* Lock PCI setup registers */ + pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, + iommu->stored_addr_lo | 1); } /* @@ -1147,7 +1237,6 @@ static void enable_iommus(void) for_each_iommu(iommu) { iommu_disable(iommu); - iommu_apply_quirks(iommu); iommu_init_flags(iommu); iommu_set_device_table(iommu); iommu_enable_command_buffer(iommu); @@ -1173,6 +1262,11 @@ static void disable_iommus(void) static int amd_iommu_resume(struct sys_device *dev) { + struct amd_iommu *iommu; + + for_each_iommu(iommu) + iommu_apply_resume_quirks(iommu); + /* re-load the hardware */ enable_iommus(); -- cgit v1.2.3-70-g09d2 From 5d0d71569e671239ae0d905ced9b65cd843f99ee Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 13 Oct 2010 11:13:21 +0200 Subject: x86/amd-iommu: Update copyright headers This patch updates the copyright headers in all source files of the AMD IOMMU driver. Signed-off-by: Joerg Roedel --- arch/x86/include/asm/amd_iommu.h | 2 +- arch/x86/include/asm/amd_iommu_proto.h | 2 +- arch/x86/include/asm/amd_iommu_types.h | 2 +- arch/x86/kernel/amd_iommu.c | 2 +- arch/x86/kernel/amd_iommu_init.c | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h index 5af2982133b..f16a2caca1e 100644 --- a/arch/x86/include/asm/amd_iommu.h +++ b/arch/x86/include/asm/amd_iommu.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. + * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. * Author: Joerg Roedel * Leo Duran * diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h index cb030374b90..916bc8111a0 100644 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ b/arch/x86/include/asm/amd_iommu_proto.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009 Advanced Micro Devices, Inc. + * Copyright (C) 2009-2010 Advanced Micro Devices, Inc. * Author: Joerg Roedel * * This program is free software; you can redistribute it and/or modify it diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index bdd20c8891e..e3509fc303b 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. + * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. * Author: Joerg Roedel * Leo Duran * diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 679b6450382..d2fdb0826df 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. + * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. * Author: Joerg Roedel * Leo Duran * diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 44710d86a43..3cb482e123d 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. + * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. * Author: Joerg Roedel * Leo Duran * -- cgit v1.2.3-70-g09d2 From d7acb92fea932ad2e7846480aeacddc2c03c8485 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Wed, 13 Oct 2010 16:00:29 -0700 Subject: x86-64, asm: If the assembler supports fxsave64, use it Kbuild allows for us to probe for the existence of specific constructs in the assembler, use them to find out if we can use fxsave64 and permit the compiler to generate better code. Signed-off-by: H. Peter Anvin --- arch/x86/Makefile | 8 ++++++-- arch/x86/include/asm/i387.h | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 8aa1b59b907..40668a96ca0 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -96,8 +96,12 @@ cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_en # is .cfi_signal_frame supported too? cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1) cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1) -KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) -KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) + +# does binutils support specific instructions? +asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1) + +KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) +KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) LDFLAGS := -m elf_$(UTS_MACHINE) diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index a73a8d5a5e6..70f105b352e 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -175,7 +175,7 @@ static inline void fpu_fxsave(struct fpu *fpu) uses any extended registers for addressing, a second REX prefix will be generated (to the assembler, rex64 followed by semicolon is a separate instruction), and hence the 64-bitness is lost. */ -#if 0 +#ifdef CONFIG_AS_FXSAVEQ /* Using "fxsaveq %0" would be the ideal choice, but is only supported starting with gas 2.16. */ __asm__ __volatile__("fxsaveq %0" -- cgit v1.2.3-70-g09d2 From fef5ba797991f9335bcfc295942b684f9bf613a1 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 13 Oct 2010 16:02:24 -0700 Subject: xen: Cope with unmapped pages when initializing kernel pagetable Xen requires that all pages containing pagetable entries to be mapped read-only. If pages used for the initial pagetable are already mapped then we can change the mapping to RO. However, if they are initially unmapped, we need to make sure that when they are later mapped, they are also mapped RO. We do this by knowing that the kernel pagetable memory is pre-allocated in the range e820_table_start - e820_table_end, so any pfn within this range should be mapped read-only. However, the pagetable setup code early_ioremaps the pages to write their entries, so we must make sure that mappings created in the early_ioremap fixmap area are mapped RW. (Those mappings are removed before the pages are presented to Xen as pagetable pages.) Signed-off-by: Jeremy Fitzhardinge LKML-Reference: <4CB63A80.8060702@goop.org> Cc: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/io.h | 1 + arch/x86/mm/ioremap.c | 5 +++++ arch/x86/xen/mmu.c | 26 ++++++++++++++++++-------- 3 files changed, 24 insertions(+), 8 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 30a3e977612..66aee6c4123 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -348,6 +348,7 @@ extern void __iomem *early_memremap(resource_size_t phys_addr, unsigned long size); extern void early_iounmap(void __iomem *addr, unsigned long size); extern void fixup_early_ioremap(void); +extern bool is_early_ioremap_ptep(pte_t *ptep); #define IO_SPACE_LIMIT 0xffff diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 3ba6e0608c5..0369843511d 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -362,6 +362,11 @@ static inline pte_t * __init early_ioremap_pte(unsigned long addr) return &bm_pte[pte_index(addr)]; } +bool __init is_early_ioremap_ptep(pte_t *ptep) +{ + return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; +} + static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata; void __init early_ioremap_init(void) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 4fe04ac0bae..7d55e9ee3a7 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include @@ -360,7 +361,8 @@ void make_lowmem_page_readonly(void *vaddr) unsigned int level; pte = lookup_address(address, &level); - BUG_ON(pte == NULL); + if (pte == NULL) + return; /* vaddr missing */ ptev = pte_wrprotect(*pte); @@ -375,7 +377,8 @@ void make_lowmem_page_readwrite(void *vaddr) unsigned int level; pte = lookup_address(address, &level); - BUG_ON(pte == NULL); + if (pte == NULL) + return; /* vaddr missing */ ptev = pte_mkwrite(*pte); @@ -1509,13 +1512,25 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) #endif } -#ifdef CONFIG_X86_32 static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) { + unsigned long pfn = pte_pfn(pte); + +#ifdef CONFIG_X86_32 /* If there's an existing pte, then don't allow _PAGE_RW to be set */ if (pte_val_ma(*ptep) & _PAGE_PRESENT) pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & pte_val_ma(pte)); +#endif + + /* + * If the new pfn is within the range of the newly allocated + * kernel pagetable, and it isn't being mapped into an + * early_ioremap fixmap slot, make sure it is RO. + */ + if (!is_early_ioremap_ptep(ptep) && + pfn >= e820_table_start && pfn < e820_table_end) + pte = pte_wrprotect(pte); return pte; } @@ -1528,7 +1543,6 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) xen_set_pte(ptep, pte); } -#endif static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) { @@ -1973,11 +1987,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { .alloc_pmd_clone = paravirt_nop, .release_pmd = xen_release_pmd_init, -#ifdef CONFIG_X86_64 - .set_pte = xen_set_pte, -#else .set_pte = xen_set_pte_init, -#endif .set_pte_at = xen_set_pte_at, .set_pmd = xen_set_pmd_hyper, -- cgit v1.2.3-70-g09d2 From e360adbe29241a0194e10e20595360dd7b98a2b3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 14 Oct 2010 14:01:34 +0800 Subject: irq_work: Add generic hardirq context callbacks Provide a mechanism that allows running code in IRQ context. It is most useful for NMI code that needs to interact with the rest of the system -- like wakeup a task to drain buffers. Perf currently has such a mechanism, so extract that and provide it as a generic feature, independent of perf so that others may also benefit. The IRQ context callback is generated through self-IPIs where possible, or on architectures like powerpc the decrementer (the built-in timer facility) is set to generate an interrupt immediately. Architectures that don't have anything like this get to do with a callback from the timer tick. These architectures can call irq_work_run() at the tail of any IRQ handlers that might enqueue such work (like the perf IRQ handler) to avoid undue latencies in processing the work. Signed-off-by: Peter Zijlstra Acked-by: Kyle McMartin Acked-by: Martin Schwidefsky [ various fixes ] Signed-off-by: Huang Ying LKML-Reference: <1287036094.7768.291.camel@yhuang-dev> Signed-off-by: Ingo Molnar --- arch/alpha/Kconfig | 1 + arch/alpha/include/asm/perf_event.h | 5 -- arch/alpha/kernel/time.c | 30 +++---- arch/arm/Kconfig | 1 + arch/arm/include/asm/perf_event.h | 12 --- arch/arm/kernel/perf_event.c | 8 +- arch/frv/Kconfig | 1 + arch/frv/lib/Makefile | 2 +- arch/frv/lib/perf_event.c | 19 ---- arch/parisc/Kconfig | 1 + arch/parisc/include/asm/perf_event.h | 3 +- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/paca.h | 2 +- arch/powerpc/kernel/time.c | 42 ++++----- arch/s390/Kconfig | 1 + arch/s390/include/asm/perf_event.h | 3 +- arch/sh/Kconfig | 1 + arch/sh/include/asm/perf_event.h | 7 -- arch/sparc/Kconfig | 2 + arch/sparc/include/asm/perf_event.h | 4 - arch/sparc/kernel/pcr.c | 8 +- arch/x86/Kconfig | 1 + arch/x86/include/asm/entry_arch.h | 4 +- arch/x86/include/asm/hardirq.h | 2 +- arch/x86/include/asm/hw_irq.h | 2 +- arch/x86/include/asm/irq_vectors.h | 4 +- arch/x86/kernel/Makefile | 1 + arch/x86/kernel/cpu/perf_event.c | 19 ---- arch/x86/kernel/entry_64.S | 6 +- arch/x86/kernel/irq.c | 8 +- arch/x86/kernel/irq_work.c | 30 +++++++ arch/x86/kernel/irqinit.c | 6 +- include/linux/irq_work.h | 20 +++++ include/linux/perf_event.h | 11 +-- init/Kconfig | 8 ++ kernel/Makefile | 2 + kernel/irq_work.c | 164 +++++++++++++++++++++++++++++++++++ kernel/perf_event.c | 104 ++-------------------- kernel/timer.c | 7 +- 39 files changed, 311 insertions(+), 242 deletions(-) delete mode 100644 arch/frv/lib/perf_event.c create mode 100644 arch/x86/kernel/irq_work.c create mode 100644 include/linux/irq_work.h create mode 100644 kernel/irq_work.c (limited to 'arch/x86/include') diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index b9647bb66d1..d04ccd73af4 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -9,6 +9,7 @@ config ALPHA select HAVE_IDE select HAVE_OPROFILE select HAVE_SYSCALL_WRAPPERS + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select HAVE_DMA_ATTRS help diff --git a/arch/alpha/include/asm/perf_event.h b/arch/alpha/include/asm/perf_event.h index 4157cd3c44a..fe792ca818f 100644 --- a/arch/alpha/include/asm/perf_event.h +++ b/arch/alpha/include/asm/perf_event.h @@ -1,11 +1,6 @@ #ifndef __ASM_ALPHA_PERF_EVENT_H #define __ASM_ALPHA_PERF_EVENT_H -/* Alpha only supports software events through this interface. */ -extern void set_perf_event_pending(void); - -#define PERF_EVENT_INDEX_OFFSET 0 - #ifdef CONFIG_PERF_EVENTS extern void init_hw_perf_events(void); #else diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 396af1799ea..0f1d8493cfc 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include #include @@ -83,25 +83,25 @@ static struct { unsigned long est_cycle_freq; -#ifdef CONFIG_PERF_EVENTS +#ifdef CONFIG_IRQ_WORK -DEFINE_PER_CPU(u8, perf_event_pending); +DEFINE_PER_CPU(u8, irq_work_pending); -#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 -#define test_perf_event_pending() __get_cpu_var(perf_event_pending) -#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 +#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 +#define test_irq_work_pending() __get_cpu_var(irq_work_pending) +#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 -void set_perf_event_pending(void) +void set_irq_work_pending(void) { - set_perf_event_pending_flag(); + set_irq_work_pending_flag(); } -#else /* CONFIG_PERF_EVENTS */ +#else /* CONFIG_IRQ_WORK */ -#define test_perf_event_pending() 0 -#define clear_perf_event_pending() +#define test_irq_work_pending() 0 +#define clear_irq_work_pending() -#endif /* CONFIG_PERF_EVENTS */ +#endif /* CONFIG_IRQ_WORK */ static inline __u32 rpcc(void) @@ -191,9 +191,9 @@ irqreturn_t timer_interrupt(int irq, void *dev) write_sequnlock(&xtime_lock); - if (test_perf_event_pending()) { - clear_perf_event_pending(); - perf_event_do_pending(); + if (test_irq_work_pending()) { + clear_irq_work_pending(); + irq_work_run(); } #ifndef CONFIG_SMP diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 88c97bc7a6f..7c0dfccd05b 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -23,6 +23,7 @@ config ARM select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZO select HAVE_KERNEL_LZMA + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select HAVE_REGS_AND_STACK_ACCESS_API diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index b5799a3b711..c4aa4e8c6af 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -12,18 +12,6 @@ #ifndef __ARM_PERF_EVENT_H__ #define __ARM_PERF_EVENT_H__ -/* - * NOP: on *most* (read: all supported) ARM platforms, the performance - * counter interrupts are regular interrupts and not an NMI. This - * means that when we receive the interrupt we can call - * perf_event_do_pending() that handles all of the work with - * interrupts disabled. - */ -static inline void -set_perf_event_pending(void) -{ -} - /* ARM performance counters start from 1 (in the cp15 accesses) so use the * same indexes here for consistency. */ #define PERF_EVENT_INDEX_OFFSET 1 diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 6cc6521881a..49643b1467e 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -1092,7 +1092,7 @@ armv6pmu_handle_irq(int irq_num, * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ - perf_event_do_pending(); + irq_work_run(); return IRQ_HANDLED; } @@ -2068,7 +2068,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ - perf_event_do_pending(); + irq_work_run(); return IRQ_HANDLED; } @@ -2436,7 +2436,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) armpmu->disable(hwc, idx); } - perf_event_do_pending(); + irq_work_run(); /* * Re-enable the PMU. @@ -2763,7 +2763,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) armpmu->disable(hwc, idx); } - perf_event_do_pending(); + irq_work_run(); /* * Re-enable the PMU. diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 16399bd2499..0f2417df632 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -7,6 +7,7 @@ config FRV default y select HAVE_IDE select HAVE_ARCH_TRACEHOOK + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS config ZONE_DMA diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile index f4709756d0d..4ff2fb1e6b1 100644 --- a/arch/frv/lib/Makefile +++ b/arch/frv/lib/Makefile @@ -5,4 +5,4 @@ lib-y := \ __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ - outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o + outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o diff --git a/arch/frv/lib/perf_event.c b/arch/frv/lib/perf_event.c deleted file mode 100644 index 9ac5acfd2e9..00000000000 --- a/arch/frv/lib/perf_event.c +++ /dev/null @@ -1,19 +0,0 @@ -/* Performance event handling - * - * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#include - -/* - * mark the performance event as pending - */ -void set_perf_event_pending(void) -{ -} diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 907417d187e..79a04a9394d 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -16,6 +16,7 @@ config PARISC select RTC_DRV_GENERIC select INIT_ALL_POSSIBLE select BUG + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select GENERIC_ATOMIC64 if !64BIT help diff --git a/arch/parisc/include/asm/perf_event.h b/arch/parisc/include/asm/perf_event.h index cc146427d8f..1e0fd8ba6c0 100644 --- a/arch/parisc/include/asm/perf_event.h +++ b/arch/parisc/include/asm/perf_event.h @@ -1,7 +1,6 @@ #ifndef __ASM_PARISC_PERF_EVENT_H #define __ASM_PARISC_PERF_EVENT_H -/* parisc only supports software events through this interface. */ -static inline void set_perf_event_pending(void) { } +/* Empty, just to avoid compiling error */ #endif /* __ASM_PARISC_PERF_EVENT_H */ diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 631e5a0fb6a..4b1e521d966 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -138,6 +138,7 @@ config PPC select HAVE_OPROFILE select HAVE_SYSCALL_WRAPPERS if PPC64 select GENERIC_ATOMIC64 if PPC32 + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 1ff6662f7fa..9b287fdd8ea 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -129,7 +129,7 @@ struct paca_struct { u8 soft_enabled; /* irq soft-enable flag */ u8 hard_enabled; /* set if irqs are enabled in MSR */ u8 io_sync; /* writel() needs spin_unlock sync */ - u8 perf_event_pending; /* PM interrupt while soft-disabled */ + u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ /* Stuff for accurate time accounting */ u64 user_time; /* accumulated usermode TB ticks */ diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 8533b3b83f5..54888eb10c3 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -53,7 +53,7 @@ #include #include #include -#include +#include #include #include @@ -493,60 +493,60 @@ void __init iSeries_time_init_early(void) } #endif /* CONFIG_PPC_ISERIES */ -#ifdef CONFIG_PERF_EVENTS +#ifdef CONFIG_IRQ_WORK /* * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... */ #ifdef CONFIG_PPC64 -static inline unsigned long test_perf_event_pending(void) +static inline unsigned long test_irq_work_pending(void) { unsigned long x; asm volatile("lbz %0,%1(13)" : "=r" (x) - : "i" (offsetof(struct paca_struct, perf_event_pending))); + : "i" (offsetof(struct paca_struct, irq_work_pending))); return x; } -static inline void set_perf_event_pending_flag(void) +static inline void set_irq_work_pending_flag(void) { asm volatile("stb %0,%1(13)" : : "r" (1), - "i" (offsetof(struct paca_struct, perf_event_pending))); + "i" (offsetof(struct paca_struct, irq_work_pending))); } -static inline void clear_perf_event_pending(void) +static inline void clear_irq_work_pending(void) { asm volatile("stb %0,%1(13)" : : "r" (0), - "i" (offsetof(struct paca_struct, perf_event_pending))); + "i" (offsetof(struct paca_struct, irq_work_pending))); } #else /* 32-bit */ -DEFINE_PER_CPU(u8, perf_event_pending); +DEFINE_PER_CPU(u8, irq_work_pending); -#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 -#define test_perf_event_pending() __get_cpu_var(perf_event_pending) -#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 +#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 +#define test_irq_work_pending() __get_cpu_var(irq_work_pending) +#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 #endif /* 32 vs 64 bit */ -void set_perf_event_pending(void) +void set_irq_work_pending(void) { preempt_disable(); - set_perf_event_pending_flag(); + set_irq_work_pending_flag(); set_dec(1); preempt_enable(); } -#else /* CONFIG_PERF_EVENTS */ +#else /* CONFIG_IRQ_WORK */ -#define test_perf_event_pending() 0 -#define clear_perf_event_pending() +#define test_irq_work_pending() 0 +#define clear_irq_work_pending() -#endif /* CONFIG_PERF_EVENTS */ +#endif /* CONFIG_IRQ_WORK */ /* * For iSeries shared processors, we have to let the hypervisor @@ -587,9 +587,9 @@ void timer_interrupt(struct pt_regs * regs) calculate_steal_time(); - if (test_perf_event_pending()) { - clear_perf_event_pending(); - perf_event_do_pending(); + if (test_irq_work_pending()) { + clear_irq_work_pending(); + irq_work_run(); } #ifdef CONFIG_PPC_ISERIES diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index f0777a47e3a..958f0dadead 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -95,6 +95,7 @@ config S390 select HAVE_KVM if 64BIT select HAVE_ARCH_TRACEHOOK select INIT_ALL_POSSIBLE + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 3840cbe7763..a75f168d271 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -4,7 +4,6 @@ * Copyright 2009 Martin Schwidefsky, IBM Corporation. */ -static inline void set_perf_event_pending(void) {} -static inline void clear_perf_event_pending(void) {} +/* Empty, just to avoid compiling error */ #define PERF_EVENT_INDEX_OFFSET 0 diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 35b6c3f8517..35b6879628a 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -16,6 +16,7 @@ config SUPERH select HAVE_ARCH_TRACEHOOK select HAVE_DMA_API_DEBUG select HAVE_DMA_ATTRS + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select HAVE_KERNEL_GZIP diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h index 3d0c9f36d15..14308bed7ea 100644 --- a/arch/sh/include/asm/perf_event.h +++ b/arch/sh/include/asm/perf_event.h @@ -26,11 +26,4 @@ extern int register_sh_pmu(struct sh_pmu *); extern int reserve_pmc_hardware(void); extern void release_pmc_hardware(void); -static inline void set_perf_event_pending(void) -{ - /* Nothing to see here, move along. */ -} - -#define PERF_EVENT_INDEX_OFFSET 0 - #endif /* __ASM_SH_PERF_EVENT_H */ diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 9212cd42a83..3e9d31401fb 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -26,6 +26,7 @@ config SPARC select ARCH_WANT_OPTIONAL_GPIOLIB select RTC_CLASS select RTC_DRV_M48T59 + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select HAVE_DMA_ATTRS @@ -54,6 +55,7 @@ config SPARC64 select RTC_DRV_BQ4802 select RTC_DRV_SUN4V select RTC_DRV_STARFIRE + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select PERF_USE_VMALLOC diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h index 727af70646c..6e8bfa1786d 100644 --- a/arch/sparc/include/asm/perf_event.h +++ b/arch/sparc/include/asm/perf_event.h @@ -1,10 +1,6 @@ #ifndef __ASM_SPARC_PERF_EVENT_H #define __ASM_SPARC_PERF_EVENT_H -extern void set_perf_event_pending(void); - -#define PERF_EVENT_INDEX_OFFSET 0 - #ifdef CONFIG_PERF_EVENTS #include diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index c4a6a50b484..b87873c0e8e 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include @@ -43,14 +43,14 @@ void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs) old_regs = set_irq_regs(regs); irq_enter(); -#ifdef CONFIG_PERF_EVENTS - perf_event_do_pending(); +#ifdef CONFIG_IRQ_WORK + irq_work_run(); #endif irq_exit(); set_irq_regs(old_regs); } -void set_perf_event_pending(void) +void arch_irq_work_raise(void) { set_softint(1 << PIL_DEFERRED_PCR_WORK); } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9815221976a..fd227d6b8d9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -25,6 +25,7 @@ config X86 select HAVE_IDE select HAVE_OPROFILE select HAVE_PERF_EVENTS if (!M386 && !M486) + select HAVE_IRQ_WORK select HAVE_IOREMAP_PROT select HAVE_KPROBES select ARCH_WANT_OPTIONAL_GPIOLIB diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 8e8ec663a98..b8e96a18676 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h @@ -49,8 +49,8 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) -#ifdef CONFIG_PERF_EVENTS -BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) +#ifdef CONFIG_IRQ_WORK +BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR) #endif #ifdef CONFIG_X86_THERMAL_VECTOR diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index aeab29aee61..55e4de613f0 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -14,7 +14,7 @@ typedef struct { #endif unsigned int x86_platform_ipis; /* arch dependent */ unsigned int apic_perf_irqs; - unsigned int apic_pending_irqs; + unsigned int apic_irq_work_irqs; #ifdef CONFIG_SMP unsigned int irq_resched_count; unsigned int irq_call_count; diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 46c0fe05f23..3a54a1ca1a0 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -29,7 +29,7 @@ extern void apic_timer_interrupt(void); extern void x86_platform_ipi(void); extern void error_interrupt(void); -extern void perf_pending_interrupt(void); +extern void irq_work_interrupt(void); extern void spurious_interrupt(void); extern void thermal_interrupt(void); diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index e2ca3009255..6af0894dafb 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -114,9 +114,9 @@ #define X86_PLATFORM_IPI_VECTOR 0xed /* - * Performance monitoring pending work vector: + * IRQ work vector: */ -#define LOCAL_PENDING_VECTOR 0xec +#define IRQ_WORK_VECTOR 0xec #define UV_BAU_MESSAGE 0xea diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 9d3f485e5dd..7490bf8d145 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -35,6 +35,7 @@ obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time.o ioport.o ldt.o dumpstack.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o +obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-$(CONFIG_X86_VISWS) += visws_quirks.o obj-$(CONFIG_X86_32) += probe_roms_32.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index e2513f26ba8..fe73c1844a9 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1196,25 +1196,6 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) return handled; } -void smp_perf_pending_interrupt(struct pt_regs *regs) -{ - irq_enter(); - ack_APIC_irq(); - inc_irq_stat(apic_pending_irqs); - perf_event_do_pending(); - irq_exit(); -} - -void set_perf_event_pending(void) -{ -#ifdef CONFIG_X86_LOCAL_APIC - if (!x86_pmu.apic || !x86_pmu_initialized()) - return; - - apic->send_IPI_self(LOCAL_PENDING_VECTOR); -#endif -} - void perf_events_lapic_init(void) { if (!x86_pmu.apic || !x86_pmu_initialized()) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 17be5ec7cbb..c375c79065f 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1023,9 +1023,9 @@ apicinterrupt ERROR_APIC_VECTOR \ apicinterrupt SPURIOUS_APIC_VECTOR \ spurious_interrupt smp_spurious_interrupt -#ifdef CONFIG_PERF_EVENTS -apicinterrupt LOCAL_PENDING_VECTOR \ - perf_pending_interrupt smp_perf_pending_interrupt +#ifdef CONFIG_IRQ_WORK +apicinterrupt IRQ_WORK_VECTOR \ + irq_work_interrupt smp_irq_work_interrupt #endif /* diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 91fd0c70a18..44edb03fc9e 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -67,10 +67,10 @@ static int show_other_interrupts(struct seq_file *p, int prec) for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); seq_printf(p, " Performance monitoring interrupts\n"); - seq_printf(p, "%*s: ", prec, "PND"); + seq_printf(p, "%*s: ", prec, "IWI"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs); - seq_printf(p, " Performance pending work\n"); + seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); + seq_printf(p, " IRQ work interrupts\n"); #endif if (x86_platform_ipi_callback) { seq_printf(p, "%*s: ", prec, "PLT"); @@ -185,7 +185,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) sum += irq_stats(cpu)->apic_timer_irqs; sum += irq_stats(cpu)->irq_spurious_count; sum += irq_stats(cpu)->apic_perf_irqs; - sum += irq_stats(cpu)->apic_pending_irqs; + sum += irq_stats(cpu)->apic_irq_work_irqs; #endif if (x86_platform_ipi_callback) sum += irq_stats(cpu)->x86_platform_ipis; diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c new file mode 100644 index 00000000000..ca8f703a1e7 --- /dev/null +++ b/arch/x86/kernel/irq_work.c @@ -0,0 +1,30 @@ +/* + * x86 specific code for irq_work + * + * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra + */ + +#include +#include +#include +#include + +void smp_irq_work_interrupt(struct pt_regs *regs) +{ + irq_enter(); + ack_APIC_irq(); + inc_irq_stat(apic_irq_work_irqs); + irq_work_run(); + irq_exit(); +} + +void arch_irq_work_raise(void) +{ +#ifdef CONFIG_X86_LOCAL_APIC + if (!cpu_has_apic) + return; + + apic->send_IPI_self(IRQ_WORK_VECTOR); + apic_wait_icr_idle(); +#endif +} diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 990ae7cfc57..713969b9266 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -224,9 +224,9 @@ static void __init apic_intr_init(void) alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); - /* Performance monitoring interrupts: */ -# ifdef CONFIG_PERF_EVENTS - alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); + /* IRQ work interrupts: */ +# ifdef CONFIG_IRQ_WORK + alloc_intr_gate(IRQ_WORK_VECTOR, irq_work_interrupt); # endif #endif diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h new file mode 100644 index 00000000000..4fa09d4d0b7 --- /dev/null +++ b/include/linux/irq_work.h @@ -0,0 +1,20 @@ +#ifndef _LINUX_IRQ_WORK_H +#define _LINUX_IRQ_WORK_H + +struct irq_work { + struct irq_work *next; + void (*func)(struct irq_work *); +}; + +static inline +void init_irq_work(struct irq_work *entry, void (*func)(struct irq_work *)) +{ + entry->next = NULL; + entry->func = func; +} + +bool irq_work_queue(struct irq_work *entry); +void irq_work_run(void); +void irq_work_sync(struct irq_work *entry); + +#endif /* _LINUX_IRQ_WORK_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a9227e98520..2ebfc9ae475 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -486,6 +486,7 @@ struct perf_guest_info_callbacks { #include #include #include +#include #include #include @@ -672,11 +673,6 @@ struct perf_buffer { void *data_pages[0]; }; -struct perf_pending_entry { - struct perf_pending_entry *next; - void (*func)(struct perf_pending_entry *); -}; - struct perf_sample_data; typedef void (*perf_overflow_handler_t)(struct perf_event *, int, @@ -784,7 +780,7 @@ struct perf_event { int pending_wakeup; int pending_kill; int pending_disable; - struct perf_pending_entry pending; + struct irq_work pending; atomic_t event_limit; @@ -898,8 +894,6 @@ extern int perf_event_init_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); extern void perf_event_delayed_put(struct task_struct *task); -extern void set_perf_event_pending(void); -extern void perf_event_do_pending(void); extern void perf_event_print_debug(void); extern void perf_pmu_disable(struct pmu *pmu); extern void perf_pmu_enable(struct pmu *pmu); @@ -1078,7 +1072,6 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } static inline void perf_event_delayed_put(struct task_struct *task) { } -static inline void perf_event_do_pending(void) { } static inline void perf_event_print_debug(void) { } static inline int perf_event_task_disable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; } diff --git a/init/Kconfig b/init/Kconfig index 2de5b1cbadd..1ef0b439908 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -21,6 +21,13 @@ config CONSTRUCTORS depends on !UML default y +config HAVE_IRQ_WORK + bool + +config IRQ_WORK + bool + depends on HAVE_IRQ_WORK + menu "General setup" config EXPERIMENTAL @@ -987,6 +994,7 @@ config PERF_EVENTS default y if (PROFILING || PERF_COUNTERS) depends on HAVE_PERF_EVENTS select ANON_INODES + select IRQ_WORK help Enable kernel support for various performance events provided by software and hardware. diff --git a/kernel/Makefile b/kernel/Makefile index d52b473c99a..4d9bf5f8531 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -23,6 +23,7 @@ CFLAGS_REMOVE_rtmutex-debug.o = -pg CFLAGS_REMOVE_cgroup-debug.o = -pg CFLAGS_REMOVE_sched_clock.o = -pg CFLAGS_REMOVE_perf_event.o = -pg +CFLAGS_REMOVE_irq_work.o = -pg endif obj-$(CONFIG_FREEZER) += freezer.o @@ -100,6 +101,7 @@ obj-$(CONFIG_TRACING) += trace/ obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o +obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o diff --git a/kernel/irq_work.c b/kernel/irq_work.c new file mode 100644 index 00000000000..f16763ff848 --- /dev/null +++ b/kernel/irq_work.c @@ -0,0 +1,164 @@ +/* + * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra + * + * Provides a framework for enqueueing and running callbacks from hardirq + * context. The enqueueing is NMI-safe. + */ + +#include +#include +#include +#include + +/* + * An entry can be in one of four states: + * + * free NULL, 0 -> {claimed} : free to be used + * claimed NULL, 3 -> {pending} : claimed to be enqueued + * pending next, 3 -> {busy} : queued, pending callback + * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed + * + * We use the lower two bits of the next pointer to keep PENDING and BUSY + * flags. + */ + +#define IRQ_WORK_PENDING 1UL +#define IRQ_WORK_BUSY 2UL +#define IRQ_WORK_FLAGS 3UL + +static inline bool irq_work_is_set(struct irq_work *entry, int flags) +{ + return (unsigned long)entry->next & flags; +} + +static inline struct irq_work *irq_work_next(struct irq_work *entry) +{ + unsigned long next = (unsigned long)entry->next; + next &= ~IRQ_WORK_FLAGS; + return (struct irq_work *)next; +} + +static inline struct irq_work *next_flags(struct irq_work *entry, int flags) +{ + unsigned long next = (unsigned long)entry; + next |= flags; + return (struct irq_work *)next; +} + +static DEFINE_PER_CPU(struct irq_work *, irq_work_list); + +/* + * Claim the entry so that no one else will poke at it. + */ +static bool irq_work_claim(struct irq_work *entry) +{ + struct irq_work *next, *nflags; + + do { + next = entry->next; + if ((unsigned long)next & IRQ_WORK_PENDING) + return false; + nflags = next_flags(next, IRQ_WORK_FLAGS); + } while (cmpxchg(&entry->next, next, nflags) != next); + + return true; +} + + +void __weak arch_irq_work_raise(void) +{ + /* + * Lame architectures will get the timer tick callback + */ +} + +/* + * Queue the entry and raise the IPI if needed. + */ +static void __irq_work_queue(struct irq_work *entry) +{ + struct irq_work **head, *next; + + head = &get_cpu_var(irq_work_list); + + do { + next = *head; + /* Can assign non-atomic because we keep the flags set. */ + entry->next = next_flags(next, IRQ_WORK_FLAGS); + } while (cmpxchg(head, next, entry) != next); + + /* The list was empty, raise self-interrupt to start processing. */ + if (!irq_work_next(entry)) + arch_irq_work_raise(); + + put_cpu_var(irq_work_list); +} + +/* + * Enqueue the irq_work @entry, returns true on success, failure when the + * @entry was already enqueued by someone else. + * + * Can be re-enqueued while the callback is still in progress. + */ +bool irq_work_queue(struct irq_work *entry) +{ + if (!irq_work_claim(entry)) { + /* + * Already enqueued, can't do! + */ + return false; + } + + __irq_work_queue(entry); + return true; +} +EXPORT_SYMBOL_GPL(irq_work_queue); + +/* + * Run the irq_work entries on this cpu. Requires to be ran from hardirq + * context with local IRQs disabled. + */ +void irq_work_run(void) +{ + struct irq_work *list, **head; + + head = &__get_cpu_var(irq_work_list); + if (*head == NULL) + return; + + BUG_ON(!in_irq()); + BUG_ON(!irqs_disabled()); + + list = xchg(head, NULL); + while (list != NULL) { + struct irq_work *entry = list; + + list = irq_work_next(list); + + /* + * Clear the PENDING bit, after this point the @entry + * can be re-used. + */ + entry->next = next_flags(NULL, IRQ_WORK_BUSY); + entry->func(entry); + /* + * Clear the BUSY bit and return to the free state if + * no-one else claimed it meanwhile. + */ + cmpxchg(&entry->next, next_flags(NULL, IRQ_WORK_BUSY), NULL); + } +} +EXPORT_SYMBOL_GPL(irq_work_run); + +/* + * Synchronize against the irq_work @entry, ensures the entry is not + * currently in use. + */ +void irq_work_sync(struct irq_work *entry) +{ + WARN_ON_ONCE(irqs_disabled()); + + while (irq_work_is_set(entry, IRQ_WORK_BUSY)) + cpu_relax(); +} +EXPORT_SYMBOL_GPL(irq_work_sync); diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 634f86a4b2f..99b9700e74d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2206,12 +2206,11 @@ static void free_event_rcu(struct rcu_head *head) kfree(event); } -static void perf_pending_sync(struct perf_event *event); static void perf_buffer_put(struct perf_buffer *buffer); static void free_event(struct perf_event *event) { - perf_pending_sync(event); + irq_work_sync(&event->pending); if (!event->parent) { atomic_dec(&nr_events); @@ -3162,16 +3161,7 @@ void perf_event_wakeup(struct perf_event *event) } } -/* - * Pending wakeups - * - * Handle the case where we need to wakeup up from NMI (or rq->lock) context. - * - * The NMI bit means we cannot possibly take locks. Therefore, maintain a - * single linked list and use cmpxchg() to add entries lockless. - */ - -static void perf_pending_event(struct perf_pending_entry *entry) +static void perf_pending_event(struct irq_work *entry) { struct perf_event *event = container_of(entry, struct perf_event, pending); @@ -3187,89 +3177,6 @@ static void perf_pending_event(struct perf_pending_entry *entry) } } -#define PENDING_TAIL ((struct perf_pending_entry *)-1UL) - -static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { - PENDING_TAIL, -}; - -static void perf_pending_queue(struct perf_pending_entry *entry, - void (*func)(struct perf_pending_entry *)) -{ - struct perf_pending_entry **head; - - if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) - return; - - entry->func = func; - - head = &get_cpu_var(perf_pending_head); - - do { - entry->next = *head; - } while (cmpxchg(head, entry->next, entry) != entry->next); - - set_perf_event_pending(); - - put_cpu_var(perf_pending_head); -} - -static int __perf_pending_run(void) -{ - struct perf_pending_entry *list; - int nr = 0; - - list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL); - while (list != PENDING_TAIL) { - void (*func)(struct perf_pending_entry *); - struct perf_pending_entry *entry = list; - - list = list->next; - - func = entry->func; - entry->next = NULL; - /* - * Ensure we observe the unqueue before we issue the wakeup, - * so that we won't be waiting forever. - * -- see perf_not_pending(). - */ - smp_wmb(); - - func(entry); - nr++; - } - - return nr; -} - -static inline int perf_not_pending(struct perf_event *event) -{ - /* - * If we flush on whatever cpu we run, there is a chance we don't - * need to wait. - */ - get_cpu(); - __perf_pending_run(); - put_cpu(); - - /* - * Ensure we see the proper queue state before going to sleep - * so that we do not miss the wakeup. -- see perf_pending_handle() - */ - smp_rmb(); - return event->pending.next == NULL; -} - -static void perf_pending_sync(struct perf_event *event) -{ - wait_event(event->waitq, perf_not_pending(event)); -} - -void perf_event_do_pending(void) -{ - __perf_pending_run(); -} - /* * We assume there is only KVM supporting the callbacks. * Later on, we might change it to a list if there is @@ -3319,8 +3226,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle) if (handle->nmi) { handle->event->pending_wakeup = 1; - perf_pending_queue(&handle->event->pending, - perf_pending_event); + irq_work_queue(&handle->event->pending); } else perf_event_wakeup(handle->event); } @@ -4356,8 +4262,7 @@ static int __perf_event_overflow(struct perf_event *event, int nmi, event->pending_kill = POLL_HUP; if (nmi) { event->pending_disable = 1; - perf_pending_queue(&event->pending, - perf_pending_event); + irq_work_queue(&event->pending); } else perf_event_disable(event); } @@ -5374,6 +5279,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, INIT_LIST_HEAD(&event->event_entry); INIT_LIST_HEAD(&event->sibling_list); init_waitqueue_head(&event->waitq); + init_irq_work(&event->pending, perf_pending_event); mutex_init(&event->mmap_mutex); diff --git a/kernel/timer.c b/kernel/timer.c index 97bf05baade..68a9ae7679b 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include @@ -1279,7 +1279,10 @@ void update_process_times(int user_tick) run_local_timers(); rcu_check_callbacks(cpu, user_tick); printk_tick(); - perf_event_do_pending(); +#ifdef CONFIG_IRQ_WORK + if (in_irq()) + irq_work_run(); +#endif scheduler_tick(); run_posix_cpu_timers(p); } -- cgit v1.2.3-70-g09d2 From 9581d442b9058d3699b4be568b6e5eae38a41493 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 19 Oct 2010 16:46:55 +0200 Subject: KVM: Fix fs/gs reload oops with invalid ldt kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/include/asm/kvm_host.h | 24 ------------------------ arch/x86/kvm/svm.c | 15 ++++++++++----- arch/x86/kvm/vmx.c | 24 +++++++++--------------- 3 files changed, 19 insertions(+), 44 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 502e53f999c..c52e2eb40a1 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -652,20 +652,6 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) return (struct kvm_mmu_page *)page_private(page); } -static inline u16 kvm_read_fs(void) -{ - u16 seg; - asm("mov %%fs, %0" : "=g"(seg)); - return seg; -} - -static inline u16 kvm_read_gs(void) -{ - u16 seg; - asm("mov %%gs, %0" : "=g"(seg)); - return seg; -} - static inline u16 kvm_read_ldt(void) { u16 ldt; @@ -673,16 +659,6 @@ static inline u16 kvm_read_ldt(void) return ldt; } -static inline void kvm_load_fs(u16 sel) -{ - asm("mov %0, %%fs" : : "rm"(sel)); -} - -static inline void kvm_load_gs(u16 sel) -{ - asm("mov %0, %%gs" : : "rm"(sel)); -} - static inline void kvm_load_ldt(u16 sel) { asm("lldt %0" : : "rm"(sel)); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 81ed28cb36e..8a3f9f64f86 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3163,8 +3163,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) sync_lapic_to_cr8(vcpu); save_host_msrs(vcpu); - fs_selector = kvm_read_fs(); - gs_selector = kvm_read_gs(); + savesegment(fs, fs_selector); + savesegment(gs, gs_selector); ldt_selector = kvm_read_ldt(); svm->vmcb->save.cr2 = vcpu->arch.cr2; /* required for live migration with NPT */ @@ -3251,10 +3251,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; - kvm_load_fs(fs_selector); - kvm_load_gs(gs_selector); - kvm_load_ldt(ldt_selector); load_host_msrs(vcpu); + loadsegment(fs, fs_selector); +#ifdef CONFIG_X86_64 + load_gs_index(gs_selector); + wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); +#else + loadsegment(gs, gs_selector); +#endif + kvm_load_ldt(ldt_selector); reload_tss(vcpu); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 49b25eee25a..7bddfab1201 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -803,7 +803,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) */ vmx->host_state.ldt_sel = kvm_read_ldt(); vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; - vmx->host_state.fs_sel = kvm_read_fs(); + savesegment(fs, vmx->host_state.fs_sel); if (!(vmx->host_state.fs_sel & 7)) { vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); vmx->host_state.fs_reload_needed = 0; @@ -811,7 +811,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) vmcs_write16(HOST_FS_SELECTOR, 0); vmx->host_state.fs_reload_needed = 1; } - vmx->host_state.gs_sel = kvm_read_gs(); + savesegment(gs, vmx->host_state.gs_sel); if (!(vmx->host_state.gs_sel & 7)) vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); else { @@ -841,27 +841,21 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) static void __vmx_load_host_state(struct vcpu_vmx *vmx) { - unsigned long flags; - if (!vmx->host_state.loaded) return; ++vmx->vcpu.stat.host_state_reload; vmx->host_state.loaded = 0; if (vmx->host_state.fs_reload_needed) - kvm_load_fs(vmx->host_state.fs_sel); + loadsegment(fs, vmx->host_state.fs_sel); if (vmx->host_state.gs_ldt_reload_needed) { kvm_load_ldt(vmx->host_state.ldt_sel); - /* - * If we have to reload gs, we must take care to - * preserve our gs base. - */ - local_irq_save(flags); - kvm_load_gs(vmx->host_state.gs_sel); #ifdef CONFIG_X86_64 - wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); + load_gs_index(vmx->host_state.gs_sel); + wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); +#else + loadsegment(gs, vmx->host_state.gs_sel); #endif - local_irq_restore(flags); } reload_tss(); #ifdef CONFIG_X86_64 @@ -2589,8 +2583,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ - vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */ - vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */ + vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ + vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); -- cgit v1.2.3-70-g09d2 From 617d34d9e5d8326ec8f188c616aa06ac59d083fe Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Tue, 21 Sep 2010 12:01:51 -0700 Subject: x86, mm: Hold mm->page_table_lock while doing vmalloc_sync Take mm->page_table_lock while syncing the vmalloc region. This prevents a race with the Xen pagetable pin/unpin code, which expects that the page_table_lock is already held. If this race occurs, then Xen can see an inconsistent page type (a page can either be read/write or a pagetable page, and pin/unpin converts it between them), which will cause either the pin or the set_p[gm]d to fail; either will crash the kernel. vmalloc_sync_all() should be called rarely, so this extra use of page_table_lock should not interfere with its normal users. The mm pointer is stashed in the pgd page's index field, as that won't be otherwise used for pgds. Reported-by: Ian Campbell Originally-by: Jan Beulich LKML-Reference: <4CB88A4C.1080305@goop.org> Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/pgtable.h | 2 ++ arch/x86/mm/fault.c | 11 ++++++++++- arch/x86/mm/init_64.c | 7 +++++++ arch/x86/mm/pgtable.c | 20 +++++++++++++++++--- 4 files changed, 36 insertions(+), 4 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 2d0a33bd297..ada823a13c7 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -28,6 +28,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; extern spinlock_t pgd_lock; extern struct list_head pgd_list; +extern struct mm_struct *pgd_page_get_mm(struct page *page); + #ifdef CONFIG_PARAVIRT #include #else /* !CONFIG_PARAVIRT */ diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index caec22906d7..6c27c39f8a3 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -229,7 +229,16 @@ void vmalloc_sync_all(void) spin_lock_irqsave(&pgd_lock, flags); list_for_each_entry(page, &pgd_list, lru) { - if (!vmalloc_sync_one(page_address(page), address)) + spinlock_t *pgt_lock; + int ret; + + pgt_lock = &pgd_page_get_mm(page)->page_table_lock; + + spin_lock(pgt_lock); + ret = vmalloc_sync_one(page_address(page), address); + spin_unlock(pgt_lock); + + if (!ret) break; } spin_unlock_irqrestore(&pgd_lock, flags); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 1ad7c0ff5d2..4d323fb770c 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -116,12 +116,19 @@ void sync_global_pgds(unsigned long start, unsigned long end) spin_lock_irqsave(&pgd_lock, flags); list_for_each_entry(page, &pgd_list, lru) { pgd_t *pgd; + spinlock_t *pgt_lock; + pgd = (pgd_t *)page_address(page) + pgd_index(address); + pgt_lock = &pgd_page_get_mm(page)->page_table_lock; + spin_lock(pgt_lock); + if (pgd_none(*pgd)) set_pgd(pgd, *pgd_ref); else BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); + + spin_unlock(pgt_lock); } spin_unlock_irqrestore(&pgd_lock, flags); } diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 5c4ee422590..c70e57dbb49 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -87,7 +87,19 @@ static inline void pgd_list_del(pgd_t *pgd) #define UNSHARED_PTRS_PER_PGD \ (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) -static void pgd_ctor(pgd_t *pgd) + +static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) +{ + BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); + virt_to_page(pgd)->index = (pgoff_t)mm; +} + +struct mm_struct *pgd_page_get_mm(struct page *page) +{ + return (struct mm_struct *)page->index; +} + +static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) { /* If the pgd points to a shared pagetable level (either the ptes in non-PAE, or shared PMD in PAE), then just copy the @@ -105,8 +117,10 @@ static void pgd_ctor(pgd_t *pgd) } /* list required to sync kernel mapping updates */ - if (!SHARED_KERNEL_PMD) + if (!SHARED_KERNEL_PMD) { + pgd_set_mm(pgd, mm); pgd_list_add(pgd); + } } static void pgd_dtor(pgd_t *pgd) @@ -272,7 +286,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) */ spin_lock_irqsave(&pgd_lock, flags); - pgd_ctor(pgd); + pgd_ctor(mm, pgd); pgd_prepopulate_pmd(mm, pgd, pmds); spin_unlock_irqrestore(&pgd_lock, flags); -- cgit v1.2.3-70-g09d2 From 3234282f33b29d349bcada40204fc7c8fda7fe72 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 19 Oct 2010 14:52:26 +0100 Subject: x86, asm: Fix CFI macro invocations to deal with shortcomings in gas gas prior to (perhaps) 2.16.90 has problems with passing non- parenthesized expressions containing spaces to macros. Spaces, however, get inserted by cpp between any macro expanding to a number and a subsequent + or -. For the +, current x86 gas then removes the space again (future gas may not do so), but for the - the space gets retained and is then considered a separator between macro arguments. Fix the respective definitions for both the - and + cases, so that they neither contain spaces nor make cpp insert any (the latter by adding seemingly redundant parentheses). Signed-off-by: Jan Beulich LKML-Reference: <4CBDBEBA020000780001E05A@vpn.id2.novell.com> Cc: Alexander van Heukelum Signed-off-by: H. Peter Anvin --- Kbuild | 2 +- arch/x86/include/asm/calling.h | 52 ++++++++++++++++++++------------------- arch/x86/include/asm/entry_arch.h | 19 +++----------- arch/x86/include/asm/segment.h | 32 ++++++++++++------------ arch/x86/kernel/asm-offsets_32.c | 4 +-- arch/x86/kernel/entry_32.S | 6 ++--- arch/x86/kernel/entry_64.S | 20 +++------------ 7 files changed, 56 insertions(+), 79 deletions(-) (limited to 'arch/x86/include') diff --git a/Kbuild b/Kbuild index e3737ad72b5..399593942a9 100644 --- a/Kbuild +++ b/Kbuild @@ -53,7 +53,7 @@ targets += arch/$(SRCARCH)/kernel/asm-offsets.s # Default sed regexp - multiline due to syntax constraints define sed-y "/^->/{s:->#\(.*\):/* \1 */:; \ - s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \ + s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 (\2) /* \3 */:; \ s:->::; p;}" endef diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h index 0e63c9a2a8d..30af5a83216 100644 --- a/arch/x86/include/asm/calling.h +++ b/arch/x86/include/asm/calling.h @@ -48,36 +48,38 @@ For 32-bit we have the following conventions - kernel is built with /* - * 64-bit system call stack frame layout defines and helpers, - * for assembly code: + * 64-bit system call stack frame layout defines and helpers, for + * assembly code (note that the seemingly unnecessary parentheses + * are to prevent cpp from inserting spaces in expressions that get + * passed to macros): */ -#define R15 0 -#define R14 8 -#define R13 16 -#define R12 24 -#define RBP 32 -#define RBX 40 +#define R15 (0) +#define R14 (8) +#define R13 (16) +#define R12 (24) +#define RBP (32) +#define RBX (40) /* arguments: interrupts/non tracing syscalls only save up to here: */ -#define R11 48 -#define R10 56 -#define R9 64 -#define R8 72 -#define RAX 80 -#define RCX 88 -#define RDX 96 -#define RSI 104 -#define RDI 112 -#define ORIG_RAX 120 /* + error_code */ +#define R11 (48) +#define R10 (56) +#define R9 (64) +#define R8 (72) +#define RAX (80) +#define RCX (88) +#define RDX (96) +#define RSI (104) +#define RDI (112) +#define ORIG_RAX (120) /* + error_code */ /* end of arguments */ /* cpu exception frame or undefined in case of fast syscall: */ -#define RIP 128 -#define CS 136 -#define EFLAGS 144 -#define RSP 152 -#define SS 160 +#define RIP (128) +#define CS (136) +#define EFLAGS (144) +#define RSP (152) +#define SS (160) #define ARGOFFSET R11 #define SWFRAME ORIG_RAX @@ -111,7 +113,7 @@ For 32-bit we have the following conventions - kernel is built with .endif .endm -#define ARG_SKIP 9*8 +#define ARG_SKIP (9*8) .macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \ skipr8910=0, skiprdx=0 @@ -169,7 +171,7 @@ For 32-bit we have the following conventions - kernel is built with .endif .endm -#define REST_SKIP 6*8 +#define REST_SKIP (6*8) .macro SAVE_REST subq $REST_SKIP, %rsp diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 8e8ec663a98..4d2966e7e85 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h @@ -16,22 +16,11 @@ BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) -BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0, - smp_invalidate_interrupt) -BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1, - smp_invalidate_interrupt) -BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2, - smp_invalidate_interrupt) -BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3, - smp_invalidate_interrupt) -BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4, - smp_invalidate_interrupt) -BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5, - smp_invalidate_interrupt) -BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6, - smp_invalidate_interrupt) -BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7, +.irpc idx, "01234567" +BUILD_INTERRUPT3(invalidate_interrupt\idx, + (INVALIDATE_TLB_VECTOR_START)+\idx, smp_invalidate_interrupt) +.endr #endif BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR) diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 14e0ed86a6f..231f1c1d660 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -73,31 +73,31 @@ #define GDT_ENTRY_DEFAULT_USER_DS 15 -#define GDT_ENTRY_KERNEL_BASE 12 +#define GDT_ENTRY_KERNEL_BASE (12) -#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) +#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0) -#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) +#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1) -#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) -#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) +#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4) +#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE+5) -#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6) -#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11) +#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE+6) +#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE+11) -#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) -#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) +#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE+14) +#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS*8) -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15) +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE+15) #ifdef CONFIG_SMP #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8) #else #define __KERNEL_PERCPU 0 #endif -#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE + 16) +#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE+16) #ifdef CONFIG_CC_STACKPROTECTOR -#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY * 8) +#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8) #else #define __KERNEL_STACK_CANARY 0 #endif @@ -182,10 +182,10 @@ #endif -#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) -#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) -#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3) -#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3) +#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8) +#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) +#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3) +#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3) #ifndef CONFIG_PARAVIRT #define get_kernel_rpl() 0 #endif diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index dfdbf640389..1a4088dda37 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -99,9 +99,7 @@ void foo(void) DEFINE(PAGE_SIZE_asm, PAGE_SIZE); DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); - DEFINE(PTRS_PER_PTE, PTRS_PER_PTE); - DEFINE(PTRS_PER_PMD, PTRS_PER_PMD); - DEFINE(PTRS_PER_PGD, PTRS_PER_PGD); + DEFINE(THREAD_SIZE_asm, THREAD_SIZE); OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 9fb188d7bc7..f73a4b881aa 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -382,20 +382,20 @@ sysenter_past_esp: * enough kernel state to call TRACE_IRQS_OFF can be called - but * we immediately enable interrupts at that point anyway. */ - pushl_cfi $(__USER_DS) + pushl_cfi $__USER_DS /*CFI_REL_OFFSET ss, 0*/ pushl_cfi %ebp CFI_REL_OFFSET esp, 0 pushfl_cfi orl $X86_EFLAGS_IF, (%esp) - pushl_cfi $(__USER_CS) + pushl_cfi $__USER_CS /*CFI_REL_OFFSET cs, 0*/ /* * Push current_thread_info()->sysenter_return to the stack. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words * pushed above; +8 corresponds to copy_thread's esp0 setting. */ - pushl_cfi (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) + pushl_cfi TI_sysenter_return-THREAD_SIZE_asm+8+4*4(%esp) CFI_REL_OFFSET eip, 0 pushl_cfi %eax diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 8851a2bb8c0..9cc9a71f8fb 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -963,22 +963,10 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR \ x86_platform_ipi smp_x86_platform_ipi #ifdef CONFIG_SMP -apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \ - invalidate_interrupt0 smp_invalidate_interrupt -apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \ - invalidate_interrupt1 smp_invalidate_interrupt -apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \ - invalidate_interrupt2 smp_invalidate_interrupt -apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \ - invalidate_interrupt3 smp_invalidate_interrupt -apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \ - invalidate_interrupt4 smp_invalidate_interrupt -apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \ - invalidate_interrupt5 smp_invalidate_interrupt -apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \ - invalidate_interrupt6 smp_invalidate_interrupt -apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \ - invalidate_interrupt7 smp_invalidate_interrupt +.irpc idx, "01234567" +apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \ + invalidate_interrupt\idx smp_invalidate_interrupt +.endr #endif apicinterrupt THRESHOLD_APIC_VECTOR \ -- cgit v1.2.3-70-g09d2 From a68c439b1966c91f0ef474e2bf275d6792312726 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 6 Oct 2010 12:27:53 +0200 Subject: apic, x86: Check if EILVT APIC registers are available (AMD only) This patch implements checks for the availability of LVT entries (APIC500-530) and reserves it if used. The check becomes necessary since we want to let the BIOS provide the LVT offsets. The offsets should be determined by the subsystems using it like those for MCE threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts are supported. Beginning with family 10h at least 4 offsets are available. Since offsets must be consistent for all cores, we keep track of the LVT offsets in software and reserve the offset for the same vector also to be used on other cores. An offset is freed by setting the entry to APIC_EILVT_MASKED. If the BIOS is right, there should be no conflicts. Otherwise a "[Firmware Bug]: ..." error message is generated. However, if software does not properly determines the offsets, it is not necessarily a BIOS bug. Signed-off-by: Robert Richter LKML-Reference: <1286360874-1471-2-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apicdef.h | 1 + arch/x86/kernel/apic/apic.c | 83 +++++++++++++++++++++++++++++++++++++----- 2 files changed, 75 insertions(+), 9 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 7fe3b3060f0..a859ca461fb 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h @@ -131,6 +131,7 @@ #define APIC_EILVTn(n) (0x500 + 0x10 * n) #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ #define APIC_EILVT_NR_AMD_10H 4 +#define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) #define APIC_EILVT_MSG_FIX 0x0 #define APIC_EILVT_MSG_SMI 0x2 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 8cf86fb3b4e..2bfeafd24f5 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -52,6 +52,7 @@ #include #include #include +#include unsigned int num_processors; @@ -370,24 +371,88 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) } /* - * Setup extended LVT, AMD specific (K8, family 10h) + * Setup extended LVT, AMD specific * - * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and - * MCE interrupts are supported. Thus MCE offset must be set to 0. + * Software should use the LVT offsets the BIOS provides. The offsets + * are determined by the subsystems using it like those for MCE + * threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts + * are supported. Beginning with family 10h at least 4 offsets are + * available. * - * If mask=1, the LVT entry does not generate interrupts while mask=0 - * enables the vector. See also the BKDGs. + * Since the offsets must be consistent for all cores, we keep track + * of the LVT offsets in software and reserve the offset for the same + * vector also to be used on other cores. An offset is freed by + * setting the entry to APIC_EILVT_MASKED. + * + * If the BIOS is right, there should be no conflicts. Otherwise a + * "[Firmware Bug]: ..." error message is generated. However, if + * software does not properly determines the offsets, it is not + * necessarily a BIOS bug. */ #define APIC_EILVT_LVTOFF_MCE 0 #define APIC_EILVT_LVTOFF_IBS 1 -static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) +static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX]; + +static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new) +{ + return (old & APIC_EILVT_MASKED) + || (new == APIC_EILVT_MASKED) + || ((new & ~APIC_EILVT_MASKED) == old); +} + +static unsigned int reserve_eilvt_offset(int offset, unsigned int new) +{ + unsigned int rsvd; /* 0: uninitialized */ + + if (offset >= APIC_EILVT_NR_MAX) + return ~0; + + rsvd = atomic_read(&eilvt_offsets[offset]) & ~APIC_EILVT_MASKED; + do { + if (rsvd && + !eilvt_entry_is_changeable(rsvd, new)) + /* may not change if vectors are different */ + return rsvd; + rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new); + } while (rsvd != new); + + return new; +} + +/* + * If mask=1, the LVT entry does not generate interrupts while mask=0 + * enables the vector. See also the BKDGs. + */ + +static int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) { - unsigned long reg = (lvt_off << 4) + APIC_EILVTn(0); - unsigned int v = (mask << 16) | (msg_type << 8) | vector; + unsigned long reg = APIC_EILVTn(offset); + unsigned int new, old, reserved; + + new = (mask << 16) | (msg_type << 8) | vector; + old = apic_read(reg); + reserved = reserve_eilvt_offset(offset, new); - apic_write(reg, v); + if (reserved != new) { + pr_err(FW_BUG "cpu %d, try to setup vector 0x%x, but " + "vector 0x%x was already reserved by another core, " + "APIC%lX=0x%x\n", + smp_processor_id(), new, reserved, reg, old); + return -EINVAL; + } + + if (!eilvt_entry_is_changeable(old, new)) { + pr_err(FW_BUG "cpu %d, try to setup vector 0x%x but " + "register already in use, APIC%lX=0x%x\n", + smp_processor_id(), new, reg, old); + return -EBUSY; + } + + apic_write(reg, new); + + return 0; } u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask) -- cgit v1.2.3-70-g09d2 From 27afdf2008da0b8878a73e32e4eb12381b84e224 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 6 Oct 2010 12:27:54 +0200 Subject: apic, x86: Use BIOS settings for IBS and MCE threshold interrupt LVT offsets We want the BIOS to setup the EILVT APIC registers. The offsets were hardcoded and BIOS settings were overwritten by the OS. Now, the subsystems for MCE threshold and IBS determine the LVT offset from the registers the BIOS has setup. If the BIOS setup is buggy on a family 10h system, a workaround enables IBS. If the OS determines an invalid register setup, a "[Firmware Bug]: " error message is reported. We need this change also for upcomming cpu families. Signed-off-by: Robert Richter LKML-Reference: <1286360874-1471-3-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 4 +- arch/x86/kernel/apic/apic.c | 19 +---- arch/x86/kernel/cpu/mcheck/mce_amd.c | 27 ++++++- arch/x86/oprofile/op_model_amd.c | 145 ++++++++++++++++++++++++++++++----- 4 files changed, 154 insertions(+), 41 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 1fa03e04ae4..286de34b0ed 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -252,9 +252,7 @@ static inline int apic_is_clustered_box(void) } #endif -extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); -extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); - +extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); #else /* !CONFIG_X86_LOCAL_APIC */ static inline void lapic_shutdown(void) { } diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 2bfeafd24f5..850657d1b0e 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -390,9 +390,6 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) * necessarily a BIOS bug. */ -#define APIC_EILVT_LVTOFF_MCE 0 -#define APIC_EILVT_LVTOFF_IBS 1 - static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX]; static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new) @@ -426,7 +423,7 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new) * enables the vector. See also the BKDGs. */ -static int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) +int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) { unsigned long reg = APIC_EILVTn(offset); unsigned int new, old, reserved; @@ -454,19 +451,7 @@ static int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) return 0; } - -u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask) -{ - setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask); - return APIC_EILVT_LVTOFF_MCE; -} - -u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask) -{ - setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); - return APIC_EILVT_LVTOFF_IBS; -} -EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs); +EXPORT_SYMBOL_GPL(setup_APIC_eilvt); /* * Program the next event, relative to now diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 39aaee5c1ab..80c482382d5 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -131,7 +131,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) u32 low = 0, high = 0, address = 0; unsigned int bank, block; struct thresh_restart tr; - u8 lvt_off; + int lvt_off = -1; + u8 offset; for (bank = 0; bank < NR_BANKS; ++bank) { for (block = 0; block < NR_BLOCKS; ++block) { @@ -162,8 +163,28 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) if (shared_bank[bank] && c->cpu_core_id) break; #endif - lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR, - APIC_EILVT_MSG_FIX, 0); + offset = (high & MASK_LVTOFF_HI) >> 20; + if (lvt_off < 0) { + if (setup_APIC_eilvt(offset, + THRESHOLD_APIC_VECTOR, + APIC_EILVT_MSG_FIX, 0)) { + pr_err(FW_BUG "cpu %d, failed to " + "setup threshold interrupt " + "for bank %d, block %d " + "(MSR%08X=0x%x%08x)", + smp_processor_id(), bank, block, + address, high, low); + continue; + } + lvt_off = offset; + } else if (lvt_off != offset) { + pr_err(FW_BUG "cpu %d, invalid threshold " + "interrupt offset %d for bank %d," + "block %d (MSR%08X=0x%x%08x)", + smp_processor_id(), lvt_off, bank, + block, address, high, low); + continue; + } high &= ~MASK_LVTOFF_HI; high |= lvt_off << 20; diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index b67a6b5aa8d..42fb46f8388 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -64,15 +64,22 @@ static u64 ibs_op_ctl; * IBS cpuid feature detection */ -#define IBS_CPUID_FEATURES 0x8000001b +#define IBS_CPUID_FEATURES 0x8000001b /* * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but * bit 0 is used to indicate the existence of IBS. */ -#define IBS_CAPS_AVAIL (1LL<<0) -#define IBS_CAPS_RDWROPCNT (1LL<<3) -#define IBS_CAPS_OPCNT (1LL<<4) +#define IBS_CAPS_AVAIL (1U<<0) +#define IBS_CAPS_RDWROPCNT (1U<<3) +#define IBS_CAPS_OPCNT (1U<<4) + +/* + * IBS APIC setup + */ +#define IBSCTL 0x1cc +#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) +#define IBSCTL_LVT_OFFSET_MASK 0x0F /* * IBS randomization macros @@ -266,6 +273,74 @@ static void op_amd_stop_ibs(void) wrmsrl(MSR_AMD64_IBSOPCTL, 0); } +static inline int eilvt_is_available(int offset) +{ + /* check if we may assign a vector */ + return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); +} + +static inline int ibs_eilvt_valid(void) +{ + u64 val; + int offset; + + rdmsrl(MSR_AMD64_IBSCTL, val); + if (!(val & IBSCTL_LVT_OFFSET_VALID)) { + pr_err(FW_BUG "cpu %d, invalid IBS " + "interrupt offset %d (MSR%08X=0x%016llx)", + smp_processor_id(), offset, + MSR_AMD64_IBSCTL, val); + return 0; + } + + offset = val & IBSCTL_LVT_OFFSET_MASK; + + if (eilvt_is_available(offset)) + return !0; + + pr_err(FW_BUG "cpu %d, IBS interrupt offset %d " + "not available (MSR%08X=0x%016llx)", + smp_processor_id(), offset, + MSR_AMD64_IBSCTL, val); + + return 0; +} + +static inline int get_ibs_offset(void) +{ + u64 val; + + rdmsrl(MSR_AMD64_IBSCTL, val); + if (!(val & IBSCTL_LVT_OFFSET_VALID)) + return -EINVAL; + + return val & IBSCTL_LVT_OFFSET_MASK; +} + +static void setup_APIC_ibs(void) +{ + int offset; + + offset = get_ibs_offset(); + if (offset < 0) + goto failed; + + if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0)) + return; +failed: + pr_warn("oprofile: IBS APIC setup failed on cpu #%d\n", + smp_processor_id()); +} + +static void clear_APIC_ibs(void) +{ + int offset; + + offset = get_ibs_offset(); + if (offset >= 0) + setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); +} + #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, @@ -376,13 +451,13 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, } if (ibs_caps) - setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0); + setup_APIC_ibs(); } static void op_amd_cpu_shutdown(void) { if (ibs_caps) - setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); + clear_APIC_ibs(); } static int op_amd_check_ctrs(struct pt_regs * const regs, @@ -445,16 +520,11 @@ static void op_amd_stop(struct op_msrs const * const msrs) op_amd_stop_ibs(); } -static int __init_ibs_nmi(void) +static int setup_ibs_ctl(int ibs_eilvt_off) { -#define IBSCTL_LVTOFFSETVAL (1 << 8) -#define IBSCTL 0x1cc struct pci_dev *cpu_cfg; int nodes; u32 value = 0; - u8 ibs_eilvt_off; - - ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); nodes = 0; cpu_cfg = NULL; @@ -466,21 +536,60 @@ static int __init_ibs_nmi(void) break; ++nodes; pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off - | IBSCTL_LVTOFFSETVAL); + | IBSCTL_LVT_OFFSET_VALID); pci_read_config_dword(cpu_cfg, IBSCTL, &value); - if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { + if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) { pci_dev_put(cpu_cfg); printk(KERN_DEBUG "Failed to setup IBS LVT offset, " - "IBSCTL = 0x%08x", value); - return 1; + "IBSCTL = 0x%08x\n", value); + return -EINVAL; } } while (1); if (!nodes) { - printk(KERN_DEBUG "No CPU node configured for IBS"); - return 1; + printk(KERN_DEBUG "No CPU node configured for IBS\n"); + return -ENODEV; + } + + return 0; +} + +static int force_ibs_eilvt_setup(void) +{ + int i; + int ret; + + /* find the next free available EILVT entry */ + for (i = 1; i < 4; i++) { + if (!eilvt_is_available(i)) + continue; + ret = setup_ibs_ctl(i); + if (ret) + return ret; + return 0; } + printk(KERN_DEBUG "No EILVT entry available\n"); + + return -EBUSY; +} + +static int __init_ibs_nmi(void) +{ + int ret; + + if (ibs_eilvt_valid()) + return 0; + + ret = force_ibs_eilvt_setup(); + if (ret) + return ret; + + if (!ibs_eilvt_valid()) + return -EFAULT; + + pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); + return 0; } -- cgit v1.2.3-70-g09d2 From b40827fa7268fda8a62490728a61c2856f33830b Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 28 Aug 2010 15:58:33 +0200 Subject: x86-32, mm: Add an initial page table for core bootstrapping This patch adds an initial page table with low mappings used exclusively for booting APs/resuming after ACPI suspend/machine restart. After this, there's no need to add low mappings to swapper_pg_dir and zap them later or create own swsusp PGD page solely for ACPI sleep needs - we have initial_page_table for that. Signed-off-by: Borislav Petkov LKML-Reference: <20101020070526.GA9588@liondog.tnic> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/pgtable_32.h | 2 +- arch/x86/include/asm/tlbflush.h | 2 -- arch/x86/include/asm/trampoline.h | 3 --- arch/x86/kernel/acpi/sleep.c | 7 ++++- arch/x86/kernel/head32.c | 1 + arch/x86/kernel/head_32.S | 55 ++++++++++++++++++--------------------- arch/x86/kernel/reboot.c | 10 ++----- arch/x86/kernel/setup.c | 18 ++++++++++++- arch/x86/kernel/smpboot.c | 16 +++--------- arch/x86/kernel/trampoline.c | 16 ------------ arch/x86/mm/init_32.c | 45 -------------------------------- 11 files changed, 56 insertions(+), 119 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index f686f49e8b7..8abde9ec90b 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -26,7 +26,7 @@ struct mm_struct; struct vm_area_struct; extern pgd_t swapper_pg_dir[1024]; -extern pgd_t trampoline_pg_dir[1024]; +extern pgd_t initial_page_table[1024]; static inline void pgtable_cache_init(void) { } static inline void check_pgt_cache(void) { } diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 7f3eba08e7d..169be8938b9 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -172,6 +172,4 @@ static inline void flush_tlb_kernel_range(unsigned long start, flush_tlb_all(); } -extern void zap_low_mappings(bool early); - #endif /* _ASM_X86_TLBFLUSH_H */ diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h index 4dde797c057..f4500fb3b48 100644 --- a/arch/x86/include/asm/trampoline.h +++ b/arch/x86/include/asm/trampoline.h @@ -13,16 +13,13 @@ extern unsigned char *trampoline_base; extern unsigned long init_rsp; extern unsigned long initial_code; -extern unsigned long initial_page_table; extern unsigned long initial_gs; #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) extern unsigned long setup_trampoline(void); -extern void __init setup_trampoline_page_table(void); extern void __init reserve_trampoline_memory(void); #else -static inline void setup_trampoline_page_table(void) {} static inline void reserve_trampoline_memory(void) {} #endif /* CONFIG_X86_TRAMPOLINE */ diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 33cec152070..b35e1ab8ba0 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -12,6 +12,11 @@ #include #include +#ifdef CONFIG_X86_32 +#include +#include +#endif + #include "realmode/wakeup.h" #include "sleep.h" @@ -90,7 +95,7 @@ int acpi_save_state_mem(void) #ifndef CONFIG_64BIT header->pmode_entry = (u32)&wakeup_pmode_return; - header->pmode_cr3 = (u32)(swsusp_pg_dir - __PAGE_OFFSET); + header->pmode_cr3 = (u32)__pa(&initial_page_table); saved_magic = 0x12345678; #else /* CONFIG_64BIT */ header->trampoline_segment = setup_trampoline() >> 4; diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 784360c0625..8b9c2019fc0 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -17,6 +17,7 @@ #include #include #include +#include static void __init i386_default_early_setup(void) { diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index fa8c1b8e09f..bcece91dd31 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -183,13 +183,12 @@ default_entry: #ifdef CONFIG_X86_PAE /* - * In PAE mode swapper_pg_dir is statically defined to contain enough - * entries to cover the VMSPLIT option (that is the top 1, 2 or 3 - * entries). The identity mapping is handled by pointing two PGD - * entries to the first kernel PMD. + * In PAE mode initial_page_table is statically defined to contain + * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3 + * entries). The identity mapping is handled by pointing two PGD entries + * to the first kernel PMD. * - * Note the upper half of each PMD or PTE are always zero at - * this stage. + * Note the upper half of each PMD or PTE are always zero at this stage. */ #define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ @@ -197,7 +196,7 @@ default_entry: xorl %ebx,%ebx /* %ebx is kept at zero */ movl $pa(__brk_base), %edi - movl $pa(swapper_pg_pmd), %edx + movl $pa(initial_pg_pmd), %edx movl $PTE_IDENT_ATTR, %eax 10: leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */ @@ -226,14 +225,14 @@ default_entry: movl %eax, pa(max_pfn_mapped) /* Do early initialization of the fixmap area */ - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8) + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax + movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) #else /* Not PAE */ page_pde_offset = (__PAGE_OFFSET >> 20); movl $pa(__brk_base), %edi - movl $pa(swapper_pg_dir), %edx + movl $pa(initial_page_table), %edx movl $PTE_IDENT_ATTR, %eax 10: leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */ @@ -257,8 +256,8 @@ page_pde_offset = (__PAGE_OFFSET >> 20); movl %eax, pa(max_pfn_mapped) /* Do early initialization of the fixmap area */ - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax - movl %eax,pa(swapper_pg_dir+0xffc) + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax + movl %eax,pa(initial_page_table+0xffc) #endif jmp 3f /* @@ -334,7 +333,7 @@ ENTRY(startup_32_smp) /* * Enable paging */ - movl pa(initial_page_table), %eax + movl $pa(initial_page_table), %eax movl %eax,%cr3 /* set the page table pointer.. */ movl %cr0,%eax orl $X86_CR0_PG,%eax @@ -614,8 +613,6 @@ ignore_int: .align 4 ENTRY(initial_code) .long i386_start_kernel -ENTRY(initial_page_table) - .long pa(swapper_pg_dir) /* * BSS section @@ -623,20 +620,18 @@ ENTRY(initial_page_table) __PAGE_ALIGNED_BSS .align PAGE_SIZE_asm #ifdef CONFIG_X86_PAE -swapper_pg_pmd: +initial_pg_pmd: .fill 1024*KPMDS,4,0 #else -ENTRY(swapper_pg_dir) +ENTRY(initial_page_table) .fill 1024,4,0 #endif -swapper_pg_fixmap: +initial_pg_fixmap: .fill 1024,4,0 -#ifdef CONFIG_X86_TRAMPOLINE -ENTRY(trampoline_pg_dir) - .fill 1024,4,0 -#endif ENTRY(empty_zero_page) .fill 4096,1,0 +ENTRY(swapper_pg_dir) + .fill 1024,4,0 /* * This starts the data section. @@ -645,20 +640,20 @@ ENTRY(empty_zero_page) __PAGE_ALIGNED_DATA /* Page-aligned for the benefit of paravirt? */ .align PAGE_SIZE_asm -ENTRY(swapper_pg_dir) - .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ +ENTRY(initial_page_table) + .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ # if KPMDS == 3 - .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 - .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x1000),0 - .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x2000),0 + .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 + .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 + .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0 # elif KPMDS == 2 .long 0,0 - .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 - .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x1000),0 + .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 + .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 # elif KPMDS == 1 .long 0,0 .long 0,0 - .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 + .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 # else # error "Kernel PMDs should be 1, 2 or 3" # endif diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 7a4cf14223b..f7f53dcd3e0 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -371,16 +371,10 @@ void machine_real_restart(const unsigned char *code, int length) CMOS_WRITE(0x00, 0x8f); spin_unlock(&rtc_lock); - /* Remap the kernel at virtual address zero, as well as offset zero - from the kernel segment. This assumes the kernel segment starts at - virtual address PAGE_OFFSET. */ - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS); - /* - * Use `swapper_pg_dir' as our page directory. + * Switch back to the initial page table. */ - load_cr3(swapper_pg_dir); + load_cr3(initial_page_table); /* Write 0x1234 to absolute memory location 0x472. The BIOS reads this on booting to tell it to "Bypass memory test (also warm diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 322b24fbeaf..af6cf2bfcee 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -728,6 +728,17 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_X86_32 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); visws_early_detect(); + + /* + * copy kernel address range established so far and switch + * to the proper swapper page table + */ + clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY, + initial_page_table + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); + + load_cr3(swapper_pg_dir); + __flush_tlb_all(); #else printk(KERN_INFO "Command line: %s\n", boot_command_line); #endif @@ -1009,7 +1020,12 @@ void __init setup_arch(char **cmdline_p) paging_init(); x86_init.paging.pagetable_setup_done(swapper_pg_dir); - setup_trampoline_page_table(); +#ifdef CONFIG_X86_32 + /* sync back kernel address range */ + clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); +#endif tboot_probe(); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 63a1a5596ac..e63bb518585 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -298,22 +298,16 @@ notrace static void __cpuinit start_secondary(void *unused) * fragile that we want to limit the things done here to the * most necessary things. */ + cpu_init(); + preempt_disable(); + smp_callin(); #ifdef CONFIG_X86_32 - /* - * Switch away from the trampoline page-table - * - * Do this before cpu_init() because it needs to access per-cpu - * data which may not be mapped in the trampoline page-table. - */ + /* switch away from the initial page table */ load_cr3(swapper_pg_dir); __flush_tlb_all(); #endif - cpu_init(); - preempt_disable(); - smp_callin(); - /* otherwise gcc will move up smp_processor_id before the cpu_init */ barrier(); /* @@ -772,7 +766,6 @@ do_rest: #ifdef CONFIG_X86_32 /* Stack for startup_32 can be just as for start_secondary onwards */ irq_ctx_init(cpu); - initial_page_table = __pa(&trampoline_pg_dir); #else clear_tsk_thread_flag(c_idle.idle, TIF_FORK); initial_gs = per_cpu_offset(cpu); @@ -921,7 +914,6 @@ int __cpuinit native_cpu_up(unsigned int cpu) per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; err = do_boot_cpu(apicid, cpu); - if (err) { pr_debug("do_boot_cpu failed %d\n", err); return -EIO; diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index e2a59525739..f1488a344cd 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c @@ -38,19 +38,3 @@ unsigned long __trampinit setup_trampoline(void) memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); return virt_to_phys(trampoline_base); } - -void __init setup_trampoline_page_table(void) -{ -#ifdef CONFIG_X86_32 - /* Copy kernel address range */ - clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, - swapper_pg_dir + KERNEL_PGD_BOUNDARY, - KERNEL_PGD_PTRS); - - /* Initialize low mappings */ - clone_pgd_range(trampoline_pg_dir, - swapper_pg_dir + KERNEL_PGD_BOUNDARY, - min_t(unsigned long, KERNEL_PGD_PTRS, - KERNEL_PGD_BOUNDARY)); -#endif -} diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 558f2d33207..1aeac2d9df8 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -548,48 +548,6 @@ static void __init pagetable_init(void) permanent_kmaps_init(pgd_base); } -#ifdef CONFIG_ACPI_SLEEP -/* - * ACPI suspend needs this for resume, because things like the intel-agp - * driver might have split up a kernel 4MB mapping. - */ -char swsusp_pg_dir[PAGE_SIZE] - __attribute__ ((aligned(PAGE_SIZE))); - -static inline void save_pg_dir(void) -{ - copy_page(swsusp_pg_dir, swapper_pg_dir); -} -#else /* !CONFIG_ACPI_SLEEP */ -static inline void save_pg_dir(void) -{ -} -#endif /* !CONFIG_ACPI_SLEEP */ - -void zap_low_mappings(bool early) -{ - int i; - - /* - * Zap initial low-memory mappings. - * - * Note that "pgd_clear()" doesn't do it for - * us, because pgd_clear() is a no-op on i386. - */ - for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) { -#ifdef CONFIG_X86_PAE - set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); -#else - set_pgd(swapper_pg_dir+i, __pgd(0)); -#endif - } - - if (early) - __flush_tlb(); - else - flush_tlb_all(); -} - pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); EXPORT_SYMBOL_GPL(__supported_pte_mask); @@ -958,9 +916,6 @@ void __init mem_init(void) if (boot_cpu_data.wp_works_ok < 0) test_wp_bit(); - - save_pg_dir(); - zap_low_mappings(true); } #ifdef CONFIG_MEMORY_HOTPLUG -- cgit v1.2.3-70-g09d2